xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/InlineCost.cpp (revision ae7e8a02e6e93455e026036132c4d053b2c12ad9)
1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inline cost analysis.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/InlineCost.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/BlockFrequencyInfo.h"
21 #include "llvm/Analysis/CFG.h"
22 #include "llvm/Analysis/CodeMetrics.h"
23 #include "llvm/Analysis/ConstantFolding.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/ProfileSummaryInfo.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/TargetTransformInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Config/llvm-config.h"
31 #include "llvm/IR/AssemblyAnnotationWriter.h"
32 #include "llvm/IR/CallingConv.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/GetElementPtrTypeIterator.h"
36 #include "llvm/IR/GlobalAlias.h"
37 #include "llvm/IR/InstVisitor.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Operator.h"
40 #include "llvm/IR/PatternMatch.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/FormattedStream.h"
44 #include "llvm/Support/raw_ostream.h"
45 
46 using namespace llvm;
47 
48 #define DEBUG_TYPE "inline-cost"
49 
50 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
51 
52 static cl::opt<int>
53     DefaultThreshold("inlinedefault-threshold", cl::Hidden, cl::init(225),
54                      cl::ZeroOrMore,
55                      cl::desc("Default amount of inlining to perform"));
56 
57 static cl::opt<bool> PrintInstructionComments(
58     "print-instruction-comments", cl::Hidden, cl::init(false),
59     cl::desc("Prints comments for instruction based on inline cost analysis"));
60 
61 static cl::opt<int> InlineThreshold(
62     "inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
63     cl::desc("Control the amount of inlining to perform (default = 225)"));
64 
65 static cl::opt<int> HintThreshold(
66     "inlinehint-threshold", cl::Hidden, cl::init(325), cl::ZeroOrMore,
67     cl::desc("Threshold for inlining functions with inline hint"));
68 
69 static cl::opt<int>
70     ColdCallSiteThreshold("inline-cold-callsite-threshold", cl::Hidden,
71                           cl::init(45), cl::ZeroOrMore,
72                           cl::desc("Threshold for inlining cold callsites"));
73 
74 static cl::opt<bool> InlineEnableCostBenefitAnalysis(
75     "inline-enable-cost-benefit-analysis", cl::Hidden, cl::init(false),
76     cl::desc("Enable the cost-benefit analysis for the inliner"));
77 
78 static cl::opt<int> InlineSavingsMultiplier(
79     "inline-savings-multiplier", cl::Hidden, cl::init(8), cl::ZeroOrMore,
80     cl::desc("Multiplier to multiply cycle savings by during inlining"));
81 
82 static cl::opt<int>
83     InlineSizeAllowance("inline-size-allowance", cl::Hidden, cl::init(100),
84                         cl::ZeroOrMore,
85                         cl::desc("The maximum size of a callee that get's "
86                                  "inlined without sufficient cycle savings"));
87 
88 // We introduce this threshold to help performance of instrumentation based
89 // PGO before we actually hook up inliner with analysis passes such as BPI and
90 // BFI.
91 static cl::opt<int> ColdThreshold(
92     "inlinecold-threshold", cl::Hidden, cl::init(45), cl::ZeroOrMore,
93     cl::desc("Threshold for inlining functions with cold attribute"));
94 
95 static cl::opt<int>
96     HotCallSiteThreshold("hot-callsite-threshold", cl::Hidden, cl::init(3000),
97                          cl::ZeroOrMore,
98                          cl::desc("Threshold for hot callsites "));
99 
100 static cl::opt<int> LocallyHotCallSiteThreshold(
101     "locally-hot-callsite-threshold", cl::Hidden, cl::init(525), cl::ZeroOrMore,
102     cl::desc("Threshold for locally hot callsites "));
103 
104 static cl::opt<int> ColdCallSiteRelFreq(
105     "cold-callsite-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore,
106     cl::desc("Maximum block frequency, expressed as a percentage of caller's "
107              "entry frequency, for a callsite to be cold in the absence of "
108              "profile information."));
109 
110 static cl::opt<int> HotCallSiteRelFreq(
111     "hot-callsite-rel-freq", cl::Hidden, cl::init(60), cl::ZeroOrMore,
112     cl::desc("Minimum block frequency, expressed as a multiple of caller's "
113              "entry frequency, for a callsite to be hot in the absence of "
114              "profile information."));
115 
116 static cl::opt<bool> OptComputeFullInlineCost(
117     "inline-cost-full", cl::Hidden, cl::init(false), cl::ZeroOrMore,
118     cl::desc("Compute the full inline cost of a call site even when the cost "
119              "exceeds the threshold."));
120 
121 static cl::opt<bool> InlineCallerSupersetNoBuiltin(
122     "inline-caller-superset-nobuiltin", cl::Hidden, cl::init(true),
123     cl::ZeroOrMore,
124     cl::desc("Allow inlining when caller has a superset of callee's nobuiltin "
125              "attributes."));
126 
127 static cl::opt<bool> DisableGEPConstOperand(
128     "disable-gep-const-evaluation", cl::Hidden, cl::init(false),
129     cl::desc("Disables evaluation of GetElementPtr with constant operands"));
130 
131 namespace {
132 class InlineCostCallAnalyzer;
133 
134 // This struct is used to store information about inline cost of a
135 // particular instruction
136 struct InstructionCostDetail {
137   int CostBefore = 0;
138   int CostAfter = 0;
139   int ThresholdBefore = 0;
140   int ThresholdAfter = 0;
141 
142   int getThresholdDelta() const { return ThresholdAfter - ThresholdBefore; }
143 
144   int getCostDelta() const { return CostAfter - CostBefore; }
145 
146   bool hasThresholdChanged() const { return ThresholdAfter != ThresholdBefore; }
147 };
148 
149 class InlineCostAnnotationWriter : public AssemblyAnnotationWriter {
150 private:
151   InlineCostCallAnalyzer *const ICCA;
152 
153 public:
154   InlineCostAnnotationWriter(InlineCostCallAnalyzer *ICCA) : ICCA(ICCA) {}
155   virtual void emitInstructionAnnot(const Instruction *I,
156                                     formatted_raw_ostream &OS) override;
157 };
158 
159 /// Carry out call site analysis, in order to evaluate inlinability.
160 /// NOTE: the type is currently used as implementation detail of functions such
161 /// as llvm::getInlineCost. Note the function_ref constructor parameters - the
162 /// expectation is that they come from the outer scope, from the wrapper
163 /// functions. If we want to support constructing CallAnalyzer objects where
164 /// lambdas are provided inline at construction, or where the object needs to
165 /// otherwise survive past the scope of the provided functions, we need to
166 /// revisit the argument types.
167 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
168   typedef InstVisitor<CallAnalyzer, bool> Base;
169   friend class InstVisitor<CallAnalyzer, bool>;
170 
171 protected:
172   virtual ~CallAnalyzer() {}
173   /// The TargetTransformInfo available for this compilation.
174   const TargetTransformInfo &TTI;
175 
176   /// Getter for the cache of @llvm.assume intrinsics.
177   function_ref<AssumptionCache &(Function &)> GetAssumptionCache;
178 
179   /// Getter for BlockFrequencyInfo
180   function_ref<BlockFrequencyInfo &(Function &)> GetBFI;
181 
182   /// Profile summary information.
183   ProfileSummaryInfo *PSI;
184 
185   /// The called function.
186   Function &F;
187 
188   // Cache the DataLayout since we use it a lot.
189   const DataLayout &DL;
190 
191   /// The OptimizationRemarkEmitter available for this compilation.
192   OptimizationRemarkEmitter *ORE;
193 
194   /// The candidate callsite being analyzed. Please do not use this to do
195   /// analysis in the caller function; we want the inline cost query to be
196   /// easily cacheable. Instead, use the cover function paramHasAttr.
197   CallBase &CandidateCall;
198 
199   /// Extension points for handling callsite features.
200   // Called before a basic block was analyzed.
201   virtual void onBlockStart(const BasicBlock *BB) {}
202 
203   /// Called after a basic block was analyzed.
204   virtual void onBlockAnalyzed(const BasicBlock *BB) {}
205 
206   /// Called before an instruction was analyzed
207   virtual void onInstructionAnalysisStart(const Instruction *I) {}
208 
209   /// Called after an instruction was analyzed
210   virtual void onInstructionAnalysisFinish(const Instruction *I) {}
211 
212   /// Called at the end of the analysis of the callsite. Return the outcome of
213   /// the analysis, i.e. 'InlineResult(true)' if the inlining may happen, or
214   /// the reason it can't.
215   virtual InlineResult finalizeAnalysis() { return InlineResult::success(); }
216   /// Called when we're about to start processing a basic block, and every time
217   /// we are done processing an instruction. Return true if there is no point in
218   /// continuing the analysis (e.g. we've determined already the call site is
219   /// too expensive to inline)
220   virtual bool shouldStop() { return false; }
221 
222   /// Called before the analysis of the callee body starts (with callsite
223   /// contexts propagated).  It checks callsite-specific information. Return a
224   /// reason analysis can't continue if that's the case, or 'true' if it may
225   /// continue.
226   virtual InlineResult onAnalysisStart() { return InlineResult::success(); }
227   /// Called if the analysis engine decides SROA cannot be done for the given
228   /// alloca.
229   virtual void onDisableSROA(AllocaInst *Arg) {}
230 
231   /// Called the analysis engine determines load elimination won't happen.
232   virtual void onDisableLoadElimination() {}
233 
234   /// Called to account for a call.
235   virtual void onCallPenalty() {}
236 
237   /// Called to account for the expectation the inlining would result in a load
238   /// elimination.
239   virtual void onLoadEliminationOpportunity() {}
240 
241   /// Called to account for the cost of argument setup for the Call in the
242   /// callee's body (not the callsite currently under analysis).
243   virtual void onCallArgumentSetup(const CallBase &Call) {}
244 
245   /// Called to account for a load relative intrinsic.
246   virtual void onLoadRelativeIntrinsic() {}
247 
248   /// Called to account for a lowered call.
249   virtual void onLoweredCall(Function *F, CallBase &Call, bool IsIndirectCall) {
250   }
251 
252   /// Account for a jump table of given size. Return false to stop further
253   /// processing the switch instruction
254   virtual bool onJumpTable(unsigned JumpTableSize) { return true; }
255 
256   /// Account for a case cluster of given size. Return false to stop further
257   /// processing of the instruction.
258   virtual bool onCaseCluster(unsigned NumCaseCluster) { return true; }
259 
260   /// Called at the end of processing a switch instruction, with the given
261   /// number of case clusters.
262   virtual void onFinalizeSwitch(unsigned JumpTableSize,
263                                 unsigned NumCaseCluster) {}
264 
265   /// Called to account for any other instruction not specifically accounted
266   /// for.
267   virtual void onMissedSimplification() {}
268 
269   /// Start accounting potential benefits due to SROA for the given alloca.
270   virtual void onInitializeSROAArg(AllocaInst *Arg) {}
271 
272   /// Account SROA savings for the AllocaInst value.
273   virtual void onAggregateSROAUse(AllocaInst *V) {}
274 
275   bool handleSROA(Value *V, bool DoNotDisable) {
276     // Check for SROA candidates in comparisons.
277     if (auto *SROAArg = getSROAArgForValueOrNull(V)) {
278       if (DoNotDisable) {
279         onAggregateSROAUse(SROAArg);
280         return true;
281       }
282       disableSROAForArg(SROAArg);
283     }
284     return false;
285   }
286 
287   bool IsCallerRecursive = false;
288   bool IsRecursiveCall = false;
289   bool ExposesReturnsTwice = false;
290   bool HasDynamicAlloca = false;
291   bool ContainsNoDuplicateCall = false;
292   bool HasReturn = false;
293   bool HasIndirectBr = false;
294   bool HasUninlineableIntrinsic = false;
295   bool InitsVargArgs = false;
296 
297   /// Number of bytes allocated statically by the callee.
298   uint64_t AllocatedSize = 0;
299   unsigned NumInstructions = 0;
300   unsigned NumVectorInstructions = 0;
301 
302   /// While we walk the potentially-inlined instructions, we build up and
303   /// maintain a mapping of simplified values specific to this callsite. The
304   /// idea is to propagate any special information we have about arguments to
305   /// this call through the inlinable section of the function, and account for
306   /// likely simplifications post-inlining. The most important aspect we track
307   /// is CFG altering simplifications -- when we prove a basic block dead, that
308   /// can cause dramatic shifts in the cost of inlining a function.
309   DenseMap<Value *, Constant *> SimplifiedValues;
310 
311   /// Keep track of the values which map back (through function arguments) to
312   /// allocas on the caller stack which could be simplified through SROA.
313   DenseMap<Value *, AllocaInst *> SROAArgValues;
314 
315   /// Keep track of Allocas for which we believe we may get SROA optimization.
316   DenseSet<AllocaInst *> EnabledSROAAllocas;
317 
318   /// Keep track of values which map to a pointer base and constant offset.
319   DenseMap<Value *, std::pair<Value *, APInt>> ConstantOffsetPtrs;
320 
321   /// Keep track of dead blocks due to the constant arguments.
322   SetVector<BasicBlock *> DeadBlocks;
323 
324   /// The mapping of the blocks to their known unique successors due to the
325   /// constant arguments.
326   DenseMap<BasicBlock *, BasicBlock *> KnownSuccessors;
327 
328   /// Model the elimination of repeated loads that is expected to happen
329   /// whenever we simplify away the stores that would otherwise cause them to be
330   /// loads.
331   bool EnableLoadElimination;
332   SmallPtrSet<Value *, 16> LoadAddrSet;
333 
334   AllocaInst *getSROAArgForValueOrNull(Value *V) const {
335     auto It = SROAArgValues.find(V);
336     if (It == SROAArgValues.end() || EnabledSROAAllocas.count(It->second) == 0)
337       return nullptr;
338     return It->second;
339   }
340 
341   // Custom simplification helper routines.
342   bool isAllocaDerivedArg(Value *V);
343   void disableSROAForArg(AllocaInst *SROAArg);
344   void disableSROA(Value *V);
345   void findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB);
346   void disableLoadElimination();
347   bool isGEPFree(GetElementPtrInst &GEP);
348   bool canFoldInboundsGEP(GetElementPtrInst &I);
349   bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
350   bool simplifyCallSite(Function *F, CallBase &Call);
351   template <typename Callable>
352   bool simplifyInstruction(Instruction &I, Callable Evaluate);
353   ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
354 
355   /// Return true if the given argument to the function being considered for
356   /// inlining has the given attribute set either at the call site or the
357   /// function declaration.  Primarily used to inspect call site specific
358   /// attributes since these can be more precise than the ones on the callee
359   /// itself.
360   bool paramHasAttr(Argument *A, Attribute::AttrKind Attr);
361 
362   /// Return true if the given value is known non null within the callee if
363   /// inlined through this particular callsite.
364   bool isKnownNonNullInCallee(Value *V);
365 
366   /// Return true if size growth is allowed when inlining the callee at \p Call.
367   bool allowSizeGrowth(CallBase &Call);
368 
369   // Custom analysis routines.
370   InlineResult analyzeBlock(BasicBlock *BB,
371                             SmallPtrSetImpl<const Value *> &EphValues);
372 
373   // Disable several entry points to the visitor so we don't accidentally use
374   // them by declaring but not defining them here.
375   void visit(Module *);
376   void visit(Module &);
377   void visit(Function *);
378   void visit(Function &);
379   void visit(BasicBlock *);
380   void visit(BasicBlock &);
381 
382   // Provide base case for our instruction visit.
383   bool visitInstruction(Instruction &I);
384 
385   // Our visit overrides.
386   bool visitAlloca(AllocaInst &I);
387   bool visitPHI(PHINode &I);
388   bool visitGetElementPtr(GetElementPtrInst &I);
389   bool visitBitCast(BitCastInst &I);
390   bool visitPtrToInt(PtrToIntInst &I);
391   bool visitIntToPtr(IntToPtrInst &I);
392   bool visitCastInst(CastInst &I);
393   bool visitUnaryInstruction(UnaryInstruction &I);
394   bool visitCmpInst(CmpInst &I);
395   bool visitSub(BinaryOperator &I);
396   bool visitBinaryOperator(BinaryOperator &I);
397   bool visitFNeg(UnaryOperator &I);
398   bool visitLoad(LoadInst &I);
399   bool visitStore(StoreInst &I);
400   bool visitExtractValue(ExtractValueInst &I);
401   bool visitInsertValue(InsertValueInst &I);
402   bool visitCallBase(CallBase &Call);
403   bool visitReturnInst(ReturnInst &RI);
404   bool visitBranchInst(BranchInst &BI);
405   bool visitSelectInst(SelectInst &SI);
406   bool visitSwitchInst(SwitchInst &SI);
407   bool visitIndirectBrInst(IndirectBrInst &IBI);
408   bool visitResumeInst(ResumeInst &RI);
409   bool visitCleanupReturnInst(CleanupReturnInst &RI);
410   bool visitCatchReturnInst(CatchReturnInst &RI);
411   bool visitUnreachableInst(UnreachableInst &I);
412 
413 public:
414   CallAnalyzer(
415       Function &Callee, CallBase &Call, const TargetTransformInfo &TTI,
416       function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
417       function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
418       ProfileSummaryInfo *PSI = nullptr,
419       OptimizationRemarkEmitter *ORE = nullptr)
420       : TTI(TTI), GetAssumptionCache(GetAssumptionCache), GetBFI(GetBFI),
421         PSI(PSI), F(Callee), DL(F.getParent()->getDataLayout()), ORE(ORE),
422         CandidateCall(Call), EnableLoadElimination(true) {}
423 
424   InlineResult analyze();
425 
426   Optional<Constant*> getSimplifiedValue(Instruction *I) {
427     if (SimplifiedValues.find(I) != SimplifiedValues.end())
428       return SimplifiedValues[I];
429     return None;
430   }
431 
432   // Keep a bunch of stats about the cost savings found so we can print them
433   // out when debugging.
434   unsigned NumConstantArgs = 0;
435   unsigned NumConstantOffsetPtrArgs = 0;
436   unsigned NumAllocaArgs = 0;
437   unsigned NumConstantPtrCmps = 0;
438   unsigned NumConstantPtrDiffs = 0;
439   unsigned NumInstructionsSimplified = 0;
440 
441   void dump();
442 };
443 
444 /// FIXME: if it is necessary to derive from InlineCostCallAnalyzer, note
445 /// the FIXME in onLoweredCall, when instantiating an InlineCostCallAnalyzer
446 class InlineCostCallAnalyzer final : public CallAnalyzer {
447   const int CostUpperBound = INT_MAX - InlineConstants::InstrCost - 1;
448   const bool ComputeFullInlineCost;
449   int LoadEliminationCost = 0;
450   /// Bonus to be applied when percentage of vector instructions in callee is
451   /// high (see more details in updateThreshold).
452   int VectorBonus = 0;
453   /// Bonus to be applied when the callee has only one reachable basic block.
454   int SingleBBBonus = 0;
455 
456   /// Tunable parameters that control the analysis.
457   const InlineParams &Params;
458 
459   // This DenseMap stores the delta change in cost and threshold after
460   // accounting for the given instruction. The map is filled only with the
461   // flag PrintInstructionComments on.
462   DenseMap<const Instruction *, InstructionCostDetail> InstructionCostDetailMap;
463 
464   /// Upper bound for the inlining cost. Bonuses are being applied to account
465   /// for speculative "expected profit" of the inlining decision.
466   int Threshold = 0;
467 
468   /// Attempt to evaluate indirect calls to boost its inline cost.
469   const bool BoostIndirectCalls;
470 
471   /// Ignore the threshold when finalizing analysis.
472   const bool IgnoreThreshold;
473 
474   // True if the cost-benefit-analysis-based inliner is enabled.
475   const bool CostBenefitAnalysisEnabled;
476 
477   /// Inlining cost measured in abstract units, accounts for all the
478   /// instructions expected to be executed for a given function invocation.
479   /// Instructions that are statically proven to be dead based on call-site
480   /// arguments are not counted here.
481   int Cost = 0;
482 
483   // The cumulative cost at the beginning of the basic block being analyzed.  At
484   // the end of analyzing each basic block, "Cost - CostAtBBStart" represents
485   // the size of that basic block.
486   int CostAtBBStart = 0;
487 
488   // The static size of live but cold basic blocks.  This is "static" in the
489   // sense that it's not weighted by profile counts at all.
490   int ColdSize = 0;
491 
492   bool SingleBB = true;
493 
494   unsigned SROACostSavings = 0;
495   unsigned SROACostSavingsLost = 0;
496 
497   /// The mapping of caller Alloca values to their accumulated cost savings. If
498   /// we have to disable SROA for one of the allocas, this tells us how much
499   /// cost must be added.
500   DenseMap<AllocaInst *, int> SROAArgCosts;
501 
502   /// Return true if \p Call is a cold callsite.
503   bool isColdCallSite(CallBase &Call, BlockFrequencyInfo *CallerBFI);
504 
505   /// Update Threshold based on callsite properties such as callee
506   /// attributes and callee hotness for PGO builds. The Callee is explicitly
507   /// passed to support analyzing indirect calls whose target is inferred by
508   /// analysis.
509   void updateThreshold(CallBase &Call, Function &Callee);
510   /// Return a higher threshold if \p Call is a hot callsite.
511   Optional<int> getHotCallSiteThreshold(CallBase &Call,
512                                         BlockFrequencyInfo *CallerBFI);
513 
514   /// Handle a capped 'int' increment for Cost.
515   void addCost(int64_t Inc, int64_t UpperBound = INT_MAX) {
516     assert(UpperBound > 0 && UpperBound <= INT_MAX && "invalid upper bound");
517     Cost = (int)std::min(UpperBound, Cost + Inc);
518   }
519 
520   void onDisableSROA(AllocaInst *Arg) override {
521     auto CostIt = SROAArgCosts.find(Arg);
522     if (CostIt == SROAArgCosts.end())
523       return;
524     addCost(CostIt->second);
525     SROACostSavings -= CostIt->second;
526     SROACostSavingsLost += CostIt->second;
527     SROAArgCosts.erase(CostIt);
528   }
529 
530   void onDisableLoadElimination() override {
531     addCost(LoadEliminationCost);
532     LoadEliminationCost = 0;
533   }
534   void onCallPenalty() override { addCost(InlineConstants::CallPenalty); }
535   void onCallArgumentSetup(const CallBase &Call) override {
536     // Pay the price of the argument setup. We account for the average 1
537     // instruction per call argument setup here.
538     addCost(Call.arg_size() * InlineConstants::InstrCost);
539   }
540   void onLoadRelativeIntrinsic() override {
541     // This is normally lowered to 4 LLVM instructions.
542     addCost(3 * InlineConstants::InstrCost);
543   }
544   void onLoweredCall(Function *F, CallBase &Call,
545                      bool IsIndirectCall) override {
546     // We account for the average 1 instruction per call argument setup here.
547     addCost(Call.arg_size() * InlineConstants::InstrCost);
548 
549     // If we have a constant that we are calling as a function, we can peer
550     // through it and see the function target. This happens not infrequently
551     // during devirtualization and so we want to give it a hefty bonus for
552     // inlining, but cap that bonus in the event that inlining wouldn't pan out.
553     // Pretend to inline the function, with a custom threshold.
554     if (IsIndirectCall && BoostIndirectCalls) {
555       auto IndirectCallParams = Params;
556       IndirectCallParams.DefaultThreshold =
557           InlineConstants::IndirectCallThreshold;
558       /// FIXME: if InlineCostCallAnalyzer is derived from, this may need
559       /// to instantiate the derived class.
560       InlineCostCallAnalyzer CA(*F, Call, IndirectCallParams, TTI,
561                                 GetAssumptionCache, GetBFI, PSI, ORE, false);
562       if (CA.analyze().isSuccess()) {
563         // We were able to inline the indirect call! Subtract the cost from the
564         // threshold to get the bonus we want to apply, but don't go below zero.
565         Cost -= std::max(0, CA.getThreshold() - CA.getCost());
566       }
567     } else
568       // Otherwise simply add the cost for merely making the call.
569       addCost(InlineConstants::CallPenalty);
570   }
571 
572   void onFinalizeSwitch(unsigned JumpTableSize,
573                         unsigned NumCaseCluster) override {
574     // If suitable for a jump table, consider the cost for the table size and
575     // branch to destination.
576     // Maximum valid cost increased in this function.
577     if (JumpTableSize) {
578       int64_t JTCost = (int64_t)JumpTableSize * InlineConstants::InstrCost +
579                        4 * InlineConstants::InstrCost;
580 
581       addCost(JTCost, (int64_t)CostUpperBound);
582       return;
583     }
584     // Considering forming a binary search, we should find the number of nodes
585     // which is same as the number of comparisons when lowered. For a given
586     // number of clusters, n, we can define a recursive function, f(n), to find
587     // the number of nodes in the tree. The recursion is :
588     // f(n) = 1 + f(n/2) + f (n - n/2), when n > 3,
589     // and f(n) = n, when n <= 3.
590     // This will lead a binary tree where the leaf should be either f(2) or f(3)
591     // when n > 3.  So, the number of comparisons from leaves should be n, while
592     // the number of non-leaf should be :
593     //   2^(log2(n) - 1) - 1
594     //   = 2^log2(n) * 2^-1 - 1
595     //   = n / 2 - 1.
596     // Considering comparisons from leaf and non-leaf nodes, we can estimate the
597     // number of comparisons in a simple closed form :
598     //   n + n / 2 - 1 = n * 3 / 2 - 1
599     if (NumCaseCluster <= 3) {
600       // Suppose a comparison includes one compare and one conditional branch.
601       addCost(NumCaseCluster * 2 * InlineConstants::InstrCost);
602       return;
603     }
604 
605     int64_t ExpectedNumberOfCompare = 3 * (int64_t)NumCaseCluster / 2 - 1;
606     int64_t SwitchCost =
607         ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost;
608 
609     addCost(SwitchCost, (int64_t)CostUpperBound);
610   }
611   void onMissedSimplification() override {
612     addCost(InlineConstants::InstrCost);
613   }
614 
615   void onInitializeSROAArg(AllocaInst *Arg) override {
616     assert(Arg != nullptr &&
617            "Should not initialize SROA costs for null value.");
618     SROAArgCosts[Arg] = 0;
619   }
620 
621   void onAggregateSROAUse(AllocaInst *SROAArg) override {
622     auto CostIt = SROAArgCosts.find(SROAArg);
623     assert(CostIt != SROAArgCosts.end() &&
624            "expected this argument to have a cost");
625     CostIt->second += InlineConstants::InstrCost;
626     SROACostSavings += InlineConstants::InstrCost;
627   }
628 
629   void onBlockStart(const BasicBlock *BB) override { CostAtBBStart = Cost; }
630 
631   void onBlockAnalyzed(const BasicBlock *BB) override {
632     if (CostBenefitAnalysisEnabled) {
633       // Keep track of the static size of live but cold basic blocks.  For now,
634       // we define a cold basic block to be one that's never executed.
635       assert(GetBFI && "GetBFI must be available");
636       BlockFrequencyInfo *BFI = &(GetBFI(F));
637       assert(BFI && "BFI must be available");
638       auto ProfileCount = BFI->getBlockProfileCount(BB);
639       assert(ProfileCount.hasValue());
640       if (ProfileCount.getValue() == 0)
641         ColdSize += Cost - CostAtBBStart;
642     }
643 
644     auto *TI = BB->getTerminator();
645     // If we had any successors at this point, than post-inlining is likely to
646     // have them as well. Note that we assume any basic blocks which existed
647     // due to branches or switches which folded above will also fold after
648     // inlining.
649     if (SingleBB && TI->getNumSuccessors() > 1) {
650       // Take off the bonus we applied to the threshold.
651       Threshold -= SingleBBBonus;
652       SingleBB = false;
653     }
654   }
655 
656   void onInstructionAnalysisStart(const Instruction *I) override {
657     // This function is called to store the initial cost of inlining before
658     // the given instruction was assessed.
659     if (!PrintInstructionComments)
660       return;
661     InstructionCostDetailMap[I].CostBefore = Cost;
662     InstructionCostDetailMap[I].ThresholdBefore = Threshold;
663   }
664 
665   void onInstructionAnalysisFinish(const Instruction *I) override {
666     // This function is called to find new values of cost and threshold after
667     // the instruction has been assessed.
668     if (!PrintInstructionComments)
669       return;
670     InstructionCostDetailMap[I].CostAfter = Cost;
671     InstructionCostDetailMap[I].ThresholdAfter = Threshold;
672   }
673 
674   bool isCostBenefitAnalysisEnabled() {
675     if (!InlineEnableCostBenefitAnalysis)
676       return false;
677 
678     if (!PSI || !PSI->hasProfileSummary())
679       return false;
680 
681     if (!GetBFI)
682       return false;
683 
684     auto *Caller = CandidateCall.getParent()->getParent();
685     if (!Caller->getEntryCount())
686       return false;
687 
688     BlockFrequencyInfo *CallerBFI = &(GetBFI(*Caller));
689     if (!CallerBFI)
690       return false;
691 
692     // For now, limit to hot call site.
693     if (!PSI->isHotCallSite(CandidateCall, CallerBFI))
694       return false;
695 
696     if (!F.getEntryCount())
697       return false;
698 
699     BlockFrequencyInfo *CalleeBFI = &(GetBFI(F));
700     if (!CalleeBFI)
701       return false;
702 
703     return true;
704   }
705 
706   // Determine whether we should inline the given call site, taking into account
707   // both the size cost and the cycle savings.  Return None if we don't have
708   // suficient profiling information to determine.
709   Optional<bool> costBenefitAnalysis() {
710     if (!CostBenefitAnalysisEnabled)
711       return None;
712 
713     // buildInlinerPipeline in the pass builder sets HotCallSiteThreshold to 0
714     // for the prelink phase of the AutoFDO + ThinLTO build.  Honor the logic by
715     // falling back to the cost-based metric.
716     // TODO: Improve this hacky condition.
717     if (Threshold == 0)
718       return None;
719 
720     assert(GetBFI);
721     BlockFrequencyInfo *CalleeBFI = &(GetBFI(F));
722     assert(CalleeBFI);
723 
724     // The cycle savings expressed as the sum of InlineConstants::InstrCost
725     // multiplied by the estimated dynamic count of each instruction we can
726     // avoid.  Savings come from the call site cost, such as argument setup and
727     // the call instruction, as well as the instructions that are folded.
728     //
729     // We use 128-bit APInt here to avoid potential overflow.  This variable
730     // should stay well below 10^^24 (or 2^^80) in practice.  This "worst" case
731     // assumes that we can avoid or fold a billion instructions, each with a
732     // profile count of 10^^15 -- roughly the number of cycles for a 24-hour
733     // period on a 4GHz machine.
734     APInt CycleSavings(128, 0);
735 
736     for (auto &BB : F) {
737       APInt CurrentSavings(128, 0);
738       for (auto &I : BB) {
739         if (BranchInst *BI = dyn_cast<BranchInst>(&I)) {
740           // Count a conditional branch as savings if it becomes unconditional.
741           if (BI->isConditional() &&
742               dyn_cast_or_null<ConstantInt>(
743                   SimplifiedValues.lookup(BI->getCondition()))) {
744             CurrentSavings += InlineConstants::InstrCost;
745           }
746         } else if (Value *V = dyn_cast<Value>(&I)) {
747           // Count an instruction as savings if we can fold it.
748           if (SimplifiedValues.count(V)) {
749             CurrentSavings += InlineConstants::InstrCost;
750           }
751         }
752         // TODO: Consider other forms of savings like switch statements,
753         // indirect calls becoming direct, SROACostSavings, LoadEliminationCost,
754         // etc.
755       }
756 
757       auto ProfileCount = CalleeBFI->getBlockProfileCount(&BB);
758       assert(ProfileCount.hasValue());
759       CurrentSavings *= ProfileCount.getValue();
760       CycleSavings += CurrentSavings;
761     }
762 
763     // Compute the cycle savings per call.
764     auto EntryProfileCount = F.getEntryCount();
765     assert(EntryProfileCount.hasValue());
766     auto EntryCount = EntryProfileCount.getCount();
767     CycleSavings += EntryCount / 2;
768     CycleSavings = CycleSavings.udiv(EntryCount);
769 
770     // Compute the total savings for the call site.
771     auto *CallerBB = CandidateCall.getParent();
772     BlockFrequencyInfo *CallerBFI = &(GetBFI(*(CallerBB->getParent())));
773     CycleSavings += getCallsiteCost(this->CandidateCall, DL);
774     CycleSavings *= CallerBFI->getBlockProfileCount(CallerBB).getValue();
775 
776     // Remove the cost of the cold basic blocks.
777     int Size = Cost - ColdSize;
778 
779     // Allow tiny callees to be inlined regardless of whether they meet the
780     // savings threshold.
781     Size = Size > InlineSizeAllowance ? Size - InlineSizeAllowance : 1;
782 
783     // Return true if the savings justify the cost of inlining.  Specifically,
784     // we evaluate the following inequality:
785     //
786     //  CycleSavings      PSI->getOrCompHotCountThreshold()
787     // -------------- >= -----------------------------------
788     //       Size              InlineSavingsMultiplier
789     //
790     // Note that the left hand side is specific to a call site.  The right hand
791     // side is a constant for the entire executable.
792     APInt LHS = CycleSavings;
793     LHS *= InlineSavingsMultiplier;
794     APInt RHS(128, PSI->getOrCompHotCountThreshold());
795     RHS *= Size;
796     return LHS.uge(RHS);
797   }
798 
799   InlineResult finalizeAnalysis() override {
800     // Loops generally act a lot like calls in that they act like barriers to
801     // movement, require a certain amount of setup, etc. So when optimising for
802     // size, we penalise any call sites that perform loops. We do this after all
803     // other costs here, so will likely only be dealing with relatively small
804     // functions (and hence DT and LI will hopefully be cheap).
805     auto *Caller = CandidateCall.getFunction();
806     if (Caller->hasMinSize()) {
807       DominatorTree DT(F);
808       LoopInfo LI(DT);
809       int NumLoops = 0;
810       for (Loop *L : LI) {
811         // Ignore loops that will not be executed
812         if (DeadBlocks.count(L->getHeader()))
813           continue;
814         NumLoops++;
815       }
816       addCost(NumLoops * InlineConstants::CallPenalty);
817     }
818 
819     // We applied the maximum possible vector bonus at the beginning. Now,
820     // subtract the excess bonus, if any, from the Threshold before
821     // comparing against Cost.
822     if (NumVectorInstructions <= NumInstructions / 10)
823       Threshold -= VectorBonus;
824     else if (NumVectorInstructions <= NumInstructions / 2)
825       Threshold -= VectorBonus / 2;
826 
827     if (auto Result = costBenefitAnalysis()) {
828       if (Result.getValue())
829         return InlineResult::success();
830       else
831         return InlineResult::failure("Cost over threshold.");
832     }
833 
834     if (IgnoreThreshold || Cost < std::max(1, Threshold))
835       return InlineResult::success();
836     return InlineResult::failure("Cost over threshold.");
837   }
838   bool shouldStop() override {
839     // Bail out the moment we cross the threshold. This means we'll under-count
840     // the cost, but only when undercounting doesn't matter.
841     return !IgnoreThreshold && Cost >= Threshold && !ComputeFullInlineCost;
842   }
843 
844   void onLoadEliminationOpportunity() override {
845     LoadEliminationCost += InlineConstants::InstrCost;
846   }
847 
848   InlineResult onAnalysisStart() override {
849     // Perform some tweaks to the cost and threshold based on the direct
850     // callsite information.
851 
852     // We want to more aggressively inline vector-dense kernels, so up the
853     // threshold, and we'll lower it if the % of vector instructions gets too
854     // low. Note that these bonuses are some what arbitrary and evolved over
855     // time by accident as much as because they are principled bonuses.
856     //
857     // FIXME: It would be nice to remove all such bonuses. At least it would be
858     // nice to base the bonus values on something more scientific.
859     assert(NumInstructions == 0);
860     assert(NumVectorInstructions == 0);
861 
862     // Update the threshold based on callsite properties
863     updateThreshold(CandidateCall, F);
864 
865     // While Threshold depends on commandline options that can take negative
866     // values, we want to enforce the invariant that the computed threshold and
867     // bonuses are non-negative.
868     assert(Threshold >= 0);
869     assert(SingleBBBonus >= 0);
870     assert(VectorBonus >= 0);
871 
872     // Speculatively apply all possible bonuses to Threshold. If cost exceeds
873     // this Threshold any time, and cost cannot decrease, we can stop processing
874     // the rest of the function body.
875     Threshold += (SingleBBBonus + VectorBonus);
876 
877     // Give out bonuses for the callsite, as the instructions setting them up
878     // will be gone after inlining.
879     addCost(-getCallsiteCost(this->CandidateCall, DL));
880 
881     // If this function uses the coldcc calling convention, prefer not to inline
882     // it.
883     if (F.getCallingConv() == CallingConv::Cold)
884       Cost += InlineConstants::ColdccPenalty;
885 
886     // Check if we're done. This can happen due to bonuses and penalties.
887     if (Cost >= Threshold && !ComputeFullInlineCost)
888       return InlineResult::failure("high cost");
889 
890     return InlineResult::success();
891   }
892 
893 public:
894   InlineCostCallAnalyzer(
895       Function &Callee, CallBase &Call, const InlineParams &Params,
896       const TargetTransformInfo &TTI,
897       function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
898       function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
899       ProfileSummaryInfo *PSI = nullptr,
900       OptimizationRemarkEmitter *ORE = nullptr, bool BoostIndirect = true,
901       bool IgnoreThreshold = false)
902       : CallAnalyzer(Callee, Call, TTI, GetAssumptionCache, GetBFI, PSI, ORE),
903         ComputeFullInlineCost(OptComputeFullInlineCost ||
904                               Params.ComputeFullInlineCost || ORE ||
905                               isCostBenefitAnalysisEnabled()),
906         Params(Params), Threshold(Params.DefaultThreshold),
907         BoostIndirectCalls(BoostIndirect), IgnoreThreshold(IgnoreThreshold),
908         CostBenefitAnalysisEnabled(isCostBenefitAnalysisEnabled()),
909         Writer(this) {}
910 
911   /// Annotation Writer for instruction details
912   InlineCostAnnotationWriter Writer;
913 
914   void dump();
915 
916   // Prints the same analysis as dump(), but its definition is not dependent
917   // on the build.
918   void print();
919 
920   Optional<InstructionCostDetail> getCostDetails(const Instruction *I) {
921     if (InstructionCostDetailMap.find(I) != InstructionCostDetailMap.end())
922       return InstructionCostDetailMap[I];
923     return None;
924   }
925 
926   virtual ~InlineCostCallAnalyzer() {}
927   int getThreshold() { return Threshold; }
928   int getCost() { return Cost; }
929 };
930 } // namespace
931 
932 /// Test whether the given value is an Alloca-derived function argument.
933 bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
934   return SROAArgValues.count(V);
935 }
936 
937 void CallAnalyzer::disableSROAForArg(AllocaInst *SROAArg) {
938   onDisableSROA(SROAArg);
939   EnabledSROAAllocas.erase(SROAArg);
940   disableLoadElimination();
941 }
942 
943 void InlineCostAnnotationWriter::emitInstructionAnnot(const Instruction *I,
944                                                 formatted_raw_ostream &OS) {
945   // The cost of inlining of the given instruction is printed always.
946   // The threshold delta is printed only when it is non-zero. It happens
947   // when we decided to give a bonus at a particular instruction.
948   Optional<InstructionCostDetail> Record = ICCA->getCostDetails(I);
949   if (!Record)
950     OS << "; No analysis for the instruction";
951   else {
952     OS << "; cost before = " << Record->CostBefore
953        << ", cost after = " << Record->CostAfter
954        << ", threshold before = " << Record->ThresholdBefore
955        << ", threshold after = " << Record->ThresholdAfter << ", ";
956     OS << "cost delta = " << Record->getCostDelta();
957     if (Record->hasThresholdChanged())
958       OS << ", threshold delta = " << Record->getThresholdDelta();
959   }
960   auto C = ICCA->getSimplifiedValue(const_cast<Instruction *>(I));
961   if (C) {
962     OS << ", simplified to ";
963     C.getValue()->print(OS, true);
964   }
965   OS << "\n";
966 }
967 
968 /// If 'V' maps to a SROA candidate, disable SROA for it.
969 void CallAnalyzer::disableSROA(Value *V) {
970   if (auto *SROAArg = getSROAArgForValueOrNull(V)) {
971     disableSROAForArg(SROAArg);
972   }
973 }
974 
975 void CallAnalyzer::disableLoadElimination() {
976   if (EnableLoadElimination) {
977     onDisableLoadElimination();
978     EnableLoadElimination = false;
979   }
980 }
981 
982 /// Accumulate a constant GEP offset into an APInt if possible.
983 ///
984 /// Returns false if unable to compute the offset for any reason. Respects any
985 /// simplified values known during the analysis of this callsite.
986 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
987   unsigned IntPtrWidth = DL.getIndexTypeSizeInBits(GEP.getType());
988   assert(IntPtrWidth == Offset.getBitWidth());
989 
990   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
991        GTI != GTE; ++GTI) {
992     ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
993     if (!OpC)
994       if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
995         OpC = dyn_cast<ConstantInt>(SimpleOp);
996     if (!OpC)
997       return false;
998     if (OpC->isZero())
999       continue;
1000 
1001     // Handle a struct index, which adds its field offset to the pointer.
1002     if (StructType *STy = GTI.getStructTypeOrNull()) {
1003       unsigned ElementIdx = OpC->getZExtValue();
1004       const StructLayout *SL = DL.getStructLayout(STy);
1005       Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
1006       continue;
1007     }
1008 
1009     APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
1010     Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
1011   }
1012   return true;
1013 }
1014 
1015 /// Use TTI to check whether a GEP is free.
1016 ///
1017 /// Respects any simplified values known during the analysis of this callsite.
1018 bool CallAnalyzer::isGEPFree(GetElementPtrInst &GEP) {
1019   SmallVector<Value *, 4> Operands;
1020   Operands.push_back(GEP.getOperand(0));
1021   for (const Use &Op : GEP.indices())
1022     if (Constant *SimpleOp = SimplifiedValues.lookup(Op))
1023       Operands.push_back(SimpleOp);
1024     else
1025       Operands.push_back(Op);
1026   return TargetTransformInfo::TCC_Free ==
1027          TTI.getUserCost(&GEP, Operands,
1028                          TargetTransformInfo::TCK_SizeAndLatency);
1029 }
1030 
1031 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
1032   // Check whether inlining will turn a dynamic alloca into a static
1033   // alloca and handle that case.
1034   if (I.isArrayAllocation()) {
1035     Constant *Size = SimplifiedValues.lookup(I.getArraySize());
1036     if (auto *AllocSize = dyn_cast_or_null<ConstantInt>(Size)) {
1037       // Sometimes a dynamic alloca could be converted into a static alloca
1038       // after this constant prop, and become a huge static alloca on an
1039       // unconditional CFG path. Avoid inlining if this is going to happen above
1040       // a threshold.
1041       // FIXME: If the threshold is removed or lowered too much, we could end up
1042       // being too pessimistic and prevent inlining non-problematic code. This
1043       // could result in unintended perf regressions. A better overall strategy
1044       // is needed to track stack usage during inlining.
1045       Type *Ty = I.getAllocatedType();
1046       AllocatedSize = SaturatingMultiplyAdd(
1047           AllocSize->getLimitedValue(), DL.getTypeAllocSize(Ty).getKnownMinSize(),
1048           AllocatedSize);
1049       if (AllocatedSize > InlineConstants::MaxSimplifiedDynamicAllocaToInline) {
1050         HasDynamicAlloca = true;
1051         return false;
1052       }
1053       return Base::visitAlloca(I);
1054     }
1055   }
1056 
1057   // Accumulate the allocated size.
1058   if (I.isStaticAlloca()) {
1059     Type *Ty = I.getAllocatedType();
1060     AllocatedSize =
1061         SaturatingAdd(DL.getTypeAllocSize(Ty).getKnownMinSize(), AllocatedSize);
1062   }
1063 
1064   // We will happily inline static alloca instructions.
1065   if (I.isStaticAlloca())
1066     return Base::visitAlloca(I);
1067 
1068   // FIXME: This is overly conservative. Dynamic allocas are inefficient for
1069   // a variety of reasons, and so we would like to not inline them into
1070   // functions which don't currently have a dynamic alloca. This simply
1071   // disables inlining altogether in the presence of a dynamic alloca.
1072   HasDynamicAlloca = true;
1073   return false;
1074 }
1075 
1076 bool CallAnalyzer::visitPHI(PHINode &I) {
1077   // FIXME: We need to propagate SROA *disabling* through phi nodes, even
1078   // though we don't want to propagate it's bonuses. The idea is to disable
1079   // SROA if it *might* be used in an inappropriate manner.
1080 
1081   // Phi nodes are always zero-cost.
1082   // FIXME: Pointer sizes may differ between different address spaces, so do we
1083   // need to use correct address space in the call to getPointerSizeInBits here?
1084   // Or could we skip the getPointerSizeInBits call completely? As far as I can
1085   // see the ZeroOffset is used as a dummy value, so we can probably use any
1086   // bit width for the ZeroOffset?
1087   APInt ZeroOffset = APInt::getNullValue(DL.getPointerSizeInBits(0));
1088   bool CheckSROA = I.getType()->isPointerTy();
1089 
1090   // Track the constant or pointer with constant offset we've seen so far.
1091   Constant *FirstC = nullptr;
1092   std::pair<Value *, APInt> FirstBaseAndOffset = {nullptr, ZeroOffset};
1093   Value *FirstV = nullptr;
1094 
1095   for (unsigned i = 0, e = I.getNumIncomingValues(); i != e; ++i) {
1096     BasicBlock *Pred = I.getIncomingBlock(i);
1097     // If the incoming block is dead, skip the incoming block.
1098     if (DeadBlocks.count(Pred))
1099       continue;
1100     // If the parent block of phi is not the known successor of the incoming
1101     // block, skip the incoming block.
1102     BasicBlock *KnownSuccessor = KnownSuccessors[Pred];
1103     if (KnownSuccessor && KnownSuccessor != I.getParent())
1104       continue;
1105 
1106     Value *V = I.getIncomingValue(i);
1107     // If the incoming value is this phi itself, skip the incoming value.
1108     if (&I == V)
1109       continue;
1110 
1111     Constant *C = dyn_cast<Constant>(V);
1112     if (!C)
1113       C = SimplifiedValues.lookup(V);
1114 
1115     std::pair<Value *, APInt> BaseAndOffset = {nullptr, ZeroOffset};
1116     if (!C && CheckSROA)
1117       BaseAndOffset = ConstantOffsetPtrs.lookup(V);
1118 
1119     if (!C && !BaseAndOffset.first)
1120       // The incoming value is neither a constant nor a pointer with constant
1121       // offset, exit early.
1122       return true;
1123 
1124     if (FirstC) {
1125       if (FirstC == C)
1126         // If we've seen a constant incoming value before and it is the same
1127         // constant we see this time, continue checking the next incoming value.
1128         continue;
1129       // Otherwise early exit because we either see a different constant or saw
1130       // a constant before but we have a pointer with constant offset this time.
1131       return true;
1132     }
1133 
1134     if (FirstV) {
1135       // The same logic as above, but check pointer with constant offset here.
1136       if (FirstBaseAndOffset == BaseAndOffset)
1137         continue;
1138       return true;
1139     }
1140 
1141     if (C) {
1142       // This is the 1st time we've seen a constant, record it.
1143       FirstC = C;
1144       continue;
1145     }
1146 
1147     // The remaining case is that this is the 1st time we've seen a pointer with
1148     // constant offset, record it.
1149     FirstV = V;
1150     FirstBaseAndOffset = BaseAndOffset;
1151   }
1152 
1153   // Check if we can map phi to a constant.
1154   if (FirstC) {
1155     SimplifiedValues[&I] = FirstC;
1156     return true;
1157   }
1158 
1159   // Check if we can map phi to a pointer with constant offset.
1160   if (FirstBaseAndOffset.first) {
1161     ConstantOffsetPtrs[&I] = FirstBaseAndOffset;
1162 
1163     if (auto *SROAArg = getSROAArgForValueOrNull(FirstV))
1164       SROAArgValues[&I] = SROAArg;
1165   }
1166 
1167   return true;
1168 }
1169 
1170 /// Check we can fold GEPs of constant-offset call site argument pointers.
1171 /// This requires target data and inbounds GEPs.
1172 ///
1173 /// \return true if the specified GEP can be folded.
1174 bool CallAnalyzer::canFoldInboundsGEP(GetElementPtrInst &I) {
1175   // Check if we have a base + offset for the pointer.
1176   std::pair<Value *, APInt> BaseAndOffset =
1177       ConstantOffsetPtrs.lookup(I.getPointerOperand());
1178   if (!BaseAndOffset.first)
1179     return false;
1180 
1181   // Check if the offset of this GEP is constant, and if so accumulate it
1182   // into Offset.
1183   if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second))
1184     return false;
1185 
1186   // Add the result as a new mapping to Base + Offset.
1187   ConstantOffsetPtrs[&I] = BaseAndOffset;
1188 
1189   return true;
1190 }
1191 
1192 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
1193   auto *SROAArg = getSROAArgForValueOrNull(I.getPointerOperand());
1194 
1195   // Lambda to check whether a GEP's indices are all constant.
1196   auto IsGEPOffsetConstant = [&](GetElementPtrInst &GEP) {
1197     for (const Use &Op : GEP.indices())
1198       if (!isa<Constant>(Op) && !SimplifiedValues.lookup(Op))
1199         return false;
1200     return true;
1201   };
1202 
1203   if (!DisableGEPConstOperand)
1204     if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1205         SmallVector<Constant *, 2> Indices;
1206         for (unsigned int Index = 1 ; Index < COps.size() ; ++Index)
1207             Indices.push_back(COps[Index]);
1208         return ConstantExpr::getGetElementPtr(I.getSourceElementType(), COps[0],
1209                                               Indices, I.isInBounds());
1210         }))
1211       return true;
1212 
1213   if ((I.isInBounds() && canFoldInboundsGEP(I)) || IsGEPOffsetConstant(I)) {
1214     if (SROAArg)
1215       SROAArgValues[&I] = SROAArg;
1216 
1217     // Constant GEPs are modeled as free.
1218     return true;
1219   }
1220 
1221   // Variable GEPs will require math and will disable SROA.
1222   if (SROAArg)
1223     disableSROAForArg(SROAArg);
1224   return isGEPFree(I);
1225 }
1226 
1227 /// Simplify \p I if its operands are constants and update SimplifiedValues.
1228 /// \p Evaluate is a callable specific to instruction type that evaluates the
1229 /// instruction when all the operands are constants.
1230 template <typename Callable>
1231 bool CallAnalyzer::simplifyInstruction(Instruction &I, Callable Evaluate) {
1232   SmallVector<Constant *, 2> COps;
1233   for (Value *Op : I.operands()) {
1234     Constant *COp = dyn_cast<Constant>(Op);
1235     if (!COp)
1236       COp = SimplifiedValues.lookup(Op);
1237     if (!COp)
1238       return false;
1239     COps.push_back(COp);
1240   }
1241   auto *C = Evaluate(COps);
1242   if (!C)
1243     return false;
1244   SimplifiedValues[&I] = C;
1245   return true;
1246 }
1247 
1248 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
1249   // Propagate constants through bitcasts.
1250   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1251         return ConstantExpr::getBitCast(COps[0], I.getType());
1252       }))
1253     return true;
1254 
1255   // Track base/offsets through casts
1256   std::pair<Value *, APInt> BaseAndOffset =
1257       ConstantOffsetPtrs.lookup(I.getOperand(0));
1258   // Casts don't change the offset, just wrap it up.
1259   if (BaseAndOffset.first)
1260     ConstantOffsetPtrs[&I] = BaseAndOffset;
1261 
1262   // Also look for SROA candidates here.
1263   if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0)))
1264     SROAArgValues[&I] = SROAArg;
1265 
1266   // Bitcasts are always zero cost.
1267   return true;
1268 }
1269 
1270 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
1271   // Propagate constants through ptrtoint.
1272   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1273         return ConstantExpr::getPtrToInt(COps[0], I.getType());
1274       }))
1275     return true;
1276 
1277   // Track base/offset pairs when converted to a plain integer provided the
1278   // integer is large enough to represent the pointer.
1279   unsigned IntegerSize = I.getType()->getScalarSizeInBits();
1280   unsigned AS = I.getOperand(0)->getType()->getPointerAddressSpace();
1281   if (IntegerSize == DL.getPointerSizeInBits(AS)) {
1282     std::pair<Value *, APInt> BaseAndOffset =
1283         ConstantOffsetPtrs.lookup(I.getOperand(0));
1284     if (BaseAndOffset.first)
1285       ConstantOffsetPtrs[&I] = BaseAndOffset;
1286   }
1287 
1288   // This is really weird. Technically, ptrtoint will disable SROA. However,
1289   // unless that ptrtoint is *used* somewhere in the live basic blocks after
1290   // inlining, it will be nuked, and SROA should proceed. All of the uses which
1291   // would block SROA would also block SROA if applied directly to a pointer,
1292   // and so we can just add the integer in here. The only places where SROA is
1293   // preserved either cannot fire on an integer, or won't in-and-of themselves
1294   // disable SROA (ext) w/o some later use that we would see and disable.
1295   if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0)))
1296     SROAArgValues[&I] = SROAArg;
1297 
1298   return TargetTransformInfo::TCC_Free ==
1299          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1300 }
1301 
1302 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
1303   // Propagate constants through ptrtoint.
1304   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1305         return ConstantExpr::getIntToPtr(COps[0], I.getType());
1306       }))
1307     return true;
1308 
1309   // Track base/offset pairs when round-tripped through a pointer without
1310   // modifications provided the integer is not too large.
1311   Value *Op = I.getOperand(0);
1312   unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
1313   if (IntegerSize <= DL.getPointerTypeSizeInBits(I.getType())) {
1314     std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
1315     if (BaseAndOffset.first)
1316       ConstantOffsetPtrs[&I] = BaseAndOffset;
1317   }
1318 
1319   // "Propagate" SROA here in the same manner as we do for ptrtoint above.
1320   if (auto *SROAArg = getSROAArgForValueOrNull(Op))
1321     SROAArgValues[&I] = SROAArg;
1322 
1323   return TargetTransformInfo::TCC_Free ==
1324          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1325 }
1326 
1327 bool CallAnalyzer::visitCastInst(CastInst &I) {
1328   // Propagate constants through casts.
1329   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1330         return ConstantExpr::getCast(I.getOpcode(), COps[0], I.getType());
1331       }))
1332     return true;
1333 
1334   // Disable SROA in the face of arbitrary casts we don't explicitly list
1335   // elsewhere.
1336   disableSROA(I.getOperand(0));
1337 
1338   // If this is a floating-point cast, and the target says this operation
1339   // is expensive, this may eventually become a library call. Treat the cost
1340   // as such.
1341   switch (I.getOpcode()) {
1342   case Instruction::FPTrunc:
1343   case Instruction::FPExt:
1344   case Instruction::UIToFP:
1345   case Instruction::SIToFP:
1346   case Instruction::FPToUI:
1347   case Instruction::FPToSI:
1348     if (TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive)
1349       onCallPenalty();
1350     break;
1351   default:
1352     break;
1353   }
1354 
1355   return TargetTransformInfo::TCC_Free ==
1356          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1357 }
1358 
1359 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
1360   Value *Operand = I.getOperand(0);
1361   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1362         return ConstantFoldInstOperands(&I, COps[0], DL);
1363       }))
1364     return true;
1365 
1366   // Disable any SROA on the argument to arbitrary unary instructions.
1367   disableSROA(Operand);
1368 
1369   return false;
1370 }
1371 
1372 bool CallAnalyzer::paramHasAttr(Argument *A, Attribute::AttrKind Attr) {
1373   return CandidateCall.paramHasAttr(A->getArgNo(), Attr);
1374 }
1375 
1376 bool CallAnalyzer::isKnownNonNullInCallee(Value *V) {
1377   // Does the *call site* have the NonNull attribute set on an argument?  We
1378   // use the attribute on the call site to memoize any analysis done in the
1379   // caller. This will also trip if the callee function has a non-null
1380   // parameter attribute, but that's a less interesting case because hopefully
1381   // the callee would already have been simplified based on that.
1382   if (Argument *A = dyn_cast<Argument>(V))
1383     if (paramHasAttr(A, Attribute::NonNull))
1384       return true;
1385 
1386   // Is this an alloca in the caller?  This is distinct from the attribute case
1387   // above because attributes aren't updated within the inliner itself and we
1388   // always want to catch the alloca derived case.
1389   if (isAllocaDerivedArg(V))
1390     // We can actually predict the result of comparisons between an
1391     // alloca-derived value and null. Note that this fires regardless of
1392     // SROA firing.
1393     return true;
1394 
1395   return false;
1396 }
1397 
1398 bool CallAnalyzer::allowSizeGrowth(CallBase &Call) {
1399   // If the normal destination of the invoke or the parent block of the call
1400   // site is unreachable-terminated, there is little point in inlining this
1401   // unless there is literally zero cost.
1402   // FIXME: Note that it is possible that an unreachable-terminated block has a
1403   // hot entry. For example, in below scenario inlining hot_call_X() may be
1404   // beneficial :
1405   // main() {
1406   //   hot_call_1();
1407   //   ...
1408   //   hot_call_N()
1409   //   exit(0);
1410   // }
1411   // For now, we are not handling this corner case here as it is rare in real
1412   // code. In future, we should elaborate this based on BPI and BFI in more
1413   // general threshold adjusting heuristics in updateThreshold().
1414   if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
1415     if (isa<UnreachableInst>(II->getNormalDest()->getTerminator()))
1416       return false;
1417   } else if (isa<UnreachableInst>(Call.getParent()->getTerminator()))
1418     return false;
1419 
1420   return true;
1421 }
1422 
1423 bool InlineCostCallAnalyzer::isColdCallSite(CallBase &Call,
1424                                             BlockFrequencyInfo *CallerBFI) {
1425   // If global profile summary is available, then callsite's coldness is
1426   // determined based on that.
1427   if (PSI && PSI->hasProfileSummary())
1428     return PSI->isColdCallSite(Call, CallerBFI);
1429 
1430   // Otherwise we need BFI to be available.
1431   if (!CallerBFI)
1432     return false;
1433 
1434   // Determine if the callsite is cold relative to caller's entry. We could
1435   // potentially cache the computation of scaled entry frequency, but the added
1436   // complexity is not worth it unless this scaling shows up high in the
1437   // profiles.
1438   const BranchProbability ColdProb(ColdCallSiteRelFreq, 100);
1439   auto CallSiteBB = Call.getParent();
1440   auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB);
1441   auto CallerEntryFreq =
1442       CallerBFI->getBlockFreq(&(Call.getCaller()->getEntryBlock()));
1443   return CallSiteFreq < CallerEntryFreq * ColdProb;
1444 }
1445 
1446 Optional<int>
1447 InlineCostCallAnalyzer::getHotCallSiteThreshold(CallBase &Call,
1448                                                 BlockFrequencyInfo *CallerBFI) {
1449 
1450   // If global profile summary is available, then callsite's hotness is
1451   // determined based on that.
1452   if (PSI && PSI->hasProfileSummary() && PSI->isHotCallSite(Call, CallerBFI))
1453     return Params.HotCallSiteThreshold;
1454 
1455   // Otherwise we need BFI to be available and to have a locally hot callsite
1456   // threshold.
1457   if (!CallerBFI || !Params.LocallyHotCallSiteThreshold)
1458     return None;
1459 
1460   // Determine if the callsite is hot relative to caller's entry. We could
1461   // potentially cache the computation of scaled entry frequency, but the added
1462   // complexity is not worth it unless this scaling shows up high in the
1463   // profiles.
1464   auto CallSiteBB = Call.getParent();
1465   auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB).getFrequency();
1466   auto CallerEntryFreq = CallerBFI->getEntryFreq();
1467   if (CallSiteFreq >= CallerEntryFreq * HotCallSiteRelFreq)
1468     return Params.LocallyHotCallSiteThreshold;
1469 
1470   // Otherwise treat it normally.
1471   return None;
1472 }
1473 
1474 void InlineCostCallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) {
1475   // If no size growth is allowed for this inlining, set Threshold to 0.
1476   if (!allowSizeGrowth(Call)) {
1477     Threshold = 0;
1478     return;
1479   }
1480 
1481   Function *Caller = Call.getCaller();
1482 
1483   // return min(A, B) if B is valid.
1484   auto MinIfValid = [](int A, Optional<int> B) {
1485     return B ? std::min(A, B.getValue()) : A;
1486   };
1487 
1488   // return max(A, B) if B is valid.
1489   auto MaxIfValid = [](int A, Optional<int> B) {
1490     return B ? std::max(A, B.getValue()) : A;
1491   };
1492 
1493   // Various bonus percentages. These are multiplied by Threshold to get the
1494   // bonus values.
1495   // SingleBBBonus: This bonus is applied if the callee has a single reachable
1496   // basic block at the given callsite context. This is speculatively applied
1497   // and withdrawn if more than one basic block is seen.
1498   //
1499   // LstCallToStaticBonus: This large bonus is applied to ensure the inlining
1500   // of the last call to a static function as inlining such functions is
1501   // guaranteed to reduce code size.
1502   //
1503   // These bonus percentages may be set to 0 based on properties of the caller
1504   // and the callsite.
1505   int SingleBBBonusPercent = 50;
1506   int VectorBonusPercent = TTI.getInlinerVectorBonusPercent();
1507   int LastCallToStaticBonus = InlineConstants::LastCallToStaticBonus;
1508 
1509   // Lambda to set all the above bonus and bonus percentages to 0.
1510   auto DisallowAllBonuses = [&]() {
1511     SingleBBBonusPercent = 0;
1512     VectorBonusPercent = 0;
1513     LastCallToStaticBonus = 0;
1514   };
1515 
1516   // Use the OptMinSizeThreshold or OptSizeThreshold knob if they are available
1517   // and reduce the threshold if the caller has the necessary attribute.
1518   if (Caller->hasMinSize()) {
1519     Threshold = MinIfValid(Threshold, Params.OptMinSizeThreshold);
1520     // For minsize, we want to disable the single BB bonus and the vector
1521     // bonuses, but not the last-call-to-static bonus. Inlining the last call to
1522     // a static function will, at the minimum, eliminate the parameter setup and
1523     // call/return instructions.
1524     SingleBBBonusPercent = 0;
1525     VectorBonusPercent = 0;
1526   } else if (Caller->hasOptSize())
1527     Threshold = MinIfValid(Threshold, Params.OptSizeThreshold);
1528 
1529   // Adjust the threshold based on inlinehint attribute and profile based
1530   // hotness information if the caller does not have MinSize attribute.
1531   if (!Caller->hasMinSize()) {
1532     if (Callee.hasFnAttribute(Attribute::InlineHint))
1533       Threshold = MaxIfValid(Threshold, Params.HintThreshold);
1534 
1535     // FIXME: After switching to the new passmanager, simplify the logic below
1536     // by checking only the callsite hotness/coldness as we will reliably
1537     // have local profile information.
1538     //
1539     // Callsite hotness and coldness can be determined if sample profile is
1540     // used (which adds hotness metadata to calls) or if caller's
1541     // BlockFrequencyInfo is available.
1542     BlockFrequencyInfo *CallerBFI = GetBFI ? &(GetBFI(*Caller)) : nullptr;
1543     auto HotCallSiteThreshold = getHotCallSiteThreshold(Call, CallerBFI);
1544     if (!Caller->hasOptSize() && HotCallSiteThreshold) {
1545       LLVM_DEBUG(dbgs() << "Hot callsite.\n");
1546       // FIXME: This should update the threshold only if it exceeds the
1547       // current threshold, but AutoFDO + ThinLTO currently relies on this
1548       // behavior to prevent inlining of hot callsites during ThinLTO
1549       // compile phase.
1550       Threshold = HotCallSiteThreshold.getValue();
1551     } else if (isColdCallSite(Call, CallerBFI)) {
1552       LLVM_DEBUG(dbgs() << "Cold callsite.\n");
1553       // Do not apply bonuses for a cold callsite including the
1554       // LastCallToStatic bonus. While this bonus might result in code size
1555       // reduction, it can cause the size of a non-cold caller to increase
1556       // preventing it from being inlined.
1557       DisallowAllBonuses();
1558       Threshold = MinIfValid(Threshold, Params.ColdCallSiteThreshold);
1559     } else if (PSI) {
1560       // Use callee's global profile information only if we have no way of
1561       // determining this via callsite information.
1562       if (PSI->isFunctionEntryHot(&Callee)) {
1563         LLVM_DEBUG(dbgs() << "Hot callee.\n");
1564         // If callsite hotness can not be determined, we may still know
1565         // that the callee is hot and treat it as a weaker hint for threshold
1566         // increase.
1567         Threshold = MaxIfValid(Threshold, Params.HintThreshold);
1568       } else if (PSI->isFunctionEntryCold(&Callee)) {
1569         LLVM_DEBUG(dbgs() << "Cold callee.\n");
1570         // Do not apply bonuses for a cold callee including the
1571         // LastCallToStatic bonus. While this bonus might result in code size
1572         // reduction, it can cause the size of a non-cold caller to increase
1573         // preventing it from being inlined.
1574         DisallowAllBonuses();
1575         Threshold = MinIfValid(Threshold, Params.ColdThreshold);
1576       }
1577     }
1578   }
1579 
1580   // Finally, take the target-specific inlining threshold multiplier into
1581   // account.
1582   Threshold *= TTI.getInliningThresholdMultiplier();
1583   Threshold += TTI.adjustInliningThreshold(&Call);
1584 
1585   SingleBBBonus = Threshold * SingleBBBonusPercent / 100;
1586   VectorBonus = Threshold * VectorBonusPercent / 100;
1587 
1588   bool OnlyOneCallAndLocalLinkage =
1589       F.hasLocalLinkage() && F.hasOneUse() && &F == Call.getCalledFunction();
1590   // If there is only one call of the function, and it has internal linkage,
1591   // the cost of inlining it drops dramatically. It may seem odd to update
1592   // Cost in updateThreshold, but the bonus depends on the logic in this method.
1593   if (OnlyOneCallAndLocalLinkage)
1594     Cost -= LastCallToStaticBonus;
1595 }
1596 
1597 bool CallAnalyzer::visitCmpInst(CmpInst &I) {
1598   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1599   // First try to handle simplified comparisons.
1600   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1601         return ConstantExpr::getCompare(I.getPredicate(), COps[0], COps[1]);
1602       }))
1603     return true;
1604 
1605   if (I.getOpcode() == Instruction::FCmp)
1606     return false;
1607 
1608   // Otherwise look for a comparison between constant offset pointers with
1609   // a common base.
1610   Value *LHSBase, *RHSBase;
1611   APInt LHSOffset, RHSOffset;
1612   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
1613   if (LHSBase) {
1614     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
1615     if (RHSBase && LHSBase == RHSBase) {
1616       // We have common bases, fold the icmp to a constant based on the
1617       // offsets.
1618       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
1619       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
1620       if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
1621         SimplifiedValues[&I] = C;
1622         ++NumConstantPtrCmps;
1623         return true;
1624       }
1625     }
1626   }
1627 
1628   // If the comparison is an equality comparison with null, we can simplify it
1629   // if we know the value (argument) can't be null
1630   if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)) &&
1631       isKnownNonNullInCallee(I.getOperand(0))) {
1632     bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
1633     SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
1634                                       : ConstantInt::getFalse(I.getType());
1635     return true;
1636   }
1637   return handleSROA(I.getOperand(0), isa<ConstantPointerNull>(I.getOperand(1)));
1638 }
1639 
1640 bool CallAnalyzer::visitSub(BinaryOperator &I) {
1641   // Try to handle a special case: we can fold computing the difference of two
1642   // constant-related pointers.
1643   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1644   Value *LHSBase, *RHSBase;
1645   APInt LHSOffset, RHSOffset;
1646   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
1647   if (LHSBase) {
1648     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
1649     if (RHSBase && LHSBase == RHSBase) {
1650       // We have common bases, fold the subtract to a constant based on the
1651       // offsets.
1652       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
1653       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
1654       if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
1655         SimplifiedValues[&I] = C;
1656         ++NumConstantPtrDiffs;
1657         return true;
1658       }
1659     }
1660   }
1661 
1662   // Otherwise, fall back to the generic logic for simplifying and handling
1663   // instructions.
1664   return Base::visitSub(I);
1665 }
1666 
1667 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
1668   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1669   Constant *CLHS = dyn_cast<Constant>(LHS);
1670   if (!CLHS)
1671     CLHS = SimplifiedValues.lookup(LHS);
1672   Constant *CRHS = dyn_cast<Constant>(RHS);
1673   if (!CRHS)
1674     CRHS = SimplifiedValues.lookup(RHS);
1675 
1676   Value *SimpleV = nullptr;
1677   if (auto FI = dyn_cast<FPMathOperator>(&I))
1678     SimpleV = SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS,
1679                             FI->getFastMathFlags(), DL);
1680   else
1681     SimpleV =
1682         SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS, DL);
1683 
1684   if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
1685     SimplifiedValues[&I] = C;
1686 
1687   if (SimpleV)
1688     return true;
1689 
1690   // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
1691   disableSROA(LHS);
1692   disableSROA(RHS);
1693 
1694   // If the instruction is floating point, and the target says this operation
1695   // is expensive, this may eventually become a library call. Treat the cost
1696   // as such. Unless it's fneg which can be implemented with an xor.
1697   using namespace llvm::PatternMatch;
1698   if (I.getType()->isFloatingPointTy() &&
1699       TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive &&
1700       !match(&I, m_FNeg(m_Value())))
1701     onCallPenalty();
1702 
1703   return false;
1704 }
1705 
1706 bool CallAnalyzer::visitFNeg(UnaryOperator &I) {
1707   Value *Op = I.getOperand(0);
1708   Constant *COp = dyn_cast<Constant>(Op);
1709   if (!COp)
1710     COp = SimplifiedValues.lookup(Op);
1711 
1712   Value *SimpleV = SimplifyFNegInst(
1713       COp ? COp : Op, cast<FPMathOperator>(I).getFastMathFlags(), DL);
1714 
1715   if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
1716     SimplifiedValues[&I] = C;
1717 
1718   if (SimpleV)
1719     return true;
1720 
1721   // Disable any SROA on arguments to arbitrary, unsimplified fneg.
1722   disableSROA(Op);
1723 
1724   return false;
1725 }
1726 
1727 bool CallAnalyzer::visitLoad(LoadInst &I) {
1728   if (handleSROA(I.getPointerOperand(), I.isSimple()))
1729     return true;
1730 
1731   // If the data is already loaded from this address and hasn't been clobbered
1732   // by any stores or calls, this load is likely to be redundant and can be
1733   // eliminated.
1734   if (EnableLoadElimination &&
1735       !LoadAddrSet.insert(I.getPointerOperand()).second && I.isUnordered()) {
1736     onLoadEliminationOpportunity();
1737     return true;
1738   }
1739 
1740   return false;
1741 }
1742 
1743 bool CallAnalyzer::visitStore(StoreInst &I) {
1744   if (handleSROA(I.getPointerOperand(), I.isSimple()))
1745     return true;
1746 
1747   // The store can potentially clobber loads and prevent repeated loads from
1748   // being eliminated.
1749   // FIXME:
1750   // 1. We can probably keep an initial set of eliminatable loads substracted
1751   // from the cost even when we finally see a store. We just need to disable
1752   // *further* accumulation of elimination savings.
1753   // 2. We should probably at some point thread MemorySSA for the callee into
1754   // this and then use that to actually compute *really* precise savings.
1755   disableLoadElimination();
1756   return false;
1757 }
1758 
1759 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) {
1760   // Constant folding for extract value is trivial.
1761   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1762         return ConstantExpr::getExtractValue(COps[0], I.getIndices());
1763       }))
1764     return true;
1765 
1766   // SROA can look through these but give them a cost.
1767   return false;
1768 }
1769 
1770 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) {
1771   // Constant folding for insert value is trivial.
1772   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1773         return ConstantExpr::getInsertValue(/*AggregateOperand*/ COps[0],
1774                                             /*InsertedValueOperand*/ COps[1],
1775                                             I.getIndices());
1776       }))
1777     return true;
1778 
1779   // SROA can look through these but give them a cost.
1780   return false;
1781 }
1782 
1783 /// Try to simplify a call site.
1784 ///
1785 /// Takes a concrete function and callsite and tries to actually simplify it by
1786 /// analyzing the arguments and call itself with instsimplify. Returns true if
1787 /// it has simplified the callsite to some other entity (a constant), making it
1788 /// free.
1789 bool CallAnalyzer::simplifyCallSite(Function *F, CallBase &Call) {
1790   // FIXME: Using the instsimplify logic directly for this is inefficient
1791   // because we have to continually rebuild the argument list even when no
1792   // simplifications can be performed. Until that is fixed with remapping
1793   // inside of instsimplify, directly constant fold calls here.
1794   if (!canConstantFoldCallTo(&Call, F))
1795     return false;
1796 
1797   // Try to re-map the arguments to constants.
1798   SmallVector<Constant *, 4> ConstantArgs;
1799   ConstantArgs.reserve(Call.arg_size());
1800   for (Value *I : Call.args()) {
1801     Constant *C = dyn_cast<Constant>(I);
1802     if (!C)
1803       C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(I));
1804     if (!C)
1805       return false; // This argument doesn't map to a constant.
1806 
1807     ConstantArgs.push_back(C);
1808   }
1809   if (Constant *C = ConstantFoldCall(&Call, F, ConstantArgs)) {
1810     SimplifiedValues[&Call] = C;
1811     return true;
1812   }
1813 
1814   return false;
1815 }
1816 
1817 bool CallAnalyzer::visitCallBase(CallBase &Call) {
1818   if (Call.hasFnAttr(Attribute::ReturnsTwice) &&
1819       !F.hasFnAttribute(Attribute::ReturnsTwice)) {
1820     // This aborts the entire analysis.
1821     ExposesReturnsTwice = true;
1822     return false;
1823   }
1824   if (isa<CallInst>(Call) && cast<CallInst>(Call).cannotDuplicate())
1825     ContainsNoDuplicateCall = true;
1826 
1827   Value *Callee = Call.getCalledOperand();
1828   Function *F = dyn_cast_or_null<Function>(Callee);
1829   bool IsIndirectCall = !F;
1830   if (IsIndirectCall) {
1831     // Check if this happens to be an indirect function call to a known function
1832     // in this inline context. If not, we've done all we can.
1833     F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
1834     if (!F) {
1835       onCallArgumentSetup(Call);
1836 
1837       if (!Call.onlyReadsMemory())
1838         disableLoadElimination();
1839       return Base::visitCallBase(Call);
1840     }
1841   }
1842 
1843   assert(F && "Expected a call to a known function");
1844 
1845   // When we have a concrete function, first try to simplify it directly.
1846   if (simplifyCallSite(F, Call))
1847     return true;
1848 
1849   // Next check if it is an intrinsic we know about.
1850   // FIXME: Lift this into part of the InstVisitor.
1851   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&Call)) {
1852     switch (II->getIntrinsicID()) {
1853     default:
1854       if (!Call.onlyReadsMemory() && !isAssumeLikeIntrinsic(II))
1855         disableLoadElimination();
1856       return Base::visitCallBase(Call);
1857 
1858     case Intrinsic::load_relative:
1859       onLoadRelativeIntrinsic();
1860       return false;
1861 
1862     case Intrinsic::memset:
1863     case Intrinsic::memcpy:
1864     case Intrinsic::memmove:
1865       disableLoadElimination();
1866       // SROA can usually chew through these intrinsics, but they aren't free.
1867       return false;
1868     case Intrinsic::icall_branch_funnel:
1869     case Intrinsic::localescape:
1870       HasUninlineableIntrinsic = true;
1871       return false;
1872     case Intrinsic::vastart:
1873       InitsVargArgs = true;
1874       return false;
1875     }
1876   }
1877 
1878   if (F == Call.getFunction()) {
1879     // This flag will fully abort the analysis, so don't bother with anything
1880     // else.
1881     IsRecursiveCall = true;
1882     return false;
1883   }
1884 
1885   if (TTI.isLoweredToCall(F)) {
1886     onLoweredCall(F, Call, IsIndirectCall);
1887   }
1888 
1889   if (!(Call.onlyReadsMemory() || (IsIndirectCall && F->onlyReadsMemory())))
1890     disableLoadElimination();
1891   return Base::visitCallBase(Call);
1892 }
1893 
1894 bool CallAnalyzer::visitReturnInst(ReturnInst &RI) {
1895   // At least one return instruction will be free after inlining.
1896   bool Free = !HasReturn;
1897   HasReturn = true;
1898   return Free;
1899 }
1900 
1901 bool CallAnalyzer::visitBranchInst(BranchInst &BI) {
1902   // We model unconditional branches as essentially free -- they really
1903   // shouldn't exist at all, but handling them makes the behavior of the
1904   // inliner more regular and predictable. Interestingly, conditional branches
1905   // which will fold away are also free.
1906   return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) ||
1907          dyn_cast_or_null<ConstantInt>(
1908              SimplifiedValues.lookup(BI.getCondition()));
1909 }
1910 
1911 bool CallAnalyzer::visitSelectInst(SelectInst &SI) {
1912   bool CheckSROA = SI.getType()->isPointerTy();
1913   Value *TrueVal = SI.getTrueValue();
1914   Value *FalseVal = SI.getFalseValue();
1915 
1916   Constant *TrueC = dyn_cast<Constant>(TrueVal);
1917   if (!TrueC)
1918     TrueC = SimplifiedValues.lookup(TrueVal);
1919   Constant *FalseC = dyn_cast<Constant>(FalseVal);
1920   if (!FalseC)
1921     FalseC = SimplifiedValues.lookup(FalseVal);
1922   Constant *CondC =
1923       dyn_cast_or_null<Constant>(SimplifiedValues.lookup(SI.getCondition()));
1924 
1925   if (!CondC) {
1926     // Select C, X, X => X
1927     if (TrueC == FalseC && TrueC) {
1928       SimplifiedValues[&SI] = TrueC;
1929       return true;
1930     }
1931 
1932     if (!CheckSROA)
1933       return Base::visitSelectInst(SI);
1934 
1935     std::pair<Value *, APInt> TrueBaseAndOffset =
1936         ConstantOffsetPtrs.lookup(TrueVal);
1937     std::pair<Value *, APInt> FalseBaseAndOffset =
1938         ConstantOffsetPtrs.lookup(FalseVal);
1939     if (TrueBaseAndOffset == FalseBaseAndOffset && TrueBaseAndOffset.first) {
1940       ConstantOffsetPtrs[&SI] = TrueBaseAndOffset;
1941 
1942       if (auto *SROAArg = getSROAArgForValueOrNull(TrueVal))
1943         SROAArgValues[&SI] = SROAArg;
1944       return true;
1945     }
1946 
1947     return Base::visitSelectInst(SI);
1948   }
1949 
1950   // Select condition is a constant.
1951   Value *SelectedV = CondC->isAllOnesValue()
1952                          ? TrueVal
1953                          : (CondC->isNullValue()) ? FalseVal : nullptr;
1954   if (!SelectedV) {
1955     // Condition is a vector constant that is not all 1s or all 0s.  If all
1956     // operands are constants, ConstantExpr::getSelect() can handle the cases
1957     // such as select vectors.
1958     if (TrueC && FalseC) {
1959       if (auto *C = ConstantExpr::getSelect(CondC, TrueC, FalseC)) {
1960         SimplifiedValues[&SI] = C;
1961         return true;
1962       }
1963     }
1964     return Base::visitSelectInst(SI);
1965   }
1966 
1967   // Condition is either all 1s or all 0s. SI can be simplified.
1968   if (Constant *SelectedC = dyn_cast<Constant>(SelectedV)) {
1969     SimplifiedValues[&SI] = SelectedC;
1970     return true;
1971   }
1972 
1973   if (!CheckSROA)
1974     return true;
1975 
1976   std::pair<Value *, APInt> BaseAndOffset =
1977       ConstantOffsetPtrs.lookup(SelectedV);
1978   if (BaseAndOffset.first) {
1979     ConstantOffsetPtrs[&SI] = BaseAndOffset;
1980 
1981     if (auto *SROAArg = getSROAArgForValueOrNull(SelectedV))
1982       SROAArgValues[&SI] = SROAArg;
1983   }
1984 
1985   return true;
1986 }
1987 
1988 bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
1989   // We model unconditional switches as free, see the comments on handling
1990   // branches.
1991   if (isa<ConstantInt>(SI.getCondition()))
1992     return true;
1993   if (Value *V = SimplifiedValues.lookup(SI.getCondition()))
1994     if (isa<ConstantInt>(V))
1995       return true;
1996 
1997   // Assume the most general case where the switch is lowered into
1998   // either a jump table, bit test, or a balanced binary tree consisting of
1999   // case clusters without merging adjacent clusters with the same
2000   // destination. We do not consider the switches that are lowered with a mix
2001   // of jump table/bit test/binary search tree. The cost of the switch is
2002   // proportional to the size of the tree or the size of jump table range.
2003   //
2004   // NB: We convert large switches which are just used to initialize large phi
2005   // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
2006   // inlining those. It will prevent inlining in cases where the optimization
2007   // does not (yet) fire.
2008 
2009   unsigned JumpTableSize = 0;
2010   BlockFrequencyInfo *BFI = GetBFI ? &(GetBFI(F)) : nullptr;
2011   unsigned NumCaseCluster =
2012       TTI.getEstimatedNumberOfCaseClusters(SI, JumpTableSize, PSI, BFI);
2013 
2014   onFinalizeSwitch(JumpTableSize, NumCaseCluster);
2015   return false;
2016 }
2017 
2018 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) {
2019   // We never want to inline functions that contain an indirectbr.  This is
2020   // incorrect because all the blockaddress's (in static global initializers
2021   // for example) would be referring to the original function, and this
2022   // indirect jump would jump from the inlined copy of the function into the
2023   // original function which is extremely undefined behavior.
2024   // FIXME: This logic isn't really right; we can safely inline functions with
2025   // indirectbr's as long as no other function or global references the
2026   // blockaddress of a block within the current function.
2027   HasIndirectBr = true;
2028   return false;
2029 }
2030 
2031 bool CallAnalyzer::visitResumeInst(ResumeInst &RI) {
2032   // FIXME: It's not clear that a single instruction is an accurate model for
2033   // the inline cost of a resume instruction.
2034   return false;
2035 }
2036 
2037 bool CallAnalyzer::visitCleanupReturnInst(CleanupReturnInst &CRI) {
2038   // FIXME: It's not clear that a single instruction is an accurate model for
2039   // the inline cost of a cleanupret instruction.
2040   return false;
2041 }
2042 
2043 bool CallAnalyzer::visitCatchReturnInst(CatchReturnInst &CRI) {
2044   // FIXME: It's not clear that a single instruction is an accurate model for
2045   // the inline cost of a catchret instruction.
2046   return false;
2047 }
2048 
2049 bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) {
2050   // FIXME: It might be reasonably to discount the cost of instructions leading
2051   // to unreachable as they have the lowest possible impact on both runtime and
2052   // code size.
2053   return true; // No actual code is needed for unreachable.
2054 }
2055 
2056 bool CallAnalyzer::visitInstruction(Instruction &I) {
2057   // Some instructions are free. All of the free intrinsics can also be
2058   // handled by SROA, etc.
2059   if (TargetTransformInfo::TCC_Free ==
2060       TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency))
2061     return true;
2062 
2063   // We found something we don't understand or can't handle. Mark any SROA-able
2064   // values in the operand list as no longer viable.
2065   for (const Use &Op : I.operands())
2066     disableSROA(Op);
2067 
2068   return false;
2069 }
2070 
2071 /// Analyze a basic block for its contribution to the inline cost.
2072 ///
2073 /// This method walks the analyzer over every instruction in the given basic
2074 /// block and accounts for their cost during inlining at this callsite. It
2075 /// aborts early if the threshold has been exceeded or an impossible to inline
2076 /// construct has been detected. It returns false if inlining is no longer
2077 /// viable, and true if inlining remains viable.
2078 InlineResult
2079 CallAnalyzer::analyzeBlock(BasicBlock *BB,
2080                            SmallPtrSetImpl<const Value *> &EphValues) {
2081   for (Instruction &I : *BB) {
2082     // FIXME: Currently, the number of instructions in a function regardless of
2083     // our ability to simplify them during inline to constants or dead code,
2084     // are actually used by the vector bonus heuristic. As long as that's true,
2085     // we have to special case debug intrinsics here to prevent differences in
2086     // inlining due to debug symbols. Eventually, the number of unsimplified
2087     // instructions shouldn't factor into the cost computation, but until then,
2088     // hack around it here.
2089     if (isa<DbgInfoIntrinsic>(I))
2090       continue;
2091 
2092     // Skip pseudo-probes.
2093     if (isa<PseudoProbeInst>(I))
2094       continue;
2095 
2096     // Skip ephemeral values.
2097     if (EphValues.count(&I))
2098       continue;
2099 
2100     ++NumInstructions;
2101     if (isa<ExtractElementInst>(I) || I.getType()->isVectorTy())
2102       ++NumVectorInstructions;
2103 
2104     // If the instruction simplified to a constant, there is no cost to this
2105     // instruction. Visit the instructions using our InstVisitor to account for
2106     // all of the per-instruction logic. The visit tree returns true if we
2107     // consumed the instruction in any way, and false if the instruction's base
2108     // cost should count against inlining.
2109     onInstructionAnalysisStart(&I);
2110 
2111     if (Base::visit(&I))
2112       ++NumInstructionsSimplified;
2113     else
2114       onMissedSimplification();
2115 
2116     onInstructionAnalysisFinish(&I);
2117     using namespace ore;
2118     // If the visit this instruction detected an uninlinable pattern, abort.
2119     InlineResult IR = InlineResult::success();
2120     if (IsRecursiveCall)
2121       IR = InlineResult::failure("recursive");
2122     else if (ExposesReturnsTwice)
2123       IR = InlineResult::failure("exposes returns twice");
2124     else if (HasDynamicAlloca)
2125       IR = InlineResult::failure("dynamic alloca");
2126     else if (HasIndirectBr)
2127       IR = InlineResult::failure("indirect branch");
2128     else if (HasUninlineableIntrinsic)
2129       IR = InlineResult::failure("uninlinable intrinsic");
2130     else if (InitsVargArgs)
2131       IR = InlineResult::failure("varargs");
2132     if (!IR.isSuccess()) {
2133       if (ORE)
2134         ORE->emit([&]() {
2135           return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
2136                                           &CandidateCall)
2137                  << NV("Callee", &F) << " has uninlinable pattern ("
2138                  << NV("InlineResult", IR.getFailureReason())
2139                  << ") and cost is not fully computed";
2140         });
2141       return IR;
2142     }
2143 
2144     // If the caller is a recursive function then we don't want to inline
2145     // functions which allocate a lot of stack space because it would increase
2146     // the caller stack usage dramatically.
2147     if (IsCallerRecursive &&
2148         AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) {
2149       auto IR =
2150           InlineResult::failure("recursive and allocates too much stack space");
2151       if (ORE)
2152         ORE->emit([&]() {
2153           return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
2154                                           &CandidateCall)
2155                  << NV("Callee", &F) << " is "
2156                  << NV("InlineResult", IR.getFailureReason())
2157                  << ". Cost is not fully computed";
2158         });
2159       return IR;
2160     }
2161 
2162     if (shouldStop())
2163       return InlineResult::failure(
2164           "Call site analysis is not favorable to inlining.");
2165   }
2166 
2167   return InlineResult::success();
2168 }
2169 
2170 /// Compute the base pointer and cumulative constant offsets for V.
2171 ///
2172 /// This strips all constant offsets off of V, leaving it the base pointer, and
2173 /// accumulates the total constant offset applied in the returned constant. It
2174 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
2175 /// no constant offsets applied.
2176 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
2177   if (!V->getType()->isPointerTy())
2178     return nullptr;
2179 
2180   unsigned AS = V->getType()->getPointerAddressSpace();
2181   unsigned IntPtrWidth = DL.getIndexSizeInBits(AS);
2182   APInt Offset = APInt::getNullValue(IntPtrWidth);
2183 
2184   // Even though we don't look through PHI nodes, we could be called on an
2185   // instruction in an unreachable block, which may be on a cycle.
2186   SmallPtrSet<Value *, 4> Visited;
2187   Visited.insert(V);
2188   do {
2189     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
2190       if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
2191         return nullptr;
2192       V = GEP->getPointerOperand();
2193     } else if (Operator::getOpcode(V) == Instruction::BitCast) {
2194       V = cast<Operator>(V)->getOperand(0);
2195     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2196       if (GA->isInterposable())
2197         break;
2198       V = GA->getAliasee();
2199     } else {
2200       break;
2201     }
2202     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
2203   } while (Visited.insert(V).second);
2204 
2205   Type *IdxPtrTy = DL.getIndexType(V->getType());
2206   return cast<ConstantInt>(ConstantInt::get(IdxPtrTy, Offset));
2207 }
2208 
2209 /// Find dead blocks due to deleted CFG edges during inlining.
2210 ///
2211 /// If we know the successor of the current block, \p CurrBB, has to be \p
2212 /// NextBB, the other successors of \p CurrBB are dead if these successors have
2213 /// no live incoming CFG edges.  If one block is found to be dead, we can
2214 /// continue growing the dead block list by checking the successors of the dead
2215 /// blocks to see if all their incoming edges are dead or not.
2216 void CallAnalyzer::findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB) {
2217   auto IsEdgeDead = [&](BasicBlock *Pred, BasicBlock *Succ) {
2218     // A CFG edge is dead if the predecessor is dead or the predecessor has a
2219     // known successor which is not the one under exam.
2220     return (DeadBlocks.count(Pred) ||
2221             (KnownSuccessors[Pred] && KnownSuccessors[Pred] != Succ));
2222   };
2223 
2224   auto IsNewlyDead = [&](BasicBlock *BB) {
2225     // If all the edges to a block are dead, the block is also dead.
2226     return (!DeadBlocks.count(BB) &&
2227             llvm::all_of(predecessors(BB),
2228                          [&](BasicBlock *P) { return IsEdgeDead(P, BB); }));
2229   };
2230 
2231   for (BasicBlock *Succ : successors(CurrBB)) {
2232     if (Succ == NextBB || !IsNewlyDead(Succ))
2233       continue;
2234     SmallVector<BasicBlock *, 4> NewDead;
2235     NewDead.push_back(Succ);
2236     while (!NewDead.empty()) {
2237       BasicBlock *Dead = NewDead.pop_back_val();
2238       if (DeadBlocks.insert(Dead))
2239         // Continue growing the dead block lists.
2240         for (BasicBlock *S : successors(Dead))
2241           if (IsNewlyDead(S))
2242             NewDead.push_back(S);
2243     }
2244   }
2245 }
2246 
2247 /// Analyze a call site for potential inlining.
2248 ///
2249 /// Returns true if inlining this call is viable, and false if it is not
2250 /// viable. It computes the cost and adjusts the threshold based on numerous
2251 /// factors and heuristics. If this method returns false but the computed cost
2252 /// is below the computed threshold, then inlining was forcibly disabled by
2253 /// some artifact of the routine.
2254 InlineResult CallAnalyzer::analyze() {
2255   ++NumCallsAnalyzed;
2256 
2257   auto Result = onAnalysisStart();
2258   if (!Result.isSuccess())
2259     return Result;
2260 
2261   if (F.empty())
2262     return InlineResult::success();
2263 
2264   Function *Caller = CandidateCall.getFunction();
2265   // Check if the caller function is recursive itself.
2266   for (User *U : Caller->users()) {
2267     CallBase *Call = dyn_cast<CallBase>(U);
2268     if (Call && Call->getFunction() == Caller) {
2269       IsCallerRecursive = true;
2270       break;
2271     }
2272   }
2273 
2274   // Populate our simplified values by mapping from function arguments to call
2275   // arguments with known important simplifications.
2276   auto CAI = CandidateCall.arg_begin();
2277   for (Argument &FAI : F.args()) {
2278     assert(CAI != CandidateCall.arg_end());
2279     if (Constant *C = dyn_cast<Constant>(CAI))
2280       SimplifiedValues[&FAI] = C;
2281 
2282     Value *PtrArg = *CAI;
2283     if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
2284       ConstantOffsetPtrs[&FAI] = std::make_pair(PtrArg, C->getValue());
2285 
2286       // We can SROA any pointer arguments derived from alloca instructions.
2287       if (auto *SROAArg = dyn_cast<AllocaInst>(PtrArg)) {
2288         SROAArgValues[&FAI] = SROAArg;
2289         onInitializeSROAArg(SROAArg);
2290         EnabledSROAAllocas.insert(SROAArg);
2291       }
2292     }
2293     ++CAI;
2294   }
2295   NumConstantArgs = SimplifiedValues.size();
2296   NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
2297   NumAllocaArgs = SROAArgValues.size();
2298 
2299   // FIXME: If a caller has multiple calls to a callee, we end up recomputing
2300   // the ephemeral values multiple times (and they're completely determined by
2301   // the callee, so this is purely duplicate work).
2302   SmallPtrSet<const Value *, 32> EphValues;
2303   CodeMetrics::collectEphemeralValues(&F, &GetAssumptionCache(F), EphValues);
2304 
2305   // The worklist of live basic blocks in the callee *after* inlining. We avoid
2306   // adding basic blocks of the callee which can be proven to be dead for this
2307   // particular call site in order to get more accurate cost estimates. This
2308   // requires a somewhat heavyweight iteration pattern: we need to walk the
2309   // basic blocks in a breadth-first order as we insert live successors. To
2310   // accomplish this, prioritizing for small iterations because we exit after
2311   // crossing our threshold, we use a small-size optimized SetVector.
2312   typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
2313                     SmallPtrSet<BasicBlock *, 16>>
2314       BBSetVector;
2315   BBSetVector BBWorklist;
2316   BBWorklist.insert(&F.getEntryBlock());
2317 
2318   // Note that we *must not* cache the size, this loop grows the worklist.
2319   for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
2320     if (shouldStop())
2321       break;
2322 
2323     BasicBlock *BB = BBWorklist[Idx];
2324     if (BB->empty())
2325       continue;
2326 
2327     onBlockStart(BB);
2328 
2329     // Disallow inlining a blockaddress with uses other than strictly callbr.
2330     // A blockaddress only has defined behavior for an indirect branch in the
2331     // same function, and we do not currently support inlining indirect
2332     // branches.  But, the inliner may not see an indirect branch that ends up
2333     // being dead code at a particular call site. If the blockaddress escapes
2334     // the function, e.g., via a global variable, inlining may lead to an
2335     // invalid cross-function reference.
2336     // FIXME: pr/39560: continue relaxing this overt restriction.
2337     if (BB->hasAddressTaken())
2338       for (User *U : BlockAddress::get(&*BB)->users())
2339         if (!isa<CallBrInst>(*U))
2340           return InlineResult::failure("blockaddress used outside of callbr");
2341 
2342     // Analyze the cost of this block. If we blow through the threshold, this
2343     // returns false, and we can bail on out.
2344     InlineResult IR = analyzeBlock(BB, EphValues);
2345     if (!IR.isSuccess())
2346       return IR;
2347 
2348     Instruction *TI = BB->getTerminator();
2349 
2350     // Add in the live successors by first checking whether we have terminator
2351     // that may be simplified based on the values simplified by this call.
2352     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
2353       if (BI->isConditional()) {
2354         Value *Cond = BI->getCondition();
2355         if (ConstantInt *SimpleCond =
2356                 dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
2357           BasicBlock *NextBB = BI->getSuccessor(SimpleCond->isZero() ? 1 : 0);
2358           BBWorklist.insert(NextBB);
2359           KnownSuccessors[BB] = NextBB;
2360           findDeadBlocks(BB, NextBB);
2361           continue;
2362         }
2363       }
2364     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
2365       Value *Cond = SI->getCondition();
2366       if (ConstantInt *SimpleCond =
2367               dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
2368         BasicBlock *NextBB = SI->findCaseValue(SimpleCond)->getCaseSuccessor();
2369         BBWorklist.insert(NextBB);
2370         KnownSuccessors[BB] = NextBB;
2371         findDeadBlocks(BB, NextBB);
2372         continue;
2373       }
2374     }
2375 
2376     // If we're unable to select a particular successor, just count all of
2377     // them.
2378     for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
2379          ++TIdx)
2380       BBWorklist.insert(TI->getSuccessor(TIdx));
2381 
2382     onBlockAnalyzed(BB);
2383   }
2384 
2385   bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() &&
2386                                     &F == CandidateCall.getCalledFunction();
2387   // If this is a noduplicate call, we can still inline as long as
2388   // inlining this would cause the removal of the caller (so the instruction
2389   // is not actually duplicated, just moved).
2390   if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
2391     return InlineResult::failure("noduplicate");
2392 
2393   return finalizeAnalysis();
2394 }
2395 
2396 void InlineCostCallAnalyzer::print() {
2397 #define DEBUG_PRINT_STAT(x) dbgs() << "      " #x ": " << x << "\n"
2398   if (PrintInstructionComments)
2399     F.print(dbgs(), &Writer);
2400   DEBUG_PRINT_STAT(NumConstantArgs);
2401   DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
2402   DEBUG_PRINT_STAT(NumAllocaArgs);
2403   DEBUG_PRINT_STAT(NumConstantPtrCmps);
2404   DEBUG_PRINT_STAT(NumConstantPtrDiffs);
2405   DEBUG_PRINT_STAT(NumInstructionsSimplified);
2406   DEBUG_PRINT_STAT(NumInstructions);
2407   DEBUG_PRINT_STAT(SROACostSavings);
2408   DEBUG_PRINT_STAT(SROACostSavingsLost);
2409   DEBUG_PRINT_STAT(LoadEliminationCost);
2410   DEBUG_PRINT_STAT(ContainsNoDuplicateCall);
2411   DEBUG_PRINT_STAT(Cost);
2412   DEBUG_PRINT_STAT(Threshold);
2413 #undef DEBUG_PRINT_STAT
2414 }
2415 
2416 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2417 /// Dump stats about this call's analysis.
2418 LLVM_DUMP_METHOD void InlineCostCallAnalyzer::dump() {
2419   print();
2420 }
2421 #endif
2422 
2423 /// Test that there are no attribute conflicts between Caller and Callee
2424 ///        that prevent inlining.
2425 static bool functionsHaveCompatibleAttributes(
2426     Function *Caller, Function *Callee, TargetTransformInfo &TTI,
2427     function_ref<const TargetLibraryInfo &(Function &)> &GetTLI) {
2428   // Note that CalleeTLI must be a copy not a reference. The legacy pass manager
2429   // caches the most recently created TLI in the TargetLibraryInfoWrapperPass
2430   // object, and always returns the same object (which is overwritten on each
2431   // GetTLI call). Therefore we copy the first result.
2432   auto CalleeTLI = GetTLI(*Callee);
2433   return TTI.areInlineCompatible(Caller, Callee) &&
2434          GetTLI(*Caller).areInlineCompatible(CalleeTLI,
2435                                              InlineCallerSupersetNoBuiltin) &&
2436          AttributeFuncs::areInlineCompatible(*Caller, *Callee);
2437 }
2438 
2439 int llvm::getCallsiteCost(CallBase &Call, const DataLayout &DL) {
2440   int Cost = 0;
2441   for (unsigned I = 0, E = Call.arg_size(); I != E; ++I) {
2442     if (Call.isByValArgument(I)) {
2443       // We approximate the number of loads and stores needed by dividing the
2444       // size of the byval type by the target's pointer size.
2445       PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType());
2446       unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType());
2447       unsigned AS = PTy->getAddressSpace();
2448       unsigned PointerSize = DL.getPointerSizeInBits(AS);
2449       // Ceiling division.
2450       unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
2451 
2452       // If it generates more than 8 stores it is likely to be expanded as an
2453       // inline memcpy so we take that as an upper bound. Otherwise we assume
2454       // one load and one store per word copied.
2455       // FIXME: The maxStoresPerMemcpy setting from the target should be used
2456       // here instead of a magic number of 8, but it's not available via
2457       // DataLayout.
2458       NumStores = std::min(NumStores, 8U);
2459 
2460       Cost += 2 * NumStores * InlineConstants::InstrCost;
2461     } else {
2462       // For non-byval arguments subtract off one instruction per call
2463       // argument.
2464       Cost += InlineConstants::InstrCost;
2465     }
2466   }
2467   // The call instruction also disappears after inlining.
2468   Cost += InlineConstants::InstrCost + InlineConstants::CallPenalty;
2469   return Cost;
2470 }
2471 
2472 InlineCost llvm::getInlineCost(
2473     CallBase &Call, const InlineParams &Params, TargetTransformInfo &CalleeTTI,
2474     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2475     function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
2476     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2477     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2478   return getInlineCost(Call, Call.getCalledFunction(), Params, CalleeTTI,
2479                        GetAssumptionCache, GetTLI, GetBFI, PSI, ORE);
2480 }
2481 
2482 Optional<int> llvm::getInliningCostEstimate(
2483     CallBase &Call, TargetTransformInfo &CalleeTTI,
2484     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2485     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2486     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2487   const InlineParams Params = {/* DefaultThreshold*/ 0,
2488                                /*HintThreshold*/ {},
2489                                /*ColdThreshold*/ {},
2490                                /*OptSizeThreshold*/ {},
2491                                /*OptMinSizeThreshold*/ {},
2492                                /*HotCallSiteThreshold*/ {},
2493                                /*LocallyHotCallSiteThreshold*/ {},
2494                                /*ColdCallSiteThreshold*/ {},
2495                                /*ComputeFullInlineCost*/ true,
2496                                /*EnableDeferral*/ true};
2497 
2498   InlineCostCallAnalyzer CA(*Call.getCalledFunction(), Call, Params, CalleeTTI,
2499                             GetAssumptionCache, GetBFI, PSI, ORE, true,
2500                             /*IgnoreThreshold*/ true);
2501   auto R = CA.analyze();
2502   if (!R.isSuccess())
2503     return None;
2504   return CA.getCost();
2505 }
2506 
2507 Optional<InlineResult> llvm::getAttributeBasedInliningDecision(
2508     CallBase &Call, Function *Callee, TargetTransformInfo &CalleeTTI,
2509     function_ref<const TargetLibraryInfo &(Function &)> GetTLI) {
2510 
2511   // Cannot inline indirect calls.
2512   if (!Callee)
2513     return InlineResult::failure("indirect call");
2514 
2515   // When callee coroutine function is inlined into caller coroutine function
2516   // before coro-split pass,
2517   // coro-early pass can not handle this quiet well.
2518   // So we won't inline the coroutine function if it have not been unsplited
2519   if (Callee->isPresplitCoroutine())
2520     return InlineResult::failure("unsplited coroutine call");
2521 
2522   // Never inline calls with byval arguments that does not have the alloca
2523   // address space. Since byval arguments can be replaced with a copy to an
2524   // alloca, the inlined code would need to be adjusted to handle that the
2525   // argument is in the alloca address space (so it is a little bit complicated
2526   // to solve).
2527   unsigned AllocaAS = Callee->getParent()->getDataLayout().getAllocaAddrSpace();
2528   for (unsigned I = 0, E = Call.arg_size(); I != E; ++I)
2529     if (Call.isByValArgument(I)) {
2530       PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType());
2531       if (PTy->getAddressSpace() != AllocaAS)
2532         return InlineResult::failure("byval arguments without alloca"
2533                                      " address space");
2534     }
2535 
2536   // Calls to functions with always-inline attributes should be inlined
2537   // whenever possible.
2538   if (Call.hasFnAttr(Attribute::AlwaysInline)) {
2539     auto IsViable = isInlineViable(*Callee);
2540     if (IsViable.isSuccess())
2541       return InlineResult::success();
2542     return InlineResult::failure(IsViable.getFailureReason());
2543   }
2544 
2545   // Never inline functions with conflicting attributes (unless callee has
2546   // always-inline attribute).
2547   Function *Caller = Call.getCaller();
2548   if (!functionsHaveCompatibleAttributes(Caller, Callee, CalleeTTI, GetTLI))
2549     return InlineResult::failure("conflicting attributes");
2550 
2551   // Don't inline this call if the caller has the optnone attribute.
2552   if (Caller->hasOptNone())
2553     return InlineResult::failure("optnone attribute");
2554 
2555   // Don't inline a function that treats null pointer as valid into a caller
2556   // that does not have this attribute.
2557   if (!Caller->nullPointerIsDefined() && Callee->nullPointerIsDefined())
2558     return InlineResult::failure("nullptr definitions incompatible");
2559 
2560   // Don't inline functions which can be interposed at link-time.
2561   if (Callee->isInterposable())
2562     return InlineResult::failure("interposable");
2563 
2564   // Don't inline functions marked noinline.
2565   if (Callee->hasFnAttribute(Attribute::NoInline))
2566     return InlineResult::failure("noinline function attribute");
2567 
2568   // Don't inline call sites marked noinline.
2569   if (Call.isNoInline())
2570     return InlineResult::failure("noinline call site attribute");
2571 
2572   // Don't inline functions if one does not have any stack protector attribute
2573   // but the other does.
2574   if (Caller->hasStackProtectorFnAttr() && !Callee->hasStackProtectorFnAttr())
2575     return InlineResult::failure(
2576         "stack protected caller but callee requested no stack protector");
2577   if (Callee->hasStackProtectorFnAttr() && !Caller->hasStackProtectorFnAttr())
2578     return InlineResult::failure(
2579         "stack protected callee but caller requested no stack protector");
2580 
2581   return None;
2582 }
2583 
2584 InlineCost llvm::getInlineCost(
2585     CallBase &Call, Function *Callee, const InlineParams &Params,
2586     TargetTransformInfo &CalleeTTI,
2587     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2588     function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
2589     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2590     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2591 
2592   auto UserDecision =
2593       llvm::getAttributeBasedInliningDecision(Call, Callee, CalleeTTI, GetTLI);
2594 
2595   if (UserDecision.hasValue()) {
2596     if (UserDecision->isSuccess())
2597       return llvm::InlineCost::getAlways("always inline attribute");
2598     return llvm::InlineCost::getNever(UserDecision->getFailureReason());
2599   }
2600 
2601   LLVM_DEBUG(llvm::dbgs() << "      Analyzing call of " << Callee->getName()
2602                           << "... (caller:" << Call.getCaller()->getName()
2603                           << ")\n");
2604 
2605   InlineCostCallAnalyzer CA(*Callee, Call, Params, CalleeTTI,
2606                             GetAssumptionCache, GetBFI, PSI, ORE);
2607   InlineResult ShouldInline = CA.analyze();
2608 
2609   LLVM_DEBUG(CA.dump());
2610 
2611   // Check if there was a reason to force inlining or no inlining.
2612   if (!ShouldInline.isSuccess() && CA.getCost() < CA.getThreshold())
2613     return InlineCost::getNever(ShouldInline.getFailureReason());
2614   if (ShouldInline.isSuccess() && CA.getCost() >= CA.getThreshold())
2615     return InlineCost::getAlways("empty function");
2616 
2617   return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
2618 }
2619 
2620 InlineResult llvm::isInlineViable(Function &F) {
2621   bool ReturnsTwice = F.hasFnAttribute(Attribute::ReturnsTwice);
2622   for (BasicBlock &BB : F) {
2623     // Disallow inlining of functions which contain indirect branches.
2624     if (isa<IndirectBrInst>(BB.getTerminator()))
2625       return InlineResult::failure("contains indirect branches");
2626 
2627     // Disallow inlining of blockaddresses which are used by non-callbr
2628     // instructions.
2629     if (BB.hasAddressTaken())
2630       for (User *U : BlockAddress::get(&BB)->users())
2631         if (!isa<CallBrInst>(*U))
2632           return InlineResult::failure("blockaddress used outside of callbr");
2633 
2634     for (auto &II : BB) {
2635       CallBase *Call = dyn_cast<CallBase>(&II);
2636       if (!Call)
2637         continue;
2638 
2639       // Disallow recursive calls.
2640       Function *Callee = Call->getCalledFunction();
2641       if (&F == Callee)
2642         return InlineResult::failure("recursive call");
2643 
2644       // Disallow calls which expose returns-twice to a function not previously
2645       // attributed as such.
2646       if (!ReturnsTwice && isa<CallInst>(Call) &&
2647           cast<CallInst>(Call)->canReturnTwice())
2648         return InlineResult::failure("exposes returns-twice attribute");
2649 
2650       if (Callee)
2651         switch (Callee->getIntrinsicID()) {
2652         default:
2653           break;
2654         case llvm::Intrinsic::icall_branch_funnel:
2655           // Disallow inlining of @llvm.icall.branch.funnel because current
2656           // backend can't separate call targets from call arguments.
2657           return InlineResult::failure(
2658               "disallowed inlining of @llvm.icall.branch.funnel");
2659         case llvm::Intrinsic::localescape:
2660           // Disallow inlining functions that call @llvm.localescape. Doing this
2661           // correctly would require major changes to the inliner.
2662           return InlineResult::failure(
2663               "disallowed inlining of @llvm.localescape");
2664         case llvm::Intrinsic::vastart:
2665           // Disallow inlining of functions that initialize VarArgs with
2666           // va_start.
2667           return InlineResult::failure(
2668               "contains VarArgs initialized with va_start");
2669         }
2670     }
2671   }
2672 
2673   return InlineResult::success();
2674 }
2675 
2676 // APIs to create InlineParams based on command line flags and/or other
2677 // parameters.
2678 
2679 InlineParams llvm::getInlineParams(int Threshold) {
2680   InlineParams Params;
2681 
2682   // This field is the threshold to use for a callee by default. This is
2683   // derived from one or more of:
2684   //  * optimization or size-optimization levels,
2685   //  * a value passed to createFunctionInliningPass function, or
2686   //  * the -inline-threshold flag.
2687   //  If the -inline-threshold flag is explicitly specified, that is used
2688   //  irrespective of anything else.
2689   if (InlineThreshold.getNumOccurrences() > 0)
2690     Params.DefaultThreshold = InlineThreshold;
2691   else
2692     Params.DefaultThreshold = Threshold;
2693 
2694   // Set the HintThreshold knob from the -inlinehint-threshold.
2695   Params.HintThreshold = HintThreshold;
2696 
2697   // Set the HotCallSiteThreshold knob from the -hot-callsite-threshold.
2698   Params.HotCallSiteThreshold = HotCallSiteThreshold;
2699 
2700   // If the -locally-hot-callsite-threshold is explicitly specified, use it to
2701   // populate LocallyHotCallSiteThreshold. Later, we populate
2702   // Params.LocallyHotCallSiteThreshold from -locally-hot-callsite-threshold if
2703   // we know that optimization level is O3 (in the getInlineParams variant that
2704   // takes the opt and size levels).
2705   // FIXME: Remove this check (and make the assignment unconditional) after
2706   // addressing size regression issues at O2.
2707   if (LocallyHotCallSiteThreshold.getNumOccurrences() > 0)
2708     Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold;
2709 
2710   // Set the ColdCallSiteThreshold knob from the
2711   // -inline-cold-callsite-threshold.
2712   Params.ColdCallSiteThreshold = ColdCallSiteThreshold;
2713 
2714   // Set the OptMinSizeThreshold and OptSizeThreshold params only if the
2715   // -inlinehint-threshold commandline option is not explicitly given. If that
2716   // option is present, then its value applies even for callees with size and
2717   // minsize attributes.
2718   // If the -inline-threshold is not specified, set the ColdThreshold from the
2719   // -inlinecold-threshold even if it is not explicitly passed. If
2720   // -inline-threshold is specified, then -inlinecold-threshold needs to be
2721   // explicitly specified to set the ColdThreshold knob
2722   if (InlineThreshold.getNumOccurrences() == 0) {
2723     Params.OptMinSizeThreshold = InlineConstants::OptMinSizeThreshold;
2724     Params.OptSizeThreshold = InlineConstants::OptSizeThreshold;
2725     Params.ColdThreshold = ColdThreshold;
2726   } else if (ColdThreshold.getNumOccurrences() > 0) {
2727     Params.ColdThreshold = ColdThreshold;
2728   }
2729   return Params;
2730 }
2731 
2732 InlineParams llvm::getInlineParams() {
2733   return getInlineParams(DefaultThreshold);
2734 }
2735 
2736 // Compute the default threshold for inlining based on the opt level and the
2737 // size opt level.
2738 static int computeThresholdFromOptLevels(unsigned OptLevel,
2739                                          unsigned SizeOptLevel) {
2740   if (OptLevel > 2)
2741     return InlineConstants::OptAggressiveThreshold;
2742   if (SizeOptLevel == 1) // -Os
2743     return InlineConstants::OptSizeThreshold;
2744   if (SizeOptLevel == 2) // -Oz
2745     return InlineConstants::OptMinSizeThreshold;
2746   return DefaultThreshold;
2747 }
2748 
2749 InlineParams llvm::getInlineParams(unsigned OptLevel, unsigned SizeOptLevel) {
2750   auto Params =
2751       getInlineParams(computeThresholdFromOptLevels(OptLevel, SizeOptLevel));
2752   // At O3, use the value of -locally-hot-callsite-threshold option to populate
2753   // Params.LocallyHotCallSiteThreshold. Below O3, this flag has effect only
2754   // when it is specified explicitly.
2755   if (OptLevel > 2)
2756     Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold;
2757   return Params;
2758 }
2759 
2760 PreservedAnalyses
2761 InlineCostAnnotationPrinterPass::run(Function &F,
2762                                      FunctionAnalysisManager &FAM) {
2763   PrintInstructionComments = true;
2764   std::function<AssumptionCache &(Function &)> GetAssumptionCache = [&](
2765       Function &F) -> AssumptionCache & {
2766     return FAM.getResult<AssumptionAnalysis>(F);
2767   };
2768   Module *M = F.getParent();
2769   ProfileSummaryInfo PSI(*M);
2770   DataLayout DL(M);
2771   TargetTransformInfo TTI(DL);
2772   // FIXME: Redesign the usage of InlineParams to expand the scope of this pass.
2773   // In the current implementation, the type of InlineParams doesn't matter as
2774   // the pass serves only for verification of inliner's decisions.
2775   // We can add a flag which determines InlineParams for this run. Right now,
2776   // the default InlineParams are used.
2777   const InlineParams Params = llvm::getInlineParams();
2778   for (BasicBlock &BB : F) {
2779     for (Instruction &I : BB) {
2780       if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2781         Function *CalledFunction = CI->getCalledFunction();
2782         if (!CalledFunction || CalledFunction->isDeclaration())
2783           continue;
2784         OptimizationRemarkEmitter ORE(CalledFunction);
2785         InlineCostCallAnalyzer ICCA(*CalledFunction, *CI, Params, TTI,
2786                                     GetAssumptionCache, nullptr, &PSI, &ORE);
2787         ICCA.analyze();
2788         OS << "      Analyzing call of " << CalledFunction->getName()
2789            << "... (caller:" << CI->getCaller()->getName() << ")\n";
2790         ICCA.print();
2791       }
2792     }
2793   }
2794   return PreservedAnalyses::all();
2795 }
2796