xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp (revision 9f23cbd6cae82fd77edfad7173432fa8dccd0a95)
1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This transformation analyzes and transforms the induction variables (and
10 // computations derived from them) into forms suitable for efficient execution
11 // on the target.
12 //
13 // This pass performs a strength reduction on array references inside loops that
14 // have as one or more of their components the loop induction variable, it
15 // rewrites expressions to take advantage of scaled-index addressing modes
16 // available on the target, and it performs a variety of other optimizations
17 // related to loop induction variables.
18 //
19 // Terminology note: this code has a lot of handling for "post-increment" or
20 // "post-inc" users. This is not talking about post-increment addressing modes;
21 // it is instead talking about code like this:
22 //
23 //   %i = phi [ 0, %entry ], [ %i.next, %latch ]
24 //   ...
25 //   %i.next = add %i, 1
26 //   %c = icmp eq %i.next, %n
27 //
28 // The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however
29 // it's useful to think about these as the same register, with some uses using
30 // the value of the register before the add and some using it after. In this
31 // example, the icmp is a post-increment user, since it uses %i.next, which is
32 // the value of the induction variable after the increment. The other common
33 // case of post-increment users is users outside the loop.
34 //
35 // TODO: More sophistication in the way Formulae are generated and filtered.
36 //
37 // TODO: Handle multiple loops at a time.
38 //
39 // TODO: Should the addressing mode BaseGV be changed to a ConstantExpr instead
40 //       of a GlobalValue?
41 //
42 // TODO: When truncation is free, truncate ICmp users' operands to make it a
43 //       smaller encoding (on x86 at least).
44 //
45 // TODO: When a negated register is used by an add (such as in a list of
46 //       multiple base registers, or as the increment expression in an addrec),
47 //       we may not actually need both reg and (-1 * reg) in registers; the
48 //       negation can be implemented by using a sub instead of an add. The
49 //       lack of support for taking this into consideration when making
50 //       register pressure decisions is partly worked around by the "Special"
51 //       use kind.
52 //
53 //===----------------------------------------------------------------------===//
54 
55 #include "llvm/Transforms/Scalar/LoopStrengthReduce.h"
56 #include "llvm/ADT/APInt.h"
57 #include "llvm/ADT/DenseMap.h"
58 #include "llvm/ADT/DenseSet.h"
59 #include "llvm/ADT/Hashing.h"
60 #include "llvm/ADT/PointerIntPair.h"
61 #include "llvm/ADT/STLExtras.h"
62 #include "llvm/ADT/SetVector.h"
63 #include "llvm/ADT/SmallBitVector.h"
64 #include "llvm/ADT/SmallPtrSet.h"
65 #include "llvm/ADT/SmallSet.h"
66 #include "llvm/ADT/SmallVector.h"
67 #include "llvm/ADT/Statistic.h"
68 #include "llvm/ADT/iterator_range.h"
69 #include "llvm/Analysis/AssumptionCache.h"
70 #include "llvm/Analysis/IVUsers.h"
71 #include "llvm/Analysis/LoopAnalysisManager.h"
72 #include "llvm/Analysis/LoopInfo.h"
73 #include "llvm/Analysis/LoopPass.h"
74 #include "llvm/Analysis/MemorySSA.h"
75 #include "llvm/Analysis/MemorySSAUpdater.h"
76 #include "llvm/Analysis/ScalarEvolution.h"
77 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
78 #include "llvm/Analysis/ScalarEvolutionNormalization.h"
79 #include "llvm/Analysis/TargetLibraryInfo.h"
80 #include "llvm/Analysis/TargetTransformInfo.h"
81 #include "llvm/Analysis/ValueTracking.h"
82 #include "llvm/BinaryFormat/Dwarf.h"
83 #include "llvm/Config/llvm-config.h"
84 #include "llvm/IR/BasicBlock.h"
85 #include "llvm/IR/Constant.h"
86 #include "llvm/IR/Constants.h"
87 #include "llvm/IR/DebugInfoMetadata.h"
88 #include "llvm/IR/DerivedTypes.h"
89 #include "llvm/IR/Dominators.h"
90 #include "llvm/IR/GlobalValue.h"
91 #include "llvm/IR/IRBuilder.h"
92 #include "llvm/IR/InstrTypes.h"
93 #include "llvm/IR/Instruction.h"
94 #include "llvm/IR/Instructions.h"
95 #include "llvm/IR/IntrinsicInst.h"
96 #include "llvm/IR/Module.h"
97 #include "llvm/IR/Operator.h"
98 #include "llvm/IR/PassManager.h"
99 #include "llvm/IR/Type.h"
100 #include "llvm/IR/Use.h"
101 #include "llvm/IR/User.h"
102 #include "llvm/IR/Value.h"
103 #include "llvm/IR/ValueHandle.h"
104 #include "llvm/InitializePasses.h"
105 #include "llvm/Pass.h"
106 #include "llvm/Support/Casting.h"
107 #include "llvm/Support/CommandLine.h"
108 #include "llvm/Support/Compiler.h"
109 #include "llvm/Support/Debug.h"
110 #include "llvm/Support/ErrorHandling.h"
111 #include "llvm/Support/MathExtras.h"
112 #include "llvm/Support/raw_ostream.h"
113 #include "llvm/Transforms/Scalar.h"
114 #include "llvm/Transforms/Utils.h"
115 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
116 #include "llvm/Transforms/Utils/Local.h"
117 #include "llvm/Transforms/Utils/LoopUtils.h"
118 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
119 #include <algorithm>
120 #include <cassert>
121 #include <cstddef>
122 #include <cstdint>
123 #include <iterator>
124 #include <limits>
125 #include <map>
126 #include <numeric>
127 #include <optional>
128 #include <utility>
129 
130 using namespace llvm;
131 
132 #define DEBUG_TYPE "loop-reduce"
133 
134 /// MaxIVUsers is an arbitrary threshold that provides an early opportunity for
135 /// bail out. This threshold is far beyond the number of users that LSR can
136 /// conceivably solve, so it should not affect generated code, but catches the
137 /// worst cases before LSR burns too much compile time and stack space.
138 static const unsigned MaxIVUsers = 200;
139 
140 /// Limit the size of expression that SCEV-based salvaging will attempt to
141 /// translate into a DIExpression.
142 /// Choose a maximum size such that debuginfo is not excessively increased and
143 /// the salvaging is not too expensive for the compiler.
144 static const unsigned MaxSCEVSalvageExpressionSize = 64;
145 
146 // Cleanup congruent phis after LSR phi expansion.
147 static cl::opt<bool> EnablePhiElim(
148   "enable-lsr-phielim", cl::Hidden, cl::init(true),
149   cl::desc("Enable LSR phi elimination"));
150 
151 // The flag adds instruction count to solutions cost comparison.
152 static cl::opt<bool> InsnsCost(
153   "lsr-insns-cost", cl::Hidden, cl::init(true),
154   cl::desc("Add instruction count to a LSR cost model"));
155 
156 // Flag to choose how to narrow complex lsr solution
157 static cl::opt<bool> LSRExpNarrow(
158   "lsr-exp-narrow", cl::Hidden, cl::init(false),
159   cl::desc("Narrow LSR complex solution using"
160            " expectation of registers number"));
161 
162 // Flag to narrow search space by filtering non-optimal formulae with
163 // the same ScaledReg and Scale.
164 static cl::opt<bool> FilterSameScaledReg(
165     "lsr-filter-same-scaled-reg", cl::Hidden, cl::init(true),
166     cl::desc("Narrow LSR search space by filtering non-optimal formulae"
167              " with the same ScaledReg and Scale"));
168 
169 static cl::opt<TTI::AddressingModeKind> PreferredAddresingMode(
170   "lsr-preferred-addressing-mode", cl::Hidden, cl::init(TTI::AMK_None),
171    cl::desc("A flag that overrides the target's preferred addressing mode."),
172    cl::values(clEnumValN(TTI::AMK_None,
173                          "none",
174                          "Don't prefer any addressing mode"),
175               clEnumValN(TTI::AMK_PreIndexed,
176                          "preindexed",
177                          "Prefer pre-indexed addressing mode"),
178               clEnumValN(TTI::AMK_PostIndexed,
179                          "postindexed",
180                          "Prefer post-indexed addressing mode")));
181 
182 static cl::opt<unsigned> ComplexityLimit(
183   "lsr-complexity-limit", cl::Hidden,
184   cl::init(std::numeric_limits<uint16_t>::max()),
185   cl::desc("LSR search space complexity limit"));
186 
187 static cl::opt<unsigned> SetupCostDepthLimit(
188     "lsr-setupcost-depth-limit", cl::Hidden, cl::init(7),
189     cl::desc("The limit on recursion depth for LSRs setup cost"));
190 
191 static cl::opt<bool> AllowTerminatingConditionFoldingAfterLSR(
192     "lsr-term-fold", cl::Hidden, cl::init(false),
193     cl::desc("Attempt to replace primary IV with other IV."));
194 
195 static cl::opt<bool> AllowDropSolutionIfLessProfitable(
196     "lsr-drop-solution", cl::Hidden, cl::init(false),
197     cl::desc("Attempt to drop solution if it is less profitable"));
198 
199 STATISTIC(NumTermFold,
200           "Number of terminating condition fold recognized and performed");
201 
202 #ifndef NDEBUG
203 // Stress test IV chain generation.
204 static cl::opt<bool> StressIVChain(
205   "stress-ivchain", cl::Hidden, cl::init(false),
206   cl::desc("Stress test LSR IV chains"));
207 #else
208 static bool StressIVChain = false;
209 #endif
210 
211 namespace {
212 
213 struct MemAccessTy {
214   /// Used in situations where the accessed memory type is unknown.
215   static const unsigned UnknownAddressSpace =
216       std::numeric_limits<unsigned>::max();
217 
218   Type *MemTy = nullptr;
219   unsigned AddrSpace = UnknownAddressSpace;
220 
221   MemAccessTy() = default;
222   MemAccessTy(Type *Ty, unsigned AS) : MemTy(Ty), AddrSpace(AS) {}
223 
224   bool operator==(MemAccessTy Other) const {
225     return MemTy == Other.MemTy && AddrSpace == Other.AddrSpace;
226   }
227 
228   bool operator!=(MemAccessTy Other) const { return !(*this == Other); }
229 
230   static MemAccessTy getUnknown(LLVMContext &Ctx,
231                                 unsigned AS = UnknownAddressSpace) {
232     return MemAccessTy(Type::getVoidTy(Ctx), AS);
233   }
234 
235   Type *getType() { return MemTy; }
236 };
237 
238 /// This class holds data which is used to order reuse candidates.
239 class RegSortData {
240 public:
241   /// This represents the set of LSRUse indices which reference
242   /// a particular register.
243   SmallBitVector UsedByIndices;
244 
245   void print(raw_ostream &OS) const;
246   void dump() const;
247 };
248 
249 } // end anonymous namespace
250 
251 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
252 void RegSortData::print(raw_ostream &OS) const {
253   OS << "[NumUses=" << UsedByIndices.count() << ']';
254 }
255 
256 LLVM_DUMP_METHOD void RegSortData::dump() const {
257   print(errs()); errs() << '\n';
258 }
259 #endif
260 
261 namespace {
262 
263 /// Map register candidates to information about how they are used.
264 class RegUseTracker {
265   using RegUsesTy = DenseMap<const SCEV *, RegSortData>;
266 
267   RegUsesTy RegUsesMap;
268   SmallVector<const SCEV *, 16> RegSequence;
269 
270 public:
271   void countRegister(const SCEV *Reg, size_t LUIdx);
272   void dropRegister(const SCEV *Reg, size_t LUIdx);
273   void swapAndDropUse(size_t LUIdx, size_t LastLUIdx);
274 
275   bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const;
276 
277   const SmallBitVector &getUsedByIndices(const SCEV *Reg) const;
278 
279   void clear();
280 
281   using iterator = SmallVectorImpl<const SCEV *>::iterator;
282   using const_iterator = SmallVectorImpl<const SCEV *>::const_iterator;
283 
284   iterator begin() { return RegSequence.begin(); }
285   iterator end()   { return RegSequence.end(); }
286   const_iterator begin() const { return RegSequence.begin(); }
287   const_iterator end() const   { return RegSequence.end(); }
288 };
289 
290 } // end anonymous namespace
291 
292 void
293 RegUseTracker::countRegister(const SCEV *Reg, size_t LUIdx) {
294   std::pair<RegUsesTy::iterator, bool> Pair =
295     RegUsesMap.insert(std::make_pair(Reg, RegSortData()));
296   RegSortData &RSD = Pair.first->second;
297   if (Pair.second)
298     RegSequence.push_back(Reg);
299   RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1));
300   RSD.UsedByIndices.set(LUIdx);
301 }
302 
303 void
304 RegUseTracker::dropRegister(const SCEV *Reg, size_t LUIdx) {
305   RegUsesTy::iterator It = RegUsesMap.find(Reg);
306   assert(It != RegUsesMap.end());
307   RegSortData &RSD = It->second;
308   assert(RSD.UsedByIndices.size() > LUIdx);
309   RSD.UsedByIndices.reset(LUIdx);
310 }
311 
312 void
313 RegUseTracker::swapAndDropUse(size_t LUIdx, size_t LastLUIdx) {
314   assert(LUIdx <= LastLUIdx);
315 
316   // Update RegUses. The data structure is not optimized for this purpose;
317   // we must iterate through it and update each of the bit vectors.
318   for (auto &Pair : RegUsesMap) {
319     SmallBitVector &UsedByIndices = Pair.second.UsedByIndices;
320     if (LUIdx < UsedByIndices.size())
321       UsedByIndices[LUIdx] =
322         LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : false;
323     UsedByIndices.resize(std::min(UsedByIndices.size(), LastLUIdx));
324   }
325 }
326 
327 bool
328 RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const {
329   RegUsesTy::const_iterator I = RegUsesMap.find(Reg);
330   if (I == RegUsesMap.end())
331     return false;
332   const SmallBitVector &UsedByIndices = I->second.UsedByIndices;
333   int i = UsedByIndices.find_first();
334   if (i == -1) return false;
335   if ((size_t)i != LUIdx) return true;
336   return UsedByIndices.find_next(i) != -1;
337 }
338 
339 const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const {
340   RegUsesTy::const_iterator I = RegUsesMap.find(Reg);
341   assert(I != RegUsesMap.end() && "Unknown register!");
342   return I->second.UsedByIndices;
343 }
344 
345 void RegUseTracker::clear() {
346   RegUsesMap.clear();
347   RegSequence.clear();
348 }
349 
350 namespace {
351 
352 /// This class holds information that describes a formula for computing
353 /// satisfying a use. It may include broken-out immediates and scaled registers.
354 struct Formula {
355   /// Global base address used for complex addressing.
356   GlobalValue *BaseGV = nullptr;
357 
358   /// Base offset for complex addressing.
359   int64_t BaseOffset = 0;
360 
361   /// Whether any complex addressing has a base register.
362   bool HasBaseReg = false;
363 
364   /// The scale of any complex addressing.
365   int64_t Scale = 0;
366 
367   /// The list of "base" registers for this use. When this is non-empty. The
368   /// canonical representation of a formula is
369   /// 1. BaseRegs.size > 1 implies ScaledReg != NULL and
370   /// 2. ScaledReg != NULL implies Scale != 1 || !BaseRegs.empty().
371   /// 3. The reg containing recurrent expr related with currect loop in the
372   /// formula should be put in the ScaledReg.
373   /// #1 enforces that the scaled register is always used when at least two
374   /// registers are needed by the formula: e.g., reg1 + reg2 is reg1 + 1 * reg2.
375   /// #2 enforces that 1 * reg is reg.
376   /// #3 ensures invariant regs with respect to current loop can be combined
377   /// together in LSR codegen.
378   /// This invariant can be temporarily broken while building a formula.
379   /// However, every formula inserted into the LSRInstance must be in canonical
380   /// form.
381   SmallVector<const SCEV *, 4> BaseRegs;
382 
383   /// The 'scaled' register for this use. This should be non-null when Scale is
384   /// not zero.
385   const SCEV *ScaledReg = nullptr;
386 
387   /// An additional constant offset which added near the use. This requires a
388   /// temporary register, but the offset itself can live in an add immediate
389   /// field rather than a register.
390   int64_t UnfoldedOffset = 0;
391 
392   Formula() = default;
393 
394   void initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE);
395 
396   bool isCanonical(const Loop &L) const;
397 
398   void canonicalize(const Loop &L);
399 
400   bool unscale();
401 
402   bool hasZeroEnd() const;
403 
404   size_t getNumRegs() const;
405   Type *getType() const;
406 
407   void deleteBaseReg(const SCEV *&S);
408 
409   bool referencesReg(const SCEV *S) const;
410   bool hasRegsUsedByUsesOtherThan(size_t LUIdx,
411                                   const RegUseTracker &RegUses) const;
412 
413   void print(raw_ostream &OS) const;
414   void dump() const;
415 };
416 
417 } // end anonymous namespace
418 
419 /// Recursion helper for initialMatch.
420 static void DoInitialMatch(const SCEV *S, Loop *L,
421                            SmallVectorImpl<const SCEV *> &Good,
422                            SmallVectorImpl<const SCEV *> &Bad,
423                            ScalarEvolution &SE) {
424   // Collect expressions which properly dominate the loop header.
425   if (SE.properlyDominates(S, L->getHeader())) {
426     Good.push_back(S);
427     return;
428   }
429 
430   // Look at add operands.
431   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
432     for (const SCEV *S : Add->operands())
433       DoInitialMatch(S, L, Good, Bad, SE);
434     return;
435   }
436 
437   // Look at addrec operands.
438   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
439     if (!AR->getStart()->isZero() && AR->isAffine()) {
440       DoInitialMatch(AR->getStart(), L, Good, Bad, SE);
441       DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
442                                       AR->getStepRecurrence(SE),
443                                       // FIXME: AR->getNoWrapFlags()
444                                       AR->getLoop(), SCEV::FlagAnyWrap),
445                      L, Good, Bad, SE);
446       return;
447     }
448 
449   // Handle a multiplication by -1 (negation) if it didn't fold.
450   if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S))
451     if (Mul->getOperand(0)->isAllOnesValue()) {
452       SmallVector<const SCEV *, 4> Ops(drop_begin(Mul->operands()));
453       const SCEV *NewMul = SE.getMulExpr(Ops);
454 
455       SmallVector<const SCEV *, 4> MyGood;
456       SmallVector<const SCEV *, 4> MyBad;
457       DoInitialMatch(NewMul, L, MyGood, MyBad, SE);
458       const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue(
459         SE.getEffectiveSCEVType(NewMul->getType())));
460       for (const SCEV *S : MyGood)
461         Good.push_back(SE.getMulExpr(NegOne, S));
462       for (const SCEV *S : MyBad)
463         Bad.push_back(SE.getMulExpr(NegOne, S));
464       return;
465     }
466 
467   // Ok, we can't do anything interesting. Just stuff the whole thing into a
468   // register and hope for the best.
469   Bad.push_back(S);
470 }
471 
472 /// Incorporate loop-variant parts of S into this Formula, attempting to keep
473 /// all loop-invariant and loop-computable values in a single base register.
474 void Formula::initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) {
475   SmallVector<const SCEV *, 4> Good;
476   SmallVector<const SCEV *, 4> Bad;
477   DoInitialMatch(S, L, Good, Bad, SE);
478   if (!Good.empty()) {
479     const SCEV *Sum = SE.getAddExpr(Good);
480     if (!Sum->isZero())
481       BaseRegs.push_back(Sum);
482     HasBaseReg = true;
483   }
484   if (!Bad.empty()) {
485     const SCEV *Sum = SE.getAddExpr(Bad);
486     if (!Sum->isZero())
487       BaseRegs.push_back(Sum);
488     HasBaseReg = true;
489   }
490   canonicalize(*L);
491 }
492 
493 static bool containsAddRecDependentOnLoop(const SCEV *S, const Loop &L) {
494   return SCEVExprContains(S, [&L](const SCEV *S) {
495     return isa<SCEVAddRecExpr>(S) && (cast<SCEVAddRecExpr>(S)->getLoop() == &L);
496   });
497 }
498 
499 /// Check whether or not this formula satisfies the canonical
500 /// representation.
501 /// \see Formula::BaseRegs.
502 bool Formula::isCanonical(const Loop &L) const {
503   if (!ScaledReg)
504     return BaseRegs.size() <= 1;
505 
506   if (Scale != 1)
507     return true;
508 
509   if (Scale == 1 && BaseRegs.empty())
510     return false;
511 
512   if (containsAddRecDependentOnLoop(ScaledReg, L))
513     return true;
514 
515   // If ScaledReg is not a recurrent expr, or it is but its loop is not current
516   // loop, meanwhile BaseRegs contains a recurrent expr reg related with current
517   // loop, we want to swap the reg in BaseRegs with ScaledReg.
518   return none_of(BaseRegs, [&L](const SCEV *S) {
519     return containsAddRecDependentOnLoop(S, L);
520   });
521 }
522 
523 /// Helper method to morph a formula into its canonical representation.
524 /// \see Formula::BaseRegs.
525 /// Every formula having more than one base register, must use the ScaledReg
526 /// field. Otherwise, we would have to do special cases everywhere in LSR
527 /// to treat reg1 + reg2 + ... the same way as reg1 + 1*reg2 + ...
528 /// On the other hand, 1*reg should be canonicalized into reg.
529 void Formula::canonicalize(const Loop &L) {
530   if (isCanonical(L))
531     return;
532 
533   if (BaseRegs.empty()) {
534     // No base reg? Use scale reg with scale = 1 as such.
535     assert(ScaledReg && "Expected 1*reg => reg");
536     assert(Scale == 1 && "Expected 1*reg => reg");
537     BaseRegs.push_back(ScaledReg);
538     Scale = 0;
539     ScaledReg = nullptr;
540     return;
541   }
542 
543   // Keep the invariant sum in BaseRegs and one of the variant sum in ScaledReg.
544   if (!ScaledReg) {
545     ScaledReg = BaseRegs.pop_back_val();
546     Scale = 1;
547   }
548 
549   // If ScaledReg is an invariant with respect to L, find the reg from
550   // BaseRegs containing the recurrent expr related with Loop L. Swap the
551   // reg with ScaledReg.
552   if (!containsAddRecDependentOnLoop(ScaledReg, L)) {
553     auto I = find_if(BaseRegs, [&L](const SCEV *S) {
554       return containsAddRecDependentOnLoop(S, L);
555     });
556     if (I != BaseRegs.end())
557       std::swap(ScaledReg, *I);
558   }
559   assert(isCanonical(L) && "Failed to canonicalize?");
560 }
561 
562 /// Get rid of the scale in the formula.
563 /// In other words, this method morphes reg1 + 1*reg2 into reg1 + reg2.
564 /// \return true if it was possible to get rid of the scale, false otherwise.
565 /// \note After this operation the formula may not be in the canonical form.
566 bool Formula::unscale() {
567   if (Scale != 1)
568     return false;
569   Scale = 0;
570   BaseRegs.push_back(ScaledReg);
571   ScaledReg = nullptr;
572   return true;
573 }
574 
575 bool Formula::hasZeroEnd() const {
576   if (UnfoldedOffset || BaseOffset)
577     return false;
578   if (BaseRegs.size() != 1 || ScaledReg)
579     return false;
580   return true;
581 }
582 
583 /// Return the total number of register operands used by this formula. This does
584 /// not include register uses implied by non-constant addrec strides.
585 size_t Formula::getNumRegs() const {
586   return !!ScaledReg + BaseRegs.size();
587 }
588 
589 /// Return the type of this formula, if it has one, or null otherwise. This type
590 /// is meaningless except for the bit size.
591 Type *Formula::getType() const {
592   return !BaseRegs.empty() ? BaseRegs.front()->getType() :
593          ScaledReg ? ScaledReg->getType() :
594          BaseGV ? BaseGV->getType() :
595          nullptr;
596 }
597 
598 /// Delete the given base reg from the BaseRegs list.
599 void Formula::deleteBaseReg(const SCEV *&S) {
600   if (&S != &BaseRegs.back())
601     std::swap(S, BaseRegs.back());
602   BaseRegs.pop_back();
603 }
604 
605 /// Test if this formula references the given register.
606 bool Formula::referencesReg(const SCEV *S) const {
607   return S == ScaledReg || is_contained(BaseRegs, S);
608 }
609 
610 /// Test whether this formula uses registers which are used by uses other than
611 /// the use with the given index.
612 bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx,
613                                          const RegUseTracker &RegUses) const {
614   if (ScaledReg)
615     if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx))
616       return true;
617   for (const SCEV *BaseReg : BaseRegs)
618     if (RegUses.isRegUsedByUsesOtherThan(BaseReg, LUIdx))
619       return true;
620   return false;
621 }
622 
623 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
624 void Formula::print(raw_ostream &OS) const {
625   bool First = true;
626   if (BaseGV) {
627     if (!First) OS << " + "; else First = false;
628     BaseGV->printAsOperand(OS, /*PrintType=*/false);
629   }
630   if (BaseOffset != 0) {
631     if (!First) OS << " + "; else First = false;
632     OS << BaseOffset;
633   }
634   for (const SCEV *BaseReg : BaseRegs) {
635     if (!First) OS << " + "; else First = false;
636     OS << "reg(" << *BaseReg << ')';
637   }
638   if (HasBaseReg && BaseRegs.empty()) {
639     if (!First) OS << " + "; else First = false;
640     OS << "**error: HasBaseReg**";
641   } else if (!HasBaseReg && !BaseRegs.empty()) {
642     if (!First) OS << " + "; else First = false;
643     OS << "**error: !HasBaseReg**";
644   }
645   if (Scale != 0) {
646     if (!First) OS << " + "; else First = false;
647     OS << Scale << "*reg(";
648     if (ScaledReg)
649       OS << *ScaledReg;
650     else
651       OS << "<unknown>";
652     OS << ')';
653   }
654   if (UnfoldedOffset != 0) {
655     if (!First) OS << " + ";
656     OS << "imm(" << UnfoldedOffset << ')';
657   }
658 }
659 
660 LLVM_DUMP_METHOD void Formula::dump() const {
661   print(errs()); errs() << '\n';
662 }
663 #endif
664 
665 /// Return true if the given addrec can be sign-extended without changing its
666 /// value.
667 static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
668   Type *WideTy =
669     IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1);
670   return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy));
671 }
672 
673 /// Return true if the given add can be sign-extended without changing its
674 /// value.
675 static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) {
676   Type *WideTy =
677     IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1);
678   return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy));
679 }
680 
681 /// Return true if the given mul can be sign-extended without changing its
682 /// value.
683 static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) {
684   Type *WideTy =
685     IntegerType::get(SE.getContext(),
686                      SE.getTypeSizeInBits(M->getType()) * M->getNumOperands());
687   return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy));
688 }
689 
690 /// Return an expression for LHS /s RHS, if it can be determined and if the
691 /// remainder is known to be zero, or null otherwise. If IgnoreSignificantBits
692 /// is true, expressions like (X * Y) /s Y are simplified to X, ignoring that
693 /// the multiplication may overflow, which is useful when the result will be
694 /// used in a context where the most significant bits are ignored.
695 static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
696                                 ScalarEvolution &SE,
697                                 bool IgnoreSignificantBits = false) {
698   // Handle the trivial case, which works for any SCEV type.
699   if (LHS == RHS)
700     return SE.getConstant(LHS->getType(), 1);
701 
702   // Handle a few RHS special cases.
703   const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS);
704   if (RC) {
705     const APInt &RA = RC->getAPInt();
706     // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do
707     // some folding.
708     if (RA.isAllOnes()) {
709       if (LHS->getType()->isPointerTy())
710         return nullptr;
711       return SE.getMulExpr(LHS, RC);
712     }
713     // Handle x /s 1 as x.
714     if (RA == 1)
715       return LHS;
716   }
717 
718   // Check for a division of a constant by a constant.
719   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) {
720     if (!RC)
721       return nullptr;
722     const APInt &LA = C->getAPInt();
723     const APInt &RA = RC->getAPInt();
724     if (LA.srem(RA) != 0)
725       return nullptr;
726     return SE.getConstant(LA.sdiv(RA));
727   }
728 
729   // Distribute the sdiv over addrec operands, if the addrec doesn't overflow.
730   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) {
731     if ((IgnoreSignificantBits || isAddRecSExtable(AR, SE)) && AR->isAffine()) {
732       const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE,
733                                       IgnoreSignificantBits);
734       if (!Step) return nullptr;
735       const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE,
736                                        IgnoreSignificantBits);
737       if (!Start) return nullptr;
738       // FlagNW is independent of the start value, step direction, and is
739       // preserved with smaller magnitude steps.
740       // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
741       return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap);
742     }
743     return nullptr;
744   }
745 
746   // Distribute the sdiv over add operands, if the add doesn't overflow.
747   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) {
748     if (IgnoreSignificantBits || isAddSExtable(Add, SE)) {
749       SmallVector<const SCEV *, 8> Ops;
750       for (const SCEV *S : Add->operands()) {
751         const SCEV *Op = getExactSDiv(S, RHS, SE, IgnoreSignificantBits);
752         if (!Op) return nullptr;
753         Ops.push_back(Op);
754       }
755       return SE.getAddExpr(Ops);
756     }
757     return nullptr;
758   }
759 
760   // Check for a multiply operand that we can pull RHS out of.
761   if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) {
762     if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) {
763       // Handle special case C1*X*Y /s C2*X*Y.
764       if (const SCEVMulExpr *MulRHS = dyn_cast<SCEVMulExpr>(RHS)) {
765         if (IgnoreSignificantBits || isMulSExtable(MulRHS, SE)) {
766           const SCEVConstant *LC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
767           const SCEVConstant *RC =
768               dyn_cast<SCEVConstant>(MulRHS->getOperand(0));
769           if (LC && RC) {
770             SmallVector<const SCEV *, 4> LOps(drop_begin(Mul->operands()));
771             SmallVector<const SCEV *, 4> ROps(drop_begin(MulRHS->operands()));
772             if (LOps == ROps)
773               return getExactSDiv(LC, RC, SE, IgnoreSignificantBits);
774           }
775         }
776       }
777 
778       SmallVector<const SCEV *, 4> Ops;
779       bool Found = false;
780       for (const SCEV *S : Mul->operands()) {
781         if (!Found)
782           if (const SCEV *Q = getExactSDiv(S, RHS, SE,
783                                            IgnoreSignificantBits)) {
784             S = Q;
785             Found = true;
786           }
787         Ops.push_back(S);
788       }
789       return Found ? SE.getMulExpr(Ops) : nullptr;
790     }
791     return nullptr;
792   }
793 
794   // Otherwise we don't know.
795   return nullptr;
796 }
797 
798 /// If S involves the addition of a constant integer value, return that integer
799 /// value, and mutate S to point to a new SCEV with that value excluded.
800 static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) {
801   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
802     if (C->getAPInt().getMinSignedBits() <= 64) {
803       S = SE.getConstant(C->getType(), 0);
804       return C->getValue()->getSExtValue();
805     }
806   } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
807     SmallVector<const SCEV *, 8> NewOps(Add->operands());
808     int64_t Result = ExtractImmediate(NewOps.front(), SE);
809     if (Result != 0)
810       S = SE.getAddExpr(NewOps);
811     return Result;
812   } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
813     SmallVector<const SCEV *, 8> NewOps(AR->operands());
814     int64_t Result = ExtractImmediate(NewOps.front(), SE);
815     if (Result != 0)
816       S = SE.getAddRecExpr(NewOps, AR->getLoop(),
817                            // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
818                            SCEV::FlagAnyWrap);
819     return Result;
820   }
821   return 0;
822 }
823 
824 /// If S involves the addition of a GlobalValue address, return that symbol, and
825 /// mutate S to point to a new SCEV with that value excluded.
826 static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) {
827   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
828     if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) {
829       S = SE.getConstant(GV->getType(), 0);
830       return GV;
831     }
832   } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
833     SmallVector<const SCEV *, 8> NewOps(Add->operands());
834     GlobalValue *Result = ExtractSymbol(NewOps.back(), SE);
835     if (Result)
836       S = SE.getAddExpr(NewOps);
837     return Result;
838   } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
839     SmallVector<const SCEV *, 8> NewOps(AR->operands());
840     GlobalValue *Result = ExtractSymbol(NewOps.front(), SE);
841     if (Result)
842       S = SE.getAddRecExpr(NewOps, AR->getLoop(),
843                            // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
844                            SCEV::FlagAnyWrap);
845     return Result;
846   }
847   return nullptr;
848 }
849 
850 /// Returns true if the specified instruction is using the specified value as an
851 /// address.
852 static bool isAddressUse(const TargetTransformInfo &TTI,
853                          Instruction *Inst, Value *OperandVal) {
854   bool isAddress = isa<LoadInst>(Inst);
855   if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
856     if (SI->getPointerOperand() == OperandVal)
857       isAddress = true;
858   } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
859     // Addressing modes can also be folded into prefetches and a variety
860     // of intrinsics.
861     switch (II->getIntrinsicID()) {
862     case Intrinsic::memset:
863     case Intrinsic::prefetch:
864     case Intrinsic::masked_load:
865       if (II->getArgOperand(0) == OperandVal)
866         isAddress = true;
867       break;
868     case Intrinsic::masked_store:
869       if (II->getArgOperand(1) == OperandVal)
870         isAddress = true;
871       break;
872     case Intrinsic::memmove:
873     case Intrinsic::memcpy:
874       if (II->getArgOperand(0) == OperandVal ||
875           II->getArgOperand(1) == OperandVal)
876         isAddress = true;
877       break;
878     default: {
879       MemIntrinsicInfo IntrInfo;
880       if (TTI.getTgtMemIntrinsic(II, IntrInfo)) {
881         if (IntrInfo.PtrVal == OperandVal)
882           isAddress = true;
883       }
884     }
885     }
886   } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) {
887     if (RMW->getPointerOperand() == OperandVal)
888       isAddress = true;
889   } else if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) {
890     if (CmpX->getPointerOperand() == OperandVal)
891       isAddress = true;
892   }
893   return isAddress;
894 }
895 
896 /// Return the type of the memory being accessed.
897 static MemAccessTy getAccessType(const TargetTransformInfo &TTI,
898                                  Instruction *Inst, Value *OperandVal) {
899   MemAccessTy AccessTy(Inst->getType(), MemAccessTy::UnknownAddressSpace);
900   if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
901     AccessTy.MemTy = SI->getOperand(0)->getType();
902     AccessTy.AddrSpace = SI->getPointerAddressSpace();
903   } else if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
904     AccessTy.AddrSpace = LI->getPointerAddressSpace();
905   } else if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) {
906     AccessTy.AddrSpace = RMW->getPointerAddressSpace();
907   } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) {
908     AccessTy.AddrSpace = CmpX->getPointerAddressSpace();
909   } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
910     switch (II->getIntrinsicID()) {
911     case Intrinsic::prefetch:
912     case Intrinsic::memset:
913       AccessTy.AddrSpace = II->getArgOperand(0)->getType()->getPointerAddressSpace();
914       AccessTy.MemTy = OperandVal->getType();
915       break;
916     case Intrinsic::memmove:
917     case Intrinsic::memcpy:
918       AccessTy.AddrSpace = OperandVal->getType()->getPointerAddressSpace();
919       AccessTy.MemTy = OperandVal->getType();
920       break;
921     case Intrinsic::masked_load:
922       AccessTy.AddrSpace =
923           II->getArgOperand(0)->getType()->getPointerAddressSpace();
924       break;
925     case Intrinsic::masked_store:
926       AccessTy.MemTy = II->getOperand(0)->getType();
927       AccessTy.AddrSpace =
928           II->getArgOperand(1)->getType()->getPointerAddressSpace();
929       break;
930     default: {
931       MemIntrinsicInfo IntrInfo;
932       if (TTI.getTgtMemIntrinsic(II, IntrInfo) && IntrInfo.PtrVal) {
933         AccessTy.AddrSpace
934           = IntrInfo.PtrVal->getType()->getPointerAddressSpace();
935       }
936 
937       break;
938     }
939     }
940   }
941 
942   // All pointers have the same requirements, so canonicalize them to an
943   // arbitrary pointer type to minimize variation.
944   if (PointerType *PTy = dyn_cast<PointerType>(AccessTy.MemTy))
945     AccessTy.MemTy = PointerType::get(IntegerType::get(PTy->getContext(), 1),
946                                       PTy->getAddressSpace());
947 
948   return AccessTy;
949 }
950 
951 /// Return true if this AddRec is already a phi in its loop.
952 static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
953   for (PHINode &PN : AR->getLoop()->getHeader()->phis()) {
954     if (SE.isSCEVable(PN.getType()) &&
955         (SE.getEffectiveSCEVType(PN.getType()) ==
956          SE.getEffectiveSCEVType(AR->getType())) &&
957         SE.getSCEV(&PN) == AR)
958       return true;
959   }
960   return false;
961 }
962 
963 /// Check if expanding this expression is likely to incur significant cost. This
964 /// is tricky because SCEV doesn't track which expressions are actually computed
965 /// by the current IR.
966 ///
967 /// We currently allow expansion of IV increments that involve adds,
968 /// multiplication by constants, and AddRecs from existing phis.
969 ///
970 /// TODO: Allow UDivExpr if we can find an existing IV increment that is an
971 /// obvious multiple of the UDivExpr.
972 static bool isHighCostExpansion(const SCEV *S,
973                                 SmallPtrSetImpl<const SCEV*> &Processed,
974                                 ScalarEvolution &SE) {
975   // Zero/One operand expressions
976   switch (S->getSCEVType()) {
977   case scUnknown:
978   case scConstant:
979     return false;
980   case scTruncate:
981     return isHighCostExpansion(cast<SCEVTruncateExpr>(S)->getOperand(),
982                                Processed, SE);
983   case scZeroExtend:
984     return isHighCostExpansion(cast<SCEVZeroExtendExpr>(S)->getOperand(),
985                                Processed, SE);
986   case scSignExtend:
987     return isHighCostExpansion(cast<SCEVSignExtendExpr>(S)->getOperand(),
988                                Processed, SE);
989   default:
990     break;
991   }
992 
993   if (!Processed.insert(S).second)
994     return false;
995 
996   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
997     for (const SCEV *S : Add->operands()) {
998       if (isHighCostExpansion(S, Processed, SE))
999         return true;
1000     }
1001     return false;
1002   }
1003 
1004   if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
1005     if (Mul->getNumOperands() == 2) {
1006       // Multiplication by a constant is ok
1007       if (isa<SCEVConstant>(Mul->getOperand(0)))
1008         return isHighCostExpansion(Mul->getOperand(1), Processed, SE);
1009 
1010       // If we have the value of one operand, check if an existing
1011       // multiplication already generates this expression.
1012       if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Mul->getOperand(1))) {
1013         Value *UVal = U->getValue();
1014         for (User *UR : UVal->users()) {
1015           // If U is a constant, it may be used by a ConstantExpr.
1016           Instruction *UI = dyn_cast<Instruction>(UR);
1017           if (UI && UI->getOpcode() == Instruction::Mul &&
1018               SE.isSCEVable(UI->getType())) {
1019             return SE.getSCEV(UI) == Mul;
1020           }
1021         }
1022       }
1023     }
1024   }
1025 
1026   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
1027     if (isExistingPhi(AR, SE))
1028       return false;
1029   }
1030 
1031   // Fow now, consider any other type of expression (div/mul/min/max) high cost.
1032   return true;
1033 }
1034 
1035 namespace {
1036 
1037 class LSRUse;
1038 
1039 } // end anonymous namespace
1040 
1041 /// Check if the addressing mode defined by \p F is completely
1042 /// folded in \p LU at isel time.
1043 /// This includes address-mode folding and special icmp tricks.
1044 /// This function returns true if \p LU can accommodate what \p F
1045 /// defines and up to 1 base + 1 scaled + offset.
1046 /// In other words, if \p F has several base registers, this function may
1047 /// still return true. Therefore, users still need to account for
1048 /// additional base registers and/or unfolded offsets to derive an
1049 /// accurate cost model.
1050 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1051                                  const LSRUse &LU, const Formula &F);
1052 
1053 // Get the cost of the scaling factor used in F for LU.
1054 static InstructionCost getScalingFactorCost(const TargetTransformInfo &TTI,
1055                                             const LSRUse &LU, const Formula &F,
1056                                             const Loop &L);
1057 
1058 namespace {
1059 
1060 /// This class is used to measure and compare candidate formulae.
1061 class Cost {
1062   const Loop *L = nullptr;
1063   ScalarEvolution *SE = nullptr;
1064   const TargetTransformInfo *TTI = nullptr;
1065   TargetTransformInfo::LSRCost C;
1066   TTI::AddressingModeKind AMK = TTI::AMK_None;
1067 
1068 public:
1069   Cost() = delete;
1070   Cost(const Loop *L, ScalarEvolution &SE, const TargetTransformInfo &TTI,
1071        TTI::AddressingModeKind AMK) :
1072     L(L), SE(&SE), TTI(&TTI), AMK(AMK) {
1073     C.Insns = 0;
1074     C.NumRegs = 0;
1075     C.AddRecCost = 0;
1076     C.NumIVMuls = 0;
1077     C.NumBaseAdds = 0;
1078     C.ImmCost = 0;
1079     C.SetupCost = 0;
1080     C.ScaleCost = 0;
1081   }
1082 
1083   bool isLess(const Cost &Other) const;
1084 
1085   void Lose();
1086 
1087 #ifndef NDEBUG
1088   // Once any of the metrics loses, they must all remain losers.
1089   bool isValid() {
1090     return ((C.Insns | C.NumRegs | C.AddRecCost | C.NumIVMuls | C.NumBaseAdds
1091              | C.ImmCost | C.SetupCost | C.ScaleCost) != ~0u)
1092       || ((C.Insns & C.NumRegs & C.AddRecCost & C.NumIVMuls & C.NumBaseAdds
1093            & C.ImmCost & C.SetupCost & C.ScaleCost) == ~0u);
1094   }
1095 #endif
1096 
1097   bool isLoser() {
1098     assert(isValid() && "invalid cost");
1099     return C.NumRegs == ~0u;
1100   }
1101 
1102   void RateFormula(const Formula &F,
1103                    SmallPtrSetImpl<const SCEV *> &Regs,
1104                    const DenseSet<const SCEV *> &VisitedRegs,
1105                    const LSRUse &LU,
1106                    SmallPtrSetImpl<const SCEV *> *LoserRegs = nullptr);
1107 
1108   void print(raw_ostream &OS) const;
1109   void dump() const;
1110 
1111 private:
1112   void RateRegister(const Formula &F, const SCEV *Reg,
1113                     SmallPtrSetImpl<const SCEV *> &Regs);
1114   void RatePrimaryRegister(const Formula &F, const SCEV *Reg,
1115                            SmallPtrSetImpl<const SCEV *> &Regs,
1116                            SmallPtrSetImpl<const SCEV *> *LoserRegs);
1117 };
1118 
1119 /// An operand value in an instruction which is to be replaced with some
1120 /// equivalent, possibly strength-reduced, replacement.
1121 struct LSRFixup {
1122   /// The instruction which will be updated.
1123   Instruction *UserInst = nullptr;
1124 
1125   /// The operand of the instruction which will be replaced. The operand may be
1126   /// used more than once; every instance will be replaced.
1127   Value *OperandValToReplace = nullptr;
1128 
1129   /// If this user is to use the post-incremented value of an induction
1130   /// variable, this set is non-empty and holds the loops associated with the
1131   /// induction variable.
1132   PostIncLoopSet PostIncLoops;
1133 
1134   /// A constant offset to be added to the LSRUse expression.  This allows
1135   /// multiple fixups to share the same LSRUse with different offsets, for
1136   /// example in an unrolled loop.
1137   int64_t Offset = 0;
1138 
1139   LSRFixup() = default;
1140 
1141   bool isUseFullyOutsideLoop(const Loop *L) const;
1142 
1143   void print(raw_ostream &OS) const;
1144   void dump() const;
1145 };
1146 
1147 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of sorted
1148 /// SmallVectors of const SCEV*.
1149 struct UniquifierDenseMapInfo {
1150   static SmallVector<const SCEV *, 4> getEmptyKey() {
1151     SmallVector<const SCEV *, 4>  V;
1152     V.push_back(reinterpret_cast<const SCEV *>(-1));
1153     return V;
1154   }
1155 
1156   static SmallVector<const SCEV *, 4> getTombstoneKey() {
1157     SmallVector<const SCEV *, 4> V;
1158     V.push_back(reinterpret_cast<const SCEV *>(-2));
1159     return V;
1160   }
1161 
1162   static unsigned getHashValue(const SmallVector<const SCEV *, 4> &V) {
1163     return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
1164   }
1165 
1166   static bool isEqual(const SmallVector<const SCEV *, 4> &LHS,
1167                       const SmallVector<const SCEV *, 4> &RHS) {
1168     return LHS == RHS;
1169   }
1170 };
1171 
1172 /// This class holds the state that LSR keeps for each use in IVUsers, as well
1173 /// as uses invented by LSR itself. It includes information about what kinds of
1174 /// things can be folded into the user, information about the user itself, and
1175 /// information about how the use may be satisfied.  TODO: Represent multiple
1176 /// users of the same expression in common?
1177 class LSRUse {
1178   DenseSet<SmallVector<const SCEV *, 4>, UniquifierDenseMapInfo> Uniquifier;
1179 
1180 public:
1181   /// An enum for a kind of use, indicating what types of scaled and immediate
1182   /// operands it might support.
1183   enum KindType {
1184     Basic,   ///< A normal use, with no folding.
1185     Special, ///< A special case of basic, allowing -1 scales.
1186     Address, ///< An address use; folding according to TargetLowering
1187     ICmpZero ///< An equality icmp with both operands folded into one.
1188     // TODO: Add a generic icmp too?
1189   };
1190 
1191   using SCEVUseKindPair = PointerIntPair<const SCEV *, 2, KindType>;
1192 
1193   KindType Kind;
1194   MemAccessTy AccessTy;
1195 
1196   /// The list of operands which are to be replaced.
1197   SmallVector<LSRFixup, 8> Fixups;
1198 
1199   /// Keep track of the min and max offsets of the fixups.
1200   int64_t MinOffset = std::numeric_limits<int64_t>::max();
1201   int64_t MaxOffset = std::numeric_limits<int64_t>::min();
1202 
1203   /// This records whether all of the fixups using this LSRUse are outside of
1204   /// the loop, in which case some special-case heuristics may be used.
1205   bool AllFixupsOutsideLoop = true;
1206 
1207   /// RigidFormula is set to true to guarantee that this use will be associated
1208   /// with a single formula--the one that initially matched. Some SCEV
1209   /// expressions cannot be expanded. This allows LSR to consider the registers
1210   /// used by those expressions without the need to expand them later after
1211   /// changing the formula.
1212   bool RigidFormula = false;
1213 
1214   /// This records the widest use type for any fixup using this
1215   /// LSRUse. FindUseWithSimilarFormula can't consider uses with different max
1216   /// fixup widths to be equivalent, because the narrower one may be relying on
1217   /// the implicit truncation to truncate away bogus bits.
1218   Type *WidestFixupType = nullptr;
1219 
1220   /// A list of ways to build a value that can satisfy this user.  After the
1221   /// list is populated, one of these is selected heuristically and used to
1222   /// formulate a replacement for OperandValToReplace in UserInst.
1223   SmallVector<Formula, 12> Formulae;
1224 
1225   /// The set of register candidates used by all formulae in this LSRUse.
1226   SmallPtrSet<const SCEV *, 4> Regs;
1227 
1228   LSRUse(KindType K, MemAccessTy AT) : Kind(K), AccessTy(AT) {}
1229 
1230   LSRFixup &getNewFixup() {
1231     Fixups.push_back(LSRFixup());
1232     return Fixups.back();
1233   }
1234 
1235   void pushFixup(LSRFixup &f) {
1236     Fixups.push_back(f);
1237     if (f.Offset > MaxOffset)
1238       MaxOffset = f.Offset;
1239     if (f.Offset < MinOffset)
1240       MinOffset = f.Offset;
1241   }
1242 
1243   bool HasFormulaWithSameRegs(const Formula &F) const;
1244   float getNotSelectedProbability(const SCEV *Reg) const;
1245   bool InsertFormula(const Formula &F, const Loop &L);
1246   void DeleteFormula(Formula &F);
1247   void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses);
1248 
1249   void print(raw_ostream &OS) const;
1250   void dump() const;
1251 };
1252 
1253 } // end anonymous namespace
1254 
1255 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1256                                  LSRUse::KindType Kind, MemAccessTy AccessTy,
1257                                  GlobalValue *BaseGV, int64_t BaseOffset,
1258                                  bool HasBaseReg, int64_t Scale,
1259                                  Instruction *Fixup = nullptr);
1260 
1261 static unsigned getSetupCost(const SCEV *Reg, unsigned Depth) {
1262   if (isa<SCEVUnknown>(Reg) || isa<SCEVConstant>(Reg))
1263     return 1;
1264   if (Depth == 0)
1265     return 0;
1266   if (const auto *S = dyn_cast<SCEVAddRecExpr>(Reg))
1267     return getSetupCost(S->getStart(), Depth - 1);
1268   if (auto S = dyn_cast<SCEVIntegralCastExpr>(Reg))
1269     return getSetupCost(S->getOperand(), Depth - 1);
1270   if (auto S = dyn_cast<SCEVNAryExpr>(Reg))
1271     return std::accumulate(S->operands().begin(), S->operands().end(), 0,
1272                            [&](unsigned i, const SCEV *Reg) {
1273                              return i + getSetupCost(Reg, Depth - 1);
1274                            });
1275   if (auto S = dyn_cast<SCEVUDivExpr>(Reg))
1276     return getSetupCost(S->getLHS(), Depth - 1) +
1277            getSetupCost(S->getRHS(), Depth - 1);
1278   return 0;
1279 }
1280 
1281 /// Tally up interesting quantities from the given register.
1282 void Cost::RateRegister(const Formula &F, const SCEV *Reg,
1283                         SmallPtrSetImpl<const SCEV *> &Regs) {
1284   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) {
1285     // If this is an addrec for another loop, it should be an invariant
1286     // with respect to L since L is the innermost loop (at least
1287     // for now LSR only handles innermost loops).
1288     if (AR->getLoop() != L) {
1289       // If the AddRec exists, consider it's register free and leave it alone.
1290       if (isExistingPhi(AR, *SE) && AMK != TTI::AMK_PostIndexed)
1291         return;
1292 
1293       // It is bad to allow LSR for current loop to add induction variables
1294       // for its sibling loops.
1295       if (!AR->getLoop()->contains(L)) {
1296         Lose();
1297         return;
1298       }
1299 
1300       // Otherwise, it will be an invariant with respect to Loop L.
1301       ++C.NumRegs;
1302       return;
1303     }
1304 
1305     unsigned LoopCost = 1;
1306     if (TTI->isIndexedLoadLegal(TTI->MIM_PostInc, AR->getType()) ||
1307         TTI->isIndexedStoreLegal(TTI->MIM_PostInc, AR->getType())) {
1308 
1309       // If the step size matches the base offset, we could use pre-indexed
1310       // addressing.
1311       if (AMK == TTI::AMK_PreIndexed) {
1312         if (auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE)))
1313           if (Step->getAPInt() == F.BaseOffset)
1314             LoopCost = 0;
1315       } else if (AMK == TTI::AMK_PostIndexed) {
1316         const SCEV *LoopStep = AR->getStepRecurrence(*SE);
1317         if (isa<SCEVConstant>(LoopStep)) {
1318           const SCEV *LoopStart = AR->getStart();
1319           if (!isa<SCEVConstant>(LoopStart) &&
1320               SE->isLoopInvariant(LoopStart, L))
1321             LoopCost = 0;
1322         }
1323       }
1324     }
1325     C.AddRecCost += LoopCost;
1326 
1327     // Add the step value register, if it needs one.
1328     // TODO: The non-affine case isn't precisely modeled here.
1329     if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) {
1330       if (!Regs.count(AR->getOperand(1))) {
1331         RateRegister(F, AR->getOperand(1), Regs);
1332         if (isLoser())
1333           return;
1334       }
1335     }
1336   }
1337   ++C.NumRegs;
1338 
1339   // Rough heuristic; favor registers which don't require extra setup
1340   // instructions in the preheader.
1341   C.SetupCost += getSetupCost(Reg, SetupCostDepthLimit);
1342   // Ensure we don't, even with the recusion limit, produce invalid costs.
1343   C.SetupCost = std::min<unsigned>(C.SetupCost, 1 << 16);
1344 
1345   C.NumIVMuls += isa<SCEVMulExpr>(Reg) &&
1346                SE->hasComputableLoopEvolution(Reg, L);
1347 }
1348 
1349 /// Record this register in the set. If we haven't seen it before, rate
1350 /// it. Optional LoserRegs provides a way to declare any formula that refers to
1351 /// one of those regs an instant loser.
1352 void Cost::RatePrimaryRegister(const Formula &F, const SCEV *Reg,
1353                                SmallPtrSetImpl<const SCEV *> &Regs,
1354                                SmallPtrSetImpl<const SCEV *> *LoserRegs) {
1355   if (LoserRegs && LoserRegs->count(Reg)) {
1356     Lose();
1357     return;
1358   }
1359   if (Regs.insert(Reg).second) {
1360     RateRegister(F, Reg, Regs);
1361     if (LoserRegs && isLoser())
1362       LoserRegs->insert(Reg);
1363   }
1364 }
1365 
1366 void Cost::RateFormula(const Formula &F,
1367                        SmallPtrSetImpl<const SCEV *> &Regs,
1368                        const DenseSet<const SCEV *> &VisitedRegs,
1369                        const LSRUse &LU,
1370                        SmallPtrSetImpl<const SCEV *> *LoserRegs) {
1371   if (isLoser())
1372     return;
1373   assert(F.isCanonical(*L) && "Cost is accurate only for canonical formula");
1374   // Tally up the registers.
1375   unsigned PrevAddRecCost = C.AddRecCost;
1376   unsigned PrevNumRegs = C.NumRegs;
1377   unsigned PrevNumBaseAdds = C.NumBaseAdds;
1378   if (const SCEV *ScaledReg = F.ScaledReg) {
1379     if (VisitedRegs.count(ScaledReg)) {
1380       Lose();
1381       return;
1382     }
1383     RatePrimaryRegister(F, ScaledReg, Regs, LoserRegs);
1384     if (isLoser())
1385       return;
1386   }
1387   for (const SCEV *BaseReg : F.BaseRegs) {
1388     if (VisitedRegs.count(BaseReg)) {
1389       Lose();
1390       return;
1391     }
1392     RatePrimaryRegister(F, BaseReg, Regs, LoserRegs);
1393     if (isLoser())
1394       return;
1395   }
1396 
1397   // Determine how many (unfolded) adds we'll need inside the loop.
1398   size_t NumBaseParts = F.getNumRegs();
1399   if (NumBaseParts > 1)
1400     // Do not count the base and a possible second register if the target
1401     // allows to fold 2 registers.
1402     C.NumBaseAdds +=
1403         NumBaseParts - (1 + (F.Scale && isAMCompletelyFolded(*TTI, LU, F)));
1404   C.NumBaseAdds += (F.UnfoldedOffset != 0);
1405 
1406   // Accumulate non-free scaling amounts.
1407   C.ScaleCost += *getScalingFactorCost(*TTI, LU, F, *L).getValue();
1408 
1409   // Tally up the non-zero immediates.
1410   for (const LSRFixup &Fixup : LU.Fixups) {
1411     int64_t O = Fixup.Offset;
1412     int64_t Offset = (uint64_t)O + F.BaseOffset;
1413     if (F.BaseGV)
1414       C.ImmCost += 64; // Handle symbolic values conservatively.
1415                      // TODO: This should probably be the pointer size.
1416     else if (Offset != 0)
1417       C.ImmCost += APInt(64, Offset, true).getMinSignedBits();
1418 
1419     // Check with target if this offset with this instruction is
1420     // specifically not supported.
1421     if (LU.Kind == LSRUse::Address && Offset != 0 &&
1422         !isAMCompletelyFolded(*TTI, LSRUse::Address, LU.AccessTy, F.BaseGV,
1423                               Offset, F.HasBaseReg, F.Scale, Fixup.UserInst))
1424       C.NumBaseAdds++;
1425   }
1426 
1427   // If we don't count instruction cost exit here.
1428   if (!InsnsCost) {
1429     assert(isValid() && "invalid cost");
1430     return;
1431   }
1432 
1433   // Treat every new register that exceeds TTI.getNumberOfRegisters() - 1 as
1434   // additional instruction (at least fill).
1435   // TODO: Need distinguish register class?
1436   unsigned TTIRegNum = TTI->getNumberOfRegisters(
1437                        TTI->getRegisterClassForType(false, F.getType())) - 1;
1438   if (C.NumRegs > TTIRegNum) {
1439     // Cost already exceeded TTIRegNum, then only newly added register can add
1440     // new instructions.
1441     if (PrevNumRegs > TTIRegNum)
1442       C.Insns += (C.NumRegs - PrevNumRegs);
1443     else
1444       C.Insns += (C.NumRegs - TTIRegNum);
1445   }
1446 
1447   // If ICmpZero formula ends with not 0, it could not be replaced by
1448   // just add or sub. We'll need to compare final result of AddRec.
1449   // That means we'll need an additional instruction. But if the target can
1450   // macro-fuse a compare with a branch, don't count this extra instruction.
1451   // For -10 + {0, +, 1}:
1452   // i = i + 1;
1453   // cmp i, 10
1454   //
1455   // For {-10, +, 1}:
1456   // i = i + 1;
1457   if (LU.Kind == LSRUse::ICmpZero && !F.hasZeroEnd() &&
1458       !TTI->canMacroFuseCmp())
1459     C.Insns++;
1460   // Each new AddRec adds 1 instruction to calculation.
1461   C.Insns += (C.AddRecCost - PrevAddRecCost);
1462 
1463   // BaseAdds adds instructions for unfolded registers.
1464   if (LU.Kind != LSRUse::ICmpZero)
1465     C.Insns += C.NumBaseAdds - PrevNumBaseAdds;
1466   assert(isValid() && "invalid cost");
1467 }
1468 
1469 /// Set this cost to a losing value.
1470 void Cost::Lose() {
1471   C.Insns = std::numeric_limits<unsigned>::max();
1472   C.NumRegs = std::numeric_limits<unsigned>::max();
1473   C.AddRecCost = std::numeric_limits<unsigned>::max();
1474   C.NumIVMuls = std::numeric_limits<unsigned>::max();
1475   C.NumBaseAdds = std::numeric_limits<unsigned>::max();
1476   C.ImmCost = std::numeric_limits<unsigned>::max();
1477   C.SetupCost = std::numeric_limits<unsigned>::max();
1478   C.ScaleCost = std::numeric_limits<unsigned>::max();
1479 }
1480 
1481 /// Choose the lower cost.
1482 bool Cost::isLess(const Cost &Other) const {
1483   if (InsnsCost.getNumOccurrences() > 0 && InsnsCost &&
1484       C.Insns != Other.C.Insns)
1485     return C.Insns < Other.C.Insns;
1486   return TTI->isLSRCostLess(C, Other.C);
1487 }
1488 
1489 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1490 void Cost::print(raw_ostream &OS) const {
1491   if (InsnsCost)
1492     OS << C.Insns << " instruction" << (C.Insns == 1 ? " " : "s ");
1493   OS << C.NumRegs << " reg" << (C.NumRegs == 1 ? "" : "s");
1494   if (C.AddRecCost != 0)
1495     OS << ", with addrec cost " << C.AddRecCost;
1496   if (C.NumIVMuls != 0)
1497     OS << ", plus " << C.NumIVMuls << " IV mul"
1498        << (C.NumIVMuls == 1 ? "" : "s");
1499   if (C.NumBaseAdds != 0)
1500     OS << ", plus " << C.NumBaseAdds << " base add"
1501        << (C.NumBaseAdds == 1 ? "" : "s");
1502   if (C.ScaleCost != 0)
1503     OS << ", plus " << C.ScaleCost << " scale cost";
1504   if (C.ImmCost != 0)
1505     OS << ", plus " << C.ImmCost << " imm cost";
1506   if (C.SetupCost != 0)
1507     OS << ", plus " << C.SetupCost << " setup cost";
1508 }
1509 
1510 LLVM_DUMP_METHOD void Cost::dump() const {
1511   print(errs()); errs() << '\n';
1512 }
1513 #endif
1514 
1515 /// Test whether this fixup always uses its value outside of the given loop.
1516 bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const {
1517   // PHI nodes use their value in their incoming blocks.
1518   if (const PHINode *PN = dyn_cast<PHINode>(UserInst)) {
1519     for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1520       if (PN->getIncomingValue(i) == OperandValToReplace &&
1521           L->contains(PN->getIncomingBlock(i)))
1522         return false;
1523     return true;
1524   }
1525 
1526   return !L->contains(UserInst);
1527 }
1528 
1529 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1530 void LSRFixup::print(raw_ostream &OS) const {
1531   OS << "UserInst=";
1532   // Store is common and interesting enough to be worth special-casing.
1533   if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) {
1534     OS << "store ";
1535     Store->getOperand(0)->printAsOperand(OS, /*PrintType=*/false);
1536   } else if (UserInst->getType()->isVoidTy())
1537     OS << UserInst->getOpcodeName();
1538   else
1539     UserInst->printAsOperand(OS, /*PrintType=*/false);
1540 
1541   OS << ", OperandValToReplace=";
1542   OperandValToReplace->printAsOperand(OS, /*PrintType=*/false);
1543 
1544   for (const Loop *PIL : PostIncLoops) {
1545     OS << ", PostIncLoop=";
1546     PIL->getHeader()->printAsOperand(OS, /*PrintType=*/false);
1547   }
1548 
1549   if (Offset != 0)
1550     OS << ", Offset=" << Offset;
1551 }
1552 
1553 LLVM_DUMP_METHOD void LSRFixup::dump() const {
1554   print(errs()); errs() << '\n';
1555 }
1556 #endif
1557 
1558 /// Test whether this use as a formula which has the same registers as the given
1559 /// formula.
1560 bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const {
1561   SmallVector<const SCEV *, 4> Key = F.BaseRegs;
1562   if (F.ScaledReg) Key.push_back(F.ScaledReg);
1563   // Unstable sort by host order ok, because this is only used for uniquifying.
1564   llvm::sort(Key);
1565   return Uniquifier.count(Key);
1566 }
1567 
1568 /// The function returns a probability of selecting formula without Reg.
1569 float LSRUse::getNotSelectedProbability(const SCEV *Reg) const {
1570   unsigned FNum = 0;
1571   for (const Formula &F : Formulae)
1572     if (F.referencesReg(Reg))
1573       FNum++;
1574   return ((float)(Formulae.size() - FNum)) / Formulae.size();
1575 }
1576 
1577 /// If the given formula has not yet been inserted, add it to the list, and
1578 /// return true. Return false otherwise.  The formula must be in canonical form.
1579 bool LSRUse::InsertFormula(const Formula &F, const Loop &L) {
1580   assert(F.isCanonical(L) && "Invalid canonical representation");
1581 
1582   if (!Formulae.empty() && RigidFormula)
1583     return false;
1584 
1585   SmallVector<const SCEV *, 4> Key = F.BaseRegs;
1586   if (F.ScaledReg) Key.push_back(F.ScaledReg);
1587   // Unstable sort by host order ok, because this is only used for uniquifying.
1588   llvm::sort(Key);
1589 
1590   if (!Uniquifier.insert(Key).second)
1591     return false;
1592 
1593   // Using a register to hold the value of 0 is not profitable.
1594   assert((!F.ScaledReg || !F.ScaledReg->isZero()) &&
1595          "Zero allocated in a scaled register!");
1596 #ifndef NDEBUG
1597   for (const SCEV *BaseReg : F.BaseRegs)
1598     assert(!BaseReg->isZero() && "Zero allocated in a base register!");
1599 #endif
1600 
1601   // Add the formula to the list.
1602   Formulae.push_back(F);
1603 
1604   // Record registers now being used by this use.
1605   Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
1606   if (F.ScaledReg)
1607     Regs.insert(F.ScaledReg);
1608 
1609   return true;
1610 }
1611 
1612 /// Remove the given formula from this use's list.
1613 void LSRUse::DeleteFormula(Formula &F) {
1614   if (&F != &Formulae.back())
1615     std::swap(F, Formulae.back());
1616   Formulae.pop_back();
1617 }
1618 
1619 /// Recompute the Regs field, and update RegUses.
1620 void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) {
1621   // Now that we've filtered out some formulae, recompute the Regs set.
1622   SmallPtrSet<const SCEV *, 4> OldRegs = std::move(Regs);
1623   Regs.clear();
1624   for (const Formula &F : Formulae) {
1625     if (F.ScaledReg) Regs.insert(F.ScaledReg);
1626     Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
1627   }
1628 
1629   // Update the RegTracker.
1630   for (const SCEV *S : OldRegs)
1631     if (!Regs.count(S))
1632       RegUses.dropRegister(S, LUIdx);
1633 }
1634 
1635 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1636 void LSRUse::print(raw_ostream &OS) const {
1637   OS << "LSR Use: Kind=";
1638   switch (Kind) {
1639   case Basic:    OS << "Basic"; break;
1640   case Special:  OS << "Special"; break;
1641   case ICmpZero: OS << "ICmpZero"; break;
1642   case Address:
1643     OS << "Address of ";
1644     if (AccessTy.MemTy->isPointerTy())
1645       OS << "pointer"; // the full pointer type could be really verbose
1646     else {
1647       OS << *AccessTy.MemTy;
1648     }
1649 
1650     OS << " in addrspace(" << AccessTy.AddrSpace << ')';
1651   }
1652 
1653   OS << ", Offsets={";
1654   bool NeedComma = false;
1655   for (const LSRFixup &Fixup : Fixups) {
1656     if (NeedComma) OS << ',';
1657     OS << Fixup.Offset;
1658     NeedComma = true;
1659   }
1660   OS << '}';
1661 
1662   if (AllFixupsOutsideLoop)
1663     OS << ", all-fixups-outside-loop";
1664 
1665   if (WidestFixupType)
1666     OS << ", widest fixup type: " << *WidestFixupType;
1667 }
1668 
1669 LLVM_DUMP_METHOD void LSRUse::dump() const {
1670   print(errs()); errs() << '\n';
1671 }
1672 #endif
1673 
1674 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1675                                  LSRUse::KindType Kind, MemAccessTy AccessTy,
1676                                  GlobalValue *BaseGV, int64_t BaseOffset,
1677                                  bool HasBaseReg, int64_t Scale,
1678                                  Instruction *Fixup/*= nullptr*/) {
1679   switch (Kind) {
1680   case LSRUse::Address:
1681     return TTI.isLegalAddressingMode(AccessTy.MemTy, BaseGV, BaseOffset,
1682                                      HasBaseReg, Scale, AccessTy.AddrSpace, Fixup);
1683 
1684   case LSRUse::ICmpZero:
1685     // There's not even a target hook for querying whether it would be legal to
1686     // fold a GV into an ICmp.
1687     if (BaseGV)
1688       return false;
1689 
1690     // ICmp only has two operands; don't allow more than two non-trivial parts.
1691     if (Scale != 0 && HasBaseReg && BaseOffset != 0)
1692       return false;
1693 
1694     // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by
1695     // putting the scaled register in the other operand of the icmp.
1696     if (Scale != 0 && Scale != -1)
1697       return false;
1698 
1699     // If we have low-level target information, ask the target if it can fold an
1700     // integer immediate on an icmp.
1701     if (BaseOffset != 0) {
1702       // We have one of:
1703       // ICmpZero     BaseReg + BaseOffset => ICmp BaseReg, -BaseOffset
1704       // ICmpZero -1*ScaleReg + BaseOffset => ICmp ScaleReg, BaseOffset
1705       // Offs is the ICmp immediate.
1706       if (Scale == 0)
1707         // The cast does the right thing with
1708         // std::numeric_limits<int64_t>::min().
1709         BaseOffset = -(uint64_t)BaseOffset;
1710       return TTI.isLegalICmpImmediate(BaseOffset);
1711     }
1712 
1713     // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg
1714     return true;
1715 
1716   case LSRUse::Basic:
1717     // Only handle single-register values.
1718     return !BaseGV && Scale == 0 && BaseOffset == 0;
1719 
1720   case LSRUse::Special:
1721     // Special case Basic to handle -1 scales.
1722     return !BaseGV && (Scale == 0 || Scale == -1) && BaseOffset == 0;
1723   }
1724 
1725   llvm_unreachable("Invalid LSRUse Kind!");
1726 }
1727 
1728 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1729                                  int64_t MinOffset, int64_t MaxOffset,
1730                                  LSRUse::KindType Kind, MemAccessTy AccessTy,
1731                                  GlobalValue *BaseGV, int64_t BaseOffset,
1732                                  bool HasBaseReg, int64_t Scale) {
1733   // Check for overflow.
1734   if (((int64_t)((uint64_t)BaseOffset + MinOffset) > BaseOffset) !=
1735       (MinOffset > 0))
1736     return false;
1737   MinOffset = (uint64_t)BaseOffset + MinOffset;
1738   if (((int64_t)((uint64_t)BaseOffset + MaxOffset) > BaseOffset) !=
1739       (MaxOffset > 0))
1740     return false;
1741   MaxOffset = (uint64_t)BaseOffset + MaxOffset;
1742 
1743   return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MinOffset,
1744                               HasBaseReg, Scale) &&
1745          isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MaxOffset,
1746                               HasBaseReg, Scale);
1747 }
1748 
1749 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1750                                  int64_t MinOffset, int64_t MaxOffset,
1751                                  LSRUse::KindType Kind, MemAccessTy AccessTy,
1752                                  const Formula &F, const Loop &L) {
1753   // For the purpose of isAMCompletelyFolded either having a canonical formula
1754   // or a scale not equal to zero is correct.
1755   // Problems may arise from non canonical formulae having a scale == 0.
1756   // Strictly speaking it would best to just rely on canonical formulae.
1757   // However, when we generate the scaled formulae, we first check that the
1758   // scaling factor is profitable before computing the actual ScaledReg for
1759   // compile time sake.
1760   assert((F.isCanonical(L) || F.Scale != 0));
1761   return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy,
1762                               F.BaseGV, F.BaseOffset, F.HasBaseReg, F.Scale);
1763 }
1764 
1765 /// Test whether we know how to expand the current formula.
1766 static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset,
1767                        int64_t MaxOffset, LSRUse::KindType Kind,
1768                        MemAccessTy AccessTy, GlobalValue *BaseGV,
1769                        int64_t BaseOffset, bool HasBaseReg, int64_t Scale) {
1770   // We know how to expand completely foldable formulae.
1771   return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV,
1772                               BaseOffset, HasBaseReg, Scale) ||
1773          // Or formulae that use a base register produced by a sum of base
1774          // registers.
1775          (Scale == 1 &&
1776           isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy,
1777                                BaseGV, BaseOffset, true, 0));
1778 }
1779 
1780 static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset,
1781                        int64_t MaxOffset, LSRUse::KindType Kind,
1782                        MemAccessTy AccessTy, const Formula &F) {
1783   return isLegalUse(TTI, MinOffset, MaxOffset, Kind, AccessTy, F.BaseGV,
1784                     F.BaseOffset, F.HasBaseReg, F.Scale);
1785 }
1786 
1787 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1788                                  const LSRUse &LU, const Formula &F) {
1789   // Target may want to look at the user instructions.
1790   if (LU.Kind == LSRUse::Address && TTI.LSRWithInstrQueries()) {
1791     for (const LSRFixup &Fixup : LU.Fixups)
1792       if (!isAMCompletelyFolded(TTI, LSRUse::Address, LU.AccessTy, F.BaseGV,
1793                                 (F.BaseOffset + Fixup.Offset), F.HasBaseReg,
1794                                 F.Scale, Fixup.UserInst))
1795         return false;
1796     return true;
1797   }
1798 
1799   return isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind,
1800                               LU.AccessTy, F.BaseGV, F.BaseOffset, F.HasBaseReg,
1801                               F.Scale);
1802 }
1803 
1804 static InstructionCost getScalingFactorCost(const TargetTransformInfo &TTI,
1805                                             const LSRUse &LU, const Formula &F,
1806                                             const Loop &L) {
1807   if (!F.Scale)
1808     return 0;
1809 
1810   // If the use is not completely folded in that instruction, we will have to
1811   // pay an extra cost only for scale != 1.
1812   if (!isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind,
1813                             LU.AccessTy, F, L))
1814     return F.Scale != 1;
1815 
1816   switch (LU.Kind) {
1817   case LSRUse::Address: {
1818     // Check the scaling factor cost with both the min and max offsets.
1819     InstructionCost ScaleCostMinOffset = TTI.getScalingFactorCost(
1820         LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MinOffset, F.HasBaseReg,
1821         F.Scale, LU.AccessTy.AddrSpace);
1822     InstructionCost ScaleCostMaxOffset = TTI.getScalingFactorCost(
1823         LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MaxOffset, F.HasBaseReg,
1824         F.Scale, LU.AccessTy.AddrSpace);
1825 
1826     assert(ScaleCostMinOffset.isValid() && ScaleCostMaxOffset.isValid() &&
1827            "Legal addressing mode has an illegal cost!");
1828     return std::max(ScaleCostMinOffset, ScaleCostMaxOffset);
1829   }
1830   case LSRUse::ICmpZero:
1831   case LSRUse::Basic:
1832   case LSRUse::Special:
1833     // The use is completely folded, i.e., everything is folded into the
1834     // instruction.
1835     return 0;
1836   }
1837 
1838   llvm_unreachable("Invalid LSRUse Kind!");
1839 }
1840 
1841 static bool isAlwaysFoldable(const TargetTransformInfo &TTI,
1842                              LSRUse::KindType Kind, MemAccessTy AccessTy,
1843                              GlobalValue *BaseGV, int64_t BaseOffset,
1844                              bool HasBaseReg) {
1845   // Fast-path: zero is always foldable.
1846   if (BaseOffset == 0 && !BaseGV) return true;
1847 
1848   // Conservatively, create an address with an immediate and a
1849   // base and a scale.
1850   int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
1851 
1852   // Canonicalize a scale of 1 to a base register if the formula doesn't
1853   // already have a base register.
1854   if (!HasBaseReg && Scale == 1) {
1855     Scale = 0;
1856     HasBaseReg = true;
1857   }
1858 
1859   return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, BaseOffset,
1860                               HasBaseReg, Scale);
1861 }
1862 
1863 static bool isAlwaysFoldable(const TargetTransformInfo &TTI,
1864                              ScalarEvolution &SE, int64_t MinOffset,
1865                              int64_t MaxOffset, LSRUse::KindType Kind,
1866                              MemAccessTy AccessTy, const SCEV *S,
1867                              bool HasBaseReg) {
1868   // Fast-path: zero is always foldable.
1869   if (S->isZero()) return true;
1870 
1871   // Conservatively, create an address with an immediate and a
1872   // base and a scale.
1873   int64_t BaseOffset = ExtractImmediate(S, SE);
1874   GlobalValue *BaseGV = ExtractSymbol(S, SE);
1875 
1876   // If there's anything else involved, it's not foldable.
1877   if (!S->isZero()) return false;
1878 
1879   // Fast-path: zero is always foldable.
1880   if (BaseOffset == 0 && !BaseGV) return true;
1881 
1882   // Conservatively, create an address with an immediate and a
1883   // base and a scale.
1884   int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
1885 
1886   return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV,
1887                               BaseOffset, HasBaseReg, Scale);
1888 }
1889 
1890 namespace {
1891 
1892 /// An individual increment in a Chain of IV increments.  Relate an IV user to
1893 /// an expression that computes the IV it uses from the IV used by the previous
1894 /// link in the Chain.
1895 ///
1896 /// For the head of a chain, IncExpr holds the absolute SCEV expression for the
1897 /// original IVOperand. The head of the chain's IVOperand is only valid during
1898 /// chain collection, before LSR replaces IV users. During chain generation,
1899 /// IncExpr can be used to find the new IVOperand that computes the same
1900 /// expression.
1901 struct IVInc {
1902   Instruction *UserInst;
1903   Value* IVOperand;
1904   const SCEV *IncExpr;
1905 
1906   IVInc(Instruction *U, Value *O, const SCEV *E)
1907       : UserInst(U), IVOperand(O), IncExpr(E) {}
1908 };
1909 
1910 // The list of IV increments in program order.  We typically add the head of a
1911 // chain without finding subsequent links.
1912 struct IVChain {
1913   SmallVector<IVInc, 1> Incs;
1914   const SCEV *ExprBase = nullptr;
1915 
1916   IVChain() = default;
1917   IVChain(const IVInc &Head, const SCEV *Base)
1918       : Incs(1, Head), ExprBase(Base) {}
1919 
1920   using const_iterator = SmallVectorImpl<IVInc>::const_iterator;
1921 
1922   // Return the first increment in the chain.
1923   const_iterator begin() const {
1924     assert(!Incs.empty());
1925     return std::next(Incs.begin());
1926   }
1927   const_iterator end() const {
1928     return Incs.end();
1929   }
1930 
1931   // Returns true if this chain contains any increments.
1932   bool hasIncs() const { return Incs.size() >= 2; }
1933 
1934   // Add an IVInc to the end of this chain.
1935   void add(const IVInc &X) { Incs.push_back(X); }
1936 
1937   // Returns the last UserInst in the chain.
1938   Instruction *tailUserInst() const { return Incs.back().UserInst; }
1939 
1940   // Returns true if IncExpr can be profitably added to this chain.
1941   bool isProfitableIncrement(const SCEV *OperExpr,
1942                              const SCEV *IncExpr,
1943                              ScalarEvolution&);
1944 };
1945 
1946 /// Helper for CollectChains to track multiple IV increment uses.  Distinguish
1947 /// between FarUsers that definitely cross IV increments and NearUsers that may
1948 /// be used between IV increments.
1949 struct ChainUsers {
1950   SmallPtrSet<Instruction*, 4> FarUsers;
1951   SmallPtrSet<Instruction*, 4> NearUsers;
1952 };
1953 
1954 /// This class holds state for the main loop strength reduction logic.
1955 class LSRInstance {
1956   IVUsers &IU;
1957   ScalarEvolution &SE;
1958   DominatorTree &DT;
1959   LoopInfo &LI;
1960   AssumptionCache &AC;
1961   TargetLibraryInfo &TLI;
1962   const TargetTransformInfo &TTI;
1963   Loop *const L;
1964   MemorySSAUpdater *MSSAU;
1965   TTI::AddressingModeKind AMK;
1966   mutable SCEVExpander Rewriter;
1967   bool Changed = false;
1968 
1969   /// This is the insert position that the current loop's induction variable
1970   /// increment should be placed. In simple loops, this is the latch block's
1971   /// terminator. But in more complicated cases, this is a position which will
1972   /// dominate all the in-loop post-increment users.
1973   Instruction *IVIncInsertPos = nullptr;
1974 
1975   /// Interesting factors between use strides.
1976   ///
1977   /// We explicitly use a SetVector which contains a SmallSet, instead of the
1978   /// default, a SmallDenseSet, because we need to use the full range of
1979   /// int64_ts, and there's currently no good way of doing that with
1980   /// SmallDenseSet.
1981   SetVector<int64_t, SmallVector<int64_t, 8>, SmallSet<int64_t, 8>> Factors;
1982 
1983   /// The cost of the current SCEV, the best solution by LSR will be dropped if
1984   /// the solution is not profitable.
1985   Cost BaselineCost;
1986 
1987   /// Interesting use types, to facilitate truncation reuse.
1988   SmallSetVector<Type *, 4> Types;
1989 
1990   /// The list of interesting uses.
1991   mutable SmallVector<LSRUse, 16> Uses;
1992 
1993   /// Track which uses use which register candidates.
1994   RegUseTracker RegUses;
1995 
1996   // Limit the number of chains to avoid quadratic behavior. We don't expect to
1997   // have more than a few IV increment chains in a loop. Missing a Chain falls
1998   // back to normal LSR behavior for those uses.
1999   static const unsigned MaxChains = 8;
2000 
2001   /// IV users can form a chain of IV increments.
2002   SmallVector<IVChain, MaxChains> IVChainVec;
2003 
2004   /// IV users that belong to profitable IVChains.
2005   SmallPtrSet<Use*, MaxChains> IVIncSet;
2006 
2007   /// Induction variables that were generated and inserted by the SCEV Expander.
2008   SmallVector<llvm::WeakVH, 2> ScalarEvolutionIVs;
2009 
2010   void OptimizeShadowIV();
2011   bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse);
2012   ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse);
2013   void OptimizeLoopTermCond();
2014 
2015   void ChainInstruction(Instruction *UserInst, Instruction *IVOper,
2016                         SmallVectorImpl<ChainUsers> &ChainUsersVec);
2017   void FinalizeChain(IVChain &Chain);
2018   void CollectChains();
2019   void GenerateIVChain(const IVChain &Chain,
2020                        SmallVectorImpl<WeakTrackingVH> &DeadInsts);
2021 
2022   void CollectInterestingTypesAndFactors();
2023   void CollectFixupsAndInitialFormulae();
2024 
2025   // Support for sharing of LSRUses between LSRFixups.
2026   using UseMapTy = DenseMap<LSRUse::SCEVUseKindPair, size_t>;
2027   UseMapTy UseMap;
2028 
2029   bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
2030                           LSRUse::KindType Kind, MemAccessTy AccessTy);
2031 
2032   std::pair<size_t, int64_t> getUse(const SCEV *&Expr, LSRUse::KindType Kind,
2033                                     MemAccessTy AccessTy);
2034 
2035   void DeleteUse(LSRUse &LU, size_t LUIdx);
2036 
2037   LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU);
2038 
2039   void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
2040   void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
2041   void CountRegisters(const Formula &F, size_t LUIdx);
2042   bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F);
2043 
2044   void CollectLoopInvariantFixupsAndFormulae();
2045 
2046   void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base,
2047                               unsigned Depth = 0);
2048 
2049   void GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx,
2050                                   const Formula &Base, unsigned Depth,
2051                                   size_t Idx, bool IsScaledReg = false);
2052   void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base);
2053   void GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx,
2054                                    const Formula &Base, size_t Idx,
2055                                    bool IsScaledReg = false);
2056   void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base);
2057   void GenerateConstantOffsetsImpl(LSRUse &LU, unsigned LUIdx,
2058                                    const Formula &Base,
2059                                    const SmallVectorImpl<int64_t> &Worklist,
2060                                    size_t Idx, bool IsScaledReg = false);
2061   void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base);
2062   void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base);
2063   void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base);
2064   void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base);
2065   void GenerateCrossUseConstantOffsets();
2066   void GenerateAllReuseFormulae();
2067 
2068   void FilterOutUndesirableDedicatedRegisters();
2069 
2070   size_t EstimateSearchSpaceComplexity() const;
2071   void NarrowSearchSpaceByDetectingSupersets();
2072   void NarrowSearchSpaceByCollapsingUnrolledCode();
2073   void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
2074   void NarrowSearchSpaceByFilterFormulaWithSameScaledReg();
2075   void NarrowSearchSpaceByFilterPostInc();
2076   void NarrowSearchSpaceByDeletingCostlyFormulas();
2077   void NarrowSearchSpaceByPickingWinnerRegs();
2078   void NarrowSearchSpaceUsingHeuristics();
2079 
2080   void SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
2081                     Cost &SolutionCost,
2082                     SmallVectorImpl<const Formula *> &Workspace,
2083                     const Cost &CurCost,
2084                     const SmallPtrSet<const SCEV *, 16> &CurRegs,
2085                     DenseSet<const SCEV *> &VisitedRegs) const;
2086   void Solve(SmallVectorImpl<const Formula *> &Solution) const;
2087 
2088   BasicBlock::iterator
2089   HoistInsertPosition(BasicBlock::iterator IP,
2090                       const SmallVectorImpl<Instruction *> &Inputs) const;
2091   BasicBlock::iterator AdjustInsertPositionForExpand(BasicBlock::iterator IP,
2092                                                      const LSRFixup &LF,
2093                                                      const LSRUse &LU) const;
2094 
2095   Value *Expand(const LSRUse &LU, const LSRFixup &LF, const Formula &F,
2096                 BasicBlock::iterator IP,
2097                 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const;
2098   void RewriteForPHI(PHINode *PN, const LSRUse &LU, const LSRFixup &LF,
2099                      const Formula &F,
2100                      SmallVectorImpl<WeakTrackingVH> &DeadInsts) const;
2101   void Rewrite(const LSRUse &LU, const LSRFixup &LF, const Formula &F,
2102                SmallVectorImpl<WeakTrackingVH> &DeadInsts) const;
2103   void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution);
2104 
2105 public:
2106   LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE, DominatorTree &DT,
2107               LoopInfo &LI, const TargetTransformInfo &TTI, AssumptionCache &AC,
2108               TargetLibraryInfo &TLI, MemorySSAUpdater *MSSAU);
2109 
2110   bool getChanged() const { return Changed; }
2111   const SmallVectorImpl<WeakVH> &getScalarEvolutionIVs() const {
2112     return ScalarEvolutionIVs;
2113   }
2114 
2115   void print_factors_and_types(raw_ostream &OS) const;
2116   void print_fixups(raw_ostream &OS) const;
2117   void print_uses(raw_ostream &OS) const;
2118   void print(raw_ostream &OS) const;
2119   void dump() const;
2120 };
2121 
2122 } // end anonymous namespace
2123 
2124 /// If IV is used in a int-to-float cast inside the loop then try to eliminate
2125 /// the cast operation.
2126 void LSRInstance::OptimizeShadowIV() {
2127   const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
2128   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2129     return;
2130 
2131   for (IVUsers::const_iterator UI = IU.begin(), E = IU.end();
2132        UI != E; /* empty */) {
2133     IVUsers::const_iterator CandidateUI = UI;
2134     ++UI;
2135     Instruction *ShadowUse = CandidateUI->getUser();
2136     Type *DestTy = nullptr;
2137     bool IsSigned = false;
2138 
2139     /* If shadow use is a int->float cast then insert a second IV
2140        to eliminate this cast.
2141 
2142          for (unsigned i = 0; i < n; ++i)
2143            foo((double)i);
2144 
2145        is transformed into
2146 
2147          double d = 0.0;
2148          for (unsigned i = 0; i < n; ++i, ++d)
2149            foo(d);
2150     */
2151     if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) {
2152       IsSigned = false;
2153       DestTy = UCast->getDestTy();
2154     }
2155     else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) {
2156       IsSigned = true;
2157       DestTy = SCast->getDestTy();
2158     }
2159     if (!DestTy) continue;
2160 
2161     // If target does not support DestTy natively then do not apply
2162     // this transformation.
2163     if (!TTI.isTypeLegal(DestTy)) continue;
2164 
2165     PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
2166     if (!PH) continue;
2167     if (PH->getNumIncomingValues() != 2) continue;
2168 
2169     // If the calculation in integers overflows, the result in FP type will
2170     // differ. So we only can do this transformation if we are guaranteed to not
2171     // deal with overflowing values
2172     const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PH));
2173     if (!AR) continue;
2174     if (IsSigned && !AR->hasNoSignedWrap()) continue;
2175     if (!IsSigned && !AR->hasNoUnsignedWrap()) continue;
2176 
2177     Type *SrcTy = PH->getType();
2178     int Mantissa = DestTy->getFPMantissaWidth();
2179     if (Mantissa == -1) continue;
2180     if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa)
2181       continue;
2182 
2183     unsigned Entry, Latch;
2184     if (PH->getIncomingBlock(0) == L->getLoopPreheader()) {
2185       Entry = 0;
2186       Latch = 1;
2187     } else {
2188       Entry = 1;
2189       Latch = 0;
2190     }
2191 
2192     ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
2193     if (!Init) continue;
2194     Constant *NewInit = ConstantFP::get(DestTy, IsSigned ?
2195                                         (double)Init->getSExtValue() :
2196                                         (double)Init->getZExtValue());
2197 
2198     BinaryOperator *Incr =
2199       dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
2200     if (!Incr) continue;
2201     if (Incr->getOpcode() != Instruction::Add
2202         && Incr->getOpcode() != Instruction::Sub)
2203       continue;
2204 
2205     /* Initialize new IV, double d = 0.0 in above example. */
2206     ConstantInt *C = nullptr;
2207     if (Incr->getOperand(0) == PH)
2208       C = dyn_cast<ConstantInt>(Incr->getOperand(1));
2209     else if (Incr->getOperand(1) == PH)
2210       C = dyn_cast<ConstantInt>(Incr->getOperand(0));
2211     else
2212       continue;
2213 
2214     if (!C) continue;
2215 
2216     // Ignore negative constants, as the code below doesn't handle them
2217     // correctly. TODO: Remove this restriction.
2218     if (!C->getValue().isStrictlyPositive()) continue;
2219 
2220     /* Add new PHINode. */
2221     PHINode *NewPH = PHINode::Create(DestTy, 2, "IV.S.", PH);
2222 
2223     /* create new increment. '++d' in above example. */
2224     Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue());
2225     BinaryOperator *NewIncr =
2226       BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ?
2227                                Instruction::FAdd : Instruction::FSub,
2228                              NewPH, CFP, "IV.S.next.", Incr);
2229 
2230     NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry));
2231     NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch));
2232 
2233     /* Remove cast operation */
2234     ShadowUse->replaceAllUsesWith(NewPH);
2235     ShadowUse->eraseFromParent();
2236     Changed = true;
2237     break;
2238   }
2239 }
2240 
2241 /// If Cond has an operand that is an expression of an IV, set the IV user and
2242 /// stride information and return true, otherwise return false.
2243 bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) {
2244   for (IVStrideUse &U : IU)
2245     if (U.getUser() == Cond) {
2246       // NOTE: we could handle setcc instructions with multiple uses here, but
2247       // InstCombine does it as well for simple uses, it's not clear that it
2248       // occurs enough in real life to handle.
2249       CondUse = &U;
2250       return true;
2251     }
2252   return false;
2253 }
2254 
2255 /// Rewrite the loop's terminating condition if it uses a max computation.
2256 ///
2257 /// This is a narrow solution to a specific, but acute, problem. For loops
2258 /// like this:
2259 ///
2260 ///   i = 0;
2261 ///   do {
2262 ///     p[i] = 0.0;
2263 ///   } while (++i < n);
2264 ///
2265 /// the trip count isn't just 'n', because 'n' might not be positive. And
2266 /// unfortunately this can come up even for loops where the user didn't use
2267 /// a C do-while loop. For example, seemingly well-behaved top-test loops
2268 /// will commonly be lowered like this:
2269 ///
2270 ///   if (n > 0) {
2271 ///     i = 0;
2272 ///     do {
2273 ///       p[i] = 0.0;
2274 ///     } while (++i < n);
2275 ///   }
2276 ///
2277 /// and then it's possible for subsequent optimization to obscure the if
2278 /// test in such a way that indvars can't find it.
2279 ///
2280 /// When indvars can't find the if test in loops like this, it creates a
2281 /// max expression, which allows it to give the loop a canonical
2282 /// induction variable:
2283 ///
2284 ///   i = 0;
2285 ///   max = n < 1 ? 1 : n;
2286 ///   do {
2287 ///     p[i] = 0.0;
2288 ///   } while (++i != max);
2289 ///
2290 /// Canonical induction variables are necessary because the loop passes
2291 /// are designed around them. The most obvious example of this is the
2292 /// LoopInfo analysis, which doesn't remember trip count values. It
2293 /// expects to be able to rediscover the trip count each time it is
2294 /// needed, and it does this using a simple analysis that only succeeds if
2295 /// the loop has a canonical induction variable.
2296 ///
2297 /// However, when it comes time to generate code, the maximum operation
2298 /// can be quite costly, especially if it's inside of an outer loop.
2299 ///
2300 /// This function solves this problem by detecting this type of loop and
2301 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
2302 /// the instructions for the maximum computation.
2303 ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
2304   // Check that the loop matches the pattern we're looking for.
2305   if (Cond->getPredicate() != CmpInst::ICMP_EQ &&
2306       Cond->getPredicate() != CmpInst::ICMP_NE)
2307     return Cond;
2308 
2309   SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1));
2310   if (!Sel || !Sel->hasOneUse()) return Cond;
2311 
2312   const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
2313   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2314     return Cond;
2315   const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1);
2316 
2317   // Add one to the backedge-taken count to get the trip count.
2318   const SCEV *IterationCount = SE.getAddExpr(One, BackedgeTakenCount);
2319   if (IterationCount != SE.getSCEV(Sel)) return Cond;
2320 
2321   // Check for a max calculation that matches the pattern. There's no check
2322   // for ICMP_ULE here because the comparison would be with zero, which
2323   // isn't interesting.
2324   CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
2325   const SCEVNAryExpr *Max = nullptr;
2326   if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) {
2327     Pred = ICmpInst::ICMP_SLE;
2328     Max = S;
2329   } else if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(IterationCount)) {
2330     Pred = ICmpInst::ICMP_SLT;
2331     Max = S;
2332   } else if (const SCEVUMaxExpr *U = dyn_cast<SCEVUMaxExpr>(IterationCount)) {
2333     Pred = ICmpInst::ICMP_ULT;
2334     Max = U;
2335   } else {
2336     // No match; bail.
2337     return Cond;
2338   }
2339 
2340   // To handle a max with more than two operands, this optimization would
2341   // require additional checking and setup.
2342   if (Max->getNumOperands() != 2)
2343     return Cond;
2344 
2345   const SCEV *MaxLHS = Max->getOperand(0);
2346   const SCEV *MaxRHS = Max->getOperand(1);
2347 
2348   // ScalarEvolution canonicalizes constants to the left. For < and >, look
2349   // for a comparison with 1. For <= and >=, a comparison with zero.
2350   if (!MaxLHS ||
2351       (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->isZero() : (MaxLHS != One)))
2352     return Cond;
2353 
2354   // Check the relevant induction variable for conformance to
2355   // the pattern.
2356   const SCEV *IV = SE.getSCEV(Cond->getOperand(0));
2357   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
2358   if (!AR || !AR->isAffine() ||
2359       AR->getStart() != One ||
2360       AR->getStepRecurrence(SE) != One)
2361     return Cond;
2362 
2363   assert(AR->getLoop() == L &&
2364          "Loop condition operand is an addrec in a different loop!");
2365 
2366   // Check the right operand of the select, and remember it, as it will
2367   // be used in the new comparison instruction.
2368   Value *NewRHS = nullptr;
2369   if (ICmpInst::isTrueWhenEqual(Pred)) {
2370     // Look for n+1, and grab n.
2371     if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1)))
2372       if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1)))
2373          if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS)
2374            NewRHS = BO->getOperand(0);
2375     if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2)))
2376       if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1)))
2377         if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS)
2378           NewRHS = BO->getOperand(0);
2379     if (!NewRHS)
2380       return Cond;
2381   } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS)
2382     NewRHS = Sel->getOperand(1);
2383   else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS)
2384     NewRHS = Sel->getOperand(2);
2385   else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS))
2386     NewRHS = SU->getValue();
2387   else
2388     // Max doesn't match expected pattern.
2389     return Cond;
2390 
2391   // Determine the new comparison opcode. It may be signed or unsigned,
2392   // and the original comparison may be either equality or inequality.
2393   if (Cond->getPredicate() == CmpInst::ICMP_EQ)
2394     Pred = CmpInst::getInversePredicate(Pred);
2395 
2396   // Ok, everything looks ok to change the condition into an SLT or SGE and
2397   // delete the max calculation.
2398   ICmpInst *NewCond =
2399     new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp");
2400 
2401   // Delete the max calculation instructions.
2402   NewCond->setDebugLoc(Cond->getDebugLoc());
2403   Cond->replaceAllUsesWith(NewCond);
2404   CondUse->setUser(NewCond);
2405   Instruction *Cmp = cast<Instruction>(Sel->getOperand(0));
2406   Cond->eraseFromParent();
2407   Sel->eraseFromParent();
2408   if (Cmp->use_empty())
2409     Cmp->eraseFromParent();
2410   return NewCond;
2411 }
2412 
2413 /// Change loop terminating condition to use the postinc iv when possible.
2414 void
2415 LSRInstance::OptimizeLoopTermCond() {
2416   SmallPtrSet<Instruction *, 4> PostIncs;
2417 
2418   // We need a different set of heuristics for rotated and non-rotated loops.
2419   // If a loop is rotated then the latch is also the backedge, so inserting
2420   // post-inc expressions just before the latch is ideal. To reduce live ranges
2421   // it also makes sense to rewrite terminating conditions to use post-inc
2422   // expressions.
2423   //
2424   // If the loop is not rotated then the latch is not a backedge; the latch
2425   // check is done in the loop head. Adding post-inc expressions before the
2426   // latch will cause overlapping live-ranges of pre-inc and post-inc expressions
2427   // in the loop body. In this case we do *not* want to use post-inc expressions
2428   // in the latch check, and we want to insert post-inc expressions before
2429   // the backedge.
2430   BasicBlock *LatchBlock = L->getLoopLatch();
2431   SmallVector<BasicBlock*, 8> ExitingBlocks;
2432   L->getExitingBlocks(ExitingBlocks);
2433   if (!llvm::is_contained(ExitingBlocks, LatchBlock)) {
2434     // The backedge doesn't exit the loop; treat this as a head-tested loop.
2435     IVIncInsertPos = LatchBlock->getTerminator();
2436     return;
2437   }
2438 
2439   // Otherwise treat this as a rotated loop.
2440   for (BasicBlock *ExitingBlock : ExitingBlocks) {
2441     // Get the terminating condition for the loop if possible.  If we
2442     // can, we want to change it to use a post-incremented version of its
2443     // induction variable, to allow coalescing the live ranges for the IV into
2444     // one register value.
2445 
2446     BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
2447     if (!TermBr)
2448       continue;
2449     // FIXME: Overly conservative, termination condition could be an 'or' etc..
2450     if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition()))
2451       continue;
2452 
2453     // Search IVUsesByStride to find Cond's IVUse if there is one.
2454     IVStrideUse *CondUse = nullptr;
2455     ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
2456     if (!FindIVUserForCond(Cond, CondUse))
2457       continue;
2458 
2459     // If the trip count is computed in terms of a max (due to ScalarEvolution
2460     // being unable to find a sufficient guard, for example), change the loop
2461     // comparison to use SLT or ULT instead of NE.
2462     // One consequence of doing this now is that it disrupts the count-down
2463     // optimization. That's not always a bad thing though, because in such
2464     // cases it may still be worthwhile to avoid a max.
2465     Cond = OptimizeMax(Cond, CondUse);
2466 
2467     // If this exiting block dominates the latch block, it may also use
2468     // the post-inc value if it won't be shared with other uses.
2469     // Check for dominance.
2470     if (!DT.dominates(ExitingBlock, LatchBlock))
2471       continue;
2472 
2473     // Conservatively avoid trying to use the post-inc value in non-latch
2474     // exits if there may be pre-inc users in intervening blocks.
2475     if (LatchBlock != ExitingBlock)
2476       for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI)
2477         // Test if the use is reachable from the exiting block. This dominator
2478         // query is a conservative approximation of reachability.
2479         if (&*UI != CondUse &&
2480             !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) {
2481           // Conservatively assume there may be reuse if the quotient of their
2482           // strides could be a legal scale.
2483           const SCEV *A = IU.getStride(*CondUse, L);
2484           const SCEV *B = IU.getStride(*UI, L);
2485           if (!A || !B) continue;
2486           if (SE.getTypeSizeInBits(A->getType()) !=
2487               SE.getTypeSizeInBits(B->getType())) {
2488             if (SE.getTypeSizeInBits(A->getType()) >
2489                 SE.getTypeSizeInBits(B->getType()))
2490               B = SE.getSignExtendExpr(B, A->getType());
2491             else
2492               A = SE.getSignExtendExpr(A, B->getType());
2493           }
2494           if (const SCEVConstant *D =
2495                 dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) {
2496             const ConstantInt *C = D->getValue();
2497             // Stride of one or negative one can have reuse with non-addresses.
2498             if (C->isOne() || C->isMinusOne())
2499               goto decline_post_inc;
2500             // Avoid weird situations.
2501             if (C->getValue().getMinSignedBits() >= 64 ||
2502                 C->getValue().isMinSignedValue())
2503               goto decline_post_inc;
2504             // Check for possible scaled-address reuse.
2505             if (isAddressUse(TTI, UI->getUser(), UI->getOperandValToReplace())) {
2506               MemAccessTy AccessTy = getAccessType(
2507                   TTI, UI->getUser(), UI->getOperandValToReplace());
2508               int64_t Scale = C->getSExtValue();
2509               if (TTI.isLegalAddressingMode(AccessTy.MemTy, /*BaseGV=*/nullptr,
2510                                             /*BaseOffset=*/0,
2511                                             /*HasBaseReg=*/false, Scale,
2512                                             AccessTy.AddrSpace))
2513                 goto decline_post_inc;
2514               Scale = -Scale;
2515               if (TTI.isLegalAddressingMode(AccessTy.MemTy, /*BaseGV=*/nullptr,
2516                                             /*BaseOffset=*/0,
2517                                             /*HasBaseReg=*/false, Scale,
2518                                             AccessTy.AddrSpace))
2519                 goto decline_post_inc;
2520             }
2521           }
2522         }
2523 
2524     LLVM_DEBUG(dbgs() << "  Change loop exiting icmp to use postinc iv: "
2525                       << *Cond << '\n');
2526 
2527     // It's possible for the setcc instruction to be anywhere in the loop, and
2528     // possible for it to have multiple users.  If it is not immediately before
2529     // the exiting block branch, move it.
2530     if (Cond->getNextNonDebugInstruction() != TermBr) {
2531       if (Cond->hasOneUse()) {
2532         Cond->moveBefore(TermBr);
2533       } else {
2534         // Clone the terminating condition and insert into the loopend.
2535         ICmpInst *OldCond = Cond;
2536         Cond = cast<ICmpInst>(Cond->clone());
2537         Cond->setName(L->getHeader()->getName() + ".termcond");
2538         Cond->insertInto(ExitingBlock, TermBr->getIterator());
2539 
2540         // Clone the IVUse, as the old use still exists!
2541         CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace());
2542         TermBr->replaceUsesOfWith(OldCond, Cond);
2543       }
2544     }
2545 
2546     // If we get to here, we know that we can transform the setcc instruction to
2547     // use the post-incremented version of the IV, allowing us to coalesce the
2548     // live ranges for the IV correctly.
2549     CondUse->transformToPostInc(L);
2550     Changed = true;
2551 
2552     PostIncs.insert(Cond);
2553   decline_post_inc:;
2554   }
2555 
2556   // Determine an insertion point for the loop induction variable increment. It
2557   // must dominate all the post-inc comparisons we just set up, and it must
2558   // dominate the loop latch edge.
2559   IVIncInsertPos = L->getLoopLatch()->getTerminator();
2560   for (Instruction *Inst : PostIncs)
2561     IVIncInsertPos = DT.findNearestCommonDominator(IVIncInsertPos, Inst);
2562 }
2563 
2564 /// Determine if the given use can accommodate a fixup at the given offset and
2565 /// other details. If so, update the use and return true.
2566 bool LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset,
2567                                      bool HasBaseReg, LSRUse::KindType Kind,
2568                                      MemAccessTy AccessTy) {
2569   int64_t NewMinOffset = LU.MinOffset;
2570   int64_t NewMaxOffset = LU.MaxOffset;
2571   MemAccessTy NewAccessTy = AccessTy;
2572 
2573   // Check for a mismatched kind. It's tempting to collapse mismatched kinds to
2574   // something conservative, however this can pessimize in the case that one of
2575   // the uses will have all its uses outside the loop, for example.
2576   if (LU.Kind != Kind)
2577     return false;
2578 
2579   // Check for a mismatched access type, and fall back conservatively as needed.
2580   // TODO: Be less conservative when the type is similar and can use the same
2581   // addressing modes.
2582   if (Kind == LSRUse::Address) {
2583     if (AccessTy.MemTy != LU.AccessTy.MemTy) {
2584       NewAccessTy = MemAccessTy::getUnknown(AccessTy.MemTy->getContext(),
2585                                             AccessTy.AddrSpace);
2586     }
2587   }
2588 
2589   // Conservatively assume HasBaseReg is true for now.
2590   if (NewOffset < LU.MinOffset) {
2591     if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr,
2592                           LU.MaxOffset - NewOffset, HasBaseReg))
2593       return false;
2594     NewMinOffset = NewOffset;
2595   } else if (NewOffset > LU.MaxOffset) {
2596     if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr,
2597                           NewOffset - LU.MinOffset, HasBaseReg))
2598       return false;
2599     NewMaxOffset = NewOffset;
2600   }
2601 
2602   // Update the use.
2603   LU.MinOffset = NewMinOffset;
2604   LU.MaxOffset = NewMaxOffset;
2605   LU.AccessTy = NewAccessTy;
2606   return true;
2607 }
2608 
2609 /// Return an LSRUse index and an offset value for a fixup which needs the given
2610 /// expression, with the given kind and optional access type.  Either reuse an
2611 /// existing use or create a new one, as needed.
2612 std::pair<size_t, int64_t> LSRInstance::getUse(const SCEV *&Expr,
2613                                                LSRUse::KindType Kind,
2614                                                MemAccessTy AccessTy) {
2615   const SCEV *Copy = Expr;
2616   int64_t Offset = ExtractImmediate(Expr, SE);
2617 
2618   // Basic uses can't accept any offset, for example.
2619   if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr,
2620                         Offset, /*HasBaseReg=*/ true)) {
2621     Expr = Copy;
2622     Offset = 0;
2623   }
2624 
2625   std::pair<UseMapTy::iterator, bool> P =
2626     UseMap.insert(std::make_pair(LSRUse::SCEVUseKindPair(Expr, Kind), 0));
2627   if (!P.second) {
2628     // A use already existed with this base.
2629     size_t LUIdx = P.first->second;
2630     LSRUse &LU = Uses[LUIdx];
2631     if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy))
2632       // Reuse this use.
2633       return std::make_pair(LUIdx, Offset);
2634   }
2635 
2636   // Create a new use.
2637   size_t LUIdx = Uses.size();
2638   P.first->second = LUIdx;
2639   Uses.push_back(LSRUse(Kind, AccessTy));
2640   LSRUse &LU = Uses[LUIdx];
2641 
2642   LU.MinOffset = Offset;
2643   LU.MaxOffset = Offset;
2644   return std::make_pair(LUIdx, Offset);
2645 }
2646 
2647 /// Delete the given use from the Uses list.
2648 void LSRInstance::DeleteUse(LSRUse &LU, size_t LUIdx) {
2649   if (&LU != &Uses.back())
2650     std::swap(LU, Uses.back());
2651   Uses.pop_back();
2652 
2653   // Update RegUses.
2654   RegUses.swapAndDropUse(LUIdx, Uses.size());
2655 }
2656 
2657 /// Look for a use distinct from OrigLU which is has a formula that has the same
2658 /// registers as the given formula.
2659 LSRUse *
2660 LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF,
2661                                        const LSRUse &OrigLU) {
2662   // Search all uses for the formula. This could be more clever.
2663   for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
2664     LSRUse &LU = Uses[LUIdx];
2665     // Check whether this use is close enough to OrigLU, to see whether it's
2666     // worthwhile looking through its formulae.
2667     // Ignore ICmpZero uses because they may contain formulae generated by
2668     // GenerateICmpZeroScales, in which case adding fixup offsets may
2669     // be invalid.
2670     if (&LU != &OrigLU &&
2671         LU.Kind != LSRUse::ICmpZero &&
2672         LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy &&
2673         LU.WidestFixupType == OrigLU.WidestFixupType &&
2674         LU.HasFormulaWithSameRegs(OrigF)) {
2675       // Scan through this use's formulae.
2676       for (const Formula &F : LU.Formulae) {
2677         // Check to see if this formula has the same registers and symbols
2678         // as OrigF.
2679         if (F.BaseRegs == OrigF.BaseRegs &&
2680             F.ScaledReg == OrigF.ScaledReg &&
2681             F.BaseGV == OrigF.BaseGV &&
2682             F.Scale == OrigF.Scale &&
2683             F.UnfoldedOffset == OrigF.UnfoldedOffset) {
2684           if (F.BaseOffset == 0)
2685             return &LU;
2686           // This is the formula where all the registers and symbols matched;
2687           // there aren't going to be any others. Since we declined it, we
2688           // can skip the rest of the formulae and proceed to the next LSRUse.
2689           break;
2690         }
2691       }
2692     }
2693   }
2694 
2695   // Nothing looked good.
2696   return nullptr;
2697 }
2698 
2699 void LSRInstance::CollectInterestingTypesAndFactors() {
2700   SmallSetVector<const SCEV *, 4> Strides;
2701 
2702   // Collect interesting types and strides.
2703   SmallVector<const SCEV *, 4> Worklist;
2704   for (const IVStrideUse &U : IU) {
2705     const SCEV *Expr = IU.getExpr(U);
2706 
2707     // Collect interesting types.
2708     Types.insert(SE.getEffectiveSCEVType(Expr->getType()));
2709 
2710     // Add strides for mentioned loops.
2711     Worklist.push_back(Expr);
2712     do {
2713       const SCEV *S = Worklist.pop_back_val();
2714       if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2715         if (AR->getLoop() == L)
2716           Strides.insert(AR->getStepRecurrence(SE));
2717         Worklist.push_back(AR->getStart());
2718       } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2719         append_range(Worklist, Add->operands());
2720       }
2721     } while (!Worklist.empty());
2722   }
2723 
2724   // Compute interesting factors from the set of interesting strides.
2725   for (SmallSetVector<const SCEV *, 4>::const_iterator
2726        I = Strides.begin(), E = Strides.end(); I != E; ++I)
2727     for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter =
2728          std::next(I); NewStrideIter != E; ++NewStrideIter) {
2729       const SCEV *OldStride = *I;
2730       const SCEV *NewStride = *NewStrideIter;
2731 
2732       if (SE.getTypeSizeInBits(OldStride->getType()) !=
2733           SE.getTypeSizeInBits(NewStride->getType())) {
2734         if (SE.getTypeSizeInBits(OldStride->getType()) >
2735             SE.getTypeSizeInBits(NewStride->getType()))
2736           NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType());
2737         else
2738           OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType());
2739       }
2740       if (const SCEVConstant *Factor =
2741             dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride,
2742                                                         SE, true))) {
2743         if (Factor->getAPInt().getMinSignedBits() <= 64 && !Factor->isZero())
2744           Factors.insert(Factor->getAPInt().getSExtValue());
2745       } else if (const SCEVConstant *Factor =
2746                    dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride,
2747                                                                NewStride,
2748                                                                SE, true))) {
2749         if (Factor->getAPInt().getMinSignedBits() <= 64 && !Factor->isZero())
2750           Factors.insert(Factor->getAPInt().getSExtValue());
2751       }
2752     }
2753 
2754   // If all uses use the same type, don't bother looking for truncation-based
2755   // reuse.
2756   if (Types.size() == 1)
2757     Types.clear();
2758 
2759   LLVM_DEBUG(print_factors_and_types(dbgs()));
2760 }
2761 
2762 /// Helper for CollectChains that finds an IV operand (computed by an AddRec in
2763 /// this loop) within [OI,OE) or returns OE. If IVUsers mapped Instructions to
2764 /// IVStrideUses, we could partially skip this.
2765 static User::op_iterator
2766 findIVOperand(User::op_iterator OI, User::op_iterator OE,
2767               Loop *L, ScalarEvolution &SE) {
2768   for(; OI != OE; ++OI) {
2769     if (Instruction *Oper = dyn_cast<Instruction>(*OI)) {
2770       if (!SE.isSCEVable(Oper->getType()))
2771         continue;
2772 
2773       if (const SCEVAddRecExpr *AR =
2774           dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Oper))) {
2775         if (AR->getLoop() == L)
2776           break;
2777       }
2778     }
2779   }
2780   return OI;
2781 }
2782 
2783 /// IVChain logic must consistently peek base TruncInst operands, so wrap it in
2784 /// a convenient helper.
2785 static Value *getWideOperand(Value *Oper) {
2786   if (TruncInst *Trunc = dyn_cast<TruncInst>(Oper))
2787     return Trunc->getOperand(0);
2788   return Oper;
2789 }
2790 
2791 /// Return true if we allow an IV chain to include both types.
2792 static bool isCompatibleIVType(Value *LVal, Value *RVal) {
2793   Type *LType = LVal->getType();
2794   Type *RType = RVal->getType();
2795   return (LType == RType) || (LType->isPointerTy() && RType->isPointerTy() &&
2796                               // Different address spaces means (possibly)
2797                               // different types of the pointer implementation,
2798                               // e.g. i16 vs i32 so disallow that.
2799                               (LType->getPointerAddressSpace() ==
2800                                RType->getPointerAddressSpace()));
2801 }
2802 
2803 /// Return an approximation of this SCEV expression's "base", or NULL for any
2804 /// constant. Returning the expression itself is conservative. Returning a
2805 /// deeper subexpression is more precise and valid as long as it isn't less
2806 /// complex than another subexpression. For expressions involving multiple
2807 /// unscaled values, we need to return the pointer-type SCEVUnknown. This avoids
2808 /// forming chains across objects, such as: PrevOper==a[i], IVOper==b[i],
2809 /// IVInc==b-a.
2810 ///
2811 /// Since SCEVUnknown is the rightmost type, and pointers are the rightmost
2812 /// SCEVUnknown, we simply return the rightmost SCEV operand.
2813 static const SCEV *getExprBase(const SCEV *S) {
2814   switch (S->getSCEVType()) {
2815   default: // uncluding scUnknown.
2816     return S;
2817   case scConstant:
2818     return nullptr;
2819   case scTruncate:
2820     return getExprBase(cast<SCEVTruncateExpr>(S)->getOperand());
2821   case scZeroExtend:
2822     return getExprBase(cast<SCEVZeroExtendExpr>(S)->getOperand());
2823   case scSignExtend:
2824     return getExprBase(cast<SCEVSignExtendExpr>(S)->getOperand());
2825   case scAddExpr: {
2826     // Skip over scaled operands (scMulExpr) to follow add operands as long as
2827     // there's nothing more complex.
2828     // FIXME: not sure if we want to recognize negation.
2829     const SCEVAddExpr *Add = cast<SCEVAddExpr>(S);
2830     for (const SCEV *SubExpr : reverse(Add->operands())) {
2831       if (SubExpr->getSCEVType() == scAddExpr)
2832         return getExprBase(SubExpr);
2833 
2834       if (SubExpr->getSCEVType() != scMulExpr)
2835         return SubExpr;
2836     }
2837     return S; // all operands are scaled, be conservative.
2838   }
2839   case scAddRecExpr:
2840     return getExprBase(cast<SCEVAddRecExpr>(S)->getStart());
2841   }
2842   llvm_unreachable("Unknown SCEV kind!");
2843 }
2844 
2845 /// Return true if the chain increment is profitable to expand into a loop
2846 /// invariant value, which may require its own register. A profitable chain
2847 /// increment will be an offset relative to the same base. We allow such offsets
2848 /// to potentially be used as chain increment as long as it's not obviously
2849 /// expensive to expand using real instructions.
2850 bool IVChain::isProfitableIncrement(const SCEV *OperExpr,
2851                                     const SCEV *IncExpr,
2852                                     ScalarEvolution &SE) {
2853   // Aggressively form chains when -stress-ivchain.
2854   if (StressIVChain)
2855     return true;
2856 
2857   // Do not replace a constant offset from IV head with a nonconstant IV
2858   // increment.
2859   if (!isa<SCEVConstant>(IncExpr)) {
2860     const SCEV *HeadExpr = SE.getSCEV(getWideOperand(Incs[0].IVOperand));
2861     if (isa<SCEVConstant>(SE.getMinusSCEV(OperExpr, HeadExpr)))
2862       return false;
2863   }
2864 
2865   SmallPtrSet<const SCEV*, 8> Processed;
2866   return !isHighCostExpansion(IncExpr, Processed, SE);
2867 }
2868 
2869 /// Return true if the number of registers needed for the chain is estimated to
2870 /// be less than the number required for the individual IV users. First prohibit
2871 /// any IV users that keep the IV live across increments (the Users set should
2872 /// be empty). Next count the number and type of increments in the chain.
2873 ///
2874 /// Chaining IVs can lead to considerable code bloat if ISEL doesn't
2875 /// effectively use postinc addressing modes. Only consider it profitable it the
2876 /// increments can be computed in fewer registers when chained.
2877 ///
2878 /// TODO: Consider IVInc free if it's already used in another chains.
2879 static bool isProfitableChain(IVChain &Chain,
2880                               SmallPtrSetImpl<Instruction *> &Users,
2881                               ScalarEvolution &SE,
2882                               const TargetTransformInfo &TTI) {
2883   if (StressIVChain)
2884     return true;
2885 
2886   if (!Chain.hasIncs())
2887     return false;
2888 
2889   if (!Users.empty()) {
2890     LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n";
2891                for (Instruction *Inst
2892                     : Users) { dbgs() << "  " << *Inst << "\n"; });
2893     return false;
2894   }
2895   assert(!Chain.Incs.empty() && "empty IV chains are not allowed");
2896 
2897   // The chain itself may require a register, so intialize cost to 1.
2898   int cost = 1;
2899 
2900   // A complete chain likely eliminates the need for keeping the original IV in
2901   // a register. LSR does not currently know how to form a complete chain unless
2902   // the header phi already exists.
2903   if (isa<PHINode>(Chain.tailUserInst())
2904       && SE.getSCEV(Chain.tailUserInst()) == Chain.Incs[0].IncExpr) {
2905     --cost;
2906   }
2907   const SCEV *LastIncExpr = nullptr;
2908   unsigned NumConstIncrements = 0;
2909   unsigned NumVarIncrements = 0;
2910   unsigned NumReusedIncrements = 0;
2911 
2912   if (TTI.isProfitableLSRChainElement(Chain.Incs[0].UserInst))
2913     return true;
2914 
2915   for (const IVInc &Inc : Chain) {
2916     if (TTI.isProfitableLSRChainElement(Inc.UserInst))
2917       return true;
2918     if (Inc.IncExpr->isZero())
2919       continue;
2920 
2921     // Incrementing by zero or some constant is neutral. We assume constants can
2922     // be folded into an addressing mode or an add's immediate operand.
2923     if (isa<SCEVConstant>(Inc.IncExpr)) {
2924       ++NumConstIncrements;
2925       continue;
2926     }
2927 
2928     if (Inc.IncExpr == LastIncExpr)
2929       ++NumReusedIncrements;
2930     else
2931       ++NumVarIncrements;
2932 
2933     LastIncExpr = Inc.IncExpr;
2934   }
2935   // An IV chain with a single increment is handled by LSR's postinc
2936   // uses. However, a chain with multiple increments requires keeping the IV's
2937   // value live longer than it needs to be if chained.
2938   if (NumConstIncrements > 1)
2939     --cost;
2940 
2941   // Materializing increment expressions in the preheader that didn't exist in
2942   // the original code may cost a register. For example, sign-extended array
2943   // indices can produce ridiculous increments like this:
2944   // IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64)))
2945   cost += NumVarIncrements;
2946 
2947   // Reusing variable increments likely saves a register to hold the multiple of
2948   // the stride.
2949   cost -= NumReusedIncrements;
2950 
2951   LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " Cost: " << cost
2952                     << "\n");
2953 
2954   return cost < 0;
2955 }
2956 
2957 /// Add this IV user to an existing chain or make it the head of a new chain.
2958 void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper,
2959                                    SmallVectorImpl<ChainUsers> &ChainUsersVec) {
2960   // When IVs are used as types of varying widths, they are generally converted
2961   // to a wider type with some uses remaining narrow under a (free) trunc.
2962   Value *const NextIV = getWideOperand(IVOper);
2963   const SCEV *const OperExpr = SE.getSCEV(NextIV);
2964   const SCEV *const OperExprBase = getExprBase(OperExpr);
2965 
2966   // Visit all existing chains. Check if its IVOper can be computed as a
2967   // profitable loop invariant increment from the last link in the Chain.
2968   unsigned ChainIdx = 0, NChains = IVChainVec.size();
2969   const SCEV *LastIncExpr = nullptr;
2970   for (; ChainIdx < NChains; ++ChainIdx) {
2971     IVChain &Chain = IVChainVec[ChainIdx];
2972 
2973     // Prune the solution space aggressively by checking that both IV operands
2974     // are expressions that operate on the same unscaled SCEVUnknown. This
2975     // "base" will be canceled by the subsequent getMinusSCEV call. Checking
2976     // first avoids creating extra SCEV expressions.
2977     if (!StressIVChain && Chain.ExprBase != OperExprBase)
2978       continue;
2979 
2980     Value *PrevIV = getWideOperand(Chain.Incs.back().IVOperand);
2981     if (!isCompatibleIVType(PrevIV, NextIV))
2982       continue;
2983 
2984     // A phi node terminates a chain.
2985     if (isa<PHINode>(UserInst) && isa<PHINode>(Chain.tailUserInst()))
2986       continue;
2987 
2988     // The increment must be loop-invariant so it can be kept in a register.
2989     const SCEV *PrevExpr = SE.getSCEV(PrevIV);
2990     const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr);
2991     if (isa<SCEVCouldNotCompute>(IncExpr) || !SE.isLoopInvariant(IncExpr, L))
2992       continue;
2993 
2994     if (Chain.isProfitableIncrement(OperExpr, IncExpr, SE)) {
2995       LastIncExpr = IncExpr;
2996       break;
2997     }
2998   }
2999   // If we haven't found a chain, create a new one, unless we hit the max. Don't
3000   // bother for phi nodes, because they must be last in the chain.
3001   if (ChainIdx == NChains) {
3002     if (isa<PHINode>(UserInst))
3003       return;
3004     if (NChains >= MaxChains && !StressIVChain) {
3005       LLVM_DEBUG(dbgs() << "IV Chain Limit\n");
3006       return;
3007     }
3008     LastIncExpr = OperExpr;
3009     // IVUsers may have skipped over sign/zero extensions. We don't currently
3010     // attempt to form chains involving extensions unless they can be hoisted
3011     // into this loop's AddRec.
3012     if (!isa<SCEVAddRecExpr>(LastIncExpr))
3013       return;
3014     ++NChains;
3015     IVChainVec.push_back(IVChain(IVInc(UserInst, IVOper, LastIncExpr),
3016                                  OperExprBase));
3017     ChainUsersVec.resize(NChains);
3018     LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Head: (" << *UserInst
3019                       << ") IV=" << *LastIncExpr << "\n");
3020   } else {
3021     LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << "  Inc: (" << *UserInst
3022                       << ") IV+" << *LastIncExpr << "\n");
3023     // Add this IV user to the end of the chain.
3024     IVChainVec[ChainIdx].add(IVInc(UserInst, IVOper, LastIncExpr));
3025   }
3026   IVChain &Chain = IVChainVec[ChainIdx];
3027 
3028   SmallPtrSet<Instruction*,4> &NearUsers = ChainUsersVec[ChainIdx].NearUsers;
3029   // This chain's NearUsers become FarUsers.
3030   if (!LastIncExpr->isZero()) {
3031     ChainUsersVec[ChainIdx].FarUsers.insert(NearUsers.begin(),
3032                                             NearUsers.end());
3033     NearUsers.clear();
3034   }
3035 
3036   // All other uses of IVOperand become near uses of the chain.
3037   // We currently ignore intermediate values within SCEV expressions, assuming
3038   // they will eventually be used be the current chain, or can be computed
3039   // from one of the chain increments. To be more precise we could
3040   // transitively follow its user and only add leaf IV users to the set.
3041   for (User *U : IVOper->users()) {
3042     Instruction *OtherUse = dyn_cast<Instruction>(U);
3043     if (!OtherUse)
3044       continue;
3045     // Uses in the chain will no longer be uses if the chain is formed.
3046     // Include the head of the chain in this iteration (not Chain.begin()).
3047     IVChain::const_iterator IncIter = Chain.Incs.begin();
3048     IVChain::const_iterator IncEnd = Chain.Incs.end();
3049     for( ; IncIter != IncEnd; ++IncIter) {
3050       if (IncIter->UserInst == OtherUse)
3051         break;
3052     }
3053     if (IncIter != IncEnd)
3054       continue;
3055 
3056     if (SE.isSCEVable(OtherUse->getType())
3057         && !isa<SCEVUnknown>(SE.getSCEV(OtherUse))
3058         && IU.isIVUserOrOperand(OtherUse)) {
3059       continue;
3060     }
3061     NearUsers.insert(OtherUse);
3062   }
3063 
3064   // Since this user is part of the chain, it's no longer considered a use
3065   // of the chain.
3066   ChainUsersVec[ChainIdx].FarUsers.erase(UserInst);
3067 }
3068 
3069 /// Populate the vector of Chains.
3070 ///
3071 /// This decreases ILP at the architecture level. Targets with ample registers,
3072 /// multiple memory ports, and no register renaming probably don't want
3073 /// this. However, such targets should probably disable LSR altogether.
3074 ///
3075 /// The job of LSR is to make a reasonable choice of induction variables across
3076 /// the loop. Subsequent passes can easily "unchain" computation exposing more
3077 /// ILP *within the loop* if the target wants it.
3078 ///
3079 /// Finding the best IV chain is potentially a scheduling problem. Since LSR
3080 /// will not reorder memory operations, it will recognize this as a chain, but
3081 /// will generate redundant IV increments. Ideally this would be corrected later
3082 /// by a smart scheduler:
3083 ///        = A[i]
3084 ///        = A[i+x]
3085 /// A[i]   =
3086 /// A[i+x] =
3087 ///
3088 /// TODO: Walk the entire domtree within this loop, not just the path to the
3089 /// loop latch. This will discover chains on side paths, but requires
3090 /// maintaining multiple copies of the Chains state.
3091 void LSRInstance::CollectChains() {
3092   LLVM_DEBUG(dbgs() << "Collecting IV Chains.\n");
3093   SmallVector<ChainUsers, 8> ChainUsersVec;
3094 
3095   SmallVector<BasicBlock *,8> LatchPath;
3096   BasicBlock *LoopHeader = L->getHeader();
3097   for (DomTreeNode *Rung = DT.getNode(L->getLoopLatch());
3098        Rung->getBlock() != LoopHeader; Rung = Rung->getIDom()) {
3099     LatchPath.push_back(Rung->getBlock());
3100   }
3101   LatchPath.push_back(LoopHeader);
3102 
3103   // Walk the instruction stream from the loop header to the loop latch.
3104   for (BasicBlock *BB : reverse(LatchPath)) {
3105     for (Instruction &I : *BB) {
3106       // Skip instructions that weren't seen by IVUsers analysis.
3107       if (isa<PHINode>(I) || !IU.isIVUserOrOperand(&I))
3108         continue;
3109 
3110       // Ignore users that are part of a SCEV expression. This way we only
3111       // consider leaf IV Users. This effectively rediscovers a portion of
3112       // IVUsers analysis but in program order this time.
3113       if (SE.isSCEVable(I.getType()) && !isa<SCEVUnknown>(SE.getSCEV(&I)))
3114           continue;
3115 
3116       // Remove this instruction from any NearUsers set it may be in.
3117       for (unsigned ChainIdx = 0, NChains = IVChainVec.size();
3118            ChainIdx < NChains; ++ChainIdx) {
3119         ChainUsersVec[ChainIdx].NearUsers.erase(&I);
3120       }
3121       // Search for operands that can be chained.
3122       SmallPtrSet<Instruction*, 4> UniqueOperands;
3123       User::op_iterator IVOpEnd = I.op_end();
3124       User::op_iterator IVOpIter = findIVOperand(I.op_begin(), IVOpEnd, L, SE);
3125       while (IVOpIter != IVOpEnd) {
3126         Instruction *IVOpInst = cast<Instruction>(*IVOpIter);
3127         if (UniqueOperands.insert(IVOpInst).second)
3128           ChainInstruction(&I, IVOpInst, ChainUsersVec);
3129         IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE);
3130       }
3131     } // Continue walking down the instructions.
3132   } // Continue walking down the domtree.
3133   // Visit phi backedges to determine if the chain can generate the IV postinc.
3134   for (PHINode &PN : L->getHeader()->phis()) {
3135     if (!SE.isSCEVable(PN.getType()))
3136       continue;
3137 
3138     Instruction *IncV =
3139         dyn_cast<Instruction>(PN.getIncomingValueForBlock(L->getLoopLatch()));
3140     if (IncV)
3141       ChainInstruction(&PN, IncV, ChainUsersVec);
3142   }
3143   // Remove any unprofitable chains.
3144   unsigned ChainIdx = 0;
3145   for (unsigned UsersIdx = 0, NChains = IVChainVec.size();
3146        UsersIdx < NChains; ++UsersIdx) {
3147     if (!isProfitableChain(IVChainVec[UsersIdx],
3148                            ChainUsersVec[UsersIdx].FarUsers, SE, TTI))
3149       continue;
3150     // Preserve the chain at UsesIdx.
3151     if (ChainIdx != UsersIdx)
3152       IVChainVec[ChainIdx] = IVChainVec[UsersIdx];
3153     FinalizeChain(IVChainVec[ChainIdx]);
3154     ++ChainIdx;
3155   }
3156   IVChainVec.resize(ChainIdx);
3157 }
3158 
3159 void LSRInstance::FinalizeChain(IVChain &Chain) {
3160   assert(!Chain.Incs.empty() && "empty IV chains are not allowed");
3161   LLVM_DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n");
3162 
3163   for (const IVInc &Inc : Chain) {
3164     LLVM_DEBUG(dbgs() << "        Inc: " << *Inc.UserInst << "\n");
3165     auto UseI = find(Inc.UserInst->operands(), Inc.IVOperand);
3166     assert(UseI != Inc.UserInst->op_end() && "cannot find IV operand");
3167     IVIncSet.insert(UseI);
3168   }
3169 }
3170 
3171 /// Return true if the IVInc can be folded into an addressing mode.
3172 static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst,
3173                              Value *Operand, const TargetTransformInfo &TTI) {
3174   const SCEVConstant *IncConst = dyn_cast<SCEVConstant>(IncExpr);
3175   if (!IncConst || !isAddressUse(TTI, UserInst, Operand))
3176     return false;
3177 
3178   if (IncConst->getAPInt().getMinSignedBits() > 64)
3179     return false;
3180 
3181   MemAccessTy AccessTy = getAccessType(TTI, UserInst, Operand);
3182   int64_t IncOffset = IncConst->getValue()->getSExtValue();
3183   if (!isAlwaysFoldable(TTI, LSRUse::Address, AccessTy, /*BaseGV=*/nullptr,
3184                         IncOffset, /*HasBaseReg=*/false))
3185     return false;
3186 
3187   return true;
3188 }
3189 
3190 /// Generate an add or subtract for each IVInc in a chain to materialize the IV
3191 /// user's operand from the previous IV user's operand.
3192 void LSRInstance::GenerateIVChain(const IVChain &Chain,
3193                                   SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
3194   // Find the new IVOperand for the head of the chain. It may have been replaced
3195   // by LSR.
3196   const IVInc &Head = Chain.Incs[0];
3197   User::op_iterator IVOpEnd = Head.UserInst->op_end();
3198   // findIVOperand returns IVOpEnd if it can no longer find a valid IV user.
3199   User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(),
3200                                              IVOpEnd, L, SE);
3201   Value *IVSrc = nullptr;
3202   while (IVOpIter != IVOpEnd) {
3203     IVSrc = getWideOperand(*IVOpIter);
3204 
3205     // If this operand computes the expression that the chain needs, we may use
3206     // it. (Check this after setting IVSrc which is used below.)
3207     //
3208     // Note that if Head.IncExpr is wider than IVSrc, then this phi is too
3209     // narrow for the chain, so we can no longer use it. We do allow using a
3210     // wider phi, assuming the LSR checked for free truncation. In that case we
3211     // should already have a truncate on this operand such that
3212     // getSCEV(IVSrc) == IncExpr.
3213     if (SE.getSCEV(*IVOpIter) == Head.IncExpr
3214         || SE.getSCEV(IVSrc) == Head.IncExpr) {
3215       break;
3216     }
3217     IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE);
3218   }
3219   if (IVOpIter == IVOpEnd) {
3220     // Gracefully give up on this chain.
3221     LLVM_DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n");
3222     return;
3223   }
3224   assert(IVSrc && "Failed to find IV chain source");
3225 
3226   LLVM_DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n");
3227   Type *IVTy = IVSrc->getType();
3228   Type *IntTy = SE.getEffectiveSCEVType(IVTy);
3229   const SCEV *LeftOverExpr = nullptr;
3230   for (const IVInc &Inc : Chain) {
3231     Instruction *InsertPt = Inc.UserInst;
3232     if (isa<PHINode>(InsertPt))
3233       InsertPt = L->getLoopLatch()->getTerminator();
3234 
3235     // IVOper will replace the current IV User's operand. IVSrc is the IV
3236     // value currently held in a register.
3237     Value *IVOper = IVSrc;
3238     if (!Inc.IncExpr->isZero()) {
3239       // IncExpr was the result of subtraction of two narrow values, so must
3240       // be signed.
3241       const SCEV *IncExpr = SE.getNoopOrSignExtend(Inc.IncExpr, IntTy);
3242       LeftOverExpr = LeftOverExpr ?
3243         SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr;
3244     }
3245     if (LeftOverExpr && !LeftOverExpr->isZero()) {
3246       // Expand the IV increment.
3247       Rewriter.clearPostInc();
3248       Value *IncV = Rewriter.expandCodeFor(LeftOverExpr, IntTy, InsertPt);
3249       const SCEV *IVOperExpr = SE.getAddExpr(SE.getUnknown(IVSrc),
3250                                              SE.getUnknown(IncV));
3251       IVOper = Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt);
3252 
3253       // If an IV increment can't be folded, use it as the next IV value.
3254       if (!canFoldIVIncExpr(LeftOverExpr, Inc.UserInst, Inc.IVOperand, TTI)) {
3255         assert(IVTy == IVOper->getType() && "inconsistent IV increment type");
3256         IVSrc = IVOper;
3257         LeftOverExpr = nullptr;
3258       }
3259     }
3260     Type *OperTy = Inc.IVOperand->getType();
3261     if (IVTy != OperTy) {
3262       assert(SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) &&
3263              "cannot extend a chained IV");
3264       IRBuilder<> Builder(InsertPt);
3265       IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy, "lsr.chain");
3266     }
3267     Inc.UserInst->replaceUsesOfWith(Inc.IVOperand, IVOper);
3268     if (auto *OperandIsInstr = dyn_cast<Instruction>(Inc.IVOperand))
3269       DeadInsts.emplace_back(OperandIsInstr);
3270   }
3271   // If LSR created a new, wider phi, we may also replace its postinc. We only
3272   // do this if we also found a wide value for the head of the chain.
3273   if (isa<PHINode>(Chain.tailUserInst())) {
3274     for (PHINode &Phi : L->getHeader()->phis()) {
3275       if (!isCompatibleIVType(&Phi, IVSrc))
3276         continue;
3277       Instruction *PostIncV = dyn_cast<Instruction>(
3278           Phi.getIncomingValueForBlock(L->getLoopLatch()));
3279       if (!PostIncV || (SE.getSCEV(PostIncV) != SE.getSCEV(IVSrc)))
3280         continue;
3281       Value *IVOper = IVSrc;
3282       Type *PostIncTy = PostIncV->getType();
3283       if (IVTy != PostIncTy) {
3284         assert(PostIncTy->isPointerTy() && "mixing int/ptr IV types");
3285         IRBuilder<> Builder(L->getLoopLatch()->getTerminator());
3286         Builder.SetCurrentDebugLocation(PostIncV->getDebugLoc());
3287         IVOper = Builder.CreatePointerCast(IVSrc, PostIncTy, "lsr.chain");
3288       }
3289       Phi.replaceUsesOfWith(PostIncV, IVOper);
3290       DeadInsts.emplace_back(PostIncV);
3291     }
3292   }
3293 }
3294 
3295 void LSRInstance::CollectFixupsAndInitialFormulae() {
3296   BranchInst *ExitBranch = nullptr;
3297   bool SaveCmp = TTI.canSaveCmp(L, &ExitBranch, &SE, &LI, &DT, &AC, &TLI);
3298 
3299   // For calculating baseline cost
3300   SmallPtrSet<const SCEV *, 16> Regs;
3301   DenseSet<const SCEV *> VisitedRegs;
3302   DenseSet<size_t> VisitedLSRUse;
3303 
3304   for (const IVStrideUse &U : IU) {
3305     Instruction *UserInst = U.getUser();
3306     // Skip IV users that are part of profitable IV Chains.
3307     User::op_iterator UseI =
3308         find(UserInst->operands(), U.getOperandValToReplace());
3309     assert(UseI != UserInst->op_end() && "cannot find IV operand");
3310     if (IVIncSet.count(UseI)) {
3311       LLVM_DEBUG(dbgs() << "Use is in profitable chain: " << **UseI << '\n');
3312       continue;
3313     }
3314 
3315     LSRUse::KindType Kind = LSRUse::Basic;
3316     MemAccessTy AccessTy;
3317     if (isAddressUse(TTI, UserInst, U.getOperandValToReplace())) {
3318       Kind = LSRUse::Address;
3319       AccessTy = getAccessType(TTI, UserInst, U.getOperandValToReplace());
3320     }
3321 
3322     const SCEV *S = IU.getExpr(U);
3323     PostIncLoopSet TmpPostIncLoops = U.getPostIncLoops();
3324 
3325     // Equality (== and !=) ICmps are special. We can rewrite (i == N) as
3326     // (N - i == 0), and this allows (N - i) to be the expression that we work
3327     // with rather than just N or i, so we can consider the register
3328     // requirements for both N and i at the same time. Limiting this code to
3329     // equality icmps is not a problem because all interesting loops use
3330     // equality icmps, thanks to IndVarSimplify.
3331     if (ICmpInst *CI = dyn_cast<ICmpInst>(UserInst)) {
3332       // If CI can be saved in some target, like replaced inside hardware loop
3333       // in PowerPC, no need to generate initial formulae for it.
3334       if (SaveCmp && CI == dyn_cast<ICmpInst>(ExitBranch->getCondition()))
3335         continue;
3336       if (CI->isEquality()) {
3337         // Swap the operands if needed to put the OperandValToReplace on the
3338         // left, for consistency.
3339         Value *NV = CI->getOperand(1);
3340         if (NV == U.getOperandValToReplace()) {
3341           CI->setOperand(1, CI->getOperand(0));
3342           CI->setOperand(0, NV);
3343           NV = CI->getOperand(1);
3344           Changed = true;
3345         }
3346 
3347         // x == y  -->  x - y == 0
3348         const SCEV *N = SE.getSCEV(NV);
3349         if (SE.isLoopInvariant(N, L) && Rewriter.isSafeToExpand(N) &&
3350             (!NV->getType()->isPointerTy() ||
3351              SE.getPointerBase(N) == SE.getPointerBase(S))) {
3352           // S is normalized, so normalize N before folding it into S
3353           // to keep the result normalized.
3354           N = normalizeForPostIncUse(N, TmpPostIncLoops, SE);
3355           Kind = LSRUse::ICmpZero;
3356           S = SE.getMinusSCEV(N, S);
3357         } else if (L->isLoopInvariant(NV) &&
3358                    (!isa<Instruction>(NV) ||
3359                     DT.dominates(cast<Instruction>(NV), L->getHeader())) &&
3360                    !NV->getType()->isPointerTy()) {
3361           // If we can't generally expand the expression (e.g. it contains
3362           // a divide), but it is already at a loop invariant point before the
3363           // loop, wrap it in an unknown (to prevent the expander from trying
3364           // to re-expand in a potentially unsafe way.)  The restriction to
3365           // integer types is required because the unknown hides the base, and
3366           // SCEV can't compute the difference of two unknown pointers.
3367           N = SE.getUnknown(NV);
3368           N = normalizeForPostIncUse(N, TmpPostIncLoops, SE);
3369           Kind = LSRUse::ICmpZero;
3370           S = SE.getMinusSCEV(N, S);
3371           assert(!isa<SCEVCouldNotCompute>(S));
3372         }
3373 
3374         // -1 and the negations of all interesting strides (except the negation
3375         // of -1) are now also interesting.
3376         for (size_t i = 0, e = Factors.size(); i != e; ++i)
3377           if (Factors[i] != -1)
3378             Factors.insert(-(uint64_t)Factors[i]);
3379         Factors.insert(-1);
3380       }
3381     }
3382 
3383     // Get or create an LSRUse.
3384     std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy);
3385     size_t LUIdx = P.first;
3386     int64_t Offset = P.second;
3387     LSRUse &LU = Uses[LUIdx];
3388 
3389     // Record the fixup.
3390     LSRFixup &LF = LU.getNewFixup();
3391     LF.UserInst = UserInst;
3392     LF.OperandValToReplace = U.getOperandValToReplace();
3393     LF.PostIncLoops = TmpPostIncLoops;
3394     LF.Offset = Offset;
3395     LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
3396 
3397     // Create SCEV as Formula for calculating baseline cost
3398     if (!VisitedLSRUse.count(LUIdx) && !LF.isUseFullyOutsideLoop(L)) {
3399       Formula F;
3400       F.initialMatch(S, L, SE);
3401       BaselineCost.RateFormula(F, Regs, VisitedRegs, LU);
3402       VisitedLSRUse.insert(LUIdx);
3403     }
3404 
3405     if (!LU.WidestFixupType ||
3406         SE.getTypeSizeInBits(LU.WidestFixupType) <
3407         SE.getTypeSizeInBits(LF.OperandValToReplace->getType()))
3408       LU.WidestFixupType = LF.OperandValToReplace->getType();
3409 
3410     // If this is the first use of this LSRUse, give it a formula.
3411     if (LU.Formulae.empty()) {
3412       InsertInitialFormula(S, LU, LUIdx);
3413       CountRegisters(LU.Formulae.back(), LUIdx);
3414     }
3415   }
3416 
3417   LLVM_DEBUG(print_fixups(dbgs()));
3418 }
3419 
3420 /// Insert a formula for the given expression into the given use, separating out
3421 /// loop-variant portions from loop-invariant and loop-computable portions.
3422 void LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU,
3423                                        size_t LUIdx) {
3424   // Mark uses whose expressions cannot be expanded.
3425   if (!Rewriter.isSafeToExpand(S))
3426     LU.RigidFormula = true;
3427 
3428   Formula F;
3429   F.initialMatch(S, L, SE);
3430   bool Inserted = InsertFormula(LU, LUIdx, F);
3431   assert(Inserted && "Initial formula already exists!"); (void)Inserted;
3432 }
3433 
3434 /// Insert a simple single-register formula for the given expression into the
3435 /// given use.
3436 void
3437 LSRInstance::InsertSupplementalFormula(const SCEV *S,
3438                                        LSRUse &LU, size_t LUIdx) {
3439   Formula F;
3440   F.BaseRegs.push_back(S);
3441   F.HasBaseReg = true;
3442   bool Inserted = InsertFormula(LU, LUIdx, F);
3443   assert(Inserted && "Supplemental formula already exists!"); (void)Inserted;
3444 }
3445 
3446 /// Note which registers are used by the given formula, updating RegUses.
3447 void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) {
3448   if (F.ScaledReg)
3449     RegUses.countRegister(F.ScaledReg, LUIdx);
3450   for (const SCEV *BaseReg : F.BaseRegs)
3451     RegUses.countRegister(BaseReg, LUIdx);
3452 }
3453 
3454 /// If the given formula has not yet been inserted, add it to the list, and
3455 /// return true. Return false otherwise.
3456 bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) {
3457   // Do not insert formula that we will not be able to expand.
3458   assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F) &&
3459          "Formula is illegal");
3460 
3461   if (!LU.InsertFormula(F, *L))
3462     return false;
3463 
3464   CountRegisters(F, LUIdx);
3465   return true;
3466 }
3467 
3468 /// Check for other uses of loop-invariant values which we're tracking. These
3469 /// other uses will pin these values in registers, making them less profitable
3470 /// for elimination.
3471 /// TODO: This currently misses non-constant addrec step registers.
3472 /// TODO: Should this give more weight to users inside the loop?
3473 void
3474 LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
3475   SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end());
3476   SmallPtrSet<const SCEV *, 32> Visited;
3477 
3478   while (!Worklist.empty()) {
3479     const SCEV *S = Worklist.pop_back_val();
3480 
3481     // Don't process the same SCEV twice
3482     if (!Visited.insert(S).second)
3483       continue;
3484 
3485     if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S))
3486       append_range(Worklist, N->operands());
3487     else if (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(S))
3488       Worklist.push_back(C->getOperand());
3489     else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
3490       Worklist.push_back(D->getLHS());
3491       Worklist.push_back(D->getRHS());
3492     } else if (const SCEVUnknown *US = dyn_cast<SCEVUnknown>(S)) {
3493       const Value *V = US->getValue();
3494       if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
3495         // Look for instructions defined outside the loop.
3496         if (L->contains(Inst)) continue;
3497       } else if (isa<UndefValue>(V))
3498         // Undef doesn't have a live range, so it doesn't matter.
3499         continue;
3500       for (const Use &U : V->uses()) {
3501         const Instruction *UserInst = dyn_cast<Instruction>(U.getUser());
3502         // Ignore non-instructions.
3503         if (!UserInst)
3504           continue;
3505         // Don't bother if the instruction is an EHPad.
3506         if (UserInst->isEHPad())
3507           continue;
3508         // Ignore instructions in other functions (as can happen with
3509         // Constants).
3510         if (UserInst->getParent()->getParent() != L->getHeader()->getParent())
3511           continue;
3512         // Ignore instructions not dominated by the loop.
3513         const BasicBlock *UseBB = !isa<PHINode>(UserInst) ?
3514           UserInst->getParent() :
3515           cast<PHINode>(UserInst)->getIncomingBlock(
3516             PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
3517         if (!DT.dominates(L->getHeader(), UseBB))
3518           continue;
3519         // Don't bother if the instruction is in a BB which ends in an EHPad.
3520         if (UseBB->getTerminator()->isEHPad())
3521           continue;
3522 
3523         // Ignore cases in which the currently-examined value could come from
3524         // a basic block terminated with an EHPad. This checks all incoming
3525         // blocks of the phi node since it is possible that the same incoming
3526         // value comes from multiple basic blocks, only some of which may end
3527         // in an EHPad. If any of them do, a subsequent rewrite attempt by this
3528         // pass would try to insert instructions into an EHPad, hitting an
3529         // assertion.
3530         if (isa<PHINode>(UserInst)) {
3531           const auto *PhiNode = cast<PHINode>(UserInst);
3532           bool HasIncompatibleEHPTerminatedBlock = false;
3533           llvm::Value *ExpectedValue = U;
3534           for (unsigned int I = 0; I < PhiNode->getNumIncomingValues(); I++) {
3535             if (PhiNode->getIncomingValue(I) == ExpectedValue) {
3536               if (PhiNode->getIncomingBlock(I)->getTerminator()->isEHPad()) {
3537                 HasIncompatibleEHPTerminatedBlock = true;
3538                 break;
3539               }
3540             }
3541           }
3542           if (HasIncompatibleEHPTerminatedBlock) {
3543             continue;
3544           }
3545         }
3546 
3547         // Don't bother rewriting PHIs in catchswitch blocks.
3548         if (isa<CatchSwitchInst>(UserInst->getParent()->getTerminator()))
3549           continue;
3550         // Ignore uses which are part of other SCEV expressions, to avoid
3551         // analyzing them multiple times.
3552         if (SE.isSCEVable(UserInst->getType())) {
3553           const SCEV *UserS = SE.getSCEV(const_cast<Instruction *>(UserInst));
3554           // If the user is a no-op, look through to its uses.
3555           if (!isa<SCEVUnknown>(UserS))
3556             continue;
3557           if (UserS == US) {
3558             Worklist.push_back(
3559               SE.getUnknown(const_cast<Instruction *>(UserInst)));
3560             continue;
3561           }
3562         }
3563         // Ignore icmp instructions which are already being analyzed.
3564         if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) {
3565           unsigned OtherIdx = !U.getOperandNo();
3566           Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx));
3567           if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L))
3568             continue;
3569         }
3570 
3571         std::pair<size_t, int64_t> P = getUse(
3572             S, LSRUse::Basic, MemAccessTy());
3573         size_t LUIdx = P.first;
3574         int64_t Offset = P.second;
3575         LSRUse &LU = Uses[LUIdx];
3576         LSRFixup &LF = LU.getNewFixup();
3577         LF.UserInst = const_cast<Instruction *>(UserInst);
3578         LF.OperandValToReplace = U;
3579         LF.Offset = Offset;
3580         LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
3581         if (!LU.WidestFixupType ||
3582             SE.getTypeSizeInBits(LU.WidestFixupType) <
3583             SE.getTypeSizeInBits(LF.OperandValToReplace->getType()))
3584           LU.WidestFixupType = LF.OperandValToReplace->getType();
3585         InsertSupplementalFormula(US, LU, LUIdx);
3586         CountRegisters(LU.Formulae.back(), Uses.size() - 1);
3587         break;
3588       }
3589     }
3590   }
3591 }
3592 
3593 /// Split S into subexpressions which can be pulled out into separate
3594 /// registers. If C is non-null, multiply each subexpression by C.
3595 ///
3596 /// Return remainder expression after factoring the subexpressions captured by
3597 /// Ops. If Ops is complete, return NULL.
3598 static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C,
3599                                    SmallVectorImpl<const SCEV *> &Ops,
3600                                    const Loop *L,
3601                                    ScalarEvolution &SE,
3602                                    unsigned Depth = 0) {
3603   // Arbitrarily cap recursion to protect compile time.
3604   if (Depth >= 3)
3605     return S;
3606 
3607   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3608     // Break out add operands.
3609     for (const SCEV *S : Add->operands()) {
3610       const SCEV *Remainder = CollectSubexprs(S, C, Ops, L, SE, Depth+1);
3611       if (Remainder)
3612         Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
3613     }
3614     return nullptr;
3615   } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
3616     // Split a non-zero base out of an addrec.
3617     if (AR->getStart()->isZero() || !AR->isAffine())
3618       return S;
3619 
3620     const SCEV *Remainder = CollectSubexprs(AR->getStart(),
3621                                             C, Ops, L, SE, Depth+1);
3622     // Split the non-zero AddRec unless it is part of a nested recurrence that
3623     // does not pertain to this loop.
3624     if (Remainder && (AR->getLoop() == L || !isa<SCEVAddRecExpr>(Remainder))) {
3625       Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
3626       Remainder = nullptr;
3627     }
3628     if (Remainder != AR->getStart()) {
3629       if (!Remainder)
3630         Remainder = SE.getConstant(AR->getType(), 0);
3631       return SE.getAddRecExpr(Remainder,
3632                               AR->getStepRecurrence(SE),
3633                               AR->getLoop(),
3634                               //FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
3635                               SCEV::FlagAnyWrap);
3636     }
3637   } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3638     // Break (C * (a + b + c)) into C*a + C*b + C*c.
3639     if (Mul->getNumOperands() != 2)
3640       return S;
3641     if (const SCEVConstant *Op0 =
3642         dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
3643       C = C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0;
3644       const SCEV *Remainder =
3645         CollectSubexprs(Mul->getOperand(1), C, Ops, L, SE, Depth+1);
3646       if (Remainder)
3647         Ops.push_back(SE.getMulExpr(C, Remainder));
3648       return nullptr;
3649     }
3650   }
3651   return S;
3652 }
3653 
3654 /// Return true if the SCEV represents a value that may end up as a
3655 /// post-increment operation.
3656 static bool mayUsePostIncMode(const TargetTransformInfo &TTI,
3657                               LSRUse &LU, const SCEV *S, const Loop *L,
3658                               ScalarEvolution &SE) {
3659   if (LU.Kind != LSRUse::Address ||
3660       !LU.AccessTy.getType()->isIntOrIntVectorTy())
3661     return false;
3662   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S);
3663   if (!AR)
3664     return false;
3665   const SCEV *LoopStep = AR->getStepRecurrence(SE);
3666   if (!isa<SCEVConstant>(LoopStep))
3667     return false;
3668   // Check if a post-indexed load/store can be used.
3669   if (TTI.isIndexedLoadLegal(TTI.MIM_PostInc, AR->getType()) ||
3670       TTI.isIndexedStoreLegal(TTI.MIM_PostInc, AR->getType())) {
3671     const SCEV *LoopStart = AR->getStart();
3672     if (!isa<SCEVConstant>(LoopStart) && SE.isLoopInvariant(LoopStart, L))
3673       return true;
3674   }
3675   return false;
3676 }
3677 
3678 /// Helper function for LSRInstance::GenerateReassociations.
3679 void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx,
3680                                              const Formula &Base,
3681                                              unsigned Depth, size_t Idx,
3682                                              bool IsScaledReg) {
3683   const SCEV *BaseReg = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
3684   // Don't generate reassociations for the base register of a value that
3685   // may generate a post-increment operator. The reason is that the
3686   // reassociations cause extra base+register formula to be created,
3687   // and possibly chosen, but the post-increment is more efficient.
3688   if (AMK == TTI::AMK_PostIndexed && mayUsePostIncMode(TTI, LU, BaseReg, L, SE))
3689     return;
3690   SmallVector<const SCEV *, 8> AddOps;
3691   const SCEV *Remainder = CollectSubexprs(BaseReg, nullptr, AddOps, L, SE);
3692   if (Remainder)
3693     AddOps.push_back(Remainder);
3694 
3695   if (AddOps.size() == 1)
3696     return;
3697 
3698   for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(),
3699                                                      JE = AddOps.end();
3700        J != JE; ++J) {
3701     // Loop-variant "unknown" values are uninteresting; we won't be able to
3702     // do anything meaningful with them.
3703     if (isa<SCEVUnknown>(*J) && !SE.isLoopInvariant(*J, L))
3704       continue;
3705 
3706     // Don't pull a constant into a register if the constant could be folded
3707     // into an immediate field.
3708     if (isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind,
3709                          LU.AccessTy, *J, Base.getNumRegs() > 1))
3710       continue;
3711 
3712     // Collect all operands except *J.
3713     SmallVector<const SCEV *, 8> InnerAddOps(
3714         ((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J);
3715     InnerAddOps.append(std::next(J),
3716                        ((const SmallVector<const SCEV *, 8> &)AddOps).end());
3717 
3718     // Don't leave just a constant behind in a register if the constant could
3719     // be folded into an immediate field.
3720     if (InnerAddOps.size() == 1 &&
3721         isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind,
3722                          LU.AccessTy, InnerAddOps[0], Base.getNumRegs() > 1))
3723       continue;
3724 
3725     const SCEV *InnerSum = SE.getAddExpr(InnerAddOps);
3726     if (InnerSum->isZero())
3727       continue;
3728     Formula F = Base;
3729 
3730     // Add the remaining pieces of the add back into the new formula.
3731     const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum);
3732     if (InnerSumSC && SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 &&
3733         TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
3734                                 InnerSumSC->getValue()->getZExtValue())) {
3735       F.UnfoldedOffset =
3736           (uint64_t)F.UnfoldedOffset + InnerSumSC->getValue()->getZExtValue();
3737       if (IsScaledReg)
3738         F.ScaledReg = nullptr;
3739       else
3740         F.BaseRegs.erase(F.BaseRegs.begin() + Idx);
3741     } else if (IsScaledReg)
3742       F.ScaledReg = InnerSum;
3743     else
3744       F.BaseRegs[Idx] = InnerSum;
3745 
3746     // Add J as its own register, or an unfolded immediate.
3747     const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J);
3748     if (SC && SE.getTypeSizeInBits(SC->getType()) <= 64 &&
3749         TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
3750                                 SC->getValue()->getZExtValue()))
3751       F.UnfoldedOffset =
3752           (uint64_t)F.UnfoldedOffset + SC->getValue()->getZExtValue();
3753     else
3754       F.BaseRegs.push_back(*J);
3755     // We may have changed the number of register in base regs, adjust the
3756     // formula accordingly.
3757     F.canonicalize(*L);
3758 
3759     if (InsertFormula(LU, LUIdx, F))
3760       // If that formula hadn't been seen before, recurse to find more like
3761       // it.
3762       // Add check on Log16(AddOps.size()) - same as Log2_32(AddOps.size()) >> 2)
3763       // Because just Depth is not enough to bound compile time.
3764       // This means that every time AddOps.size() is greater 16^x we will add
3765       // x to Depth.
3766       GenerateReassociations(LU, LUIdx, LU.Formulae.back(),
3767                              Depth + 1 + (Log2_32(AddOps.size()) >> 2));
3768   }
3769 }
3770 
3771 /// Split out subexpressions from adds and the bases of addrecs.
3772 void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
3773                                          Formula Base, unsigned Depth) {
3774   assert(Base.isCanonical(*L) && "Input must be in the canonical form");
3775   // Arbitrarily cap recursion to protect compile time.
3776   if (Depth >= 3)
3777     return;
3778 
3779   for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
3780     GenerateReassociationsImpl(LU, LUIdx, Base, Depth, i);
3781 
3782   if (Base.Scale == 1)
3783     GenerateReassociationsImpl(LU, LUIdx, Base, Depth,
3784                                /* Idx */ -1, /* IsScaledReg */ true);
3785 }
3786 
3787 ///  Generate a formula consisting of all of the loop-dominating registers added
3788 /// into a single register.
3789 void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
3790                                        Formula Base) {
3791   // This method is only interesting on a plurality of registers.
3792   if (Base.BaseRegs.size() + (Base.Scale == 1) +
3793       (Base.UnfoldedOffset != 0) <= 1)
3794     return;
3795 
3796   // Flatten the representation, i.e., reg1 + 1*reg2 => reg1 + reg2, before
3797   // processing the formula.
3798   Base.unscale();
3799   SmallVector<const SCEV *, 4> Ops;
3800   Formula NewBase = Base;
3801   NewBase.BaseRegs.clear();
3802   Type *CombinedIntegerType = nullptr;
3803   for (const SCEV *BaseReg : Base.BaseRegs) {
3804     if (SE.properlyDominates(BaseReg, L->getHeader()) &&
3805         !SE.hasComputableLoopEvolution(BaseReg, L)) {
3806       if (!CombinedIntegerType)
3807         CombinedIntegerType = SE.getEffectiveSCEVType(BaseReg->getType());
3808       Ops.push_back(BaseReg);
3809     }
3810     else
3811       NewBase.BaseRegs.push_back(BaseReg);
3812   }
3813 
3814   // If no register is relevant, we're done.
3815   if (Ops.size() == 0)
3816     return;
3817 
3818   // Utility function for generating the required variants of the combined
3819   // registers.
3820   auto GenerateFormula = [&](const SCEV *Sum) {
3821     Formula F = NewBase;
3822 
3823     // TODO: If Sum is zero, it probably means ScalarEvolution missed an
3824     // opportunity to fold something. For now, just ignore such cases
3825     // rather than proceed with zero in a register.
3826     if (Sum->isZero())
3827       return;
3828 
3829     F.BaseRegs.push_back(Sum);
3830     F.canonicalize(*L);
3831     (void)InsertFormula(LU, LUIdx, F);
3832   };
3833 
3834   // If we collected at least two registers, generate a formula combining them.
3835   if (Ops.size() > 1) {
3836     SmallVector<const SCEV *, 4> OpsCopy(Ops); // Don't let SE modify Ops.
3837     GenerateFormula(SE.getAddExpr(OpsCopy));
3838   }
3839 
3840   // If we have an unfolded offset, generate a formula combining it with the
3841   // registers collected.
3842   if (NewBase.UnfoldedOffset) {
3843     assert(CombinedIntegerType && "Missing a type for the unfolded offset");
3844     Ops.push_back(SE.getConstant(CombinedIntegerType, NewBase.UnfoldedOffset,
3845                                  true));
3846     NewBase.UnfoldedOffset = 0;
3847     GenerateFormula(SE.getAddExpr(Ops));
3848   }
3849 }
3850 
3851 /// Helper function for LSRInstance::GenerateSymbolicOffsets.
3852 void LSRInstance::GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx,
3853                                               const Formula &Base, size_t Idx,
3854                                               bool IsScaledReg) {
3855   const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
3856   GlobalValue *GV = ExtractSymbol(G, SE);
3857   if (G->isZero() || !GV)
3858     return;
3859   Formula F = Base;
3860   F.BaseGV = GV;
3861   if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F))
3862     return;
3863   if (IsScaledReg)
3864     F.ScaledReg = G;
3865   else
3866     F.BaseRegs[Idx] = G;
3867   (void)InsertFormula(LU, LUIdx, F);
3868 }
3869 
3870 /// Generate reuse formulae using symbolic offsets.
3871 void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx,
3872                                           Formula Base) {
3873   // We can't add a symbolic offset if the address already contains one.
3874   if (Base.BaseGV) return;
3875 
3876   for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
3877     GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, i);
3878   if (Base.Scale == 1)
3879     GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, /* Idx */ -1,
3880                                 /* IsScaledReg */ true);
3881 }
3882 
3883 /// Helper function for LSRInstance::GenerateConstantOffsets.
3884 void LSRInstance::GenerateConstantOffsetsImpl(
3885     LSRUse &LU, unsigned LUIdx, const Formula &Base,
3886     const SmallVectorImpl<int64_t> &Worklist, size_t Idx, bool IsScaledReg) {
3887 
3888   auto GenerateOffset = [&](const SCEV *G, int64_t Offset) {
3889     Formula F = Base;
3890     F.BaseOffset = (uint64_t)Base.BaseOffset - Offset;
3891 
3892     if (isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) {
3893       // Add the offset to the base register.
3894       const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), Offset), G);
3895       // If it cancelled out, drop the base register, otherwise update it.
3896       if (NewG->isZero()) {
3897         if (IsScaledReg) {
3898           F.Scale = 0;
3899           F.ScaledReg = nullptr;
3900         } else
3901           F.deleteBaseReg(F.BaseRegs[Idx]);
3902         F.canonicalize(*L);
3903       } else if (IsScaledReg)
3904         F.ScaledReg = NewG;
3905       else
3906         F.BaseRegs[Idx] = NewG;
3907 
3908       (void)InsertFormula(LU, LUIdx, F);
3909     }
3910   };
3911 
3912   const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
3913 
3914   // With constant offsets and constant steps, we can generate pre-inc
3915   // accesses by having the offset equal the step. So, for access #0 with a
3916   // step of 8, we generate a G - 8 base which would require the first access
3917   // to be ((G - 8) + 8),+,8. The pre-indexed access then updates the pointer
3918   // for itself and hopefully becomes the base for other accesses. This means
3919   // means that a single pre-indexed access can be generated to become the new
3920   // base pointer for each iteration of the loop, resulting in no extra add/sub
3921   // instructions for pointer updating.
3922   if (AMK == TTI::AMK_PreIndexed && LU.Kind == LSRUse::Address) {
3923     if (auto *GAR = dyn_cast<SCEVAddRecExpr>(G)) {
3924       if (auto *StepRec =
3925           dyn_cast<SCEVConstant>(GAR->getStepRecurrence(SE))) {
3926         const APInt &StepInt = StepRec->getAPInt();
3927         int64_t Step = StepInt.isNegative() ?
3928           StepInt.getSExtValue() : StepInt.getZExtValue();
3929 
3930         for (int64_t Offset : Worklist) {
3931           Offset -= Step;
3932           GenerateOffset(G, Offset);
3933         }
3934       }
3935     }
3936   }
3937   for (int64_t Offset : Worklist)
3938     GenerateOffset(G, Offset);
3939 
3940   int64_t Imm = ExtractImmediate(G, SE);
3941   if (G->isZero() || Imm == 0)
3942     return;
3943   Formula F = Base;
3944   F.BaseOffset = (uint64_t)F.BaseOffset + Imm;
3945   if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F))
3946     return;
3947   if (IsScaledReg) {
3948     F.ScaledReg = G;
3949   } else {
3950     F.BaseRegs[Idx] = G;
3951     // We may generate non canonical Formula if G is a recurrent expr reg
3952     // related with current loop while F.ScaledReg is not.
3953     F.canonicalize(*L);
3954   }
3955   (void)InsertFormula(LU, LUIdx, F);
3956 }
3957 
3958 /// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets.
3959 void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx,
3960                                           Formula Base) {
3961   // TODO: For now, just add the min and max offset, because it usually isn't
3962   // worthwhile looking at everything inbetween.
3963   SmallVector<int64_t, 2> Worklist;
3964   Worklist.push_back(LU.MinOffset);
3965   if (LU.MaxOffset != LU.MinOffset)
3966     Worklist.push_back(LU.MaxOffset);
3967 
3968   for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
3969     GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, i);
3970   if (Base.Scale == 1)
3971     GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, /* Idx */ -1,
3972                                 /* IsScaledReg */ true);
3973 }
3974 
3975 /// For ICmpZero, check to see if we can scale up the comparison. For example, x
3976 /// == y -> x*c == y*c.
3977 void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
3978                                          Formula Base) {
3979   if (LU.Kind != LSRUse::ICmpZero) return;
3980 
3981   // Determine the integer type for the base formula.
3982   Type *IntTy = Base.getType();
3983   if (!IntTy) return;
3984   if (SE.getTypeSizeInBits(IntTy) > 64) return;
3985 
3986   // Don't do this if there is more than one offset.
3987   if (LU.MinOffset != LU.MaxOffset) return;
3988 
3989   // Check if transformation is valid. It is illegal to multiply pointer.
3990   if (Base.ScaledReg && Base.ScaledReg->getType()->isPointerTy())
3991     return;
3992   for (const SCEV *BaseReg : Base.BaseRegs)
3993     if (BaseReg->getType()->isPointerTy())
3994       return;
3995   assert(!Base.BaseGV && "ICmpZero use is not legal!");
3996 
3997   // Check each interesting stride.
3998   for (int64_t Factor : Factors) {
3999     // Check that Factor can be represented by IntTy
4000     if (!ConstantInt::isValueValidForType(IntTy, Factor))
4001       continue;
4002     // Check that the multiplication doesn't overflow.
4003     if (Base.BaseOffset == std::numeric_limits<int64_t>::min() && Factor == -1)
4004       continue;
4005     int64_t NewBaseOffset = (uint64_t)Base.BaseOffset * Factor;
4006     assert(Factor != 0 && "Zero factor not expected!");
4007     if (NewBaseOffset / Factor != Base.BaseOffset)
4008       continue;
4009     // If the offset will be truncated at this use, check that it is in bounds.
4010     if (!IntTy->isPointerTy() &&
4011         !ConstantInt::isValueValidForType(IntTy, NewBaseOffset))
4012       continue;
4013 
4014     // Check that multiplying with the use offset doesn't overflow.
4015     int64_t Offset = LU.MinOffset;
4016     if (Offset == std::numeric_limits<int64_t>::min() && Factor == -1)
4017       continue;
4018     Offset = (uint64_t)Offset * Factor;
4019     if (Offset / Factor != LU.MinOffset)
4020       continue;
4021     // If the offset will be truncated at this use, check that it is in bounds.
4022     if (!IntTy->isPointerTy() &&
4023         !ConstantInt::isValueValidForType(IntTy, Offset))
4024       continue;
4025 
4026     Formula F = Base;
4027     F.BaseOffset = NewBaseOffset;
4028 
4029     // Check that this scale is legal.
4030     if (!isLegalUse(TTI, Offset, Offset, LU.Kind, LU.AccessTy, F))
4031       continue;
4032 
4033     // Compensate for the use having MinOffset built into it.
4034     F.BaseOffset = (uint64_t)F.BaseOffset + Offset - LU.MinOffset;
4035 
4036     const SCEV *FactorS = SE.getConstant(IntTy, Factor);
4037 
4038     // Check that multiplying with each base register doesn't overflow.
4039     for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) {
4040       F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS);
4041       if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i])
4042         goto next;
4043     }
4044 
4045     // Check that multiplying with the scaled register doesn't overflow.
4046     if (F.ScaledReg) {
4047       F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS);
4048       if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg)
4049         continue;
4050     }
4051 
4052     // Check that multiplying with the unfolded offset doesn't overflow.
4053     if (F.UnfoldedOffset != 0) {
4054       if (F.UnfoldedOffset == std::numeric_limits<int64_t>::min() &&
4055           Factor == -1)
4056         continue;
4057       F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset * Factor;
4058       if (F.UnfoldedOffset / Factor != Base.UnfoldedOffset)
4059         continue;
4060       // If the offset will be truncated, check that it is in bounds.
4061       if (!IntTy->isPointerTy() &&
4062           !ConstantInt::isValueValidForType(IntTy, F.UnfoldedOffset))
4063         continue;
4064     }
4065 
4066     // If we make it here and it's legal, add it.
4067     (void)InsertFormula(LU, LUIdx, F);
4068   next:;
4069   }
4070 }
4071 
4072 /// Generate stride factor reuse formulae by making use of scaled-offset address
4073 /// modes, for example.
4074 void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) {
4075   // Determine the integer type for the base formula.
4076   Type *IntTy = Base.getType();
4077   if (!IntTy) return;
4078 
4079   // If this Formula already has a scaled register, we can't add another one.
4080   // Try to unscale the formula to generate a better scale.
4081   if (Base.Scale != 0 && !Base.unscale())
4082     return;
4083 
4084   assert(Base.Scale == 0 && "unscale did not did its job!");
4085 
4086   // Check each interesting stride.
4087   for (int64_t Factor : Factors) {
4088     Base.Scale = Factor;
4089     Base.HasBaseReg = Base.BaseRegs.size() > 1;
4090     // Check whether this scale is going to be legal.
4091     if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
4092                     Base)) {
4093       // As a special-case, handle special out-of-loop Basic users specially.
4094       // TODO: Reconsider this special case.
4095       if (LU.Kind == LSRUse::Basic &&
4096           isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LSRUse::Special,
4097                      LU.AccessTy, Base) &&
4098           LU.AllFixupsOutsideLoop)
4099         LU.Kind = LSRUse::Special;
4100       else
4101         continue;
4102     }
4103     // For an ICmpZero, negating a solitary base register won't lead to
4104     // new solutions.
4105     if (LU.Kind == LSRUse::ICmpZero &&
4106         !Base.HasBaseReg && Base.BaseOffset == 0 && !Base.BaseGV)
4107       continue;
4108     // For each addrec base reg, if its loop is current loop, apply the scale.
4109     for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) {
4110       const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i]);
4111       if (AR && (AR->getLoop() == L || LU.AllFixupsOutsideLoop)) {
4112         const SCEV *FactorS = SE.getConstant(IntTy, Factor);
4113         if (FactorS->isZero())
4114           continue;
4115         // Divide out the factor, ignoring high bits, since we'll be
4116         // scaling the value back up in the end.
4117         if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true))
4118           if (!Quotient->isZero()) {
4119             // TODO: This could be optimized to avoid all the copying.
4120             Formula F = Base;
4121             F.ScaledReg = Quotient;
4122             F.deleteBaseReg(F.BaseRegs[i]);
4123             // The canonical representation of 1*reg is reg, which is already in
4124             // Base. In that case, do not try to insert the formula, it will be
4125             // rejected anyway.
4126             if (F.Scale == 1 && (F.BaseRegs.empty() ||
4127                                  (AR->getLoop() != L && LU.AllFixupsOutsideLoop)))
4128               continue;
4129             // If AllFixupsOutsideLoop is true and F.Scale is 1, we may generate
4130             // non canonical Formula with ScaledReg's loop not being L.
4131             if (F.Scale == 1 && LU.AllFixupsOutsideLoop)
4132               F.canonicalize(*L);
4133             (void)InsertFormula(LU, LUIdx, F);
4134           }
4135       }
4136     }
4137   }
4138 }
4139 
4140 /// Generate reuse formulae from different IV types.
4141 void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) {
4142   // Don't bother truncating symbolic values.
4143   if (Base.BaseGV) return;
4144 
4145   // Determine the integer type for the base formula.
4146   Type *DstTy = Base.getType();
4147   if (!DstTy) return;
4148   if (DstTy->isPointerTy())
4149     return;
4150 
4151   // It is invalid to extend a pointer type so exit early if ScaledReg or
4152   // any of the BaseRegs are pointers.
4153   if (Base.ScaledReg && Base.ScaledReg->getType()->isPointerTy())
4154     return;
4155   if (any_of(Base.BaseRegs,
4156              [](const SCEV *S) { return S->getType()->isPointerTy(); }))
4157     return;
4158 
4159   for (Type *SrcTy : Types) {
4160     if (SrcTy != DstTy && TTI.isTruncateFree(SrcTy, DstTy)) {
4161       Formula F = Base;
4162 
4163       // Sometimes SCEV is able to prove zero during ext transform. It may
4164       // happen if SCEV did not do all possible transforms while creating the
4165       // initial node (maybe due to depth limitations), but it can do them while
4166       // taking ext.
4167       if (F.ScaledReg) {
4168         const SCEV *NewScaledReg = SE.getAnyExtendExpr(F.ScaledReg, SrcTy);
4169         if (NewScaledReg->isZero())
4170          continue;
4171         F.ScaledReg = NewScaledReg;
4172       }
4173       bool HasZeroBaseReg = false;
4174       for (const SCEV *&BaseReg : F.BaseRegs) {
4175         const SCEV *NewBaseReg = SE.getAnyExtendExpr(BaseReg, SrcTy);
4176         if (NewBaseReg->isZero()) {
4177           HasZeroBaseReg = true;
4178           break;
4179         }
4180         BaseReg = NewBaseReg;
4181       }
4182       if (HasZeroBaseReg)
4183         continue;
4184 
4185       // TODO: This assumes we've done basic processing on all uses and
4186       // have an idea what the register usage is.
4187       if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses))
4188         continue;
4189 
4190       F.canonicalize(*L);
4191       (void)InsertFormula(LU, LUIdx, F);
4192     }
4193   }
4194 }
4195 
4196 namespace {
4197 
4198 /// Helper class for GenerateCrossUseConstantOffsets. It's used to defer
4199 /// modifications so that the search phase doesn't have to worry about the data
4200 /// structures moving underneath it.
4201 struct WorkItem {
4202   size_t LUIdx;
4203   int64_t Imm;
4204   const SCEV *OrigReg;
4205 
4206   WorkItem(size_t LI, int64_t I, const SCEV *R)
4207       : LUIdx(LI), Imm(I), OrigReg(R) {}
4208 
4209   void print(raw_ostream &OS) const;
4210   void dump() const;
4211 };
4212 
4213 } // end anonymous namespace
4214 
4215 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4216 void WorkItem::print(raw_ostream &OS) const {
4217   OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx
4218      << " , add offset " << Imm;
4219 }
4220 
4221 LLVM_DUMP_METHOD void WorkItem::dump() const {
4222   print(errs()); errs() << '\n';
4223 }
4224 #endif
4225 
4226 /// Look for registers which are a constant distance apart and try to form reuse
4227 /// opportunities between them.
4228 void LSRInstance::GenerateCrossUseConstantOffsets() {
4229   // Group the registers by their value without any added constant offset.
4230   using ImmMapTy = std::map<int64_t, const SCEV *>;
4231 
4232   DenseMap<const SCEV *, ImmMapTy> Map;
4233   DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap;
4234   SmallVector<const SCEV *, 8> Sequence;
4235   for (const SCEV *Use : RegUses) {
4236     const SCEV *Reg = Use; // Make a copy for ExtractImmediate to modify.
4237     int64_t Imm = ExtractImmediate(Reg, SE);
4238     auto Pair = Map.insert(std::make_pair(Reg, ImmMapTy()));
4239     if (Pair.second)
4240       Sequence.push_back(Reg);
4241     Pair.first->second.insert(std::make_pair(Imm, Use));
4242     UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(Use);
4243   }
4244 
4245   // Now examine each set of registers with the same base value. Build up
4246   // a list of work to do and do the work in a separate step so that we're
4247   // not adding formulae and register counts while we're searching.
4248   SmallVector<WorkItem, 32> WorkItems;
4249   SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems;
4250   for (const SCEV *Reg : Sequence) {
4251     const ImmMapTy &Imms = Map.find(Reg)->second;
4252 
4253     // It's not worthwhile looking for reuse if there's only one offset.
4254     if (Imms.size() == 1)
4255       continue;
4256 
4257     LLVM_DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':';
4258                for (const auto &Entry
4259                     : Imms) dbgs()
4260                << ' ' << Entry.first;
4261                dbgs() << '\n');
4262 
4263     // Examine each offset.
4264     for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end();
4265          J != JE; ++J) {
4266       const SCEV *OrigReg = J->second;
4267 
4268       int64_t JImm = J->first;
4269       const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg);
4270 
4271       if (!isa<SCEVConstant>(OrigReg) &&
4272           UsedByIndicesMap[Reg].count() == 1) {
4273         LLVM_DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg
4274                           << '\n');
4275         continue;
4276       }
4277 
4278       // Conservatively examine offsets between this orig reg a few selected
4279       // other orig regs.
4280       int64_t First = Imms.begin()->first;
4281       int64_t Last = std::prev(Imms.end())->first;
4282       // Compute (First + Last)  / 2 without overflow using the fact that
4283       // First + Last = 2 * (First + Last) + (First ^ Last).
4284       int64_t Avg = (First & Last) + ((First ^ Last) >> 1);
4285       // If the result is negative and First is odd and Last even (or vice versa),
4286       // we rounded towards -inf. Add 1 in that case, to round towards 0.
4287       Avg = Avg + ((First ^ Last) & ((uint64_t)Avg >> 63));
4288       ImmMapTy::const_iterator OtherImms[] = {
4289           Imms.begin(), std::prev(Imms.end()),
4290          Imms.lower_bound(Avg)};
4291       for (const auto &M : OtherImms) {
4292         if (M == J || M == JE) continue;
4293 
4294         // Compute the difference between the two.
4295         int64_t Imm = (uint64_t)JImm - M->first;
4296         for (unsigned LUIdx : UsedByIndices.set_bits())
4297           // Make a memo of this use, offset, and register tuple.
4298           if (UniqueItems.insert(std::make_pair(LUIdx, Imm)).second)
4299             WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg));
4300       }
4301     }
4302   }
4303 
4304   Map.clear();
4305   Sequence.clear();
4306   UsedByIndicesMap.clear();
4307   UniqueItems.clear();
4308 
4309   // Now iterate through the worklist and add new formulae.
4310   for (const WorkItem &WI : WorkItems) {
4311     size_t LUIdx = WI.LUIdx;
4312     LSRUse &LU = Uses[LUIdx];
4313     int64_t Imm = WI.Imm;
4314     const SCEV *OrigReg = WI.OrigReg;
4315 
4316     Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType());
4317     const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm));
4318     unsigned BitWidth = SE.getTypeSizeInBits(IntTy);
4319 
4320     // TODO: Use a more targeted data structure.
4321     for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) {
4322       Formula F = LU.Formulae[L];
4323       // FIXME: The code for the scaled and unscaled registers looks
4324       // very similar but slightly different. Investigate if they
4325       // could be merged. That way, we would not have to unscale the
4326       // Formula.
4327       F.unscale();
4328       // Use the immediate in the scaled register.
4329       if (F.ScaledReg == OrigReg) {
4330         int64_t Offset = (uint64_t)F.BaseOffset + Imm * (uint64_t)F.Scale;
4331         // Don't create 50 + reg(-50).
4332         if (F.referencesReg(SE.getSCEV(
4333                    ConstantInt::get(IntTy, -(uint64_t)Offset))))
4334           continue;
4335         Formula NewF = F;
4336         NewF.BaseOffset = Offset;
4337         if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
4338                         NewF))
4339           continue;
4340         NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg);
4341 
4342         // If the new scale is a constant in a register, and adding the constant
4343         // value to the immediate would produce a value closer to zero than the
4344         // immediate itself, then the formula isn't worthwhile.
4345         if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg))
4346           if (C->getValue()->isNegative() != (NewF.BaseOffset < 0) &&
4347               (C->getAPInt().abs() * APInt(BitWidth, F.Scale))
4348                   .ule(std::abs(NewF.BaseOffset)))
4349             continue;
4350 
4351         // OK, looks good.
4352         NewF.canonicalize(*this->L);
4353         (void)InsertFormula(LU, LUIdx, NewF);
4354       } else {
4355         // Use the immediate in a base register.
4356         for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) {
4357           const SCEV *BaseReg = F.BaseRegs[N];
4358           if (BaseReg != OrigReg)
4359             continue;
4360           Formula NewF = F;
4361           NewF.BaseOffset = (uint64_t)NewF.BaseOffset + Imm;
4362           if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset,
4363                           LU.Kind, LU.AccessTy, NewF)) {
4364             if (AMK == TTI::AMK_PostIndexed &&
4365                 mayUsePostIncMode(TTI, LU, OrigReg, this->L, SE))
4366               continue;
4367             if (!TTI.isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm))
4368               continue;
4369             NewF = F;
4370             NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm;
4371           }
4372           NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg);
4373 
4374           // If the new formula has a constant in a register, and adding the
4375           // constant value to the immediate would produce a value closer to
4376           // zero than the immediate itself, then the formula isn't worthwhile.
4377           for (const SCEV *NewReg : NewF.BaseRegs)
4378             if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewReg))
4379               if ((C->getAPInt() + NewF.BaseOffset)
4380                       .abs()
4381                       .slt(std::abs(NewF.BaseOffset)) &&
4382                   (C->getAPInt() + NewF.BaseOffset).countTrailingZeros() >=
4383                       countTrailingZeros<uint64_t>(NewF.BaseOffset))
4384                 goto skip_formula;
4385 
4386           // Ok, looks good.
4387           NewF.canonicalize(*this->L);
4388           (void)InsertFormula(LU, LUIdx, NewF);
4389           break;
4390         skip_formula:;
4391         }
4392       }
4393     }
4394   }
4395 }
4396 
4397 /// Generate formulae for each use.
4398 void
4399 LSRInstance::GenerateAllReuseFormulae() {
4400   // This is split into multiple loops so that hasRegsUsedByUsesOtherThan
4401   // queries are more precise.
4402   for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4403     LSRUse &LU = Uses[LUIdx];
4404     for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4405       GenerateReassociations(LU, LUIdx, LU.Formulae[i]);
4406     for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4407       GenerateCombinations(LU, LUIdx, LU.Formulae[i]);
4408   }
4409   for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4410     LSRUse &LU = Uses[LUIdx];
4411     for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4412       GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]);
4413     for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4414       GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]);
4415     for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4416       GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]);
4417     for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4418       GenerateScales(LU, LUIdx, LU.Formulae[i]);
4419   }
4420   for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4421     LSRUse &LU = Uses[LUIdx];
4422     for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4423       GenerateTruncates(LU, LUIdx, LU.Formulae[i]);
4424   }
4425 
4426   GenerateCrossUseConstantOffsets();
4427 
4428   LLVM_DEBUG(dbgs() << "\n"
4429                        "After generating reuse formulae:\n";
4430              print_uses(dbgs()));
4431 }
4432 
4433 /// If there are multiple formulae with the same set of registers used
4434 /// by other uses, pick the best one and delete the others.
4435 void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
4436   DenseSet<const SCEV *> VisitedRegs;
4437   SmallPtrSet<const SCEV *, 16> Regs;
4438   SmallPtrSet<const SCEV *, 16> LoserRegs;
4439 #ifndef NDEBUG
4440   bool ChangedFormulae = false;
4441 #endif
4442 
4443   // Collect the best formula for each unique set of shared registers. This
4444   // is reset for each use.
4445   using BestFormulaeTy =
4446       DenseMap<SmallVector<const SCEV *, 4>, size_t, UniquifierDenseMapInfo>;
4447 
4448   BestFormulaeTy BestFormulae;
4449 
4450   for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4451     LSRUse &LU = Uses[LUIdx];
4452     LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs());
4453                dbgs() << '\n');
4454 
4455     bool Any = false;
4456     for (size_t FIdx = 0, NumForms = LU.Formulae.size();
4457          FIdx != NumForms; ++FIdx) {
4458       Formula &F = LU.Formulae[FIdx];
4459 
4460       // Some formulas are instant losers. For example, they may depend on
4461       // nonexistent AddRecs from other loops. These need to be filtered
4462       // immediately, otherwise heuristics could choose them over others leading
4463       // to an unsatisfactory solution. Passing LoserRegs into RateFormula here
4464       // avoids the need to recompute this information across formulae using the
4465       // same bad AddRec. Passing LoserRegs is also essential unless we remove
4466       // the corresponding bad register from the Regs set.
4467       Cost CostF(L, SE, TTI, AMK);
4468       Regs.clear();
4469       CostF.RateFormula(F, Regs, VisitedRegs, LU, &LoserRegs);
4470       if (CostF.isLoser()) {
4471         // During initial formula generation, undesirable formulae are generated
4472         // by uses within other loops that have some non-trivial address mode or
4473         // use the postinc form of the IV. LSR needs to provide these formulae
4474         // as the basis of rediscovering the desired formula that uses an AddRec
4475         // corresponding to the existing phi. Once all formulae have been
4476         // generated, these initial losers may be pruned.
4477         LLVM_DEBUG(dbgs() << "  Filtering loser "; F.print(dbgs());
4478                    dbgs() << "\n");
4479       }
4480       else {
4481         SmallVector<const SCEV *, 4> Key;
4482         for (const SCEV *Reg : F.BaseRegs) {
4483           if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx))
4484             Key.push_back(Reg);
4485         }
4486         if (F.ScaledReg &&
4487             RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx))
4488           Key.push_back(F.ScaledReg);
4489         // Unstable sort by host order ok, because this is only used for
4490         // uniquifying.
4491         llvm::sort(Key);
4492 
4493         std::pair<BestFormulaeTy::const_iterator, bool> P =
4494           BestFormulae.insert(std::make_pair(Key, FIdx));
4495         if (P.second)
4496           continue;
4497 
4498         Formula &Best = LU.Formulae[P.first->second];
4499 
4500         Cost CostBest(L, SE, TTI, AMK);
4501         Regs.clear();
4502         CostBest.RateFormula(Best, Regs, VisitedRegs, LU);
4503         if (CostF.isLess(CostBest))
4504           std::swap(F, Best);
4505         LLVM_DEBUG(dbgs() << "  Filtering out formula "; F.print(dbgs());
4506                    dbgs() << "\n"
4507                              "    in favor of formula ";
4508                    Best.print(dbgs()); dbgs() << '\n');
4509       }
4510 #ifndef NDEBUG
4511       ChangedFormulae = true;
4512 #endif
4513       LU.DeleteFormula(F);
4514       --FIdx;
4515       --NumForms;
4516       Any = true;
4517     }
4518 
4519     // Now that we've filtered out some formulae, recompute the Regs set.
4520     if (Any)
4521       LU.RecomputeRegs(LUIdx, RegUses);
4522 
4523     // Reset this to prepare for the next use.
4524     BestFormulae.clear();
4525   }
4526 
4527   LLVM_DEBUG(if (ChangedFormulae) {
4528     dbgs() << "\n"
4529               "After filtering out undesirable candidates:\n";
4530     print_uses(dbgs());
4531   });
4532 }
4533 
4534 /// Estimate the worst-case number of solutions the solver might have to
4535 /// consider. It almost never considers this many solutions because it prune the
4536 /// search space, but the pruning isn't always sufficient.
4537 size_t LSRInstance::EstimateSearchSpaceComplexity() const {
4538   size_t Power = 1;
4539   for (const LSRUse &LU : Uses) {
4540     size_t FSize = LU.Formulae.size();
4541     if (FSize >= ComplexityLimit) {
4542       Power = ComplexityLimit;
4543       break;
4544     }
4545     Power *= FSize;
4546     if (Power >= ComplexityLimit)
4547       break;
4548   }
4549   return Power;
4550 }
4551 
4552 /// When one formula uses a superset of the registers of another formula, it
4553 /// won't help reduce register pressure (though it may not necessarily hurt
4554 /// register pressure); remove it to simplify the system.
4555 void LSRInstance::NarrowSearchSpaceByDetectingSupersets() {
4556   if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
4557     LLVM_DEBUG(dbgs() << "The search space is too complex.\n");
4558 
4559     LLVM_DEBUG(dbgs() << "Narrowing the search space by eliminating formulae "
4560                          "which use a superset of registers used by other "
4561                          "formulae.\n");
4562 
4563     for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4564       LSRUse &LU = Uses[LUIdx];
4565       bool Any = false;
4566       for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
4567         Formula &F = LU.Formulae[i];
4568         // Look for a formula with a constant or GV in a register. If the use
4569         // also has a formula with that same value in an immediate field,
4570         // delete the one that uses a register.
4571         for (SmallVectorImpl<const SCEV *>::const_iterator
4572              I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) {
4573           if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*I)) {
4574             Formula NewF = F;
4575             //FIXME: Formulas should store bitwidth to do wrapping properly.
4576             //       See PR41034.
4577             NewF.BaseOffset += (uint64_t)C->getValue()->getSExtValue();
4578             NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
4579                                 (I - F.BaseRegs.begin()));
4580             if (LU.HasFormulaWithSameRegs(NewF)) {
4581               LLVM_DEBUG(dbgs() << "  Deleting "; F.print(dbgs());
4582                          dbgs() << '\n');
4583               LU.DeleteFormula(F);
4584               --i;
4585               --e;
4586               Any = true;
4587               break;
4588             }
4589           } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(*I)) {
4590             if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue()))
4591               if (!F.BaseGV) {
4592                 Formula NewF = F;
4593                 NewF.BaseGV = GV;
4594                 NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
4595                                     (I - F.BaseRegs.begin()));
4596                 if (LU.HasFormulaWithSameRegs(NewF)) {
4597                   LLVM_DEBUG(dbgs() << "  Deleting "; F.print(dbgs());
4598                              dbgs() << '\n');
4599                   LU.DeleteFormula(F);
4600                   --i;
4601                   --e;
4602                   Any = true;
4603                   break;
4604                 }
4605               }
4606           }
4607         }
4608       }
4609       if (Any)
4610         LU.RecomputeRegs(LUIdx, RegUses);
4611     }
4612 
4613     LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
4614   }
4615 }
4616 
4617 /// When there are many registers for expressions like A, A+1, A+2, etc.,
4618 /// allocate a single register for them.
4619 void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
4620   if (EstimateSearchSpaceComplexity() < ComplexityLimit)
4621     return;
4622 
4623   LLVM_DEBUG(
4624       dbgs() << "The search space is too complex.\n"
4625                 "Narrowing the search space by assuming that uses separated "
4626                 "by a constant offset will use the same registers.\n");
4627 
4628   // This is especially useful for unrolled loops.
4629 
4630   for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4631     LSRUse &LU = Uses[LUIdx];
4632     for (const Formula &F : LU.Formulae) {
4633       if (F.BaseOffset == 0 || (F.Scale != 0 && F.Scale != 1))
4634         continue;
4635 
4636       LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU);
4637       if (!LUThatHas)
4638         continue;
4639 
4640       if (!reconcileNewOffset(*LUThatHas, F.BaseOffset, /*HasBaseReg=*/ false,
4641                               LU.Kind, LU.AccessTy))
4642         continue;
4643 
4644       LLVM_DEBUG(dbgs() << "  Deleting use "; LU.print(dbgs()); dbgs() << '\n');
4645 
4646       LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop;
4647 
4648       // Transfer the fixups of LU to LUThatHas.
4649       for (LSRFixup &Fixup : LU.Fixups) {
4650         Fixup.Offset += F.BaseOffset;
4651         LUThatHas->pushFixup(Fixup);
4652         LLVM_DEBUG(dbgs() << "New fixup has offset " << Fixup.Offset << '\n');
4653       }
4654 
4655       // Delete formulae from the new use which are no longer legal.
4656       bool Any = false;
4657       for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) {
4658         Formula &F = LUThatHas->Formulae[i];
4659         if (!isLegalUse(TTI, LUThatHas->MinOffset, LUThatHas->MaxOffset,
4660                         LUThatHas->Kind, LUThatHas->AccessTy, F)) {
4661           LLVM_DEBUG(dbgs() << "  Deleting "; F.print(dbgs()); dbgs() << '\n');
4662           LUThatHas->DeleteFormula(F);
4663           --i;
4664           --e;
4665           Any = true;
4666         }
4667       }
4668 
4669       if (Any)
4670         LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses);
4671 
4672       // Delete the old use.
4673       DeleteUse(LU, LUIdx);
4674       --LUIdx;
4675       --NumUses;
4676       break;
4677     }
4678   }
4679 
4680   LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
4681 }
4682 
4683 /// Call FilterOutUndesirableDedicatedRegisters again, if necessary, now that
4684 /// we've done more filtering, as it may be able to find more formulae to
4685 /// eliminate.
4686 void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){
4687   if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
4688     LLVM_DEBUG(dbgs() << "The search space is too complex.\n");
4689 
4690     LLVM_DEBUG(dbgs() << "Narrowing the search space by re-filtering out "
4691                          "undesirable dedicated registers.\n");
4692 
4693     FilterOutUndesirableDedicatedRegisters();
4694 
4695     LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
4696   }
4697 }
4698 
4699 /// If a LSRUse has multiple formulae with the same ScaledReg and Scale.
4700 /// Pick the best one and delete the others.
4701 /// This narrowing heuristic is to keep as many formulae with different
4702 /// Scale and ScaledReg pair as possible while narrowing the search space.
4703 /// The benefit is that it is more likely to find out a better solution
4704 /// from a formulae set with more Scale and ScaledReg variations than
4705 /// a formulae set with the same Scale and ScaledReg. The picking winner
4706 /// reg heuristic will often keep the formulae with the same Scale and
4707 /// ScaledReg and filter others, and we want to avoid that if possible.
4708 void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() {
4709   if (EstimateSearchSpaceComplexity() < ComplexityLimit)
4710     return;
4711 
4712   LLVM_DEBUG(
4713       dbgs() << "The search space is too complex.\n"
4714                 "Narrowing the search space by choosing the best Formula "
4715                 "from the Formulae with the same Scale and ScaledReg.\n");
4716 
4717   // Map the "Scale * ScaledReg" pair to the best formula of current LSRUse.
4718   using BestFormulaeTy = DenseMap<std::pair<const SCEV *, int64_t>, size_t>;
4719 
4720   BestFormulaeTy BestFormulae;
4721 #ifndef NDEBUG
4722   bool ChangedFormulae = false;
4723 #endif
4724   DenseSet<const SCEV *> VisitedRegs;
4725   SmallPtrSet<const SCEV *, 16> Regs;
4726 
4727   for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4728     LSRUse &LU = Uses[LUIdx];
4729     LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs());
4730                dbgs() << '\n');
4731 
4732     // Return true if Formula FA is better than Formula FB.
4733     auto IsBetterThan = [&](Formula &FA, Formula &FB) {
4734       // First we will try to choose the Formula with fewer new registers.
4735       // For a register used by current Formula, the more the register is
4736       // shared among LSRUses, the less we increase the register number
4737       // counter of the formula.
4738       size_t FARegNum = 0;
4739       for (const SCEV *Reg : FA.BaseRegs) {
4740         const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(Reg);
4741         FARegNum += (NumUses - UsedByIndices.count() + 1);
4742       }
4743       size_t FBRegNum = 0;
4744       for (const SCEV *Reg : FB.BaseRegs) {
4745         const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(Reg);
4746         FBRegNum += (NumUses - UsedByIndices.count() + 1);
4747       }
4748       if (FARegNum != FBRegNum)
4749         return FARegNum < FBRegNum;
4750 
4751       // If the new register numbers are the same, choose the Formula with
4752       // less Cost.
4753       Cost CostFA(L, SE, TTI, AMK);
4754       Cost CostFB(L, SE, TTI, AMK);
4755       Regs.clear();
4756       CostFA.RateFormula(FA, Regs, VisitedRegs, LU);
4757       Regs.clear();
4758       CostFB.RateFormula(FB, Regs, VisitedRegs, LU);
4759       return CostFA.isLess(CostFB);
4760     };
4761 
4762     bool Any = false;
4763     for (size_t FIdx = 0, NumForms = LU.Formulae.size(); FIdx != NumForms;
4764          ++FIdx) {
4765       Formula &F = LU.Formulae[FIdx];
4766       if (!F.ScaledReg)
4767         continue;
4768       auto P = BestFormulae.insert({{F.ScaledReg, F.Scale}, FIdx});
4769       if (P.second)
4770         continue;
4771 
4772       Formula &Best = LU.Formulae[P.first->second];
4773       if (IsBetterThan(F, Best))
4774         std::swap(F, Best);
4775       LLVM_DEBUG(dbgs() << "  Filtering out formula "; F.print(dbgs());
4776                  dbgs() << "\n"
4777                            "    in favor of formula ";
4778                  Best.print(dbgs()); dbgs() << '\n');
4779 #ifndef NDEBUG
4780       ChangedFormulae = true;
4781 #endif
4782       LU.DeleteFormula(F);
4783       --FIdx;
4784       --NumForms;
4785       Any = true;
4786     }
4787     if (Any)
4788       LU.RecomputeRegs(LUIdx, RegUses);
4789 
4790     // Reset this to prepare for the next use.
4791     BestFormulae.clear();
4792   }
4793 
4794   LLVM_DEBUG(if (ChangedFormulae) {
4795     dbgs() << "\n"
4796               "After filtering out undesirable candidates:\n";
4797     print_uses(dbgs());
4798   });
4799 }
4800 
4801 /// If we are over the complexity limit, filter out any post-inc prefering
4802 /// variables to only post-inc values.
4803 void LSRInstance::NarrowSearchSpaceByFilterPostInc() {
4804   if (AMK != TTI::AMK_PostIndexed)
4805     return;
4806   if (EstimateSearchSpaceComplexity() < ComplexityLimit)
4807     return;
4808 
4809   LLVM_DEBUG(dbgs() << "The search space is too complex.\n"
4810                        "Narrowing the search space by choosing the lowest "
4811                        "register Formula for PostInc Uses.\n");
4812 
4813   for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4814     LSRUse &LU = Uses[LUIdx];
4815 
4816     if (LU.Kind != LSRUse::Address)
4817       continue;
4818     if (!TTI.isIndexedLoadLegal(TTI.MIM_PostInc, LU.AccessTy.getType()) &&
4819         !TTI.isIndexedStoreLegal(TTI.MIM_PostInc, LU.AccessTy.getType()))
4820       continue;
4821 
4822     size_t MinRegs = std::numeric_limits<size_t>::max();
4823     for (const Formula &F : LU.Formulae)
4824       MinRegs = std::min(F.getNumRegs(), MinRegs);
4825 
4826     bool Any = false;
4827     for (size_t FIdx = 0, NumForms = LU.Formulae.size(); FIdx != NumForms;
4828          ++FIdx) {
4829       Formula &F = LU.Formulae[FIdx];
4830       if (F.getNumRegs() > MinRegs) {
4831         LLVM_DEBUG(dbgs() << "  Filtering out formula "; F.print(dbgs());
4832                    dbgs() << "\n");
4833         LU.DeleteFormula(F);
4834         --FIdx;
4835         --NumForms;
4836         Any = true;
4837       }
4838     }
4839     if (Any)
4840       LU.RecomputeRegs(LUIdx, RegUses);
4841 
4842     if (EstimateSearchSpaceComplexity() < ComplexityLimit)
4843       break;
4844   }
4845 
4846   LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
4847 }
4848 
4849 /// The function delete formulas with high registers number expectation.
4850 /// Assuming we don't know the value of each formula (already delete
4851 /// all inefficient), generate probability of not selecting for each
4852 /// register.
4853 /// For example,
4854 /// Use1:
4855 ///  reg(a) + reg({0,+,1})
4856 ///  reg(a) + reg({-1,+,1}) + 1
4857 ///  reg({a,+,1})
4858 /// Use2:
4859 ///  reg(b) + reg({0,+,1})
4860 ///  reg(b) + reg({-1,+,1}) + 1
4861 ///  reg({b,+,1})
4862 /// Use3:
4863 ///  reg(c) + reg(b) + reg({0,+,1})
4864 ///  reg(c) + reg({b,+,1})
4865 ///
4866 /// Probability of not selecting
4867 ///                 Use1   Use2    Use3
4868 /// reg(a)         (1/3) *   1   *   1
4869 /// reg(b)           1   * (1/3) * (1/2)
4870 /// reg({0,+,1})   (2/3) * (2/3) * (1/2)
4871 /// reg({-1,+,1})  (2/3) * (2/3) *   1
4872 /// reg({a,+,1})   (2/3) *   1   *   1
4873 /// reg({b,+,1})     1   * (2/3) * (2/3)
4874 /// reg(c)           1   *   1   *   0
4875 ///
4876 /// Now count registers number mathematical expectation for each formula:
4877 /// Note that for each use we exclude probability if not selecting for the use.
4878 /// For example for Use1 probability for reg(a) would be just 1 * 1 (excluding
4879 /// probabilty 1/3 of not selecting for Use1).
4880 /// Use1:
4881 ///  reg(a) + reg({0,+,1})          1 + 1/3       -- to be deleted
4882 ///  reg(a) + reg({-1,+,1}) + 1     1 + 4/9       -- to be deleted
4883 ///  reg({a,+,1})                   1
4884 /// Use2:
4885 ///  reg(b) + reg({0,+,1})          1/2 + 1/3     -- to be deleted
4886 ///  reg(b) + reg({-1,+,1}) + 1     1/2 + 2/3     -- to be deleted
4887 ///  reg({b,+,1})                   2/3
4888 /// Use3:
4889 ///  reg(c) + reg(b) + reg({0,+,1}) 1 + 1/3 + 4/9 -- to be deleted
4890 ///  reg(c) + reg({b,+,1})          1 + 2/3
4891 void LSRInstance::NarrowSearchSpaceByDeletingCostlyFormulas() {
4892   if (EstimateSearchSpaceComplexity() < ComplexityLimit)
4893     return;
4894   // Ok, we have too many of formulae on our hands to conveniently handle.
4895   // Use a rough heuristic to thin out the list.
4896 
4897   // Set of Regs wich will be 100% used in final solution.
4898   // Used in each formula of a solution (in example above this is reg(c)).
4899   // We can skip them in calculations.
4900   SmallPtrSet<const SCEV *, 4> UniqRegs;
4901   LLVM_DEBUG(dbgs() << "The search space is too complex.\n");
4902 
4903   // Map each register to probability of not selecting
4904   DenseMap <const SCEV *, float> RegNumMap;
4905   for (const SCEV *Reg : RegUses) {
4906     if (UniqRegs.count(Reg))
4907       continue;
4908     float PNotSel = 1;
4909     for (const LSRUse &LU : Uses) {
4910       if (!LU.Regs.count(Reg))
4911         continue;
4912       float P = LU.getNotSelectedProbability(Reg);
4913       if (P != 0.0)
4914         PNotSel *= P;
4915       else
4916         UniqRegs.insert(Reg);
4917     }
4918     RegNumMap.insert(std::make_pair(Reg, PNotSel));
4919   }
4920 
4921   LLVM_DEBUG(
4922       dbgs() << "Narrowing the search space by deleting costly formulas\n");
4923 
4924   // Delete formulas where registers number expectation is high.
4925   for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4926     LSRUse &LU = Uses[LUIdx];
4927     // If nothing to delete - continue.
4928     if (LU.Formulae.size() < 2)
4929       continue;
4930     // This is temporary solution to test performance. Float should be
4931     // replaced with round independent type (based on integers) to avoid
4932     // different results for different target builds.
4933     float FMinRegNum = LU.Formulae[0].getNumRegs();
4934     float FMinARegNum = LU.Formulae[0].getNumRegs();
4935     size_t MinIdx = 0;
4936     for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
4937       Formula &F = LU.Formulae[i];
4938       float FRegNum = 0;
4939       float FARegNum = 0;
4940       for (const SCEV *BaseReg : F.BaseRegs) {
4941         if (UniqRegs.count(BaseReg))
4942           continue;
4943         FRegNum += RegNumMap[BaseReg] / LU.getNotSelectedProbability(BaseReg);
4944         if (isa<SCEVAddRecExpr>(BaseReg))
4945           FARegNum +=
4946               RegNumMap[BaseReg] / LU.getNotSelectedProbability(BaseReg);
4947       }
4948       if (const SCEV *ScaledReg = F.ScaledReg) {
4949         if (!UniqRegs.count(ScaledReg)) {
4950           FRegNum +=
4951               RegNumMap[ScaledReg] / LU.getNotSelectedProbability(ScaledReg);
4952           if (isa<SCEVAddRecExpr>(ScaledReg))
4953             FARegNum +=
4954                 RegNumMap[ScaledReg] / LU.getNotSelectedProbability(ScaledReg);
4955         }
4956       }
4957       if (FMinRegNum > FRegNum ||
4958           (FMinRegNum == FRegNum && FMinARegNum > FARegNum)) {
4959         FMinRegNum = FRegNum;
4960         FMinARegNum = FARegNum;
4961         MinIdx = i;
4962       }
4963     }
4964     LLVM_DEBUG(dbgs() << "  The formula "; LU.Formulae[MinIdx].print(dbgs());
4965                dbgs() << " with min reg num " << FMinRegNum << '\n');
4966     if (MinIdx != 0)
4967       std::swap(LU.Formulae[MinIdx], LU.Formulae[0]);
4968     while (LU.Formulae.size() != 1) {
4969       LLVM_DEBUG(dbgs() << "  Deleting "; LU.Formulae.back().print(dbgs());
4970                  dbgs() << '\n');
4971       LU.Formulae.pop_back();
4972     }
4973     LU.RecomputeRegs(LUIdx, RegUses);
4974     assert(LU.Formulae.size() == 1 && "Should be exactly 1 min regs formula");
4975     Formula &F = LU.Formulae[0];
4976     LLVM_DEBUG(dbgs() << "  Leaving only "; F.print(dbgs()); dbgs() << '\n');
4977     // When we choose the formula, the regs become unique.
4978     UniqRegs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
4979     if (F.ScaledReg)
4980       UniqRegs.insert(F.ScaledReg);
4981   }
4982   LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
4983 }
4984 
4985 /// Pick a register which seems likely to be profitable, and then in any use
4986 /// which has any reference to that register, delete all formulae which do not
4987 /// reference that register.
4988 void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() {
4989   // With all other options exhausted, loop until the system is simple
4990   // enough to handle.
4991   SmallPtrSet<const SCEV *, 4> Taken;
4992   while (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
4993     // Ok, we have too many of formulae on our hands to conveniently handle.
4994     // Use a rough heuristic to thin out the list.
4995     LLVM_DEBUG(dbgs() << "The search space is too complex.\n");
4996 
4997     // Pick the register which is used by the most LSRUses, which is likely
4998     // to be a good reuse register candidate.
4999     const SCEV *Best = nullptr;
5000     unsigned BestNum = 0;
5001     for (const SCEV *Reg : RegUses) {
5002       if (Taken.count(Reg))
5003         continue;
5004       if (!Best) {
5005         Best = Reg;
5006         BestNum = RegUses.getUsedByIndices(Reg).count();
5007       } else {
5008         unsigned Count = RegUses.getUsedByIndices(Reg).count();
5009         if (Count > BestNum) {
5010           Best = Reg;
5011           BestNum = Count;
5012         }
5013       }
5014     }
5015     assert(Best && "Failed to find best LSRUse candidate");
5016 
5017     LLVM_DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best
5018                       << " will yield profitable reuse.\n");
5019     Taken.insert(Best);
5020 
5021     // In any use with formulae which references this register, delete formulae
5022     // which don't reference it.
5023     for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
5024       LSRUse &LU = Uses[LUIdx];
5025       if (!LU.Regs.count(Best)) continue;
5026 
5027       bool Any = false;
5028       for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
5029         Formula &F = LU.Formulae[i];
5030         if (!F.referencesReg(Best)) {
5031           LLVM_DEBUG(dbgs() << "  Deleting "; F.print(dbgs()); dbgs() << '\n');
5032           LU.DeleteFormula(F);
5033           --e;
5034           --i;
5035           Any = true;
5036           assert(e != 0 && "Use has no formulae left! Is Regs inconsistent?");
5037           continue;
5038         }
5039       }
5040 
5041       if (Any)
5042         LU.RecomputeRegs(LUIdx, RegUses);
5043     }
5044 
5045     LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
5046   }
5047 }
5048 
5049 /// If there are an extraordinary number of formulae to choose from, use some
5050 /// rough heuristics to prune down the number of formulae. This keeps the main
5051 /// solver from taking an extraordinary amount of time in some worst-case
5052 /// scenarios.
5053 void LSRInstance::NarrowSearchSpaceUsingHeuristics() {
5054   NarrowSearchSpaceByDetectingSupersets();
5055   NarrowSearchSpaceByCollapsingUnrolledCode();
5056   NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
5057   if (FilterSameScaledReg)
5058     NarrowSearchSpaceByFilterFormulaWithSameScaledReg();
5059   NarrowSearchSpaceByFilterPostInc();
5060   if (LSRExpNarrow)
5061     NarrowSearchSpaceByDeletingCostlyFormulas();
5062   else
5063     NarrowSearchSpaceByPickingWinnerRegs();
5064 }
5065 
5066 /// This is the recursive solver.
5067 void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
5068                                Cost &SolutionCost,
5069                                SmallVectorImpl<const Formula *> &Workspace,
5070                                const Cost &CurCost,
5071                                const SmallPtrSet<const SCEV *, 16> &CurRegs,
5072                                DenseSet<const SCEV *> &VisitedRegs) const {
5073   // Some ideas:
5074   //  - prune more:
5075   //    - use more aggressive filtering
5076   //    - sort the formula so that the most profitable solutions are found first
5077   //    - sort the uses too
5078   //  - search faster:
5079   //    - don't compute a cost, and then compare. compare while computing a cost
5080   //      and bail early.
5081   //    - track register sets with SmallBitVector
5082 
5083   const LSRUse &LU = Uses[Workspace.size()];
5084 
5085   // If this use references any register that's already a part of the
5086   // in-progress solution, consider it a requirement that a formula must
5087   // reference that register in order to be considered. This prunes out
5088   // unprofitable searching.
5089   SmallSetVector<const SCEV *, 4> ReqRegs;
5090   for (const SCEV *S : CurRegs)
5091     if (LU.Regs.count(S))
5092       ReqRegs.insert(S);
5093 
5094   SmallPtrSet<const SCEV *, 16> NewRegs;
5095   Cost NewCost(L, SE, TTI, AMK);
5096   for (const Formula &F : LU.Formulae) {
5097     // Ignore formulae which may not be ideal in terms of register reuse of
5098     // ReqRegs.  The formula should use all required registers before
5099     // introducing new ones.
5100     // This can sometimes (notably when trying to favour postinc) lead to
5101     // sub-optimial decisions. There it is best left to the cost modelling to
5102     // get correct.
5103     if (AMK != TTI::AMK_PostIndexed || LU.Kind != LSRUse::Address) {
5104       int NumReqRegsToFind = std::min(F.getNumRegs(), ReqRegs.size());
5105       for (const SCEV *Reg : ReqRegs) {
5106         if ((F.ScaledReg && F.ScaledReg == Reg) ||
5107             is_contained(F.BaseRegs, Reg)) {
5108           --NumReqRegsToFind;
5109           if (NumReqRegsToFind == 0)
5110             break;
5111         }
5112       }
5113       if (NumReqRegsToFind != 0) {
5114         // If none of the formulae satisfied the required registers, then we could
5115         // clear ReqRegs and try again. Currently, we simply give up in this case.
5116         continue;
5117       }
5118     }
5119 
5120     // Evaluate the cost of the current formula. If it's already worse than
5121     // the current best, prune the search at that point.
5122     NewCost = CurCost;
5123     NewRegs = CurRegs;
5124     NewCost.RateFormula(F, NewRegs, VisitedRegs, LU);
5125     if (NewCost.isLess(SolutionCost)) {
5126       Workspace.push_back(&F);
5127       if (Workspace.size() != Uses.size()) {
5128         SolveRecurse(Solution, SolutionCost, Workspace, NewCost,
5129                      NewRegs, VisitedRegs);
5130         if (F.getNumRegs() == 1 && Workspace.size() == 1)
5131           VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]);
5132       } else {
5133         LLVM_DEBUG(dbgs() << "New best at "; NewCost.print(dbgs());
5134                    dbgs() << ".\nRegs:\n";
5135                    for (const SCEV *S : NewRegs) dbgs()
5136                       << "- " << *S << "\n";
5137                    dbgs() << '\n');
5138 
5139         SolutionCost = NewCost;
5140         Solution = Workspace;
5141       }
5142       Workspace.pop_back();
5143     }
5144   }
5145 }
5146 
5147 /// Choose one formula from each use. Return the results in the given Solution
5148 /// vector.
5149 void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const {
5150   SmallVector<const Formula *, 8> Workspace;
5151   Cost SolutionCost(L, SE, TTI, AMK);
5152   SolutionCost.Lose();
5153   Cost CurCost(L, SE, TTI, AMK);
5154   SmallPtrSet<const SCEV *, 16> CurRegs;
5155   DenseSet<const SCEV *> VisitedRegs;
5156   Workspace.reserve(Uses.size());
5157 
5158   // SolveRecurse does all the work.
5159   SolveRecurse(Solution, SolutionCost, Workspace, CurCost,
5160                CurRegs, VisitedRegs);
5161   if (Solution.empty()) {
5162     LLVM_DEBUG(dbgs() << "\nNo Satisfactory Solution\n");
5163     return;
5164   }
5165 
5166   // Ok, we've now made all our decisions.
5167   LLVM_DEBUG(dbgs() << "\n"
5168                        "The chosen solution requires ";
5169              SolutionCost.print(dbgs()); dbgs() << ":\n";
5170              for (size_t i = 0, e = Uses.size(); i != e; ++i) {
5171                dbgs() << "  ";
5172                Uses[i].print(dbgs());
5173                dbgs() << "\n"
5174                          "    ";
5175                Solution[i]->print(dbgs());
5176                dbgs() << '\n';
5177              });
5178 
5179   assert(Solution.size() == Uses.size() && "Malformed solution!");
5180 
5181   if (BaselineCost.isLess(SolutionCost)) {
5182     LLVM_DEBUG(dbgs() << "The baseline solution requires ";
5183                BaselineCost.print(dbgs()); dbgs() << "\n");
5184     if (!AllowDropSolutionIfLessProfitable)
5185       LLVM_DEBUG(
5186           dbgs() << "Baseline is more profitable than chosen solution, "
5187                     "add option 'lsr-drop-solution' to drop LSR solution.\n");
5188     else {
5189       LLVM_DEBUG(dbgs() << "Baseline is more profitable than chosen "
5190                            "solution, dropping LSR solution.\n";);
5191       Solution.clear();
5192     }
5193   }
5194 }
5195 
5196 /// Helper for AdjustInsertPositionForExpand. Climb up the dominator tree far as
5197 /// we can go while still being dominated by the input positions. This helps
5198 /// canonicalize the insert position, which encourages sharing.
5199 BasicBlock::iterator
5200 LSRInstance::HoistInsertPosition(BasicBlock::iterator IP,
5201                                  const SmallVectorImpl<Instruction *> &Inputs)
5202                                                                          const {
5203   Instruction *Tentative = &*IP;
5204   while (true) {
5205     bool AllDominate = true;
5206     Instruction *BetterPos = nullptr;
5207     // Don't bother attempting to insert before a catchswitch, their basic block
5208     // cannot have other non-PHI instructions.
5209     if (isa<CatchSwitchInst>(Tentative))
5210       return IP;
5211 
5212     for (Instruction *Inst : Inputs) {
5213       if (Inst == Tentative || !DT.dominates(Inst, Tentative)) {
5214         AllDominate = false;
5215         break;
5216       }
5217       // Attempt to find an insert position in the middle of the block,
5218       // instead of at the end, so that it can be used for other expansions.
5219       if (Tentative->getParent() == Inst->getParent() &&
5220           (!BetterPos || !DT.dominates(Inst, BetterPos)))
5221         BetterPos = &*std::next(BasicBlock::iterator(Inst));
5222     }
5223     if (!AllDominate)
5224       break;
5225     if (BetterPos)
5226       IP = BetterPos->getIterator();
5227     else
5228       IP = Tentative->getIterator();
5229 
5230     const Loop *IPLoop = LI.getLoopFor(IP->getParent());
5231     unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0;
5232 
5233     BasicBlock *IDom;
5234     for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) {
5235       if (!Rung) return IP;
5236       Rung = Rung->getIDom();
5237       if (!Rung) return IP;
5238       IDom = Rung->getBlock();
5239 
5240       // Don't climb into a loop though.
5241       const Loop *IDomLoop = LI.getLoopFor(IDom);
5242       unsigned IDomDepth = IDomLoop ? IDomLoop->getLoopDepth() : 0;
5243       if (IDomDepth <= IPLoopDepth &&
5244           (IDomDepth != IPLoopDepth || IDomLoop == IPLoop))
5245         break;
5246     }
5247 
5248     Tentative = IDom->getTerminator();
5249   }
5250 
5251   return IP;
5252 }
5253 
5254 /// Determine an input position which will be dominated by the operands and
5255 /// which will dominate the result.
5256 BasicBlock::iterator LSRInstance::AdjustInsertPositionForExpand(
5257     BasicBlock::iterator LowestIP, const LSRFixup &LF, const LSRUse &LU) const {
5258   // Collect some instructions which must be dominated by the
5259   // expanding replacement. These must be dominated by any operands that
5260   // will be required in the expansion.
5261   SmallVector<Instruction *, 4> Inputs;
5262   if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace))
5263     Inputs.push_back(I);
5264   if (LU.Kind == LSRUse::ICmpZero)
5265     if (Instruction *I =
5266           dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1)))
5267       Inputs.push_back(I);
5268   if (LF.PostIncLoops.count(L)) {
5269     if (LF.isUseFullyOutsideLoop(L))
5270       Inputs.push_back(L->getLoopLatch()->getTerminator());
5271     else
5272       Inputs.push_back(IVIncInsertPos);
5273   }
5274   // The expansion must also be dominated by the increment positions of any
5275   // loops it for which it is using post-inc mode.
5276   for (const Loop *PIL : LF.PostIncLoops) {
5277     if (PIL == L) continue;
5278 
5279     // Be dominated by the loop exit.
5280     SmallVector<BasicBlock *, 4> ExitingBlocks;
5281     PIL->getExitingBlocks(ExitingBlocks);
5282     if (!ExitingBlocks.empty()) {
5283       BasicBlock *BB = ExitingBlocks[0];
5284       for (unsigned i = 1, e = ExitingBlocks.size(); i != e; ++i)
5285         BB = DT.findNearestCommonDominator(BB, ExitingBlocks[i]);
5286       Inputs.push_back(BB->getTerminator());
5287     }
5288   }
5289 
5290   assert(!isa<PHINode>(LowestIP) && !LowestIP->isEHPad()
5291          && !isa<DbgInfoIntrinsic>(LowestIP) &&
5292          "Insertion point must be a normal instruction");
5293 
5294   // Then, climb up the immediate dominator tree as far as we can go while
5295   // still being dominated by the input positions.
5296   BasicBlock::iterator IP = HoistInsertPosition(LowestIP, Inputs);
5297 
5298   // Don't insert instructions before PHI nodes.
5299   while (isa<PHINode>(IP)) ++IP;
5300 
5301   // Ignore landingpad instructions.
5302   while (IP->isEHPad()) ++IP;
5303 
5304   // Ignore debug intrinsics.
5305   while (isa<DbgInfoIntrinsic>(IP)) ++IP;
5306 
5307   // Set IP below instructions recently inserted by SCEVExpander. This keeps the
5308   // IP consistent across expansions and allows the previously inserted
5309   // instructions to be reused by subsequent expansion.
5310   while (Rewriter.isInsertedInstruction(&*IP) && IP != LowestIP)
5311     ++IP;
5312 
5313   return IP;
5314 }
5315 
5316 /// Emit instructions for the leading candidate expression for this LSRUse (this
5317 /// is called "expanding").
5318 Value *LSRInstance::Expand(const LSRUse &LU, const LSRFixup &LF,
5319                            const Formula &F, BasicBlock::iterator IP,
5320                            SmallVectorImpl<WeakTrackingVH> &DeadInsts) const {
5321   if (LU.RigidFormula)
5322     return LF.OperandValToReplace;
5323 
5324   // Determine an input position which will be dominated by the operands and
5325   // which will dominate the result.
5326   IP = AdjustInsertPositionForExpand(IP, LF, LU);
5327   Rewriter.setInsertPoint(&*IP);
5328 
5329   // Inform the Rewriter if we have a post-increment use, so that it can
5330   // perform an advantageous expansion.
5331   Rewriter.setPostInc(LF.PostIncLoops);
5332 
5333   // This is the type that the user actually needs.
5334   Type *OpTy = LF.OperandValToReplace->getType();
5335   // This will be the type that we'll initially expand to.
5336   Type *Ty = F.getType();
5337   if (!Ty)
5338     // No type known; just expand directly to the ultimate type.
5339     Ty = OpTy;
5340   else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy))
5341     // Expand directly to the ultimate type if it's the right size.
5342     Ty = OpTy;
5343   // This is the type to do integer arithmetic in.
5344   Type *IntTy = SE.getEffectiveSCEVType(Ty);
5345 
5346   // Build up a list of operands to add together to form the full base.
5347   SmallVector<const SCEV *, 8> Ops;
5348 
5349   // Expand the BaseRegs portion.
5350   for (const SCEV *Reg : F.BaseRegs) {
5351     assert(!Reg->isZero() && "Zero allocated in a base register!");
5352 
5353     // If we're expanding for a post-inc user, make the post-inc adjustment.
5354     Reg = denormalizeForPostIncUse(Reg, LF.PostIncLoops, SE);
5355     Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, nullptr)));
5356   }
5357 
5358   // Expand the ScaledReg portion.
5359   Value *ICmpScaledV = nullptr;
5360   if (F.Scale != 0) {
5361     const SCEV *ScaledS = F.ScaledReg;
5362 
5363     // If we're expanding for a post-inc user, make the post-inc adjustment.
5364     PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops);
5365     ScaledS = denormalizeForPostIncUse(ScaledS, Loops, SE);
5366 
5367     if (LU.Kind == LSRUse::ICmpZero) {
5368       // Expand ScaleReg as if it was part of the base regs.
5369       if (F.Scale == 1)
5370         Ops.push_back(
5371             SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr)));
5372       else {
5373         // An interesting way of "folding" with an icmp is to use a negated
5374         // scale, which we'll implement by inserting it into the other operand
5375         // of the icmp.
5376         assert(F.Scale == -1 &&
5377                "The only scale supported by ICmpZero uses is -1!");
5378         ICmpScaledV = Rewriter.expandCodeFor(ScaledS, nullptr);
5379       }
5380     } else {
5381       // Otherwise just expand the scaled register and an explicit scale,
5382       // which is expected to be matched as part of the address.
5383 
5384       // Flush the operand list to suppress SCEVExpander hoisting address modes.
5385       // Unless the addressing mode will not be folded.
5386       if (!Ops.empty() && LU.Kind == LSRUse::Address &&
5387           isAMCompletelyFolded(TTI, LU, F)) {
5388         Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), nullptr);
5389         Ops.clear();
5390         Ops.push_back(SE.getUnknown(FullV));
5391       }
5392       ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr));
5393       if (F.Scale != 1)
5394         ScaledS =
5395             SE.getMulExpr(ScaledS, SE.getConstant(ScaledS->getType(), F.Scale));
5396       Ops.push_back(ScaledS);
5397     }
5398   }
5399 
5400   // Expand the GV portion.
5401   if (F.BaseGV) {
5402     // Flush the operand list to suppress SCEVExpander hoisting.
5403     if (!Ops.empty()) {
5404       Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), IntTy);
5405       Ops.clear();
5406       Ops.push_back(SE.getUnknown(FullV));
5407     }
5408     Ops.push_back(SE.getUnknown(F.BaseGV));
5409   }
5410 
5411   // Flush the operand list to suppress SCEVExpander hoisting of both folded and
5412   // unfolded offsets. LSR assumes they both live next to their uses.
5413   if (!Ops.empty()) {
5414     Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty);
5415     Ops.clear();
5416     Ops.push_back(SE.getUnknown(FullV));
5417   }
5418 
5419   // Expand the immediate portion.
5420   int64_t Offset = (uint64_t)F.BaseOffset + LF.Offset;
5421   if (Offset != 0) {
5422     if (LU.Kind == LSRUse::ICmpZero) {
5423       // The other interesting way of "folding" with an ICmpZero is to use a
5424       // negated immediate.
5425       if (!ICmpScaledV)
5426         ICmpScaledV = ConstantInt::get(IntTy, -(uint64_t)Offset);
5427       else {
5428         Ops.push_back(SE.getUnknown(ICmpScaledV));
5429         ICmpScaledV = ConstantInt::get(IntTy, Offset);
5430       }
5431     } else {
5432       // Just add the immediate values. These again are expected to be matched
5433       // as part of the address.
5434       Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset)));
5435     }
5436   }
5437 
5438   // Expand the unfolded offset portion.
5439   int64_t UnfoldedOffset = F.UnfoldedOffset;
5440   if (UnfoldedOffset != 0) {
5441     // Just add the immediate values.
5442     Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy,
5443                                                        UnfoldedOffset)));
5444   }
5445 
5446   // Emit instructions summing all the operands.
5447   const SCEV *FullS = Ops.empty() ?
5448                       SE.getConstant(IntTy, 0) :
5449                       SE.getAddExpr(Ops);
5450   Value *FullV = Rewriter.expandCodeFor(FullS, Ty);
5451 
5452   // We're done expanding now, so reset the rewriter.
5453   Rewriter.clearPostInc();
5454 
5455   // An ICmpZero Formula represents an ICmp which we're handling as a
5456   // comparison against zero. Now that we've expanded an expression for that
5457   // form, update the ICmp's other operand.
5458   if (LU.Kind == LSRUse::ICmpZero) {
5459     ICmpInst *CI = cast<ICmpInst>(LF.UserInst);
5460     if (auto *OperandIsInstr = dyn_cast<Instruction>(CI->getOperand(1)))
5461       DeadInsts.emplace_back(OperandIsInstr);
5462     assert(!F.BaseGV && "ICmp does not support folding a global value and "
5463                            "a scale at the same time!");
5464     if (F.Scale == -1) {
5465       if (ICmpScaledV->getType() != OpTy) {
5466         Instruction *Cast =
5467           CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false,
5468                                                    OpTy, false),
5469                            ICmpScaledV, OpTy, "tmp", CI);
5470         ICmpScaledV = Cast;
5471       }
5472       CI->setOperand(1, ICmpScaledV);
5473     } else {
5474       // A scale of 1 means that the scale has been expanded as part of the
5475       // base regs.
5476       assert((F.Scale == 0 || F.Scale == 1) &&
5477              "ICmp does not support folding a global value and "
5478              "a scale at the same time!");
5479       Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy),
5480                                            -(uint64_t)Offset);
5481       if (C->getType() != OpTy)
5482         C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
5483                                                           OpTy, false),
5484                                   C, OpTy);
5485 
5486       CI->setOperand(1, C);
5487     }
5488   }
5489 
5490   return FullV;
5491 }
5492 
5493 /// Helper for Rewrite. PHI nodes are special because the use of their operands
5494 /// effectively happens in their predecessor blocks, so the expression may need
5495 /// to be expanded in multiple places.
5496 void LSRInstance::RewriteForPHI(
5497     PHINode *PN, const LSRUse &LU, const LSRFixup &LF, const Formula &F,
5498     SmallVectorImpl<WeakTrackingVH> &DeadInsts) const {
5499   DenseMap<BasicBlock *, Value *> Inserted;
5500   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
5501     if (PN->getIncomingValue(i) == LF.OperandValToReplace) {
5502       bool needUpdateFixups = false;
5503       BasicBlock *BB = PN->getIncomingBlock(i);
5504 
5505       // If this is a critical edge, split the edge so that we do not insert
5506       // the code on all predecessor/successor paths.  We do this unless this
5507       // is the canonical backedge for this loop, which complicates post-inc
5508       // users.
5509       if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 &&
5510           !isa<IndirectBrInst>(BB->getTerminator()) &&
5511           !isa<CatchSwitchInst>(BB->getTerminator())) {
5512         BasicBlock *Parent = PN->getParent();
5513         Loop *PNLoop = LI.getLoopFor(Parent);
5514         if (!PNLoop || Parent != PNLoop->getHeader()) {
5515           // Split the critical edge.
5516           BasicBlock *NewBB = nullptr;
5517           if (!Parent->isLandingPad()) {
5518             NewBB =
5519                 SplitCriticalEdge(BB, Parent,
5520                                   CriticalEdgeSplittingOptions(&DT, &LI, MSSAU)
5521                                       .setMergeIdenticalEdges()
5522                                       .setKeepOneInputPHIs());
5523           } else {
5524             SmallVector<BasicBlock*, 2> NewBBs;
5525             SplitLandingPadPredecessors(Parent, BB, "", "", NewBBs, &DT, &LI);
5526             NewBB = NewBBs[0];
5527           }
5528           // If NewBB==NULL, then SplitCriticalEdge refused to split because all
5529           // phi predecessors are identical. The simple thing to do is skip
5530           // splitting in this case rather than complicate the API.
5531           if (NewBB) {
5532             // If PN is outside of the loop and BB is in the loop, we want to
5533             // move the block to be immediately before the PHI block, not
5534             // immediately after BB.
5535             if (L->contains(BB) && !L->contains(PN))
5536               NewBB->moveBefore(PN->getParent());
5537 
5538             // Splitting the edge can reduce the number of PHI entries we have.
5539             e = PN->getNumIncomingValues();
5540             BB = NewBB;
5541             i = PN->getBasicBlockIndex(BB);
5542 
5543             needUpdateFixups = true;
5544           }
5545         }
5546       }
5547 
5548       std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair =
5549         Inserted.insert(std::make_pair(BB, static_cast<Value *>(nullptr)));
5550       if (!Pair.second)
5551         PN->setIncomingValue(i, Pair.first->second);
5552       else {
5553         Value *FullV =
5554             Expand(LU, LF, F, BB->getTerminator()->getIterator(), DeadInsts);
5555 
5556         // If this is reuse-by-noop-cast, insert the noop cast.
5557         Type *OpTy = LF.OperandValToReplace->getType();
5558         if (FullV->getType() != OpTy)
5559           FullV =
5560             CastInst::Create(CastInst::getCastOpcode(FullV, false,
5561                                                      OpTy, false),
5562                              FullV, LF.OperandValToReplace->getType(),
5563                              "tmp", BB->getTerminator());
5564 
5565         PN->setIncomingValue(i, FullV);
5566         Pair.first->second = FullV;
5567       }
5568 
5569       // If LSR splits critical edge and phi node has other pending
5570       // fixup operands, we need to update those pending fixups. Otherwise
5571       // formulae will not be implemented completely and some instructions
5572       // will not be eliminated.
5573       if (needUpdateFixups) {
5574         for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx)
5575           for (LSRFixup &Fixup : Uses[LUIdx].Fixups)
5576             // If fixup is supposed to rewrite some operand in the phi
5577             // that was just updated, it may be already moved to
5578             // another phi node. Such fixup requires update.
5579             if (Fixup.UserInst == PN) {
5580               // Check if the operand we try to replace still exists in the
5581               // original phi.
5582               bool foundInOriginalPHI = false;
5583               for (const auto &val : PN->incoming_values())
5584                 if (val == Fixup.OperandValToReplace) {
5585                   foundInOriginalPHI = true;
5586                   break;
5587                 }
5588 
5589               // If fixup operand found in original PHI - nothing to do.
5590               if (foundInOriginalPHI)
5591                 continue;
5592 
5593               // Otherwise it might be moved to another PHI and requires update.
5594               // If fixup operand not found in any of the incoming blocks that
5595               // means we have already rewritten it - nothing to do.
5596               for (const auto &Block : PN->blocks())
5597                 for (BasicBlock::iterator I = Block->begin(); isa<PHINode>(I);
5598                      ++I) {
5599                   PHINode *NewPN = cast<PHINode>(I);
5600                   for (const auto &val : NewPN->incoming_values())
5601                     if (val == Fixup.OperandValToReplace)
5602                       Fixup.UserInst = NewPN;
5603                 }
5604             }
5605       }
5606     }
5607 }
5608 
5609 /// Emit instructions for the leading candidate expression for this LSRUse (this
5610 /// is called "expanding"), and update the UserInst to reference the newly
5611 /// expanded value.
5612 void LSRInstance::Rewrite(const LSRUse &LU, const LSRFixup &LF,
5613                           const Formula &F,
5614                           SmallVectorImpl<WeakTrackingVH> &DeadInsts) const {
5615   // First, find an insertion point that dominates UserInst. For PHI nodes,
5616   // find the nearest block which dominates all the relevant uses.
5617   if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) {
5618     RewriteForPHI(PN, LU, LF, F, DeadInsts);
5619   } else {
5620     Value *FullV = Expand(LU, LF, F, LF.UserInst->getIterator(), DeadInsts);
5621 
5622     // If this is reuse-by-noop-cast, insert the noop cast.
5623     Type *OpTy = LF.OperandValToReplace->getType();
5624     if (FullV->getType() != OpTy) {
5625       Instruction *Cast =
5626         CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false),
5627                          FullV, OpTy, "tmp", LF.UserInst);
5628       FullV = Cast;
5629     }
5630 
5631     // Update the user. ICmpZero is handled specially here (for now) because
5632     // Expand may have updated one of the operands of the icmp already, and
5633     // its new value may happen to be equal to LF.OperandValToReplace, in
5634     // which case doing replaceUsesOfWith leads to replacing both operands
5635     // with the same value. TODO: Reorganize this.
5636     if (LU.Kind == LSRUse::ICmpZero)
5637       LF.UserInst->setOperand(0, FullV);
5638     else
5639       LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV);
5640   }
5641 
5642   if (auto *OperandIsInstr = dyn_cast<Instruction>(LF.OperandValToReplace))
5643     DeadInsts.emplace_back(OperandIsInstr);
5644 }
5645 
5646 /// Rewrite all the fixup locations with new values, following the chosen
5647 /// solution.
5648 void LSRInstance::ImplementSolution(
5649     const SmallVectorImpl<const Formula *> &Solution) {
5650   // Keep track of instructions we may have made dead, so that
5651   // we can remove them after we are done working.
5652   SmallVector<WeakTrackingVH, 16> DeadInsts;
5653 
5654   Rewriter.setIVIncInsertPos(L, IVIncInsertPos);
5655 
5656   // Mark phi nodes that terminate chains so the expander tries to reuse them.
5657   for (const IVChain &Chain : IVChainVec) {
5658     if (PHINode *PN = dyn_cast<PHINode>(Chain.tailUserInst()))
5659       Rewriter.setChainedPhi(PN);
5660   }
5661 
5662   // Expand the new value definitions and update the users.
5663   for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx)
5664     for (const LSRFixup &Fixup : Uses[LUIdx].Fixups) {
5665       Rewrite(Uses[LUIdx], Fixup, *Solution[LUIdx], DeadInsts);
5666       Changed = true;
5667     }
5668 
5669   for (const IVChain &Chain : IVChainVec) {
5670     GenerateIVChain(Chain, DeadInsts);
5671     Changed = true;
5672   }
5673 
5674   for (const WeakVH &IV : Rewriter.getInsertedIVs())
5675     if (IV && dyn_cast<Instruction>(&*IV)->getParent())
5676       ScalarEvolutionIVs.push_back(IV);
5677 
5678   // Clean up after ourselves. This must be done before deleting any
5679   // instructions.
5680   Rewriter.clear();
5681 
5682   Changed |= RecursivelyDeleteTriviallyDeadInstructionsPermissive(DeadInsts,
5683                                                                   &TLI, MSSAU);
5684 
5685   // In our cost analysis above, we assume that each addrec consumes exactly
5686   // one register, and arrange to have increments inserted just before the
5687   // latch to maximimize the chance this is true.  However, if we reused
5688   // existing IVs, we now need to move the increments to match our
5689   // expectations.  Otherwise, our cost modeling results in us having a
5690   // chosen a non-optimal result for the actual schedule.  (And yes, this
5691   // scheduling decision does impact later codegen.)
5692   for (PHINode &PN : L->getHeader()->phis()) {
5693     BinaryOperator *BO = nullptr;
5694     Value *Start = nullptr, *Step = nullptr;
5695     if (!matchSimpleRecurrence(&PN, BO, Start, Step))
5696       continue;
5697 
5698     switch (BO->getOpcode()) {
5699     case Instruction::Sub:
5700       if (BO->getOperand(0) != &PN)
5701         // sub is non-commutative - match handling elsewhere in LSR
5702         continue;
5703       break;
5704     case Instruction::Add:
5705       break;
5706     default:
5707       continue;
5708     };
5709 
5710     if (!isa<Constant>(Step))
5711       // If not a constant step, might increase register pressure
5712       // (We assume constants have been canonicalized to RHS)
5713       continue;
5714 
5715     if (BO->getParent() == IVIncInsertPos->getParent())
5716       // Only bother moving across blocks.  Isel can handle block local case.
5717       continue;
5718 
5719     // Can we legally schedule inc at the desired point?
5720     if (!llvm::all_of(BO->uses(),
5721                       [&](Use &U) {return DT.dominates(IVIncInsertPos, U);}))
5722       continue;
5723     BO->moveBefore(IVIncInsertPos);
5724     Changed = true;
5725   }
5726 
5727 
5728 }
5729 
5730 LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE,
5731                          DominatorTree &DT, LoopInfo &LI,
5732                          const TargetTransformInfo &TTI, AssumptionCache &AC,
5733                          TargetLibraryInfo &TLI, MemorySSAUpdater *MSSAU)
5734     : IU(IU), SE(SE), DT(DT), LI(LI), AC(AC), TLI(TLI), TTI(TTI), L(L),
5735       MSSAU(MSSAU), AMK(PreferredAddresingMode.getNumOccurrences() > 0
5736                             ? PreferredAddresingMode
5737                             : TTI.getPreferredAddressingMode(L, &SE)),
5738       Rewriter(SE, L->getHeader()->getModule()->getDataLayout(), "lsr", false),
5739       BaselineCost(L, SE, TTI, AMK) {
5740   // If LoopSimplify form is not available, stay out of trouble.
5741   if (!L->isLoopSimplifyForm())
5742     return;
5743 
5744   // If there's no interesting work to be done, bail early.
5745   if (IU.empty()) return;
5746 
5747   // If there's too much analysis to be done, bail early. We won't be able to
5748   // model the problem anyway.
5749   unsigned NumUsers = 0;
5750   for (const IVStrideUse &U : IU) {
5751     if (++NumUsers > MaxIVUsers) {
5752       (void)U;
5753       LLVM_DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << U
5754                         << "\n");
5755       return;
5756     }
5757     // Bail out if we have a PHI on an EHPad that gets a value from a
5758     // CatchSwitchInst.  Because the CatchSwitchInst cannot be split, there is
5759     // no good place to stick any instructions.
5760     if (auto *PN = dyn_cast<PHINode>(U.getUser())) {
5761        auto *FirstNonPHI = PN->getParent()->getFirstNonPHI();
5762        if (isa<FuncletPadInst>(FirstNonPHI) ||
5763            isa<CatchSwitchInst>(FirstNonPHI))
5764          for (BasicBlock *PredBB : PN->blocks())
5765            if (isa<CatchSwitchInst>(PredBB->getFirstNonPHI()))
5766              return;
5767     }
5768   }
5769 
5770   LLVM_DEBUG(dbgs() << "\nLSR on loop ";
5771              L->getHeader()->printAsOperand(dbgs(), /*PrintType=*/false);
5772              dbgs() << ":\n");
5773 
5774   // Configure SCEVExpander already now, so the correct mode is used for
5775   // isSafeToExpand() checks.
5776 #ifndef NDEBUG
5777   Rewriter.setDebugType(DEBUG_TYPE);
5778 #endif
5779   Rewriter.disableCanonicalMode();
5780   Rewriter.enableLSRMode();
5781 
5782   // First, perform some low-level loop optimizations.
5783   OptimizeShadowIV();
5784   OptimizeLoopTermCond();
5785 
5786   // If loop preparation eliminates all interesting IV users, bail.
5787   if (IU.empty()) return;
5788 
5789   // Skip nested loops until we can model them better with formulae.
5790   if (!L->isInnermost()) {
5791     LLVM_DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n");
5792     return;
5793   }
5794 
5795   // Start collecting data and preparing for the solver.
5796   // If number of registers is not the major cost, we cannot benefit from the
5797   // current profitable chain optimization which is based on number of
5798   // registers.
5799   // FIXME: add profitable chain optimization for other kinds major cost, for
5800   // example number of instructions.
5801   if (TTI.isNumRegsMajorCostOfLSR() || StressIVChain)
5802     CollectChains();
5803   CollectInterestingTypesAndFactors();
5804   CollectFixupsAndInitialFormulae();
5805   CollectLoopInvariantFixupsAndFormulae();
5806 
5807   if (Uses.empty())
5808     return;
5809 
5810   LLVM_DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n";
5811              print_uses(dbgs()));
5812 
5813   // Now use the reuse data to generate a bunch of interesting ways
5814   // to formulate the values needed for the uses.
5815   GenerateAllReuseFormulae();
5816 
5817   FilterOutUndesirableDedicatedRegisters();
5818   NarrowSearchSpaceUsingHeuristics();
5819 
5820   SmallVector<const Formula *, 8> Solution;
5821   Solve(Solution);
5822 
5823   // Release memory that is no longer needed.
5824   Factors.clear();
5825   Types.clear();
5826   RegUses.clear();
5827 
5828   if (Solution.empty())
5829     return;
5830 
5831 #ifndef NDEBUG
5832   // Formulae should be legal.
5833   for (const LSRUse &LU : Uses) {
5834     for (const Formula &F : LU.Formulae)
5835       assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
5836                         F) && "Illegal formula generated!");
5837   };
5838 #endif
5839 
5840   // Now that we've decided what we want, make it so.
5841   ImplementSolution(Solution);
5842 }
5843 
5844 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
5845 void LSRInstance::print_factors_and_types(raw_ostream &OS) const {
5846   if (Factors.empty() && Types.empty()) return;
5847 
5848   OS << "LSR has identified the following interesting factors and types: ";
5849   bool First = true;
5850 
5851   for (int64_t Factor : Factors) {
5852     if (!First) OS << ", ";
5853     First = false;
5854     OS << '*' << Factor;
5855   }
5856 
5857   for (Type *Ty : Types) {
5858     if (!First) OS << ", ";
5859     First = false;
5860     OS << '(' << *Ty << ')';
5861   }
5862   OS << '\n';
5863 }
5864 
5865 void LSRInstance::print_fixups(raw_ostream &OS) const {
5866   OS << "LSR is examining the following fixup sites:\n";
5867   for (const LSRUse &LU : Uses)
5868     for (const LSRFixup &LF : LU.Fixups) {
5869       dbgs() << "  ";
5870       LF.print(OS);
5871       OS << '\n';
5872     }
5873 }
5874 
5875 void LSRInstance::print_uses(raw_ostream &OS) const {
5876   OS << "LSR is examining the following uses:\n";
5877   for (const LSRUse &LU : Uses) {
5878     dbgs() << "  ";
5879     LU.print(OS);
5880     OS << '\n';
5881     for (const Formula &F : LU.Formulae) {
5882       OS << "    ";
5883       F.print(OS);
5884       OS << '\n';
5885     }
5886   }
5887 }
5888 
5889 void LSRInstance::print(raw_ostream &OS) const {
5890   print_factors_and_types(OS);
5891   print_fixups(OS);
5892   print_uses(OS);
5893 }
5894 
5895 LLVM_DUMP_METHOD void LSRInstance::dump() const {
5896   print(errs()); errs() << '\n';
5897 }
5898 #endif
5899 
5900 namespace {
5901 
5902 class LoopStrengthReduce : public LoopPass {
5903 public:
5904   static char ID; // Pass ID, replacement for typeid
5905 
5906   LoopStrengthReduce();
5907 
5908 private:
5909   bool runOnLoop(Loop *L, LPPassManager &LPM) override;
5910   void getAnalysisUsage(AnalysisUsage &AU) const override;
5911 };
5912 
5913 } // end anonymous namespace
5914 
5915 LoopStrengthReduce::LoopStrengthReduce() : LoopPass(ID) {
5916   initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry());
5917 }
5918 
5919 void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const {
5920   // We split critical edges, so we change the CFG.  However, we do update
5921   // many analyses if they are around.
5922   AU.addPreservedID(LoopSimplifyID);
5923 
5924   AU.addRequired<LoopInfoWrapperPass>();
5925   AU.addPreserved<LoopInfoWrapperPass>();
5926   AU.addRequiredID(LoopSimplifyID);
5927   AU.addRequired<DominatorTreeWrapperPass>();
5928   AU.addPreserved<DominatorTreeWrapperPass>();
5929   AU.addRequired<ScalarEvolutionWrapperPass>();
5930   AU.addPreserved<ScalarEvolutionWrapperPass>();
5931   AU.addRequired<AssumptionCacheTracker>();
5932   AU.addRequired<TargetLibraryInfoWrapperPass>();
5933   // Requiring LoopSimplify a second time here prevents IVUsers from running
5934   // twice, since LoopSimplify was invalidated by running ScalarEvolution.
5935   AU.addRequiredID(LoopSimplifyID);
5936   AU.addRequired<IVUsersWrapperPass>();
5937   AU.addPreserved<IVUsersWrapperPass>();
5938   AU.addRequired<TargetTransformInfoWrapperPass>();
5939   AU.addPreserved<MemorySSAWrapperPass>();
5940 }
5941 
5942 namespace {
5943 
5944 /// Enables more convenient iteration over a DWARF expression vector.
5945 static iterator_range<llvm::DIExpression::expr_op_iterator>
5946 ToDwarfOpIter(SmallVectorImpl<uint64_t> &Expr) {
5947   llvm::DIExpression::expr_op_iterator Begin =
5948       llvm::DIExpression::expr_op_iterator(Expr.begin());
5949   llvm::DIExpression::expr_op_iterator End =
5950       llvm::DIExpression::expr_op_iterator(Expr.end());
5951   return {Begin, End};
5952 }
5953 
5954 struct SCEVDbgValueBuilder {
5955   SCEVDbgValueBuilder() = default;
5956   SCEVDbgValueBuilder(const SCEVDbgValueBuilder &Base) { clone(Base); }
5957 
5958   void clone(const SCEVDbgValueBuilder &Base) {
5959     LocationOps = Base.LocationOps;
5960     Expr = Base.Expr;
5961   }
5962 
5963   void clear() {
5964     LocationOps.clear();
5965     Expr.clear();
5966   }
5967 
5968   /// The DIExpression as we translate the SCEV.
5969   SmallVector<uint64_t, 6> Expr;
5970   /// The location ops of the DIExpression.
5971   SmallVector<Value *, 2> LocationOps;
5972 
5973   void pushOperator(uint64_t Op) { Expr.push_back(Op); }
5974   void pushUInt(uint64_t Operand) { Expr.push_back(Operand); }
5975 
5976   /// Add a DW_OP_LLVM_arg to the expression, followed by the index of the value
5977   /// in the set of values referenced by the expression.
5978   void pushLocation(llvm::Value *V) {
5979     Expr.push_back(llvm::dwarf::DW_OP_LLVM_arg);
5980     auto *It = llvm::find(LocationOps, V);
5981     unsigned ArgIndex = 0;
5982     if (It != LocationOps.end()) {
5983       ArgIndex = std::distance(LocationOps.begin(), It);
5984     } else {
5985       ArgIndex = LocationOps.size();
5986       LocationOps.push_back(V);
5987     }
5988     Expr.push_back(ArgIndex);
5989   }
5990 
5991   void pushValue(const SCEVUnknown *U) {
5992     llvm::Value *V = cast<SCEVUnknown>(U)->getValue();
5993     pushLocation(V);
5994   }
5995 
5996   bool pushConst(const SCEVConstant *C) {
5997     if (C->getAPInt().getMinSignedBits() > 64)
5998       return false;
5999     Expr.push_back(llvm::dwarf::DW_OP_consts);
6000     Expr.push_back(C->getAPInt().getSExtValue());
6001     return true;
6002   }
6003 
6004   // Iterating the expression as DWARF ops is convenient when updating
6005   // DWARF_OP_LLVM_args.
6006   iterator_range<llvm::DIExpression::expr_op_iterator> expr_ops() {
6007     return ToDwarfOpIter(Expr);
6008   }
6009 
6010   /// Several SCEV types are sequences of the same arithmetic operator applied
6011   /// to constants and values that may be extended or truncated.
6012   bool pushArithmeticExpr(const llvm::SCEVCommutativeExpr *CommExpr,
6013                           uint64_t DwarfOp) {
6014     assert((isa<llvm::SCEVAddExpr>(CommExpr) || isa<SCEVMulExpr>(CommExpr)) &&
6015            "Expected arithmetic SCEV type");
6016     bool Success = true;
6017     unsigned EmitOperator = 0;
6018     for (const auto &Op : CommExpr->operands()) {
6019       Success &= pushSCEV(Op);
6020 
6021       if (EmitOperator >= 1)
6022         pushOperator(DwarfOp);
6023       ++EmitOperator;
6024     }
6025     return Success;
6026   }
6027 
6028   // TODO: Identify and omit noop casts.
6029   bool pushCast(const llvm::SCEVCastExpr *C, bool IsSigned) {
6030     const llvm::SCEV *Inner = C->getOperand(0);
6031     const llvm::Type *Type = C->getType();
6032     uint64_t ToWidth = Type->getIntegerBitWidth();
6033     bool Success = pushSCEV(Inner);
6034     uint64_t CastOps[] = {dwarf::DW_OP_LLVM_convert, ToWidth,
6035                           IsSigned ? llvm::dwarf::DW_ATE_signed
6036                                    : llvm::dwarf::DW_ATE_unsigned};
6037     for (const auto &Op : CastOps)
6038       pushOperator(Op);
6039     return Success;
6040   }
6041 
6042   // TODO: MinMax - although these haven't been encountered in the test suite.
6043   bool pushSCEV(const llvm::SCEV *S) {
6044     bool Success = true;
6045     if (const SCEVConstant *StartInt = dyn_cast<SCEVConstant>(S)) {
6046       Success &= pushConst(StartInt);
6047 
6048     } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
6049       if (!U->getValue())
6050         return false;
6051       pushLocation(U->getValue());
6052 
6053     } else if (const SCEVMulExpr *MulRec = dyn_cast<SCEVMulExpr>(S)) {
6054       Success &= pushArithmeticExpr(MulRec, llvm::dwarf::DW_OP_mul);
6055 
6056     } else if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
6057       Success &= pushSCEV(UDiv->getLHS());
6058       Success &= pushSCEV(UDiv->getRHS());
6059       pushOperator(llvm::dwarf::DW_OP_div);
6060 
6061     } else if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(S)) {
6062       // Assert if a new and unknown SCEVCastEXpr type is encountered.
6063       assert((isa<SCEVZeroExtendExpr>(Cast) || isa<SCEVTruncateExpr>(Cast) ||
6064               isa<SCEVPtrToIntExpr>(Cast) || isa<SCEVSignExtendExpr>(Cast)) &&
6065              "Unexpected cast type in SCEV.");
6066       Success &= pushCast(Cast, (isa<SCEVSignExtendExpr>(Cast)));
6067 
6068     } else if (const SCEVAddExpr *AddExpr = dyn_cast<SCEVAddExpr>(S)) {
6069       Success &= pushArithmeticExpr(AddExpr, llvm::dwarf::DW_OP_plus);
6070 
6071     } else if (isa<SCEVAddRecExpr>(S)) {
6072       // Nested SCEVAddRecExpr are generated by nested loops and are currently
6073       // unsupported.
6074       return false;
6075 
6076     } else {
6077       return false;
6078     }
6079     return Success;
6080   }
6081 
6082   /// Return true if the combination of arithmetic operator and underlying
6083   /// SCEV constant value is an identity function.
6084   bool isIdentityFunction(uint64_t Op, const SCEV *S) {
6085     if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
6086       if (C->getAPInt().getMinSignedBits() > 64)
6087         return false;
6088       int64_t I = C->getAPInt().getSExtValue();
6089       switch (Op) {
6090       case llvm::dwarf::DW_OP_plus:
6091       case llvm::dwarf::DW_OP_minus:
6092         return I == 0;
6093       case llvm::dwarf::DW_OP_mul:
6094       case llvm::dwarf::DW_OP_div:
6095         return I == 1;
6096       }
6097     }
6098     return false;
6099   }
6100 
6101   /// Convert a SCEV of a value to a DIExpression that is pushed onto the
6102   /// builder's expression stack. The stack should already contain an
6103   /// expression for the iteration count, so that it can be multiplied by
6104   /// the stride and added to the start.
6105   /// Components of the expression are omitted if they are an identity function.
6106   /// Chain (non-affine) SCEVs are not supported.
6107   bool SCEVToValueExpr(const llvm::SCEVAddRecExpr &SAR, ScalarEvolution &SE) {
6108     assert(SAR.isAffine() && "Expected affine SCEV");
6109     // TODO: Is this check needed?
6110     if (isa<SCEVAddRecExpr>(SAR.getStart()))
6111       return false;
6112 
6113     const SCEV *Start = SAR.getStart();
6114     const SCEV *Stride = SAR.getStepRecurrence(SE);
6115 
6116     // Skip pushing arithmetic noops.
6117     if (!isIdentityFunction(llvm::dwarf::DW_OP_mul, Stride)) {
6118       if (!pushSCEV(Stride))
6119         return false;
6120       pushOperator(llvm::dwarf::DW_OP_mul);
6121     }
6122     if (!isIdentityFunction(llvm::dwarf::DW_OP_plus, Start)) {
6123       if (!pushSCEV(Start))
6124         return false;
6125       pushOperator(llvm::dwarf::DW_OP_plus);
6126     }
6127     return true;
6128   }
6129 
6130   /// Create an expression that is an offset from a value (usually the IV).
6131   void createOffsetExpr(int64_t Offset, Value *OffsetValue) {
6132     pushLocation(OffsetValue);
6133     DIExpression::appendOffset(Expr, Offset);
6134     LLVM_DEBUG(
6135         dbgs() << "scev-salvage: Generated IV offset expression. Offset: "
6136                << std::to_string(Offset) << "\n");
6137   }
6138 
6139   /// Combine a translation of the SCEV and the IV to create an expression that
6140   /// recovers a location's value.
6141   /// returns true if an expression was created.
6142   bool createIterCountExpr(const SCEV *S,
6143                            const SCEVDbgValueBuilder &IterationCount,
6144                            ScalarEvolution &SE) {
6145     // SCEVs for SSA values are most frquently of the form
6146     // {start,+,stride}, but sometimes they are ({start,+,stride} + %a + ..).
6147     // This is because %a is a PHI node that is not the IV. However, these
6148     // SCEVs have not been observed to result in debuginfo-lossy optimisations,
6149     // so its not expected this point will be reached.
6150     if (!isa<SCEVAddRecExpr>(S))
6151       return false;
6152 
6153     LLVM_DEBUG(dbgs() << "scev-salvage: Location to salvage SCEV: " << *S
6154                       << '\n');
6155 
6156     const auto *Rec = cast<SCEVAddRecExpr>(S);
6157     if (!Rec->isAffine())
6158       return false;
6159 
6160     if (S->getExpressionSize() > MaxSCEVSalvageExpressionSize)
6161       return false;
6162 
6163     // Initialise a new builder with the iteration count expression. In
6164     // combination with the value's SCEV this enables recovery.
6165     clone(IterationCount);
6166     if (!SCEVToValueExpr(*Rec, SE))
6167       return false;
6168 
6169     return true;
6170   }
6171 
6172   /// Convert a SCEV of a value to a DIExpression that is pushed onto the
6173   /// builder's expression stack. The stack should already contain an
6174   /// expression for the iteration count, so that it can be multiplied by
6175   /// the stride and added to the start.
6176   /// Components of the expression are omitted if they are an identity function.
6177   bool SCEVToIterCountExpr(const llvm::SCEVAddRecExpr &SAR,
6178                            ScalarEvolution &SE) {
6179     assert(SAR.isAffine() && "Expected affine SCEV");
6180     if (isa<SCEVAddRecExpr>(SAR.getStart())) {
6181       LLVM_DEBUG(dbgs() << "scev-salvage: IV SCEV. Unsupported nested AddRec: "
6182                         << SAR << '\n');
6183       return false;
6184     }
6185     const SCEV *Start = SAR.getStart();
6186     const SCEV *Stride = SAR.getStepRecurrence(SE);
6187 
6188     // Skip pushing arithmetic noops.
6189     if (!isIdentityFunction(llvm::dwarf::DW_OP_minus, Start)) {
6190       if (!pushSCEV(Start))
6191         return false;
6192       pushOperator(llvm::dwarf::DW_OP_minus);
6193     }
6194     if (!isIdentityFunction(llvm::dwarf::DW_OP_div, Stride)) {
6195       if (!pushSCEV(Stride))
6196         return false;
6197       pushOperator(llvm::dwarf::DW_OP_div);
6198     }
6199     return true;
6200   }
6201 
6202   // Append the current expression and locations to a location list and an
6203   // expression list. Modify the DW_OP_LLVM_arg indexes to account for
6204   // the locations already present in the destination list.
6205   void appendToVectors(SmallVectorImpl<uint64_t> &DestExpr,
6206                        SmallVectorImpl<Value *> &DestLocations) {
6207     assert(!DestLocations.empty() &&
6208            "Expected the locations vector to contain the IV");
6209     // The DWARF_OP_LLVM_arg arguments of the expression being appended must be
6210     // modified to account for the locations already in the destination vector.
6211     // All builders contain the IV as the first location op.
6212     assert(!LocationOps.empty() &&
6213            "Expected the location ops to contain the IV.");
6214     // DestIndexMap[n] contains the index in DestLocations for the nth
6215     // location in this SCEVDbgValueBuilder.
6216     SmallVector<uint64_t, 2> DestIndexMap;
6217     for (const auto &Op : LocationOps) {
6218       auto It = find(DestLocations, Op);
6219       if (It != DestLocations.end()) {
6220         // Location already exists in DestLocations, reuse existing ArgIndex.
6221         DestIndexMap.push_back(std::distance(DestLocations.begin(), It));
6222         continue;
6223       }
6224       // Location is not in DestLocations, add it.
6225       DestIndexMap.push_back(DestLocations.size());
6226       DestLocations.push_back(Op);
6227     }
6228 
6229     for (const auto &Op : expr_ops()) {
6230       if (Op.getOp() != dwarf::DW_OP_LLVM_arg) {
6231         Op.appendToVector(DestExpr);
6232         continue;
6233       }
6234 
6235       DestExpr.push_back(dwarf::DW_OP_LLVM_arg);
6236       // `DW_OP_LLVM_arg n` represents the nth LocationOp in this SCEV,
6237       // DestIndexMap[n] contains its new index in DestLocations.
6238       uint64_t NewIndex = DestIndexMap[Op.getArg(0)];
6239       DestExpr.push_back(NewIndex);
6240     }
6241   }
6242 };
6243 
6244 /// Holds all the required data to salvage a dbg.value using the pre-LSR SCEVs
6245 /// and DIExpression.
6246 struct DVIRecoveryRec {
6247   DVIRecoveryRec(DbgValueInst *DbgValue)
6248       : DVI(DbgValue), Expr(DbgValue->getExpression()),
6249         HadLocationArgList(false) {}
6250 
6251   DbgValueInst *DVI;
6252   DIExpression *Expr;
6253   bool HadLocationArgList;
6254   SmallVector<WeakVH, 2> LocationOps;
6255   SmallVector<const llvm::SCEV *, 2> SCEVs;
6256   SmallVector<std::unique_ptr<SCEVDbgValueBuilder>, 2> RecoveryExprs;
6257 
6258   void clear() {
6259     for (auto &RE : RecoveryExprs)
6260       RE.reset();
6261     RecoveryExprs.clear();
6262   }
6263 
6264   ~DVIRecoveryRec() { clear(); }
6265 };
6266 } // namespace
6267 
6268 /// Returns the total number of DW_OP_llvm_arg operands in the expression.
6269 /// This helps in determining if a DIArglist is necessary or can be omitted from
6270 /// the dbg.value.
6271 static unsigned numLLVMArgOps(SmallVectorImpl<uint64_t> &Expr) {
6272   auto expr_ops = ToDwarfOpIter(Expr);
6273   unsigned Count = 0;
6274   for (auto Op : expr_ops)
6275     if (Op.getOp() == dwarf::DW_OP_LLVM_arg)
6276       Count++;
6277   return Count;
6278 }
6279 
6280 /// Overwrites DVI with the location and Ops as the DIExpression. This will
6281 /// create an invalid expression if Ops has any dwarf::DW_OP_llvm_arg operands,
6282 /// because a DIArglist is not created for the first argument of the dbg.value.
6283 static void updateDVIWithLocation(DbgValueInst &DVI, Value *Location,
6284                                   SmallVectorImpl<uint64_t> &Ops) {
6285   assert(
6286       numLLVMArgOps(Ops) == 0 &&
6287       "Expected expression that does not contain any DW_OP_llvm_arg operands.");
6288   DVI.setRawLocation(ValueAsMetadata::get(Location));
6289   DVI.setExpression(DIExpression::get(DVI.getContext(), Ops));
6290 }
6291 
6292 /// Overwrite DVI with locations placed into a DIArglist.
6293 static void updateDVIWithLocations(DbgValueInst &DVI,
6294                                    SmallVectorImpl<Value *> &Locations,
6295                                    SmallVectorImpl<uint64_t> &Ops) {
6296   assert(numLLVMArgOps(Ops) != 0 &&
6297          "Expected expression that references DIArglist locations using "
6298          "DW_OP_llvm_arg operands.");
6299   SmallVector<ValueAsMetadata *, 3> MetadataLocs;
6300   for (Value *V : Locations)
6301     MetadataLocs.push_back(ValueAsMetadata::get(V));
6302   auto ValArrayRef = llvm::ArrayRef<llvm::ValueAsMetadata *>(MetadataLocs);
6303   DVI.setRawLocation(llvm::DIArgList::get(DVI.getContext(), ValArrayRef));
6304   DVI.setExpression(DIExpression::get(DVI.getContext(), Ops));
6305 }
6306 
6307 /// Write the new expression and new location ops for the dbg.value. If possible
6308 /// reduce the szie of the dbg.value intrinsic by omitting DIArglist. This
6309 /// can be omitted if:
6310 /// 1. There is only a single location, refenced by a single DW_OP_llvm_arg.
6311 /// 2. The DW_OP_LLVM_arg is the first operand in the expression.
6312 static void UpdateDbgValueInst(DVIRecoveryRec &DVIRec,
6313                                SmallVectorImpl<Value *> &NewLocationOps,
6314                                SmallVectorImpl<uint64_t> &NewExpr) {
6315   unsigned NumLLVMArgs = numLLVMArgOps(NewExpr);
6316   if (NumLLVMArgs == 0) {
6317     // Location assumed to be on the stack.
6318     updateDVIWithLocation(*DVIRec.DVI, NewLocationOps[0], NewExpr);
6319   } else if (NumLLVMArgs == 1 && NewExpr[0] == dwarf::DW_OP_LLVM_arg) {
6320     // There is only a single DW_OP_llvm_arg at the start of the expression,
6321     // so it can be omitted along with DIArglist.
6322     assert(NewExpr[1] == 0 &&
6323            "Lone LLVM_arg in a DIExpression should refer to location-op 0.");
6324     llvm::SmallVector<uint64_t, 6> ShortenedOps(llvm::drop_begin(NewExpr, 2));
6325     updateDVIWithLocation(*DVIRec.DVI, NewLocationOps[0], ShortenedOps);
6326   } else {
6327     // Multiple DW_OP_llvm_arg, so DIArgList is strictly necessary.
6328     updateDVIWithLocations(*DVIRec.DVI, NewLocationOps, NewExpr);
6329   }
6330 
6331   // If the DIExpression was previously empty then add the stack terminator.
6332   // Non-empty expressions have only had elements inserted into them and so the
6333   // terminator should already be present e.g. stack_value or fragment.
6334   DIExpression *SalvageExpr = DVIRec.DVI->getExpression();
6335   if (!DVIRec.Expr->isComplex() && SalvageExpr->isComplex()) {
6336     SalvageExpr = DIExpression::append(SalvageExpr, {dwarf::DW_OP_stack_value});
6337     DVIRec.DVI->setExpression(SalvageExpr);
6338   }
6339 }
6340 
6341 /// Cached location ops may be erased during LSR, in which case an undef is
6342 /// required when restoring from the cache. The type of that location is no
6343 /// longer available, so just use int8. The undef will be replaced by one or
6344 /// more locations later when a SCEVDbgValueBuilder selects alternative
6345 /// locations to use for the salvage.
6346 static Value *getValueOrUndef(WeakVH &VH, LLVMContext &C) {
6347   return (VH) ? VH : UndefValue::get(llvm::Type::getInt8Ty(C));
6348 }
6349 
6350 /// Restore the DVI's pre-LSR arguments. Substitute undef for any erased values.
6351 static void restorePreTransformState(DVIRecoveryRec &DVIRec) {
6352   LLVM_DEBUG(dbgs() << "scev-salvage: restore dbg.value to pre-LSR state\n"
6353                     << "scev-salvage: post-LSR: " << *DVIRec.DVI << '\n');
6354   assert(DVIRec.Expr && "Expected an expression");
6355   DVIRec.DVI->setExpression(DVIRec.Expr);
6356 
6357   // Even a single location-op may be inside a DIArgList and referenced with
6358   // DW_OP_LLVM_arg, which is valid only with a DIArgList.
6359   if (!DVIRec.HadLocationArgList) {
6360     assert(DVIRec.LocationOps.size() == 1 &&
6361            "Unexpected number of location ops.");
6362     // LSR's unsuccessful salvage attempt may have added DIArgList, which in
6363     // this case was not present before, so force the location back to a single
6364     // uncontained Value.
6365     Value *CachedValue =
6366         getValueOrUndef(DVIRec.LocationOps[0], DVIRec.DVI->getContext());
6367     DVIRec.DVI->setRawLocation(ValueAsMetadata::get(CachedValue));
6368   } else {
6369     SmallVector<ValueAsMetadata *, 3> MetadataLocs;
6370     for (WeakVH VH : DVIRec.LocationOps) {
6371       Value *CachedValue = getValueOrUndef(VH, DVIRec.DVI->getContext());
6372       MetadataLocs.push_back(ValueAsMetadata::get(CachedValue));
6373     }
6374     auto ValArrayRef = llvm::ArrayRef<llvm::ValueAsMetadata *>(MetadataLocs);
6375     DVIRec.DVI->setRawLocation(
6376         llvm::DIArgList::get(DVIRec.DVI->getContext(), ValArrayRef));
6377   }
6378   LLVM_DEBUG(dbgs() << "scev-salvage: pre-LSR: " << *DVIRec.DVI << '\n');
6379 }
6380 
6381 static bool SalvageDVI(llvm::Loop *L, ScalarEvolution &SE,
6382                        llvm::PHINode *LSRInductionVar, DVIRecoveryRec &DVIRec,
6383                        const SCEV *SCEVInductionVar,
6384                        SCEVDbgValueBuilder IterCountExpr) {
6385   if (!DVIRec.DVI->isKillLocation())
6386     return false;
6387 
6388   // LSR may have caused several changes to the dbg.value in the failed salvage
6389   // attempt. So restore the DIExpression, the location ops and also the
6390   // location ops format, which is always DIArglist for multiple ops, but only
6391   // sometimes for a single op.
6392   restorePreTransformState(DVIRec);
6393 
6394   // LocationOpIndexMap[i] will store the post-LSR location index of
6395   // the non-optimised out location at pre-LSR index i.
6396   SmallVector<int64_t, 2> LocationOpIndexMap;
6397   LocationOpIndexMap.assign(DVIRec.LocationOps.size(), -1);
6398   SmallVector<Value *, 2> NewLocationOps;
6399   NewLocationOps.push_back(LSRInductionVar);
6400 
6401   for (unsigned i = 0; i < DVIRec.LocationOps.size(); i++) {
6402     WeakVH VH = DVIRec.LocationOps[i];
6403     // Place the locations not optimised out in the list first, avoiding
6404     // inserts later. The map is used to update the DIExpression's
6405     // DW_OP_LLVM_arg arguments as the expression is updated.
6406     if (VH && !isa<UndefValue>(VH)) {
6407       NewLocationOps.push_back(VH);
6408       LocationOpIndexMap[i] = NewLocationOps.size() - 1;
6409       LLVM_DEBUG(dbgs() << "scev-salvage: Location index " << i
6410                         << " now at index " << LocationOpIndexMap[i] << "\n");
6411       continue;
6412     }
6413 
6414     // It's possible that a value referred to in the SCEV may have been
6415     // optimised out by LSR.
6416     if (SE.containsErasedValue(DVIRec.SCEVs[i]) ||
6417         SE.containsUndefs(DVIRec.SCEVs[i])) {
6418       LLVM_DEBUG(dbgs() << "scev-salvage: SCEV for location at index: " << i
6419                         << " refers to a location that is now undef or erased. "
6420                            "Salvage abandoned.\n");
6421       return false;
6422     }
6423 
6424     LLVM_DEBUG(dbgs() << "scev-salvage: salvaging location at index " << i
6425                       << " with SCEV: " << *DVIRec.SCEVs[i] << "\n");
6426 
6427     DVIRec.RecoveryExprs[i] = std::make_unique<SCEVDbgValueBuilder>();
6428     SCEVDbgValueBuilder *SalvageExpr = DVIRec.RecoveryExprs[i].get();
6429 
6430     // Create an offset-based salvage expression if possible, as it requires
6431     // less DWARF ops than an iteration count-based expression.
6432     if (std::optional<APInt> Offset =
6433             SE.computeConstantDifference(DVIRec.SCEVs[i], SCEVInductionVar)) {
6434       if (Offset->getMinSignedBits() <= 64)
6435         SalvageExpr->createOffsetExpr(Offset->getSExtValue(), LSRInductionVar);
6436     } else if (!SalvageExpr->createIterCountExpr(DVIRec.SCEVs[i], IterCountExpr,
6437                                                  SE))
6438       return false;
6439   }
6440 
6441   // Merge the DbgValueBuilder generated expressions and the original
6442   // DIExpression, place the result into an new vector.
6443   SmallVector<uint64_t, 3> NewExpr;
6444   if (DVIRec.Expr->getNumElements() == 0) {
6445     assert(DVIRec.RecoveryExprs.size() == 1 &&
6446            "Expected only a single recovery expression for an empty "
6447            "DIExpression.");
6448     assert(DVIRec.RecoveryExprs[0] &&
6449            "Expected a SCEVDbgSalvageBuilder for location 0");
6450     SCEVDbgValueBuilder *B = DVIRec.RecoveryExprs[0].get();
6451     B->appendToVectors(NewExpr, NewLocationOps);
6452   }
6453   for (const auto &Op : DVIRec.Expr->expr_ops()) {
6454     // Most Ops needn't be updated.
6455     if (Op.getOp() != dwarf::DW_OP_LLVM_arg) {
6456       Op.appendToVector(NewExpr);
6457       continue;
6458     }
6459 
6460     uint64_t LocationArgIndex = Op.getArg(0);
6461     SCEVDbgValueBuilder *DbgBuilder =
6462         DVIRec.RecoveryExprs[LocationArgIndex].get();
6463     // The location doesn't have s SCEVDbgValueBuilder, so LSR did not
6464     // optimise it away. So just translate the argument to the updated
6465     // location index.
6466     if (!DbgBuilder) {
6467       NewExpr.push_back(dwarf::DW_OP_LLVM_arg);
6468       assert(LocationOpIndexMap[Op.getArg(0)] != -1 &&
6469              "Expected a positive index for the location-op position.");
6470       NewExpr.push_back(LocationOpIndexMap[Op.getArg(0)]);
6471       continue;
6472     }
6473     // The location has a recovery expression.
6474     DbgBuilder->appendToVectors(NewExpr, NewLocationOps);
6475   }
6476 
6477   UpdateDbgValueInst(DVIRec, NewLocationOps, NewExpr);
6478   LLVM_DEBUG(dbgs() << "scev-salvage: Updated DVI: " << *DVIRec.DVI << "\n");
6479   return true;
6480 }
6481 
6482 /// Obtain an expression for the iteration count, then attempt to salvage the
6483 /// dbg.value intrinsics.
6484 static void
6485 DbgRewriteSalvageableDVIs(llvm::Loop *L, ScalarEvolution &SE,
6486                           llvm::PHINode *LSRInductionVar,
6487                           SmallVector<std::unique_ptr<DVIRecoveryRec>, 2> &DVIToUpdate) {
6488   if (DVIToUpdate.empty())
6489     return;
6490 
6491   const llvm::SCEV *SCEVInductionVar = SE.getSCEV(LSRInductionVar);
6492   assert(SCEVInductionVar &&
6493          "Anticipated a SCEV for the post-LSR induction variable");
6494 
6495   if (const SCEVAddRecExpr *IVAddRec =
6496           dyn_cast<SCEVAddRecExpr>(SCEVInductionVar)) {
6497     if (!IVAddRec->isAffine())
6498       return;
6499 
6500     // Prevent translation using excessive resources.
6501     if (IVAddRec->getExpressionSize() > MaxSCEVSalvageExpressionSize)
6502       return;
6503 
6504     // The iteration count is required to recover location values.
6505     SCEVDbgValueBuilder IterCountExpr;
6506     IterCountExpr.pushLocation(LSRInductionVar);
6507     if (!IterCountExpr.SCEVToIterCountExpr(*IVAddRec, SE))
6508       return;
6509 
6510     LLVM_DEBUG(dbgs() << "scev-salvage: IV SCEV: " << *SCEVInductionVar
6511                       << '\n');
6512 
6513     for (auto &DVIRec : DVIToUpdate) {
6514       SalvageDVI(L, SE, LSRInductionVar, *DVIRec, SCEVInductionVar,
6515                  IterCountExpr);
6516     }
6517   }
6518 }
6519 
6520 /// Identify and cache salvageable DVI locations and expressions along with the
6521 /// corresponding SCEV(s). Also ensure that the DVI is not deleted between
6522 /// cacheing and salvaging.
6523 static void DbgGatherSalvagableDVI(
6524     Loop *L, ScalarEvolution &SE,
6525     SmallVector<std::unique_ptr<DVIRecoveryRec>, 2> &SalvageableDVISCEVs,
6526     SmallSet<AssertingVH<DbgValueInst>, 2> &DVIHandles) {
6527   for (const auto &B : L->getBlocks()) {
6528     for (auto &I : *B) {
6529       auto DVI = dyn_cast<DbgValueInst>(&I);
6530       if (!DVI)
6531         continue;
6532       // Ensure that if any location op is undef that the dbg.vlue is not
6533       // cached.
6534       if (DVI->isKillLocation())
6535         continue;
6536 
6537       // Check that the location op SCEVs are suitable for translation to
6538       // DIExpression.
6539       const auto &HasTranslatableLocationOps =
6540           [&](const DbgValueInst *DVI) -> bool {
6541         for (const auto LocOp : DVI->location_ops()) {
6542           if (!LocOp)
6543             return false;
6544 
6545           if (!SE.isSCEVable(LocOp->getType()))
6546             return false;
6547 
6548           const SCEV *S = SE.getSCEV(LocOp);
6549           if (SE.containsUndefs(S))
6550             return false;
6551         }
6552         return true;
6553       };
6554 
6555       if (!HasTranslatableLocationOps(DVI))
6556         continue;
6557 
6558       std::unique_ptr<DVIRecoveryRec> NewRec =
6559           std::make_unique<DVIRecoveryRec>(DVI);
6560       // Each location Op may need a SCEVDbgValueBuilder in order to recover it.
6561       // Pre-allocating a vector will enable quick lookups of the builder later
6562       // during the salvage.
6563       NewRec->RecoveryExprs.resize(DVI->getNumVariableLocationOps());
6564       for (const auto LocOp : DVI->location_ops()) {
6565         NewRec->SCEVs.push_back(SE.getSCEV(LocOp));
6566         NewRec->LocationOps.push_back(LocOp);
6567         NewRec->HadLocationArgList = DVI->hasArgList();
6568       }
6569       SalvageableDVISCEVs.push_back(std::move(NewRec));
6570       DVIHandles.insert(DVI);
6571     }
6572   }
6573 }
6574 
6575 /// Ideally pick the PHI IV inserted by ScalarEvolutionExpander. As a fallback
6576 /// any PHi from the loop header is usable, but may have less chance of
6577 /// surviving subsequent transforms.
6578 static llvm::PHINode *GetInductionVariable(const Loop &L, ScalarEvolution &SE,
6579                                            const LSRInstance &LSR) {
6580 
6581   auto IsSuitableIV = [&](PHINode *P) {
6582     if (!SE.isSCEVable(P->getType()))
6583       return false;
6584     if (const SCEVAddRecExpr *Rec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(P)))
6585       return Rec->isAffine() && !SE.containsUndefs(SE.getSCEV(P));
6586     return false;
6587   };
6588 
6589   // For now, just pick the first IV that was generated and inserted by
6590   // ScalarEvolution. Ideally pick an IV that is unlikely to be optimised away
6591   // by subsequent transforms.
6592   for (const WeakVH &IV : LSR.getScalarEvolutionIVs()) {
6593     if (!IV)
6594       continue;
6595 
6596     // There should only be PHI node IVs.
6597     PHINode *P = cast<PHINode>(&*IV);
6598 
6599     if (IsSuitableIV(P))
6600       return P;
6601   }
6602 
6603   for (PHINode &P : L.getHeader()->phis()) {
6604     if (IsSuitableIV(&P))
6605       return &P;
6606   }
6607   return nullptr;
6608 }
6609 
6610 static std::optional<std::tuple<PHINode *, PHINode *, const SCEV *>>
6611 canFoldTermCondOfLoop(Loop *L, ScalarEvolution &SE, DominatorTree &DT,
6612                       const LoopInfo &LI) {
6613   if (!L->isInnermost()) {
6614     LLVM_DEBUG(dbgs() << "Cannot fold on non-innermost loop\n");
6615     return std::nullopt;
6616   }
6617   // Only inspect on simple loop structure
6618   if (!L->isLoopSimplifyForm()) {
6619     LLVM_DEBUG(dbgs() << "Cannot fold on non-simple loop\n");
6620     return std::nullopt;
6621   }
6622 
6623   if (!SE.hasLoopInvariantBackedgeTakenCount(L)) {
6624     LLVM_DEBUG(dbgs() << "Cannot fold on backedge that is loop variant\n");
6625     return std::nullopt;
6626   }
6627 
6628   BasicBlock *LoopLatch = L->getLoopLatch();
6629 
6630   // TODO: Can we do something for greater than and less than?
6631   // Terminating condition is foldable when it is an eq/ne icmp
6632   BranchInst *BI = cast<BranchInst>(LoopLatch->getTerminator());
6633   if (BI->isUnconditional())
6634     return std::nullopt;
6635   Value *TermCond = BI->getCondition();
6636   if (!isa<ICmpInst>(TermCond) || !cast<ICmpInst>(TermCond)->isEquality()) {
6637     LLVM_DEBUG(dbgs() << "Cannot fold on branching condition that is not an "
6638                          "ICmpInst::eq / ICmpInst::ne\n");
6639     return std::nullopt;
6640   }
6641   if (!TermCond->hasOneUse()) {
6642     LLVM_DEBUG(
6643         dbgs()
6644         << "Cannot replace terminating condition with more than one use\n");
6645     return std::nullopt;
6646   }
6647 
6648   // For `IsToFold`, a primary IV can be replaced by other affine AddRec when it
6649   // is only used by the terminating condition. To check for this, we may need
6650   // to traverse through a chain of use-def until we can examine the final
6651   // usage.
6652   //         *----------------------*
6653   //   *---->|  LoopHeader:         |
6654   //   |     |  PrimaryIV = phi ... |
6655   //   |     *----------------------*
6656   //   |              |
6657   //   |              |
6658   //   |           chain of
6659   //   |          single use
6660   // used by          |
6661   //  phi             |
6662   //   |            Value
6663   //   |          /       \
6664   //   |     chain of     chain of
6665   //   |    single use     single use
6666   //   |      /               \
6667   //   |     /                 \
6668   //   *- Value                Value --> used by terminating condition
6669   auto IsToFold = [&](PHINode &PN) -> bool {
6670     Value *V = &PN;
6671 
6672     while (V->getNumUses() == 1)
6673       V = *V->user_begin();
6674 
6675     if (V->getNumUses() != 2)
6676       return false;
6677 
6678     Value *VToPN = nullptr;
6679     Value *VToTermCond = nullptr;
6680     for (User *U : V->users()) {
6681       while (U->getNumUses() == 1) {
6682         if (isa<PHINode>(U))
6683           VToPN = U;
6684         if (U == TermCond)
6685           VToTermCond = U;
6686         U = *U->user_begin();
6687       }
6688     }
6689     return VToPN && VToTermCond;
6690   };
6691 
6692   // If this is an IV which we could replace the terminating condition, return
6693   // the final value of the alternative IV on the last iteration.
6694   auto getAlternateIVEnd = [&](PHINode &PN) -> const SCEV * {
6695     // FIXME: This does not properly account for overflow.
6696     const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(SE.getSCEV(&PN));
6697     const SCEV *BECount = SE.getBackedgeTakenCount(L);
6698     const SCEV *TermValueS = SE.getAddExpr(
6699         AddRec->getOperand(0),
6700         SE.getTruncateOrZeroExtend(
6701             SE.getMulExpr(
6702                 AddRec->getOperand(1),
6703                 SE.getTruncateOrZeroExtend(
6704                     SE.getAddExpr(BECount, SE.getOne(BECount->getType())),
6705                     AddRec->getOperand(1)->getType())),
6706             AddRec->getOperand(0)->getType()));
6707     const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
6708     SCEVExpander Expander(SE, DL, "lsr_fold_term_cond");
6709     if (!Expander.isSafeToExpand(TermValueS)) {
6710       LLVM_DEBUG(
6711           dbgs() << "Is not safe to expand terminating value for phi node" << PN
6712                  << "\n");
6713       return nullptr;
6714     }
6715     return TermValueS;
6716   };
6717 
6718   PHINode *ToFold = nullptr;
6719   PHINode *ToHelpFold = nullptr;
6720   const SCEV *TermValueS = nullptr;
6721 
6722   for (PHINode &PN : L->getHeader()->phis()) {
6723     if (!SE.isSCEVable(PN.getType())) {
6724       LLVM_DEBUG(dbgs() << "IV of phi '" << PN
6725                         << "' is not SCEV-able, not qualified for the "
6726                            "terminating condition folding.\n");
6727       continue;
6728     }
6729     const SCEV *S = SE.getSCEV(&PN);
6730     const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S);
6731     // Only speculate on affine AddRec
6732     if (!AddRec || !AddRec->isAffine()) {
6733       LLVM_DEBUG(dbgs() << "SCEV of phi '" << PN
6734                         << "' is not an affine add recursion, not qualified "
6735                            "for the terminating condition folding.\n");
6736       continue;
6737     }
6738 
6739     if (IsToFold(PN))
6740       ToFold = &PN;
6741     else if (auto P = getAlternateIVEnd(PN)) {
6742       ToHelpFold = &PN;
6743       TermValueS = P;
6744     }
6745   }
6746 
6747   LLVM_DEBUG(if (ToFold && !ToHelpFold) dbgs()
6748                  << "Cannot find other AddRec IV to help folding\n";);
6749 
6750   LLVM_DEBUG(if (ToFold && ToHelpFold) dbgs()
6751              << "\nFound loop that can fold terminating condition\n"
6752              << "  BECount (SCEV): " << *SE.getBackedgeTakenCount(L) << "\n"
6753              << "  TermCond: " << *TermCond << "\n"
6754              << "  BrandInst: " << *BI << "\n"
6755              << "  ToFold: " << *ToFold << "\n"
6756              << "  ToHelpFold: " << *ToHelpFold << "\n");
6757 
6758   if (!ToFold || !ToHelpFold)
6759     return std::nullopt;
6760   return std::make_tuple(ToFold, ToHelpFold, TermValueS);
6761 }
6762 
6763 static bool ReduceLoopStrength(Loop *L, IVUsers &IU, ScalarEvolution &SE,
6764                                DominatorTree &DT, LoopInfo &LI,
6765                                const TargetTransformInfo &TTI,
6766                                AssumptionCache &AC, TargetLibraryInfo &TLI,
6767                                MemorySSA *MSSA) {
6768 
6769   // Debug preservation - before we start removing anything identify which DVI
6770   // meet the salvageable criteria and store their DIExpression and SCEVs.
6771   SmallVector<std::unique_ptr<DVIRecoveryRec>, 2> SalvageableDVIRecords;
6772   SmallSet<AssertingVH<DbgValueInst>, 2> DVIHandles;
6773   DbgGatherSalvagableDVI(L, SE, SalvageableDVIRecords, DVIHandles);
6774 
6775   bool Changed = false;
6776   std::unique_ptr<MemorySSAUpdater> MSSAU;
6777   if (MSSA)
6778     MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
6779 
6780   // Run the main LSR transformation.
6781   const LSRInstance &Reducer =
6782       LSRInstance(L, IU, SE, DT, LI, TTI, AC, TLI, MSSAU.get());
6783   Changed |= Reducer.getChanged();
6784 
6785   // Remove any extra phis created by processing inner loops.
6786   Changed |= DeleteDeadPHIs(L->getHeader(), &TLI, MSSAU.get());
6787   if (EnablePhiElim && L->isLoopSimplifyForm()) {
6788     SmallVector<WeakTrackingVH, 16> DeadInsts;
6789     const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
6790     SCEVExpander Rewriter(SE, DL, "lsr", false);
6791 #ifndef NDEBUG
6792     Rewriter.setDebugType(DEBUG_TYPE);
6793 #endif
6794     unsigned numFolded = Rewriter.replaceCongruentIVs(L, &DT, DeadInsts, &TTI);
6795     if (numFolded) {
6796       Changed = true;
6797       RecursivelyDeleteTriviallyDeadInstructionsPermissive(DeadInsts, &TLI,
6798                                                            MSSAU.get());
6799       DeleteDeadPHIs(L->getHeader(), &TLI, MSSAU.get());
6800     }
6801   }
6802   // LSR may at times remove all uses of an induction variable from a loop.
6803   // The only remaining use is the PHI in the exit block.
6804   // When this is the case, if the exit value of the IV can be calculated using
6805   // SCEV, we can replace the exit block PHI with the final value of the IV and
6806   // skip the updates in each loop iteration.
6807   if (L->isRecursivelyLCSSAForm(DT, LI) && L->getExitBlock()) {
6808     SmallVector<WeakTrackingVH, 16> DeadInsts;
6809     const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
6810     SCEVExpander Rewriter(SE, DL, "lsr", true);
6811     int Rewrites = rewriteLoopExitValues(L, &LI, &TLI, &SE, &TTI, Rewriter, &DT,
6812                                          UnusedIndVarInLoop, DeadInsts);
6813     if (Rewrites) {
6814       Changed = true;
6815       RecursivelyDeleteTriviallyDeadInstructionsPermissive(DeadInsts, &TLI,
6816                                                            MSSAU.get());
6817       DeleteDeadPHIs(L->getHeader(), &TLI, MSSAU.get());
6818     }
6819   }
6820 
6821   if (AllowTerminatingConditionFoldingAfterLSR) {
6822     if (auto Opt = canFoldTermCondOfLoop(L, SE, DT, LI)) {
6823       auto [ToFold, ToHelpFold, TermValueS] = *Opt;
6824 
6825       Changed = true;
6826       NumTermFold++;
6827 
6828       BasicBlock *LoopPreheader = L->getLoopPreheader();
6829       BasicBlock *LoopLatch = L->getLoopLatch();
6830 
6831       (void)ToFold;
6832       LLVM_DEBUG(dbgs() << "To fold phi-node:\n"
6833                         << *ToFold << "\n"
6834                         << "New term-cond phi-node:\n"
6835                         << *ToHelpFold << "\n");
6836 
6837       Value *StartValue = ToHelpFold->getIncomingValueForBlock(LoopPreheader);
6838       (void)StartValue;
6839       Value *LoopValue = ToHelpFold->getIncomingValueForBlock(LoopLatch);
6840 
6841       // SCEVExpander for both use in preheader and latch
6842       const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
6843       SCEVExpander Expander(SE, DL, "lsr_fold_term_cond");
6844       SCEVExpanderCleaner ExpCleaner(Expander);
6845 
6846       assert(Expander.isSafeToExpand(TermValueS) &&
6847              "Terminating value was checked safe in canFoldTerminatingCondition");
6848 
6849       // Create new terminating value at loop header
6850       Value *TermValue = Expander.expandCodeFor(TermValueS, ToHelpFold->getType(),
6851                                                 LoopPreheader->getTerminator());
6852 
6853       LLVM_DEBUG(dbgs() << "Start value of new term-cond phi-node:\n"
6854                         << *StartValue << "\n"
6855                         << "Terminating value of new term-cond phi-node:\n"
6856                         << *TermValue << "\n");
6857 
6858       // Create new terminating condition at loop latch
6859       BranchInst *BI = cast<BranchInst>(LoopLatch->getTerminator());
6860       ICmpInst *OldTermCond = cast<ICmpInst>(BI->getCondition());
6861       IRBuilder<> LatchBuilder(LoopLatch->getTerminator());
6862       // FIXME: We are adding a use of an IV here without account for poison safety.
6863       // This is incorrect.
6864       Value *NewTermCond = LatchBuilder.CreateICmp(
6865           OldTermCond->getPredicate(), LoopValue, TermValue,
6866           "lsr_fold_term_cond.replaced_term_cond");
6867 
6868       LLVM_DEBUG(dbgs() << "Old term-cond:\n"
6869                         << *OldTermCond << "\n"
6870                         << "New term-cond:\b" << *NewTermCond << "\n");
6871 
6872       BI->setCondition(NewTermCond);
6873 
6874       OldTermCond->eraseFromParent();
6875       DeleteDeadPHIs(L->getHeader(), &TLI, MSSAU.get());
6876 
6877       ExpCleaner.markResultUsed();
6878     }
6879   }
6880 
6881   if (SalvageableDVIRecords.empty())
6882     return Changed;
6883 
6884   // Obtain relevant IVs and attempt to rewrite the salvageable DVIs with
6885   // expressions composed using the derived iteration count.
6886   // TODO: Allow for multiple IV references for nested AddRecSCEVs
6887   for (const auto &L : LI) {
6888     if (llvm::PHINode *IV = GetInductionVariable(*L, SE, Reducer))
6889       DbgRewriteSalvageableDVIs(L, SE, IV, SalvageableDVIRecords);
6890     else {
6891       LLVM_DEBUG(dbgs() << "scev-salvage: SCEV salvaging not possible. An IV "
6892                            "could not be identified.\n");
6893     }
6894   }
6895 
6896   for (auto &Rec : SalvageableDVIRecords)
6897     Rec->clear();
6898   SalvageableDVIRecords.clear();
6899   DVIHandles.clear();
6900   return Changed;
6901 }
6902 
6903 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) {
6904   if (skipLoop(L))
6905     return false;
6906 
6907   auto &IU = getAnalysis<IVUsersWrapperPass>().getIU();
6908   auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
6909   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
6910   auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
6911   const auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
6912       *L->getHeader()->getParent());
6913   auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
6914       *L->getHeader()->getParent());
6915   auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
6916       *L->getHeader()->getParent());
6917   auto *MSSAAnalysis = getAnalysisIfAvailable<MemorySSAWrapperPass>();
6918   MemorySSA *MSSA = nullptr;
6919   if (MSSAAnalysis)
6920     MSSA = &MSSAAnalysis->getMSSA();
6921   return ReduceLoopStrength(L, IU, SE, DT, LI, TTI, AC, TLI, MSSA);
6922 }
6923 
6924 PreservedAnalyses LoopStrengthReducePass::run(Loop &L, LoopAnalysisManager &AM,
6925                                               LoopStandardAnalysisResults &AR,
6926                                               LPMUpdater &) {
6927   if (!ReduceLoopStrength(&L, AM.getResult<IVUsersAnalysis>(L, AR), AR.SE,
6928                           AR.DT, AR.LI, AR.TTI, AR.AC, AR.TLI, AR.MSSA))
6929     return PreservedAnalyses::all();
6930 
6931   auto PA = getLoopPassPreservedAnalyses();
6932   if (AR.MSSA)
6933     PA.preserve<MemorySSAAnalysis>();
6934   return PA;
6935 }
6936 
6937 char LoopStrengthReduce::ID = 0;
6938 
6939 INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce",
6940                       "Loop Strength Reduction", false, false)
6941 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
6942 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
6943 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
6944 INITIALIZE_PASS_DEPENDENCY(IVUsersWrapperPass)
6945 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
6946 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
6947 INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce",
6948                     "Loop Strength Reduction", false, false)
6949 
6950 Pass *llvm::createLoopStrengthReducePass() { return new LoopStrengthReduce(); }
6951