xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp (revision 59c8e88e72633afbc47a4ace0d2170d00d51f7dc)
1 //===-- SimplifyIndVar.cpp - Induction variable simplification ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements induction variable simplification. It does
10 // not define any actual pass or policy, but provides a single function to
11 // simplify a loop's induction variables based on ScalarEvolution.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Utils/SimplifyIndVar.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/LoopInfo.h"
19 #include "llvm/IR/Dominators.h"
20 #include "llvm/IR/IRBuilder.h"
21 #include "llvm/IR/Instructions.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/PatternMatch.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/raw_ostream.h"
26 #include "llvm/Transforms/Utils/Local.h"
27 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
28 
29 using namespace llvm;
30 
31 #define DEBUG_TYPE "indvars"
32 
33 STATISTIC(NumElimIdentity, "Number of IV identities eliminated");
34 STATISTIC(NumElimOperand,  "Number of IV operands folded into a use");
35 STATISTIC(NumFoldedUser, "Number of IV users folded into a constant");
36 STATISTIC(NumElimRem     , "Number of IV remainder operations eliminated");
37 STATISTIC(
38     NumSimplifiedSDiv,
39     "Number of IV signed division operations converted to unsigned division");
40 STATISTIC(
41     NumSimplifiedSRem,
42     "Number of IV signed remainder operations converted to unsigned remainder");
43 STATISTIC(NumElimCmp     , "Number of IV comparisons eliminated");
44 
45 namespace {
46   /// This is a utility for simplifying induction variables
47   /// based on ScalarEvolution. It is the primary instrument of the
48   /// IndvarSimplify pass, but it may also be directly invoked to cleanup after
49   /// other loop passes that preserve SCEV.
50   class SimplifyIndvar {
51     Loop             *L;
52     LoopInfo         *LI;
53     ScalarEvolution  *SE;
54     DominatorTree    *DT;
55     const TargetTransformInfo *TTI;
56     SCEVExpander     &Rewriter;
57     SmallVectorImpl<WeakTrackingVH> &DeadInsts;
58 
59     bool Changed = false;
60 
61   public:
62     SimplifyIndvar(Loop *Loop, ScalarEvolution *SE, DominatorTree *DT,
63                    LoopInfo *LI, const TargetTransformInfo *TTI,
64                    SCEVExpander &Rewriter,
65                    SmallVectorImpl<WeakTrackingVH> &Dead)
66         : L(Loop), LI(LI), SE(SE), DT(DT), TTI(TTI), Rewriter(Rewriter),
67           DeadInsts(Dead) {
68       assert(LI && "IV simplification requires LoopInfo");
69     }
70 
71     bool hasChanged() const { return Changed; }
72 
73     /// Iteratively perform simplification on a worklist of users of the
74     /// specified induction variable. This is the top-level driver that applies
75     /// all simplifications to users of an IV.
76     void simplifyUsers(PHINode *CurrIV, IVVisitor *V = nullptr);
77 
78     Value *foldIVUser(Instruction *UseInst, Instruction *IVOperand);
79 
80     bool eliminateIdentitySCEV(Instruction *UseInst, Instruction *IVOperand);
81     bool replaceIVUserWithLoopInvariant(Instruction *UseInst);
82     bool replaceFloatIVWithIntegerIV(Instruction *UseInst);
83 
84     bool eliminateOverflowIntrinsic(WithOverflowInst *WO);
85     bool eliminateSaturatingIntrinsic(SaturatingInst *SI);
86     bool eliminateTrunc(TruncInst *TI);
87     bool eliminateIVUser(Instruction *UseInst, Instruction *IVOperand);
88     bool makeIVComparisonInvariant(ICmpInst *ICmp, Instruction *IVOperand);
89     void eliminateIVComparison(ICmpInst *ICmp, Instruction *IVOperand);
90     void simplifyIVRemainder(BinaryOperator *Rem, Instruction *IVOperand,
91                              bool IsSigned);
92     void replaceRemWithNumerator(BinaryOperator *Rem);
93     void replaceRemWithNumeratorOrZero(BinaryOperator *Rem);
94     void replaceSRemWithURem(BinaryOperator *Rem);
95     bool eliminateSDiv(BinaryOperator *SDiv);
96     bool strengthenBinaryOp(BinaryOperator *BO, Instruction *IVOperand);
97     bool strengthenOverflowingOperation(BinaryOperator *OBO,
98                                         Instruction *IVOperand);
99     bool strengthenRightShift(BinaryOperator *BO, Instruction *IVOperand);
100   };
101 }
102 
103 /// Find a point in code which dominates all given instructions. We can safely
104 /// assume that, whatever fact we can prove at the found point, this fact is
105 /// also true for each of the given instructions.
106 static Instruction *findCommonDominator(ArrayRef<Instruction *> Instructions,
107                                         DominatorTree &DT) {
108   Instruction *CommonDom = nullptr;
109   for (auto *Insn : Instructions)
110     CommonDom =
111         CommonDom ? DT.findNearestCommonDominator(CommonDom, Insn) : Insn;
112   assert(CommonDom && "Common dominator not found?");
113   return CommonDom;
114 }
115 
116 /// Fold an IV operand into its use.  This removes increments of an
117 /// aligned IV when used by a instruction that ignores the low bits.
118 ///
119 /// IVOperand is guaranteed SCEVable, but UseInst may not be.
120 ///
121 /// Return the operand of IVOperand for this induction variable if IVOperand can
122 /// be folded (in case more folding opportunities have been exposed).
123 /// Otherwise return null.
124 Value *SimplifyIndvar::foldIVUser(Instruction *UseInst, Instruction *IVOperand) {
125   Value *IVSrc = nullptr;
126   const unsigned OperIdx = 0;
127   const SCEV *FoldedExpr = nullptr;
128   bool MustDropExactFlag = false;
129   switch (UseInst->getOpcode()) {
130   default:
131     return nullptr;
132   case Instruction::UDiv:
133   case Instruction::LShr:
134     // We're only interested in the case where we know something about
135     // the numerator and have a constant denominator.
136     if (IVOperand != UseInst->getOperand(OperIdx) ||
137         !isa<ConstantInt>(UseInst->getOperand(1)))
138       return nullptr;
139 
140     // Attempt to fold a binary operator with constant operand.
141     // e.g. ((I + 1) >> 2) => I >> 2
142     if (!isa<BinaryOperator>(IVOperand)
143         || !isa<ConstantInt>(IVOperand->getOperand(1)))
144       return nullptr;
145 
146     IVSrc = IVOperand->getOperand(0);
147     // IVSrc must be the (SCEVable) IV, since the other operand is const.
148     assert(SE->isSCEVable(IVSrc->getType()) && "Expect SCEVable IV operand");
149 
150     ConstantInt *D = cast<ConstantInt>(UseInst->getOperand(1));
151     if (UseInst->getOpcode() == Instruction::LShr) {
152       // Get a constant for the divisor. See createSCEV.
153       uint32_t BitWidth = cast<IntegerType>(UseInst->getType())->getBitWidth();
154       if (D->getValue().uge(BitWidth))
155         return nullptr;
156 
157       D = ConstantInt::get(UseInst->getContext(),
158                            APInt::getOneBitSet(BitWidth, D->getZExtValue()));
159     }
160     const auto *LHS = SE->getSCEV(IVSrc);
161     const auto *RHS = SE->getSCEV(D);
162     FoldedExpr = SE->getUDivExpr(LHS, RHS);
163     // We might have 'exact' flag set at this point which will no longer be
164     // correct after we make the replacement.
165     if (UseInst->isExact() && LHS != SE->getMulExpr(FoldedExpr, RHS))
166       MustDropExactFlag = true;
167   }
168   // We have something that might fold it's operand. Compare SCEVs.
169   if (!SE->isSCEVable(UseInst->getType()))
170     return nullptr;
171 
172   // Bypass the operand if SCEV can prove it has no effect.
173   if (SE->getSCEV(UseInst) != FoldedExpr)
174     return nullptr;
175 
176   LLVM_DEBUG(dbgs() << "INDVARS: Eliminated IV operand: " << *IVOperand
177                     << " -> " << *UseInst << '\n');
178 
179   UseInst->setOperand(OperIdx, IVSrc);
180   assert(SE->getSCEV(UseInst) == FoldedExpr && "bad SCEV with folded oper");
181 
182   if (MustDropExactFlag)
183     UseInst->dropPoisonGeneratingFlags();
184 
185   ++NumElimOperand;
186   Changed = true;
187   if (IVOperand->use_empty())
188     DeadInsts.emplace_back(IVOperand);
189   return IVSrc;
190 }
191 
192 bool SimplifyIndvar::makeIVComparisonInvariant(ICmpInst *ICmp,
193                                                Instruction *IVOperand) {
194   auto *Preheader = L->getLoopPreheader();
195   if (!Preheader)
196     return false;
197   unsigned IVOperIdx = 0;
198   ICmpInst::Predicate Pred = ICmp->getPredicate();
199   if (IVOperand != ICmp->getOperand(0)) {
200     // Swapped
201     assert(IVOperand == ICmp->getOperand(1) && "Can't find IVOperand");
202     IVOperIdx = 1;
203     Pred = ICmpInst::getSwappedPredicate(Pred);
204   }
205 
206   // Get the SCEVs for the ICmp operands (in the specific context of the
207   // current loop)
208   const Loop *ICmpLoop = LI->getLoopFor(ICmp->getParent());
209   const SCEV *S = SE->getSCEVAtScope(ICmp->getOperand(IVOperIdx), ICmpLoop);
210   const SCEV *X = SE->getSCEVAtScope(ICmp->getOperand(1 - IVOperIdx), ICmpLoop);
211   auto LIP = SE->getLoopInvariantPredicate(Pred, S, X, L, ICmp);
212   if (!LIP)
213     return false;
214   ICmpInst::Predicate InvariantPredicate = LIP->Pred;
215   const SCEV *InvariantLHS = LIP->LHS;
216   const SCEV *InvariantRHS = LIP->RHS;
217 
218   // Do not generate something ridiculous.
219   auto *PHTerm = Preheader->getTerminator();
220   if (Rewriter.isHighCostExpansion({InvariantLHS, InvariantRHS}, L,
221                                    2 * SCEVCheapExpansionBudget, TTI, PHTerm) ||
222       !Rewriter.isSafeToExpandAt(InvariantLHS, PHTerm) ||
223       !Rewriter.isSafeToExpandAt(InvariantRHS, PHTerm))
224     return false;
225   auto *NewLHS =
226       Rewriter.expandCodeFor(InvariantLHS, IVOperand->getType(), PHTerm);
227   auto *NewRHS =
228       Rewriter.expandCodeFor(InvariantRHS, IVOperand->getType(), PHTerm);
229   LLVM_DEBUG(dbgs() << "INDVARS: Simplified comparison: " << *ICmp << '\n');
230   ICmp->setPredicate(InvariantPredicate);
231   ICmp->setOperand(0, NewLHS);
232   ICmp->setOperand(1, NewRHS);
233   return true;
234 }
235 
236 /// SimplifyIVUsers helper for eliminating useless
237 /// comparisons against an induction variable.
238 void SimplifyIndvar::eliminateIVComparison(ICmpInst *ICmp,
239                                            Instruction *IVOperand) {
240   unsigned IVOperIdx = 0;
241   ICmpInst::Predicate Pred = ICmp->getPredicate();
242   ICmpInst::Predicate OriginalPred = Pred;
243   if (IVOperand != ICmp->getOperand(0)) {
244     // Swapped
245     assert(IVOperand == ICmp->getOperand(1) && "Can't find IVOperand");
246     IVOperIdx = 1;
247     Pred = ICmpInst::getSwappedPredicate(Pred);
248   }
249 
250   // Get the SCEVs for the ICmp operands (in the specific context of the
251   // current loop)
252   const Loop *ICmpLoop = LI->getLoopFor(ICmp->getParent());
253   const SCEV *S = SE->getSCEVAtScope(ICmp->getOperand(IVOperIdx), ICmpLoop);
254   const SCEV *X = SE->getSCEVAtScope(ICmp->getOperand(1 - IVOperIdx), ICmpLoop);
255 
256   // If the condition is always true or always false in the given context,
257   // replace it with a constant value.
258   SmallVector<Instruction *, 4> Users;
259   for (auto *U : ICmp->users())
260     Users.push_back(cast<Instruction>(U));
261   const Instruction *CtxI = findCommonDominator(Users, *DT);
262   if (auto Ev = SE->evaluatePredicateAt(Pred, S, X, CtxI)) {
263     SE->forgetValue(ICmp);
264     ICmp->replaceAllUsesWith(ConstantInt::getBool(ICmp->getContext(), *Ev));
265     DeadInsts.emplace_back(ICmp);
266     LLVM_DEBUG(dbgs() << "INDVARS: Eliminated comparison: " << *ICmp << '\n');
267   } else if (makeIVComparisonInvariant(ICmp, IVOperand)) {
268     // fallthrough to end of function
269   } else if (ICmpInst::isSigned(OriginalPred) &&
270              SE->isKnownNonNegative(S) && SE->isKnownNonNegative(X)) {
271     // If we were unable to make anything above, all we can is to canonicalize
272     // the comparison hoping that it will open the doors for other
273     // optimizations. If we find out that we compare two non-negative values,
274     // we turn the instruction's predicate to its unsigned version. Note that
275     // we cannot rely on Pred here unless we check if we have swapped it.
276     assert(ICmp->getPredicate() == OriginalPred && "Predicate changed?");
277     LLVM_DEBUG(dbgs() << "INDVARS: Turn to unsigned comparison: " << *ICmp
278                       << '\n');
279     ICmp->setPredicate(ICmpInst::getUnsignedPredicate(OriginalPred));
280   } else
281     return;
282 
283   ++NumElimCmp;
284   Changed = true;
285 }
286 
287 bool SimplifyIndvar::eliminateSDiv(BinaryOperator *SDiv) {
288   // Get the SCEVs for the ICmp operands.
289   auto *N = SE->getSCEV(SDiv->getOperand(0));
290   auto *D = SE->getSCEV(SDiv->getOperand(1));
291 
292   // Simplify unnecessary loops away.
293   const Loop *L = LI->getLoopFor(SDiv->getParent());
294   N = SE->getSCEVAtScope(N, L);
295   D = SE->getSCEVAtScope(D, L);
296 
297   // Replace sdiv by udiv if both of the operands are non-negative
298   if (SE->isKnownNonNegative(N) && SE->isKnownNonNegative(D)) {
299     auto *UDiv = BinaryOperator::Create(
300         BinaryOperator::UDiv, SDiv->getOperand(0), SDiv->getOperand(1),
301         SDiv->getName() + ".udiv", SDiv);
302     UDiv->setIsExact(SDiv->isExact());
303     SDiv->replaceAllUsesWith(UDiv);
304     LLVM_DEBUG(dbgs() << "INDVARS: Simplified sdiv: " << *SDiv << '\n');
305     ++NumSimplifiedSDiv;
306     Changed = true;
307     DeadInsts.push_back(SDiv);
308     return true;
309   }
310 
311   return false;
312 }
313 
314 // i %s n -> i %u n if i >= 0 and n >= 0
315 void SimplifyIndvar::replaceSRemWithURem(BinaryOperator *Rem) {
316   auto *N = Rem->getOperand(0), *D = Rem->getOperand(1);
317   auto *URem = BinaryOperator::Create(BinaryOperator::URem, N, D,
318                                       Rem->getName() + ".urem", Rem);
319   Rem->replaceAllUsesWith(URem);
320   LLVM_DEBUG(dbgs() << "INDVARS: Simplified srem: " << *Rem << '\n');
321   ++NumSimplifiedSRem;
322   Changed = true;
323   DeadInsts.emplace_back(Rem);
324 }
325 
326 // i % n  -->  i  if i is in [0,n).
327 void SimplifyIndvar::replaceRemWithNumerator(BinaryOperator *Rem) {
328   Rem->replaceAllUsesWith(Rem->getOperand(0));
329   LLVM_DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
330   ++NumElimRem;
331   Changed = true;
332   DeadInsts.emplace_back(Rem);
333 }
334 
335 // (i+1) % n  -->  (i+1)==n?0:(i+1)  if i is in [0,n).
336 void SimplifyIndvar::replaceRemWithNumeratorOrZero(BinaryOperator *Rem) {
337   auto *T = Rem->getType();
338   auto *N = Rem->getOperand(0), *D = Rem->getOperand(1);
339   ICmpInst *ICmp = new ICmpInst(Rem, ICmpInst::ICMP_EQ, N, D);
340   SelectInst *Sel =
341       SelectInst::Create(ICmp, ConstantInt::get(T, 0), N, "iv.rem", Rem);
342   Rem->replaceAllUsesWith(Sel);
343   LLVM_DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
344   ++NumElimRem;
345   Changed = true;
346   DeadInsts.emplace_back(Rem);
347 }
348 
349 /// SimplifyIVUsers helper for eliminating useless remainder operations
350 /// operating on an induction variable or replacing srem by urem.
351 void SimplifyIndvar::simplifyIVRemainder(BinaryOperator *Rem,
352                                          Instruction *IVOperand,
353                                          bool IsSigned) {
354   auto *NValue = Rem->getOperand(0);
355   auto *DValue = Rem->getOperand(1);
356   // We're only interested in the case where we know something about
357   // the numerator, unless it is a srem, because we want to replace srem by urem
358   // in general.
359   bool UsedAsNumerator = IVOperand == NValue;
360   if (!UsedAsNumerator && !IsSigned)
361     return;
362 
363   const SCEV *N = SE->getSCEV(NValue);
364 
365   // Simplify unnecessary loops away.
366   const Loop *ICmpLoop = LI->getLoopFor(Rem->getParent());
367   N = SE->getSCEVAtScope(N, ICmpLoop);
368 
369   bool IsNumeratorNonNegative = !IsSigned || SE->isKnownNonNegative(N);
370 
371   // Do not proceed if the Numerator may be negative
372   if (!IsNumeratorNonNegative)
373     return;
374 
375   const SCEV *D = SE->getSCEV(DValue);
376   D = SE->getSCEVAtScope(D, ICmpLoop);
377 
378   if (UsedAsNumerator) {
379     auto LT = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
380     if (SE->isKnownPredicate(LT, N, D)) {
381       replaceRemWithNumerator(Rem);
382       return;
383     }
384 
385     auto *T = Rem->getType();
386     const auto *NLessOne = SE->getMinusSCEV(N, SE->getOne(T));
387     if (SE->isKnownPredicate(LT, NLessOne, D)) {
388       replaceRemWithNumeratorOrZero(Rem);
389       return;
390     }
391   }
392 
393   // Try to replace SRem with URem, if both N and D are known non-negative.
394   // Since we had already check N, we only need to check D now
395   if (!IsSigned || !SE->isKnownNonNegative(D))
396     return;
397 
398   replaceSRemWithURem(Rem);
399 }
400 
401 bool SimplifyIndvar::eliminateOverflowIntrinsic(WithOverflowInst *WO) {
402   const SCEV *LHS = SE->getSCEV(WO->getLHS());
403   const SCEV *RHS = SE->getSCEV(WO->getRHS());
404   if (!SE->willNotOverflow(WO->getBinaryOp(), WO->isSigned(), LHS, RHS))
405     return false;
406 
407   // Proved no overflow, nuke the overflow check and, if possible, the overflow
408   // intrinsic as well.
409 
410   BinaryOperator *NewResult = BinaryOperator::Create(
411       WO->getBinaryOp(), WO->getLHS(), WO->getRHS(), "", WO);
412 
413   if (WO->isSigned())
414     NewResult->setHasNoSignedWrap(true);
415   else
416     NewResult->setHasNoUnsignedWrap(true);
417 
418   SmallVector<ExtractValueInst *, 4> ToDelete;
419 
420   for (auto *U : WO->users()) {
421     if (auto *EVI = dyn_cast<ExtractValueInst>(U)) {
422       if (EVI->getIndices()[0] == 1)
423         EVI->replaceAllUsesWith(ConstantInt::getFalse(WO->getContext()));
424       else {
425         assert(EVI->getIndices()[0] == 0 && "Only two possibilities!");
426         EVI->replaceAllUsesWith(NewResult);
427       }
428       ToDelete.push_back(EVI);
429     }
430   }
431 
432   for (auto *EVI : ToDelete)
433     EVI->eraseFromParent();
434 
435   if (WO->use_empty())
436     WO->eraseFromParent();
437 
438   Changed = true;
439   return true;
440 }
441 
442 bool SimplifyIndvar::eliminateSaturatingIntrinsic(SaturatingInst *SI) {
443   const SCEV *LHS = SE->getSCEV(SI->getLHS());
444   const SCEV *RHS = SE->getSCEV(SI->getRHS());
445   if (!SE->willNotOverflow(SI->getBinaryOp(), SI->isSigned(), LHS, RHS))
446     return false;
447 
448   BinaryOperator *BO = BinaryOperator::Create(
449       SI->getBinaryOp(), SI->getLHS(), SI->getRHS(), SI->getName(), SI);
450   if (SI->isSigned())
451     BO->setHasNoSignedWrap();
452   else
453     BO->setHasNoUnsignedWrap();
454 
455   SI->replaceAllUsesWith(BO);
456   DeadInsts.emplace_back(SI);
457   Changed = true;
458   return true;
459 }
460 
461 bool SimplifyIndvar::eliminateTrunc(TruncInst *TI) {
462   // It is always legal to replace
463   //   icmp <pred> i32 trunc(iv), n
464   // with
465   //   icmp <pred> i64 sext(trunc(iv)), sext(n), if pred is signed predicate.
466   // Or with
467   //   icmp <pred> i64 zext(trunc(iv)), zext(n), if pred is unsigned predicate.
468   // Or with either of these if pred is an equality predicate.
469   //
470   // If we can prove that iv == sext(trunc(iv)) or iv == zext(trunc(iv)) for
471   // every comparison which uses trunc, it means that we can replace each of
472   // them with comparison of iv against sext/zext(n). We no longer need trunc
473   // after that.
474   //
475   // TODO: Should we do this if we can widen *some* comparisons, but not all
476   // of them? Sometimes it is enough to enable other optimizations, but the
477   // trunc instruction will stay in the loop.
478   Value *IV = TI->getOperand(0);
479   Type *IVTy = IV->getType();
480   const SCEV *IVSCEV = SE->getSCEV(IV);
481   const SCEV *TISCEV = SE->getSCEV(TI);
482 
483   // Check if iv == zext(trunc(iv)) and if iv == sext(trunc(iv)). If so, we can
484   // get rid of trunc
485   bool DoesSExtCollapse = false;
486   bool DoesZExtCollapse = false;
487   if (IVSCEV == SE->getSignExtendExpr(TISCEV, IVTy))
488     DoesSExtCollapse = true;
489   if (IVSCEV == SE->getZeroExtendExpr(TISCEV, IVTy))
490     DoesZExtCollapse = true;
491 
492   // If neither sext nor zext does collapse, it is not profitable to do any
493   // transform. Bail.
494   if (!DoesSExtCollapse && !DoesZExtCollapse)
495     return false;
496 
497   // Collect users of the trunc that look like comparisons against invariants.
498   // Bail if we find something different.
499   SmallVector<ICmpInst *, 4> ICmpUsers;
500   for (auto *U : TI->users()) {
501     // We don't care about users in unreachable blocks.
502     if (isa<Instruction>(U) &&
503         !DT->isReachableFromEntry(cast<Instruction>(U)->getParent()))
504       continue;
505     ICmpInst *ICI = dyn_cast<ICmpInst>(U);
506     if (!ICI) return false;
507     assert(L->contains(ICI->getParent()) && "LCSSA form broken?");
508     if (!(ICI->getOperand(0) == TI && L->isLoopInvariant(ICI->getOperand(1))) &&
509         !(ICI->getOperand(1) == TI && L->isLoopInvariant(ICI->getOperand(0))))
510       return false;
511     // If we cannot get rid of trunc, bail.
512     if (ICI->isSigned() && !DoesSExtCollapse)
513       return false;
514     if (ICI->isUnsigned() && !DoesZExtCollapse)
515       return false;
516     // For equality, either signed or unsigned works.
517     ICmpUsers.push_back(ICI);
518   }
519 
520   auto CanUseZExt = [&](ICmpInst *ICI) {
521     // Unsigned comparison can be widened as unsigned.
522     if (ICI->isUnsigned())
523       return true;
524     // Is it profitable to do zext?
525     if (!DoesZExtCollapse)
526       return false;
527     // For equality, we can safely zext both parts.
528     if (ICI->isEquality())
529       return true;
530     // Otherwise we can only use zext when comparing two non-negative or two
531     // negative values. But in practice, we will never pass DoesZExtCollapse
532     // check for a negative value, because zext(trunc(x)) is non-negative. So
533     // it only make sense to check for non-negativity here.
534     const SCEV *SCEVOP1 = SE->getSCEV(ICI->getOperand(0));
535     const SCEV *SCEVOP2 = SE->getSCEV(ICI->getOperand(1));
536     return SE->isKnownNonNegative(SCEVOP1) && SE->isKnownNonNegative(SCEVOP2);
537   };
538   // Replace all comparisons against trunc with comparisons against IV.
539   for (auto *ICI : ICmpUsers) {
540     bool IsSwapped = L->isLoopInvariant(ICI->getOperand(0));
541     auto *Op1 = IsSwapped ? ICI->getOperand(0) : ICI->getOperand(1);
542     Instruction *Ext = nullptr;
543     // For signed/unsigned predicate, replace the old comparison with comparison
544     // of immediate IV against sext/zext of the invariant argument. If we can
545     // use either sext or zext (i.e. we are dealing with equality predicate),
546     // then prefer zext as a more canonical form.
547     // TODO: If we see a signed comparison which can be turned into unsigned,
548     // we can do it here for canonicalization purposes.
549     ICmpInst::Predicate Pred = ICI->getPredicate();
550     if (IsSwapped) Pred = ICmpInst::getSwappedPredicate(Pred);
551     if (CanUseZExt(ICI)) {
552       assert(DoesZExtCollapse && "Unprofitable zext?");
553       Ext = new ZExtInst(Op1, IVTy, "zext", ICI);
554       Pred = ICmpInst::getUnsignedPredicate(Pred);
555     } else {
556       assert(DoesSExtCollapse && "Unprofitable sext?");
557       Ext = new SExtInst(Op1, IVTy, "sext", ICI);
558       assert(Pred == ICmpInst::getSignedPredicate(Pred) && "Must be signed!");
559     }
560     bool Changed;
561     L->makeLoopInvariant(Ext, Changed);
562     (void)Changed;
563     ICmpInst *NewICI = new ICmpInst(ICI, Pred, IV, Ext);
564     ICI->replaceAllUsesWith(NewICI);
565     DeadInsts.emplace_back(ICI);
566   }
567 
568   // Trunc no longer needed.
569   TI->replaceAllUsesWith(PoisonValue::get(TI->getType()));
570   DeadInsts.emplace_back(TI);
571   return true;
572 }
573 
574 /// Eliminate an operation that consumes a simple IV and has no observable
575 /// side-effect given the range of IV values.  IVOperand is guaranteed SCEVable,
576 /// but UseInst may not be.
577 bool SimplifyIndvar::eliminateIVUser(Instruction *UseInst,
578                                      Instruction *IVOperand) {
579   if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
580     eliminateIVComparison(ICmp, IVOperand);
581     return true;
582   }
583   if (BinaryOperator *Bin = dyn_cast<BinaryOperator>(UseInst)) {
584     bool IsSRem = Bin->getOpcode() == Instruction::SRem;
585     if (IsSRem || Bin->getOpcode() == Instruction::URem) {
586       simplifyIVRemainder(Bin, IVOperand, IsSRem);
587       return true;
588     }
589 
590     if (Bin->getOpcode() == Instruction::SDiv)
591       return eliminateSDiv(Bin);
592   }
593 
594   if (auto *WO = dyn_cast<WithOverflowInst>(UseInst))
595     if (eliminateOverflowIntrinsic(WO))
596       return true;
597 
598   if (auto *SI = dyn_cast<SaturatingInst>(UseInst))
599     if (eliminateSaturatingIntrinsic(SI))
600       return true;
601 
602   if (auto *TI = dyn_cast<TruncInst>(UseInst))
603     if (eliminateTrunc(TI))
604       return true;
605 
606   if (eliminateIdentitySCEV(UseInst, IVOperand))
607     return true;
608 
609   return false;
610 }
611 
612 static Instruction *GetLoopInvariantInsertPosition(Loop *L, Instruction *Hint) {
613   if (auto *BB = L->getLoopPreheader())
614     return BB->getTerminator();
615 
616   return Hint;
617 }
618 
619 /// Replace the UseInst with a loop invariant expression if it is safe.
620 bool SimplifyIndvar::replaceIVUserWithLoopInvariant(Instruction *I) {
621   if (!SE->isSCEVable(I->getType()))
622     return false;
623 
624   // Get the symbolic expression for this instruction.
625   const SCEV *S = SE->getSCEV(I);
626 
627   if (!SE->isLoopInvariant(S, L))
628     return false;
629 
630   // Do not generate something ridiculous even if S is loop invariant.
631   if (Rewriter.isHighCostExpansion(S, L, SCEVCheapExpansionBudget, TTI, I))
632     return false;
633 
634   auto *IP = GetLoopInvariantInsertPosition(L, I);
635 
636   if (!Rewriter.isSafeToExpandAt(S, IP)) {
637     LLVM_DEBUG(dbgs() << "INDVARS: Can not replace IV user: " << *I
638                       << " with non-speculable loop invariant: " << *S << '\n');
639     return false;
640   }
641 
642   auto *Invariant = Rewriter.expandCodeFor(S, I->getType(), IP);
643 
644   I->replaceAllUsesWith(Invariant);
645   LLVM_DEBUG(dbgs() << "INDVARS: Replace IV user: " << *I
646                     << " with loop invariant: " << *S << '\n');
647   ++NumFoldedUser;
648   Changed = true;
649   DeadInsts.emplace_back(I);
650   return true;
651 }
652 
653 /// Eliminate redundant type cast between integer and float.
654 bool SimplifyIndvar::replaceFloatIVWithIntegerIV(Instruction *UseInst) {
655   if (UseInst->getOpcode() != CastInst::SIToFP &&
656       UseInst->getOpcode() != CastInst::UIToFP)
657     return false;
658 
659   Instruction *IVOperand = cast<Instruction>(UseInst->getOperand(0));
660   // Get the symbolic expression for this instruction.
661   const SCEV *IV = SE->getSCEV(IVOperand);
662   unsigned MaskBits;
663   if (UseInst->getOpcode() == CastInst::SIToFP)
664     MaskBits = SE->getSignedRange(IV).getMinSignedBits();
665   else
666     MaskBits = SE->getUnsignedRange(IV).getActiveBits();
667   unsigned DestNumSigBits = UseInst->getType()->getFPMantissaWidth();
668   if (MaskBits <= DestNumSigBits) {
669     for (User *U : UseInst->users()) {
670       // Match for fptosi/fptoui of sitofp and with same type.
671       auto *CI = dyn_cast<CastInst>(U);
672       if (!CI)
673         continue;
674 
675       CastInst::CastOps Opcode = CI->getOpcode();
676       if (Opcode != CastInst::FPToSI && Opcode != CastInst::FPToUI)
677         continue;
678 
679       Value *Conv = nullptr;
680       if (IVOperand->getType() != CI->getType()) {
681         IRBuilder<> Builder(CI);
682         StringRef Name = IVOperand->getName();
683         // To match InstCombine logic, we only need sext if both fptosi and
684         // sitofp are used. If one of them is unsigned, then we can use zext.
685         if (SE->getTypeSizeInBits(IVOperand->getType()) >
686             SE->getTypeSizeInBits(CI->getType())) {
687           Conv = Builder.CreateTrunc(IVOperand, CI->getType(), Name + ".trunc");
688         } else if (Opcode == CastInst::FPToUI ||
689                    UseInst->getOpcode() == CastInst::UIToFP) {
690           Conv = Builder.CreateZExt(IVOperand, CI->getType(), Name + ".zext");
691         } else {
692           Conv = Builder.CreateSExt(IVOperand, CI->getType(), Name + ".sext");
693         }
694       } else
695         Conv = IVOperand;
696 
697       CI->replaceAllUsesWith(Conv);
698       DeadInsts.push_back(CI);
699       LLVM_DEBUG(dbgs() << "INDVARS: Replace IV user: " << *CI
700                         << " with: " << *Conv << '\n');
701 
702       ++NumFoldedUser;
703       Changed = true;
704     }
705   }
706 
707   return Changed;
708 }
709 
710 /// Eliminate any operation that SCEV can prove is an identity function.
711 bool SimplifyIndvar::eliminateIdentitySCEV(Instruction *UseInst,
712                                            Instruction *IVOperand) {
713   if (!SE->isSCEVable(UseInst->getType()) ||
714       (UseInst->getType() != IVOperand->getType()) ||
715       (SE->getSCEV(UseInst) != SE->getSCEV(IVOperand)))
716     return false;
717 
718   // getSCEV(X) == getSCEV(Y) does not guarantee that X and Y are related in the
719   // dominator tree, even if X is an operand to Y.  For instance, in
720   //
721   //     %iv = phi i32 {0,+,1}
722   //     br %cond, label %left, label %merge
723   //
724   //   left:
725   //     %X = add i32 %iv, 0
726   //     br label %merge
727   //
728   //   merge:
729   //     %M = phi (%X, %iv)
730   //
731   // getSCEV(%M) == getSCEV(%X) == {0,+,1}, but %X does not dominate %M, and
732   // %M.replaceAllUsesWith(%X) would be incorrect.
733 
734   if (isa<PHINode>(UseInst))
735     // If UseInst is not a PHI node then we know that IVOperand dominates
736     // UseInst directly from the legality of SSA.
737     if (!DT || !DT->dominates(IVOperand, UseInst))
738       return false;
739 
740   if (!LI->replacementPreservesLCSSAForm(UseInst, IVOperand))
741     return false;
742 
743   LLVM_DEBUG(dbgs() << "INDVARS: Eliminated identity: " << *UseInst << '\n');
744 
745   SE->forgetValue(UseInst);
746   UseInst->replaceAllUsesWith(IVOperand);
747   ++NumElimIdentity;
748   Changed = true;
749   DeadInsts.emplace_back(UseInst);
750   return true;
751 }
752 
753 bool SimplifyIndvar::strengthenBinaryOp(BinaryOperator *BO,
754                                         Instruction *IVOperand) {
755   return (isa<OverflowingBinaryOperator>(BO) &&
756           strengthenOverflowingOperation(BO, IVOperand)) ||
757          (isa<ShlOperator>(BO) && strengthenRightShift(BO, IVOperand));
758 }
759 
760 /// Annotate BO with nsw / nuw if it provably does not signed-overflow /
761 /// unsigned-overflow.  Returns true if anything changed, false otherwise.
762 bool SimplifyIndvar::strengthenOverflowingOperation(BinaryOperator *BO,
763                                                     Instruction *IVOperand) {
764   auto Flags = SE->getStrengthenedNoWrapFlagsFromBinOp(
765       cast<OverflowingBinaryOperator>(BO));
766 
767   if (!Flags)
768     return false;
769 
770   BO->setHasNoUnsignedWrap(ScalarEvolution::maskFlags(*Flags, SCEV::FlagNUW) ==
771                            SCEV::FlagNUW);
772   BO->setHasNoSignedWrap(ScalarEvolution::maskFlags(*Flags, SCEV::FlagNSW) ==
773                          SCEV::FlagNSW);
774 
775   // The getStrengthenedNoWrapFlagsFromBinOp() check inferred additional nowrap
776   // flags on addrecs while performing zero/sign extensions. We could call
777   // forgetValue() here to make sure those flags also propagate to any other
778   // SCEV expressions based on the addrec. However, this can have pathological
779   // compile-time impact, see https://bugs.llvm.org/show_bug.cgi?id=50384.
780   return true;
781 }
782 
783 /// Annotate the Shr in (X << IVOperand) >> C as exact using the
784 /// information from the IV's range. Returns true if anything changed, false
785 /// otherwise.
786 bool SimplifyIndvar::strengthenRightShift(BinaryOperator *BO,
787                                           Instruction *IVOperand) {
788   using namespace llvm::PatternMatch;
789 
790   if (BO->getOpcode() == Instruction::Shl) {
791     bool Changed = false;
792     ConstantRange IVRange = SE->getUnsignedRange(SE->getSCEV(IVOperand));
793     for (auto *U : BO->users()) {
794       const APInt *C;
795       if (match(U,
796                 m_AShr(m_Shl(m_Value(), m_Specific(IVOperand)), m_APInt(C))) ||
797           match(U,
798                 m_LShr(m_Shl(m_Value(), m_Specific(IVOperand)), m_APInt(C)))) {
799         BinaryOperator *Shr = cast<BinaryOperator>(U);
800         if (!Shr->isExact() && IVRange.getUnsignedMin().uge(*C)) {
801           Shr->setIsExact(true);
802           Changed = true;
803         }
804       }
805     }
806     return Changed;
807   }
808 
809   return false;
810 }
811 
812 /// Add all uses of Def to the current IV's worklist.
813 static void pushIVUsers(
814   Instruction *Def, Loop *L,
815   SmallPtrSet<Instruction*,16> &Simplified,
816   SmallVectorImpl< std::pair<Instruction*,Instruction*> > &SimpleIVUsers) {
817 
818   for (User *U : Def->users()) {
819     Instruction *UI = cast<Instruction>(U);
820 
821     // Avoid infinite or exponential worklist processing.
822     // Also ensure unique worklist users.
823     // If Def is a LoopPhi, it may not be in the Simplified set, so check for
824     // self edges first.
825     if (UI == Def)
826       continue;
827 
828     // Only change the current Loop, do not change the other parts (e.g. other
829     // Loops).
830     if (!L->contains(UI))
831       continue;
832 
833     // Do not push the same instruction more than once.
834     if (!Simplified.insert(UI).second)
835       continue;
836 
837     SimpleIVUsers.push_back(std::make_pair(UI, Def));
838   }
839 }
840 
841 /// Return true if this instruction generates a simple SCEV
842 /// expression in terms of that IV.
843 ///
844 /// This is similar to IVUsers' isInteresting() but processes each instruction
845 /// non-recursively when the operand is already known to be a simpleIVUser.
846 ///
847 static bool isSimpleIVUser(Instruction *I, const Loop *L, ScalarEvolution *SE) {
848   if (!SE->isSCEVable(I->getType()))
849     return false;
850 
851   // Get the symbolic expression for this instruction.
852   const SCEV *S = SE->getSCEV(I);
853 
854   // Only consider affine recurrences.
855   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S);
856   if (AR && AR->getLoop() == L)
857     return true;
858 
859   return false;
860 }
861 
862 /// Iteratively perform simplification on a worklist of users
863 /// of the specified induction variable. Each successive simplification may push
864 /// more users which may themselves be candidates for simplification.
865 ///
866 /// This algorithm does not require IVUsers analysis. Instead, it simplifies
867 /// instructions in-place during analysis. Rather than rewriting induction
868 /// variables bottom-up from their users, it transforms a chain of IVUsers
869 /// top-down, updating the IR only when it encounters a clear optimization
870 /// opportunity.
871 ///
872 /// Once DisableIVRewrite is default, LSR will be the only client of IVUsers.
873 ///
874 void SimplifyIndvar::simplifyUsers(PHINode *CurrIV, IVVisitor *V) {
875   if (!SE->isSCEVable(CurrIV->getType()))
876     return;
877 
878   // Instructions processed by SimplifyIndvar for CurrIV.
879   SmallPtrSet<Instruction*,16> Simplified;
880 
881   // Use-def pairs if IV users waiting to be processed for CurrIV.
882   SmallVector<std::pair<Instruction*, Instruction*>, 8> SimpleIVUsers;
883 
884   // Push users of the current LoopPhi. In rare cases, pushIVUsers may be
885   // called multiple times for the same LoopPhi. This is the proper thing to
886   // do for loop header phis that use each other.
887   pushIVUsers(CurrIV, L, Simplified, SimpleIVUsers);
888 
889   while (!SimpleIVUsers.empty()) {
890     std::pair<Instruction*, Instruction*> UseOper =
891       SimpleIVUsers.pop_back_val();
892     Instruction *UseInst = UseOper.first;
893 
894     // If a user of the IndVar is trivially dead, we prefer just to mark it dead
895     // rather than try to do some complex analysis or transformation (such as
896     // widening) basing on it.
897     // TODO: Propagate TLI and pass it here to handle more cases.
898     if (isInstructionTriviallyDead(UseInst, /* TLI */ nullptr)) {
899       DeadInsts.emplace_back(UseInst);
900       continue;
901     }
902 
903     // Bypass back edges to avoid extra work.
904     if (UseInst == CurrIV) continue;
905 
906     // Try to replace UseInst with a loop invariant before any other
907     // simplifications.
908     if (replaceIVUserWithLoopInvariant(UseInst))
909       continue;
910 
911     // Go further for the bitcast ''prtoint ptr to i64'
912     if (isa<PtrToIntInst>(UseInst))
913       for (Use &U : UseInst->uses()) {
914         Instruction *User = cast<Instruction>(U.getUser());
915         if (replaceIVUserWithLoopInvariant(User))
916           break; // done replacing
917       }
918 
919     Instruction *IVOperand = UseOper.second;
920     for (unsigned N = 0; IVOperand; ++N) {
921       assert(N <= Simplified.size() && "runaway iteration");
922       (void) N;
923 
924       Value *NewOper = foldIVUser(UseInst, IVOperand);
925       if (!NewOper)
926         break; // done folding
927       IVOperand = dyn_cast<Instruction>(NewOper);
928     }
929     if (!IVOperand)
930       continue;
931 
932     if (eliminateIVUser(UseInst, IVOperand)) {
933       pushIVUsers(IVOperand, L, Simplified, SimpleIVUsers);
934       continue;
935     }
936 
937     if (BinaryOperator *BO = dyn_cast<BinaryOperator>(UseInst)) {
938       if (strengthenBinaryOp(BO, IVOperand)) {
939         // re-queue uses of the now modified binary operator and fall
940         // through to the checks that remain.
941         pushIVUsers(IVOperand, L, Simplified, SimpleIVUsers);
942       }
943     }
944 
945     // Try to use integer induction for FPToSI of float induction directly.
946     if (replaceFloatIVWithIntegerIV(UseInst)) {
947       // Re-queue the potentially new direct uses of IVOperand.
948       pushIVUsers(IVOperand, L, Simplified, SimpleIVUsers);
949       continue;
950     }
951 
952     CastInst *Cast = dyn_cast<CastInst>(UseInst);
953     if (V && Cast) {
954       V->visitCast(Cast);
955       continue;
956     }
957     if (isSimpleIVUser(UseInst, L, SE)) {
958       pushIVUsers(UseInst, L, Simplified, SimpleIVUsers);
959     }
960   }
961 }
962 
963 namespace llvm {
964 
965 void IVVisitor::anchor() { }
966 
967 /// Simplify instructions that use this induction variable
968 /// by using ScalarEvolution to analyze the IV's recurrence.
969 bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT,
970                        LoopInfo *LI, const TargetTransformInfo *TTI,
971                        SmallVectorImpl<WeakTrackingVH> &Dead,
972                        SCEVExpander &Rewriter, IVVisitor *V) {
973   SimplifyIndvar SIV(LI->getLoopFor(CurrIV->getParent()), SE, DT, LI, TTI,
974                      Rewriter, Dead);
975   SIV.simplifyUsers(CurrIV, V);
976   return SIV.hasChanged();
977 }
978 
979 /// Simplify users of induction variables within this
980 /// loop. This does not actually change or add IVs.
981 bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT,
982                      LoopInfo *LI, const TargetTransformInfo *TTI,
983                      SmallVectorImpl<WeakTrackingVH> &Dead) {
984   SCEVExpander Rewriter(*SE, SE->getDataLayout(), "indvars");
985 #ifndef NDEBUG
986   Rewriter.setDebugType(DEBUG_TYPE);
987 #endif
988   bool Changed = false;
989   for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
990     Changed |=
991         simplifyUsersOfIV(cast<PHINode>(I), SE, DT, LI, TTI, Dead, Rewriter);
992   }
993   return Changed;
994 }
995 
996 } // namespace llvm
997 
998 namespace {
999 //===----------------------------------------------------------------------===//
1000 // Widen Induction Variables - Extend the width of an IV to cover its
1001 // widest uses.
1002 //===----------------------------------------------------------------------===//
1003 
1004 class WidenIV {
1005   // Parameters
1006   PHINode *OrigPhi;
1007   Type *WideType;
1008 
1009   // Context
1010   LoopInfo        *LI;
1011   Loop            *L;
1012   ScalarEvolution *SE;
1013   DominatorTree   *DT;
1014 
1015   // Does the module have any calls to the llvm.experimental.guard intrinsic
1016   // at all? If not we can avoid scanning instructions looking for guards.
1017   bool HasGuards;
1018 
1019   bool UsePostIncrementRanges;
1020 
1021   // Statistics
1022   unsigned NumElimExt = 0;
1023   unsigned NumWidened = 0;
1024 
1025   // Result
1026   PHINode *WidePhi = nullptr;
1027   Instruction *WideInc = nullptr;
1028   const SCEV *WideIncExpr = nullptr;
1029   SmallVectorImpl<WeakTrackingVH> &DeadInsts;
1030 
1031   SmallPtrSet<Instruction *,16> Widened;
1032 
1033   enum class ExtendKind { Zero, Sign, Unknown };
1034 
1035   // A map tracking the kind of extension used to widen each narrow IV
1036   // and narrow IV user.
1037   // Key: pointer to a narrow IV or IV user.
1038   // Value: the kind of extension used to widen this Instruction.
1039   DenseMap<AssertingVH<Instruction>, ExtendKind> ExtendKindMap;
1040 
1041   using DefUserPair = std::pair<AssertingVH<Value>, AssertingVH<Instruction>>;
1042 
1043   // A map with control-dependent ranges for post increment IV uses. The key is
1044   // a pair of IV def and a use of this def denoting the context. The value is
1045   // a ConstantRange representing possible values of the def at the given
1046   // context.
1047   DenseMap<DefUserPair, ConstantRange> PostIncRangeInfos;
1048 
1049   std::optional<ConstantRange> getPostIncRangeInfo(Value *Def,
1050                                                    Instruction *UseI) {
1051     DefUserPair Key(Def, UseI);
1052     auto It = PostIncRangeInfos.find(Key);
1053     return It == PostIncRangeInfos.end()
1054                ? std::optional<ConstantRange>(std::nullopt)
1055                : std::optional<ConstantRange>(It->second);
1056   }
1057 
1058   void calculatePostIncRanges(PHINode *OrigPhi);
1059   void calculatePostIncRange(Instruction *NarrowDef, Instruction *NarrowUser);
1060 
1061   void updatePostIncRangeInfo(Value *Def, Instruction *UseI, ConstantRange R) {
1062     DefUserPair Key(Def, UseI);
1063     auto It = PostIncRangeInfos.find(Key);
1064     if (It == PostIncRangeInfos.end())
1065       PostIncRangeInfos.insert({Key, R});
1066     else
1067       It->second = R.intersectWith(It->second);
1068   }
1069 
1070 public:
1071   /// Record a link in the Narrow IV def-use chain along with the WideIV that
1072   /// computes the same value as the Narrow IV def.  This avoids caching Use*
1073   /// pointers.
1074   struct NarrowIVDefUse {
1075     Instruction *NarrowDef = nullptr;
1076     Instruction *NarrowUse = nullptr;
1077     Instruction *WideDef = nullptr;
1078 
1079     // True if the narrow def is never negative.  Tracking this information lets
1080     // us use a sign extension instead of a zero extension or vice versa, when
1081     // profitable and legal.
1082     bool NeverNegative = false;
1083 
1084     NarrowIVDefUse(Instruction *ND, Instruction *NU, Instruction *WD,
1085                    bool NeverNegative)
1086         : NarrowDef(ND), NarrowUse(NU), WideDef(WD),
1087           NeverNegative(NeverNegative) {}
1088   };
1089 
1090   WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv,
1091           DominatorTree *DTree, SmallVectorImpl<WeakTrackingVH> &DI,
1092           bool HasGuards, bool UsePostIncrementRanges = true);
1093 
1094   PHINode *createWideIV(SCEVExpander &Rewriter);
1095 
1096   unsigned getNumElimExt() { return NumElimExt; };
1097   unsigned getNumWidened() { return NumWidened; };
1098 
1099 protected:
1100   Value *createExtendInst(Value *NarrowOper, Type *WideType, bool IsSigned,
1101                           Instruction *Use);
1102 
1103   Instruction *cloneIVUser(NarrowIVDefUse DU, const SCEVAddRecExpr *WideAR);
1104   Instruction *cloneArithmeticIVUser(NarrowIVDefUse DU,
1105                                      const SCEVAddRecExpr *WideAR);
1106   Instruction *cloneBitwiseIVUser(NarrowIVDefUse DU);
1107 
1108   ExtendKind getExtendKind(Instruction *I);
1109 
1110   using WidenedRecTy = std::pair<const SCEVAddRecExpr *, ExtendKind>;
1111 
1112   WidenedRecTy getWideRecurrence(NarrowIVDefUse DU);
1113 
1114   WidenedRecTy getExtendedOperandRecurrence(NarrowIVDefUse DU);
1115 
1116   const SCEV *getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS,
1117                               unsigned OpCode) const;
1118 
1119   Instruction *widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter);
1120 
1121   bool widenLoopCompare(NarrowIVDefUse DU);
1122   bool widenWithVariantUse(NarrowIVDefUse DU);
1123 
1124   void pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef);
1125 
1126 private:
1127   SmallVector<NarrowIVDefUse, 8> NarrowIVUsers;
1128 };
1129 } // namespace
1130 
1131 /// Determine the insertion point for this user. By default, insert immediately
1132 /// before the user. SCEVExpander or LICM will hoist loop invariants out of the
1133 /// loop. For PHI nodes, there may be multiple uses, so compute the nearest
1134 /// common dominator for the incoming blocks. A nullptr can be returned if no
1135 /// viable location is found: it may happen if User is a PHI and Def only comes
1136 /// to this PHI from unreachable blocks.
1137 static Instruction *getInsertPointForUses(Instruction *User, Value *Def,
1138                                           DominatorTree *DT, LoopInfo *LI) {
1139   PHINode *PHI = dyn_cast<PHINode>(User);
1140   if (!PHI)
1141     return User;
1142 
1143   Instruction *InsertPt = nullptr;
1144   for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) {
1145     if (PHI->getIncomingValue(i) != Def)
1146       continue;
1147 
1148     BasicBlock *InsertBB = PHI->getIncomingBlock(i);
1149 
1150     if (!DT->isReachableFromEntry(InsertBB))
1151       continue;
1152 
1153     if (!InsertPt) {
1154       InsertPt = InsertBB->getTerminator();
1155       continue;
1156     }
1157     InsertBB = DT->findNearestCommonDominator(InsertPt->getParent(), InsertBB);
1158     InsertPt = InsertBB->getTerminator();
1159   }
1160 
1161   // If we have skipped all inputs, it means that Def only comes to Phi from
1162   // unreachable blocks.
1163   if (!InsertPt)
1164     return nullptr;
1165 
1166   auto *DefI = dyn_cast<Instruction>(Def);
1167   if (!DefI)
1168     return InsertPt;
1169 
1170   assert(DT->dominates(DefI, InsertPt) && "def does not dominate all uses");
1171 
1172   auto *L = LI->getLoopFor(DefI->getParent());
1173   assert(!L || L->contains(LI->getLoopFor(InsertPt->getParent())));
1174 
1175   for (auto *DTN = (*DT)[InsertPt->getParent()]; DTN; DTN = DTN->getIDom())
1176     if (LI->getLoopFor(DTN->getBlock()) == L)
1177       return DTN->getBlock()->getTerminator();
1178 
1179   llvm_unreachable("DefI dominates InsertPt!");
1180 }
1181 
1182 WidenIV::WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv,
1183           DominatorTree *DTree, SmallVectorImpl<WeakTrackingVH> &DI,
1184           bool HasGuards, bool UsePostIncrementRanges)
1185       : OrigPhi(WI.NarrowIV), WideType(WI.WidestNativeType), LI(LInfo),
1186         L(LI->getLoopFor(OrigPhi->getParent())), SE(SEv), DT(DTree),
1187         HasGuards(HasGuards), UsePostIncrementRanges(UsePostIncrementRanges),
1188         DeadInsts(DI) {
1189     assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV");
1190     ExtendKindMap[OrigPhi] = WI.IsSigned ? ExtendKind::Sign : ExtendKind::Zero;
1191 }
1192 
1193 Value *WidenIV::createExtendInst(Value *NarrowOper, Type *WideType,
1194                                  bool IsSigned, Instruction *Use) {
1195   // Set the debug location and conservative insertion point.
1196   IRBuilder<> Builder(Use);
1197   // Hoist the insertion point into loop preheaders as far as possible.
1198   for (const Loop *L = LI->getLoopFor(Use->getParent());
1199        L && L->getLoopPreheader() && L->isLoopInvariant(NarrowOper);
1200        L = L->getParentLoop())
1201     Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator());
1202 
1203   return IsSigned ? Builder.CreateSExt(NarrowOper, WideType) :
1204                     Builder.CreateZExt(NarrowOper, WideType);
1205 }
1206 
1207 /// Instantiate a wide operation to replace a narrow operation. This only needs
1208 /// to handle operations that can evaluation to SCEVAddRec. It can safely return
1209 /// 0 for any operation we decide not to clone.
1210 Instruction *WidenIV::cloneIVUser(WidenIV::NarrowIVDefUse DU,
1211                                   const SCEVAddRecExpr *WideAR) {
1212   unsigned Opcode = DU.NarrowUse->getOpcode();
1213   switch (Opcode) {
1214   default:
1215     return nullptr;
1216   case Instruction::Add:
1217   case Instruction::Mul:
1218   case Instruction::UDiv:
1219   case Instruction::Sub:
1220     return cloneArithmeticIVUser(DU, WideAR);
1221 
1222   case Instruction::And:
1223   case Instruction::Or:
1224   case Instruction::Xor:
1225   case Instruction::Shl:
1226   case Instruction::LShr:
1227   case Instruction::AShr:
1228     return cloneBitwiseIVUser(DU);
1229   }
1230 }
1231 
1232 Instruction *WidenIV::cloneBitwiseIVUser(WidenIV::NarrowIVDefUse DU) {
1233   Instruction *NarrowUse = DU.NarrowUse;
1234   Instruction *NarrowDef = DU.NarrowDef;
1235   Instruction *WideDef = DU.WideDef;
1236 
1237   LLVM_DEBUG(dbgs() << "Cloning bitwise IVUser: " << *NarrowUse << "\n");
1238 
1239   // Replace NarrowDef operands with WideDef. Otherwise, we don't know anything
1240   // about the narrow operand yet so must insert a [sz]ext. It is probably loop
1241   // invariant and will be folded or hoisted. If it actually comes from a
1242   // widened IV, it should be removed during a future call to widenIVUse.
1243   bool IsSigned = getExtendKind(NarrowDef) == ExtendKind::Sign;
1244   Value *LHS = (NarrowUse->getOperand(0) == NarrowDef)
1245                    ? WideDef
1246                    : createExtendInst(NarrowUse->getOperand(0), WideType,
1247                                       IsSigned, NarrowUse);
1248   Value *RHS = (NarrowUse->getOperand(1) == NarrowDef)
1249                    ? WideDef
1250                    : createExtendInst(NarrowUse->getOperand(1), WideType,
1251                                       IsSigned, NarrowUse);
1252 
1253   auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
1254   auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
1255                                         NarrowBO->getName());
1256   IRBuilder<> Builder(NarrowUse);
1257   Builder.Insert(WideBO);
1258   WideBO->copyIRFlags(NarrowBO);
1259   return WideBO;
1260 }
1261 
1262 Instruction *WidenIV::cloneArithmeticIVUser(WidenIV::NarrowIVDefUse DU,
1263                                             const SCEVAddRecExpr *WideAR) {
1264   Instruction *NarrowUse = DU.NarrowUse;
1265   Instruction *NarrowDef = DU.NarrowDef;
1266   Instruction *WideDef = DU.WideDef;
1267 
1268   LLVM_DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n");
1269 
1270   unsigned IVOpIdx = (NarrowUse->getOperand(0) == NarrowDef) ? 0 : 1;
1271 
1272   // We're trying to find X such that
1273   //
1274   //  Widen(NarrowDef `op` NonIVNarrowDef) == WideAR == WideDef `op.wide` X
1275   //
1276   // We guess two solutions to X, sext(NonIVNarrowDef) and zext(NonIVNarrowDef),
1277   // and check using SCEV if any of them are correct.
1278 
1279   // Returns true if extending NonIVNarrowDef according to `SignExt` is a
1280   // correct solution to X.
1281   auto GuessNonIVOperand = [&](bool SignExt) {
1282     const SCEV *WideLHS;
1283     const SCEV *WideRHS;
1284 
1285     auto GetExtend = [this, SignExt](const SCEV *S, Type *Ty) {
1286       if (SignExt)
1287         return SE->getSignExtendExpr(S, Ty);
1288       return SE->getZeroExtendExpr(S, Ty);
1289     };
1290 
1291     if (IVOpIdx == 0) {
1292       WideLHS = SE->getSCEV(WideDef);
1293       const SCEV *NarrowRHS = SE->getSCEV(NarrowUse->getOperand(1));
1294       WideRHS = GetExtend(NarrowRHS, WideType);
1295     } else {
1296       const SCEV *NarrowLHS = SE->getSCEV(NarrowUse->getOperand(0));
1297       WideLHS = GetExtend(NarrowLHS, WideType);
1298       WideRHS = SE->getSCEV(WideDef);
1299     }
1300 
1301     // WideUse is "WideDef `op.wide` X" as described in the comment.
1302     const SCEV *WideUse =
1303       getSCEVByOpCode(WideLHS, WideRHS, NarrowUse->getOpcode());
1304 
1305     return WideUse == WideAR;
1306   };
1307 
1308   bool SignExtend = getExtendKind(NarrowDef) == ExtendKind::Sign;
1309   if (!GuessNonIVOperand(SignExtend)) {
1310     SignExtend = !SignExtend;
1311     if (!GuessNonIVOperand(SignExtend))
1312       return nullptr;
1313   }
1314 
1315   Value *LHS = (NarrowUse->getOperand(0) == NarrowDef)
1316                    ? WideDef
1317                    : createExtendInst(NarrowUse->getOperand(0), WideType,
1318                                       SignExtend, NarrowUse);
1319   Value *RHS = (NarrowUse->getOperand(1) == NarrowDef)
1320                    ? WideDef
1321                    : createExtendInst(NarrowUse->getOperand(1), WideType,
1322                                       SignExtend, NarrowUse);
1323 
1324   auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
1325   auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
1326                                         NarrowBO->getName());
1327 
1328   IRBuilder<> Builder(NarrowUse);
1329   Builder.Insert(WideBO);
1330   WideBO->copyIRFlags(NarrowBO);
1331   return WideBO;
1332 }
1333 
1334 WidenIV::ExtendKind WidenIV::getExtendKind(Instruction *I) {
1335   auto It = ExtendKindMap.find(I);
1336   assert(It != ExtendKindMap.end() && "Instruction not yet extended!");
1337   return It->second;
1338 }
1339 
1340 const SCEV *WidenIV::getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS,
1341                                      unsigned OpCode) const {
1342   switch (OpCode) {
1343   case Instruction::Add:
1344     return SE->getAddExpr(LHS, RHS);
1345   case Instruction::Sub:
1346     return SE->getMinusSCEV(LHS, RHS);
1347   case Instruction::Mul:
1348     return SE->getMulExpr(LHS, RHS);
1349   case Instruction::UDiv:
1350     return SE->getUDivExpr(LHS, RHS);
1351   default:
1352     llvm_unreachable("Unsupported opcode.");
1353   };
1354 }
1355 
1356 /// No-wrap operations can transfer sign extension of their result to their
1357 /// operands. Generate the SCEV value for the widened operation without
1358 /// actually modifying the IR yet. If the expression after extending the
1359 /// operands is an AddRec for this loop, return the AddRec and the kind of
1360 /// extension used.
1361 WidenIV::WidenedRecTy
1362 WidenIV::getExtendedOperandRecurrence(WidenIV::NarrowIVDefUse DU) {
1363   // Handle the common case of add<nsw/nuw>
1364   const unsigned OpCode = DU.NarrowUse->getOpcode();
1365   // Only Add/Sub/Mul instructions supported yet.
1366   if (OpCode != Instruction::Add && OpCode != Instruction::Sub &&
1367       OpCode != Instruction::Mul)
1368     return {nullptr, ExtendKind::Unknown};
1369 
1370   // One operand (NarrowDef) has already been extended to WideDef. Now determine
1371   // if extending the other will lead to a recurrence.
1372   const unsigned ExtendOperIdx =
1373       DU.NarrowUse->getOperand(0) == DU.NarrowDef ? 1 : 0;
1374   assert(DU.NarrowUse->getOperand(1-ExtendOperIdx) == DU.NarrowDef && "bad DU");
1375 
1376   const SCEV *ExtendOperExpr = nullptr;
1377   const OverflowingBinaryOperator *OBO =
1378     cast<OverflowingBinaryOperator>(DU.NarrowUse);
1379   ExtendKind ExtKind = getExtendKind(DU.NarrowDef);
1380   if (ExtKind == ExtendKind::Sign && OBO->hasNoSignedWrap())
1381     ExtendOperExpr = SE->getSignExtendExpr(
1382       SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
1383   else if (ExtKind == ExtendKind::Zero && OBO->hasNoUnsignedWrap())
1384     ExtendOperExpr = SE->getZeroExtendExpr(
1385       SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
1386   else
1387     return {nullptr, ExtendKind::Unknown};
1388 
1389   // When creating this SCEV expr, don't apply the current operations NSW or NUW
1390   // flags. This instruction may be guarded by control flow that the no-wrap
1391   // behavior depends on. Non-control-equivalent instructions can be mapped to
1392   // the same SCEV expression, and it would be incorrect to transfer NSW/NUW
1393   // semantics to those operations.
1394   const SCEV *lhs = SE->getSCEV(DU.WideDef);
1395   const SCEV *rhs = ExtendOperExpr;
1396 
1397   // Let's swap operands to the initial order for the case of non-commutative
1398   // operations, like SUB. See PR21014.
1399   if (ExtendOperIdx == 0)
1400     std::swap(lhs, rhs);
1401   const SCEVAddRecExpr *AddRec =
1402       dyn_cast<SCEVAddRecExpr>(getSCEVByOpCode(lhs, rhs, OpCode));
1403 
1404   if (!AddRec || AddRec->getLoop() != L)
1405     return {nullptr, ExtendKind::Unknown};
1406 
1407   return {AddRec, ExtKind};
1408 }
1409 
1410 /// Is this instruction potentially interesting for further simplification after
1411 /// widening it's type? In other words, can the extend be safely hoisted out of
1412 /// the loop with SCEV reducing the value to a recurrence on the same loop. If
1413 /// so, return the extended recurrence and the kind of extension used. Otherwise
1414 /// return {nullptr, ExtendKind::Unknown}.
1415 WidenIV::WidenedRecTy WidenIV::getWideRecurrence(WidenIV::NarrowIVDefUse DU) {
1416   if (!DU.NarrowUse->getType()->isIntegerTy())
1417     return {nullptr, ExtendKind::Unknown};
1418 
1419   const SCEV *NarrowExpr = SE->getSCEV(DU.NarrowUse);
1420   if (SE->getTypeSizeInBits(NarrowExpr->getType()) >=
1421       SE->getTypeSizeInBits(WideType)) {
1422     // NarrowUse implicitly widens its operand. e.g. a gep with a narrow
1423     // index. So don't follow this use.
1424     return {nullptr, ExtendKind::Unknown};
1425   }
1426 
1427   const SCEV *WideExpr;
1428   ExtendKind ExtKind;
1429   if (DU.NeverNegative) {
1430     WideExpr = SE->getSignExtendExpr(NarrowExpr, WideType);
1431     if (isa<SCEVAddRecExpr>(WideExpr))
1432       ExtKind = ExtendKind::Sign;
1433     else {
1434       WideExpr = SE->getZeroExtendExpr(NarrowExpr, WideType);
1435       ExtKind = ExtendKind::Zero;
1436     }
1437   } else if (getExtendKind(DU.NarrowDef) == ExtendKind::Sign) {
1438     WideExpr = SE->getSignExtendExpr(NarrowExpr, WideType);
1439     ExtKind = ExtendKind::Sign;
1440   } else {
1441     WideExpr = SE->getZeroExtendExpr(NarrowExpr, WideType);
1442     ExtKind = ExtendKind::Zero;
1443   }
1444   const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(WideExpr);
1445   if (!AddRec || AddRec->getLoop() != L)
1446     return {nullptr, ExtendKind::Unknown};
1447   return {AddRec, ExtKind};
1448 }
1449 
1450 /// This IV user cannot be widened. Replace this use of the original narrow IV
1451 /// with a truncation of the new wide IV to isolate and eliminate the narrow IV.
1452 static void truncateIVUse(WidenIV::NarrowIVDefUse DU, DominatorTree *DT,
1453                           LoopInfo *LI) {
1454   auto *InsertPt = getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI);
1455   if (!InsertPt)
1456     return;
1457   LLVM_DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef << " for user "
1458                     << *DU.NarrowUse << "\n");
1459   IRBuilder<> Builder(InsertPt);
1460   Value *Trunc = Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType());
1461   DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, Trunc);
1462 }
1463 
1464 /// If the narrow use is a compare instruction, then widen the compare
1465 //  (and possibly the other operand).  The extend operation is hoisted into the
1466 // loop preheader as far as possible.
1467 bool WidenIV::widenLoopCompare(WidenIV::NarrowIVDefUse DU) {
1468   ICmpInst *Cmp = dyn_cast<ICmpInst>(DU.NarrowUse);
1469   if (!Cmp)
1470     return false;
1471 
1472   // We can legally widen the comparison in the following two cases:
1473   //
1474   //  - The signedness of the IV extension and comparison match
1475   //
1476   //  - The narrow IV is always positive (and thus its sign extension is equal
1477   //    to its zero extension).  For instance, let's say we're zero extending
1478   //    %narrow for the following use
1479   //
1480   //      icmp slt i32 %narrow, %val   ... (A)
1481   //
1482   //    and %narrow is always positive.  Then
1483   //
1484   //      (A) == icmp slt i32 sext(%narrow), sext(%val)
1485   //          == icmp slt i32 zext(%narrow), sext(%val)
1486   bool IsSigned = getExtendKind(DU.NarrowDef) == ExtendKind::Sign;
1487   if (!(DU.NeverNegative || IsSigned == Cmp->isSigned()))
1488     return false;
1489 
1490   Value *Op = Cmp->getOperand(Cmp->getOperand(0) == DU.NarrowDef ? 1 : 0);
1491   unsigned CastWidth = SE->getTypeSizeInBits(Op->getType());
1492   unsigned IVWidth = SE->getTypeSizeInBits(WideType);
1493   assert(CastWidth <= IVWidth && "Unexpected width while widening compare.");
1494 
1495   // Widen the compare instruction.
1496   auto *InsertPt = getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI);
1497   if (!InsertPt)
1498     return false;
1499   IRBuilder<> Builder(InsertPt);
1500   DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef);
1501 
1502   // Widen the other operand of the compare, if necessary.
1503   if (CastWidth < IVWidth) {
1504     Value *ExtOp = createExtendInst(Op, WideType, Cmp->isSigned(), Cmp);
1505     DU.NarrowUse->replaceUsesOfWith(Op, ExtOp);
1506   }
1507   return true;
1508 }
1509 
1510 // The widenIVUse avoids generating trunc by evaluating the use as AddRec, this
1511 // will not work when:
1512 //    1) SCEV traces back to an instruction inside the loop that SCEV can not
1513 // expand, eg. add %indvar, (load %addr)
1514 //    2) SCEV finds a loop variant, eg. add %indvar, %loopvariant
1515 // While SCEV fails to avoid trunc, we can still try to use instruction
1516 // combining approach to prove trunc is not required. This can be further
1517 // extended with other instruction combining checks, but for now we handle the
1518 // following case (sub can be "add" and "mul", "nsw + sext" can be "nus + zext")
1519 //
1520 // Src:
1521 //   %c = sub nsw %b, %indvar
1522 //   %d = sext %c to i64
1523 // Dst:
1524 //   %indvar.ext1 = sext %indvar to i64
1525 //   %m = sext %b to i64
1526 //   %d = sub nsw i64 %m, %indvar.ext1
1527 // Therefore, as long as the result of add/sub/mul is extended to wide type, no
1528 // trunc is required regardless of how %b is generated. This pattern is common
1529 // when calculating address in 64 bit architecture
1530 bool WidenIV::widenWithVariantUse(WidenIV::NarrowIVDefUse DU) {
1531   Instruction *NarrowUse = DU.NarrowUse;
1532   Instruction *NarrowDef = DU.NarrowDef;
1533   Instruction *WideDef = DU.WideDef;
1534 
1535   // Handle the common case of add<nsw/nuw>
1536   const unsigned OpCode = NarrowUse->getOpcode();
1537   // Only Add/Sub/Mul instructions are supported.
1538   if (OpCode != Instruction::Add && OpCode != Instruction::Sub &&
1539       OpCode != Instruction::Mul)
1540     return false;
1541 
1542   // The operand that is not defined by NarrowDef of DU. Let's call it the
1543   // other operand.
1544   assert((NarrowUse->getOperand(0) == NarrowDef ||
1545           NarrowUse->getOperand(1) == NarrowDef) &&
1546          "bad DU");
1547 
1548   const OverflowingBinaryOperator *OBO =
1549     cast<OverflowingBinaryOperator>(NarrowUse);
1550   ExtendKind ExtKind = getExtendKind(NarrowDef);
1551   bool CanSignExtend = ExtKind == ExtendKind::Sign && OBO->hasNoSignedWrap();
1552   bool CanZeroExtend = ExtKind == ExtendKind::Zero && OBO->hasNoUnsignedWrap();
1553   auto AnotherOpExtKind = ExtKind;
1554 
1555   // Check that all uses are either:
1556   // - narrow def (in case of we are widening the IV increment);
1557   // - single-input LCSSA Phis;
1558   // - comparison of the chosen type;
1559   // - extend of the chosen type (raison d'etre).
1560   SmallVector<Instruction *, 4> ExtUsers;
1561   SmallVector<PHINode *, 4> LCSSAPhiUsers;
1562   SmallVector<ICmpInst *, 4> ICmpUsers;
1563   for (Use &U : NarrowUse->uses()) {
1564     Instruction *User = cast<Instruction>(U.getUser());
1565     if (User == NarrowDef)
1566       continue;
1567     if (!L->contains(User)) {
1568       auto *LCSSAPhi = cast<PHINode>(User);
1569       // Make sure there is only 1 input, so that we don't have to split
1570       // critical edges.
1571       if (LCSSAPhi->getNumOperands() != 1)
1572         return false;
1573       LCSSAPhiUsers.push_back(LCSSAPhi);
1574       continue;
1575     }
1576     if (auto *ICmp = dyn_cast<ICmpInst>(User)) {
1577       auto Pred = ICmp->getPredicate();
1578       // We have 3 types of predicates: signed, unsigned and equality
1579       // predicates. For equality, it's legal to widen icmp for either sign and
1580       // zero extend. For sign extend, we can also do so for signed predicates,
1581       // likeweise for zero extend we can widen icmp for unsigned predicates.
1582       if (ExtKind == ExtendKind::Zero && ICmpInst::isSigned(Pred))
1583         return false;
1584       if (ExtKind == ExtendKind::Sign && ICmpInst::isUnsigned(Pred))
1585         return false;
1586       ICmpUsers.push_back(ICmp);
1587       continue;
1588     }
1589     if (ExtKind == ExtendKind::Sign)
1590       User = dyn_cast<SExtInst>(User);
1591     else
1592       User = dyn_cast<ZExtInst>(User);
1593     if (!User || User->getType() != WideType)
1594       return false;
1595     ExtUsers.push_back(User);
1596   }
1597   if (ExtUsers.empty()) {
1598     DeadInsts.emplace_back(NarrowUse);
1599     return true;
1600   }
1601 
1602   // We'll prove some facts that should be true in the context of ext users. If
1603   // there is no users, we are done now. If there are some, pick their common
1604   // dominator as context.
1605   const Instruction *CtxI = findCommonDominator(ExtUsers, *DT);
1606 
1607   if (!CanSignExtend && !CanZeroExtend) {
1608     // Because InstCombine turns 'sub nuw' to 'add' losing the no-wrap flag, we
1609     // will most likely not see it. Let's try to prove it.
1610     if (OpCode != Instruction::Add)
1611       return false;
1612     if (ExtKind != ExtendKind::Zero)
1613       return false;
1614     const SCEV *LHS = SE->getSCEV(OBO->getOperand(0));
1615     const SCEV *RHS = SE->getSCEV(OBO->getOperand(1));
1616     // TODO: Support case for NarrowDef = NarrowUse->getOperand(1).
1617     if (NarrowUse->getOperand(0) != NarrowDef)
1618       return false;
1619     if (!SE->isKnownNegative(RHS))
1620       return false;
1621     bool ProvedSubNUW = SE->isKnownPredicateAt(ICmpInst::ICMP_UGE, LHS,
1622                                                SE->getNegativeSCEV(RHS), CtxI);
1623     if (!ProvedSubNUW)
1624       return false;
1625     // In fact, our 'add' is 'sub nuw'. We will need to widen the 2nd operand as
1626     // neg(zext(neg(op))), which is basically sext(op).
1627     AnotherOpExtKind = ExtendKind::Sign;
1628   }
1629 
1630   // Verifying that Defining operand is an AddRec
1631   const SCEV *Op1 = SE->getSCEV(WideDef);
1632   const SCEVAddRecExpr *AddRecOp1 = dyn_cast<SCEVAddRecExpr>(Op1);
1633   if (!AddRecOp1 || AddRecOp1->getLoop() != L)
1634     return false;
1635 
1636   LLVM_DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n");
1637 
1638   // Generating a widening use instruction.
1639   Value *LHS =
1640       (NarrowUse->getOperand(0) == NarrowDef)
1641           ? WideDef
1642           : createExtendInst(NarrowUse->getOperand(0), WideType,
1643                              AnotherOpExtKind == ExtendKind::Sign, NarrowUse);
1644   Value *RHS =
1645       (NarrowUse->getOperand(1) == NarrowDef)
1646           ? WideDef
1647           : createExtendInst(NarrowUse->getOperand(1), WideType,
1648                              AnotherOpExtKind == ExtendKind::Sign, NarrowUse);
1649 
1650   auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
1651   auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
1652                                         NarrowBO->getName());
1653   IRBuilder<> Builder(NarrowUse);
1654   Builder.Insert(WideBO);
1655   WideBO->copyIRFlags(NarrowBO);
1656   ExtendKindMap[NarrowUse] = ExtKind;
1657 
1658   for (Instruction *User : ExtUsers) {
1659     assert(User->getType() == WideType && "Checked before!");
1660     LLVM_DEBUG(dbgs() << "INDVARS: eliminating " << *User << " replaced by "
1661                       << *WideBO << "\n");
1662     ++NumElimExt;
1663     User->replaceAllUsesWith(WideBO);
1664     DeadInsts.emplace_back(User);
1665   }
1666 
1667   for (PHINode *User : LCSSAPhiUsers) {
1668     assert(User->getNumOperands() == 1 && "Checked before!");
1669     Builder.SetInsertPoint(User);
1670     auto *WidePN =
1671         Builder.CreatePHI(WideBO->getType(), 1, User->getName() + ".wide");
1672     BasicBlock *LoopExitingBlock = User->getParent()->getSinglePredecessor();
1673     assert(LoopExitingBlock && L->contains(LoopExitingBlock) &&
1674            "Not a LCSSA Phi?");
1675     WidePN->addIncoming(WideBO, LoopExitingBlock);
1676     Builder.SetInsertPoint(&*User->getParent()->getFirstInsertionPt());
1677     auto *TruncPN = Builder.CreateTrunc(WidePN, User->getType());
1678     User->replaceAllUsesWith(TruncPN);
1679     DeadInsts.emplace_back(User);
1680   }
1681 
1682   for (ICmpInst *User : ICmpUsers) {
1683     Builder.SetInsertPoint(User);
1684     auto ExtendedOp = [&](Value * V)->Value * {
1685       if (V == NarrowUse)
1686         return WideBO;
1687       if (ExtKind == ExtendKind::Zero)
1688         return Builder.CreateZExt(V, WideBO->getType());
1689       else
1690         return Builder.CreateSExt(V, WideBO->getType());
1691     };
1692     auto Pred = User->getPredicate();
1693     auto *LHS = ExtendedOp(User->getOperand(0));
1694     auto *RHS = ExtendedOp(User->getOperand(1));
1695     auto *WideCmp =
1696         Builder.CreateICmp(Pred, LHS, RHS, User->getName() + ".wide");
1697     User->replaceAllUsesWith(WideCmp);
1698     DeadInsts.emplace_back(User);
1699   }
1700 
1701   return true;
1702 }
1703 
1704 /// Determine whether an individual user of the narrow IV can be widened. If so,
1705 /// return the wide clone of the user.
1706 Instruction *WidenIV::widenIVUse(WidenIV::NarrowIVDefUse DU, SCEVExpander &Rewriter) {
1707   assert(ExtendKindMap.count(DU.NarrowDef) &&
1708          "Should already know the kind of extension used to widen NarrowDef");
1709 
1710   // Stop traversing the def-use chain at inner-loop phis or post-loop phis.
1711   if (PHINode *UsePhi = dyn_cast<PHINode>(DU.NarrowUse)) {
1712     if (LI->getLoopFor(UsePhi->getParent()) != L) {
1713       // For LCSSA phis, sink the truncate outside the loop.
1714       // After SimplifyCFG most loop exit targets have a single predecessor.
1715       // Otherwise fall back to a truncate within the loop.
1716       if (UsePhi->getNumOperands() != 1)
1717         truncateIVUse(DU, DT, LI);
1718       else {
1719         // Widening the PHI requires us to insert a trunc.  The logical place
1720         // for this trunc is in the same BB as the PHI.  This is not possible if
1721         // the BB is terminated by a catchswitch.
1722         if (isa<CatchSwitchInst>(UsePhi->getParent()->getTerminator()))
1723           return nullptr;
1724 
1725         PHINode *WidePhi =
1726           PHINode::Create(DU.WideDef->getType(), 1, UsePhi->getName() + ".wide",
1727                           UsePhi);
1728         WidePhi->addIncoming(DU.WideDef, UsePhi->getIncomingBlock(0));
1729         IRBuilder<> Builder(&*WidePhi->getParent()->getFirstInsertionPt());
1730         Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType());
1731         UsePhi->replaceAllUsesWith(Trunc);
1732         DeadInsts.emplace_back(UsePhi);
1733         LLVM_DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi << " to "
1734                           << *WidePhi << "\n");
1735       }
1736       return nullptr;
1737     }
1738   }
1739 
1740   // This narrow use can be widened by a sext if it's non-negative or its narrow
1741   // def was widended by a sext. Same for zext.
1742   auto canWidenBySExt = [&]() {
1743     return DU.NeverNegative || getExtendKind(DU.NarrowDef) == ExtendKind::Sign;
1744   };
1745   auto canWidenByZExt = [&]() {
1746     return DU.NeverNegative || getExtendKind(DU.NarrowDef) == ExtendKind::Zero;
1747   };
1748 
1749   // Our raison d'etre! Eliminate sign and zero extension.
1750   if ((isa<SExtInst>(DU.NarrowUse) && canWidenBySExt()) ||
1751       (isa<ZExtInst>(DU.NarrowUse) && canWidenByZExt())) {
1752     Value *NewDef = DU.WideDef;
1753     if (DU.NarrowUse->getType() != WideType) {
1754       unsigned CastWidth = SE->getTypeSizeInBits(DU.NarrowUse->getType());
1755       unsigned IVWidth = SE->getTypeSizeInBits(WideType);
1756       if (CastWidth < IVWidth) {
1757         // The cast isn't as wide as the IV, so insert a Trunc.
1758         IRBuilder<> Builder(DU.NarrowUse);
1759         NewDef = Builder.CreateTrunc(DU.WideDef, DU.NarrowUse->getType());
1760       }
1761       else {
1762         // A wider extend was hidden behind a narrower one. This may induce
1763         // another round of IV widening in which the intermediate IV becomes
1764         // dead. It should be very rare.
1765         LLVM_DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi
1766                           << " not wide enough to subsume " << *DU.NarrowUse
1767                           << "\n");
1768         DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef);
1769         NewDef = DU.NarrowUse;
1770       }
1771     }
1772     if (NewDef != DU.NarrowUse) {
1773       LLVM_DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse
1774                         << " replaced by " << *DU.WideDef << "\n");
1775       ++NumElimExt;
1776       DU.NarrowUse->replaceAllUsesWith(NewDef);
1777       DeadInsts.emplace_back(DU.NarrowUse);
1778     }
1779     // Now that the extend is gone, we want to expose it's uses for potential
1780     // further simplification. We don't need to directly inform SimplifyIVUsers
1781     // of the new users, because their parent IV will be processed later as a
1782     // new loop phi. If we preserved IVUsers analysis, we would also want to
1783     // push the uses of WideDef here.
1784 
1785     // No further widening is needed. The deceased [sz]ext had done it for us.
1786     return nullptr;
1787   }
1788 
1789   // Does this user itself evaluate to a recurrence after widening?
1790   WidenedRecTy WideAddRec = getExtendedOperandRecurrence(DU);
1791   if (!WideAddRec.first)
1792     WideAddRec = getWideRecurrence(DU);
1793 
1794   assert((WideAddRec.first == nullptr) ==
1795          (WideAddRec.second == ExtendKind::Unknown));
1796   if (!WideAddRec.first) {
1797     // If use is a loop condition, try to promote the condition instead of
1798     // truncating the IV first.
1799     if (widenLoopCompare(DU))
1800       return nullptr;
1801 
1802     // We are here about to generate a truncate instruction that may hurt
1803     // performance because the scalar evolution expression computed earlier
1804     // in WideAddRec.first does not indicate a polynomial induction expression.
1805     // In that case, look at the operands of the use instruction to determine
1806     // if we can still widen the use instead of truncating its operand.
1807     if (widenWithVariantUse(DU))
1808       return nullptr;
1809 
1810     // This user does not evaluate to a recurrence after widening, so don't
1811     // follow it. Instead insert a Trunc to kill off the original use,
1812     // eventually isolating the original narrow IV so it can be removed.
1813     truncateIVUse(DU, DT, LI);
1814     return nullptr;
1815   }
1816 
1817   // Reuse the IV increment that SCEVExpander created as long as it dominates
1818   // NarrowUse.
1819   Instruction *WideUse = nullptr;
1820   if (WideAddRec.first == WideIncExpr &&
1821       Rewriter.hoistIVInc(WideInc, DU.NarrowUse))
1822     WideUse = WideInc;
1823   else {
1824     WideUse = cloneIVUser(DU, WideAddRec.first);
1825     if (!WideUse)
1826       return nullptr;
1827   }
1828   // Evaluation of WideAddRec ensured that the narrow expression could be
1829   // extended outside the loop without overflow. This suggests that the wide use
1830   // evaluates to the same expression as the extended narrow use, but doesn't
1831   // absolutely guarantee it. Hence the following failsafe check. In rare cases
1832   // where it fails, we simply throw away the newly created wide use.
1833   if (WideAddRec.first != SE->getSCEV(WideUse)) {
1834     LLVM_DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse << ": "
1835                       << *SE->getSCEV(WideUse) << " != " << *WideAddRec.first
1836                       << "\n");
1837     DeadInsts.emplace_back(WideUse);
1838     return nullptr;
1839   }
1840 
1841   // if we reached this point then we are going to replace
1842   // DU.NarrowUse with WideUse. Reattach DbgValue then.
1843   replaceAllDbgUsesWith(*DU.NarrowUse, *WideUse, *WideUse, *DT);
1844 
1845   ExtendKindMap[DU.NarrowUse] = WideAddRec.second;
1846   // Returning WideUse pushes it on the worklist.
1847   return WideUse;
1848 }
1849 
1850 /// Add eligible users of NarrowDef to NarrowIVUsers.
1851 void WidenIV::pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef) {
1852   const SCEV *NarrowSCEV = SE->getSCEV(NarrowDef);
1853   bool NonNegativeDef =
1854       SE->isKnownPredicate(ICmpInst::ICMP_SGE, NarrowSCEV,
1855                            SE->getZero(NarrowSCEV->getType()));
1856   for (User *U : NarrowDef->users()) {
1857     Instruction *NarrowUser = cast<Instruction>(U);
1858 
1859     // Handle data flow merges and bizarre phi cycles.
1860     if (!Widened.insert(NarrowUser).second)
1861       continue;
1862 
1863     bool NonNegativeUse = false;
1864     if (!NonNegativeDef) {
1865       // We might have a control-dependent range information for this context.
1866       if (auto RangeInfo = getPostIncRangeInfo(NarrowDef, NarrowUser))
1867         NonNegativeUse = RangeInfo->getSignedMin().isNonNegative();
1868     }
1869 
1870     NarrowIVUsers.emplace_back(NarrowDef, NarrowUser, WideDef,
1871                                NonNegativeDef || NonNegativeUse);
1872   }
1873 }
1874 
1875 /// Process a single induction variable. First use the SCEVExpander to create a
1876 /// wide induction variable that evaluates to the same recurrence as the
1877 /// original narrow IV. Then use a worklist to forward traverse the narrow IV's
1878 /// def-use chain. After widenIVUse has processed all interesting IV users, the
1879 /// narrow IV will be isolated for removal by DeleteDeadPHIs.
1880 ///
1881 /// It would be simpler to delete uses as they are processed, but we must avoid
1882 /// invalidating SCEV expressions.
1883 PHINode *WidenIV::createWideIV(SCEVExpander &Rewriter) {
1884   // Is this phi an induction variable?
1885   const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(OrigPhi));
1886   if (!AddRec)
1887     return nullptr;
1888 
1889   // Widen the induction variable expression.
1890   const SCEV *WideIVExpr = getExtendKind(OrigPhi) == ExtendKind::Sign
1891                                ? SE->getSignExtendExpr(AddRec, WideType)
1892                                : SE->getZeroExtendExpr(AddRec, WideType);
1893 
1894   assert(SE->getEffectiveSCEVType(WideIVExpr->getType()) == WideType &&
1895          "Expect the new IV expression to preserve its type");
1896 
1897   // Can the IV be extended outside the loop without overflow?
1898   AddRec = dyn_cast<SCEVAddRecExpr>(WideIVExpr);
1899   if (!AddRec || AddRec->getLoop() != L)
1900     return nullptr;
1901 
1902   // An AddRec must have loop-invariant operands. Since this AddRec is
1903   // materialized by a loop header phi, the expression cannot have any post-loop
1904   // operands, so they must dominate the loop header.
1905   assert(
1906       SE->properlyDominates(AddRec->getStart(), L->getHeader()) &&
1907       SE->properlyDominates(AddRec->getStepRecurrence(*SE), L->getHeader()) &&
1908       "Loop header phi recurrence inputs do not dominate the loop");
1909 
1910   // Iterate over IV uses (including transitive ones) looking for IV increments
1911   // of the form 'add nsw %iv, <const>'. For each increment and each use of
1912   // the increment calculate control-dependent range information basing on
1913   // dominating conditions inside of the loop (e.g. a range check inside of the
1914   // loop). Calculated ranges are stored in PostIncRangeInfos map.
1915   //
1916   // Control-dependent range information is later used to prove that a narrow
1917   // definition is not negative (see pushNarrowIVUsers). It's difficult to do
1918   // this on demand because when pushNarrowIVUsers needs this information some
1919   // of the dominating conditions might be already widened.
1920   if (UsePostIncrementRanges)
1921     calculatePostIncRanges(OrigPhi);
1922 
1923   // The rewriter provides a value for the desired IV expression. This may
1924   // either find an existing phi or materialize a new one. Either way, we
1925   // expect a well-formed cyclic phi-with-increments. i.e. any operand not part
1926   // of the phi-SCC dominates the loop entry.
1927   Instruction *InsertPt = &*L->getHeader()->getFirstInsertionPt();
1928   Value *ExpandInst = Rewriter.expandCodeFor(AddRec, WideType, InsertPt);
1929   // If the wide phi is not a phi node, for example a cast node, like bitcast,
1930   // inttoptr, ptrtoint, just skip for now.
1931   if (!(WidePhi = dyn_cast<PHINode>(ExpandInst))) {
1932     // if the cast node is an inserted instruction without any user, we should
1933     // remove it to make sure the pass don't touch the function as we can not
1934     // wide the phi.
1935     if (ExpandInst->hasNUses(0) &&
1936         Rewriter.isInsertedInstruction(cast<Instruction>(ExpandInst)))
1937       DeadInsts.emplace_back(ExpandInst);
1938     return nullptr;
1939   }
1940 
1941   // Remembering the WideIV increment generated by SCEVExpander allows
1942   // widenIVUse to reuse it when widening the narrow IV's increment. We don't
1943   // employ a general reuse mechanism because the call above is the only call to
1944   // SCEVExpander. Henceforth, we produce 1-to-1 narrow to wide uses.
1945   if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1946     WideInc =
1947       cast<Instruction>(WidePhi->getIncomingValueForBlock(LatchBlock));
1948     WideIncExpr = SE->getSCEV(WideInc);
1949     // Propagate the debug location associated with the original loop increment
1950     // to the new (widened) increment.
1951     auto *OrigInc =
1952       cast<Instruction>(OrigPhi->getIncomingValueForBlock(LatchBlock));
1953     WideInc->setDebugLoc(OrigInc->getDebugLoc());
1954   }
1955 
1956   LLVM_DEBUG(dbgs() << "Wide IV: " << *WidePhi << "\n");
1957   ++NumWidened;
1958 
1959   // Traverse the def-use chain using a worklist starting at the original IV.
1960   assert(Widened.empty() && NarrowIVUsers.empty() && "expect initial state" );
1961 
1962   Widened.insert(OrigPhi);
1963   pushNarrowIVUsers(OrigPhi, WidePhi);
1964 
1965   while (!NarrowIVUsers.empty()) {
1966     WidenIV::NarrowIVDefUse DU = NarrowIVUsers.pop_back_val();
1967 
1968     // Process a def-use edge. This may replace the use, so don't hold a
1969     // use_iterator across it.
1970     Instruction *WideUse = widenIVUse(DU, Rewriter);
1971 
1972     // Follow all def-use edges from the previous narrow use.
1973     if (WideUse)
1974       pushNarrowIVUsers(DU.NarrowUse, WideUse);
1975 
1976     // widenIVUse may have removed the def-use edge.
1977     if (DU.NarrowDef->use_empty())
1978       DeadInsts.emplace_back(DU.NarrowDef);
1979   }
1980 
1981   // Attach any debug information to the new PHI.
1982   replaceAllDbgUsesWith(*OrigPhi, *WidePhi, *WidePhi, *DT);
1983 
1984   return WidePhi;
1985 }
1986 
1987 /// Calculates control-dependent range for the given def at the given context
1988 /// by looking at dominating conditions inside of the loop
1989 void WidenIV::calculatePostIncRange(Instruction *NarrowDef,
1990                                     Instruction *NarrowUser) {
1991   using namespace llvm::PatternMatch;
1992 
1993   Value *NarrowDefLHS;
1994   const APInt *NarrowDefRHS;
1995   if (!match(NarrowDef, m_NSWAdd(m_Value(NarrowDefLHS),
1996                                  m_APInt(NarrowDefRHS))) ||
1997       !NarrowDefRHS->isNonNegative())
1998     return;
1999 
2000   auto UpdateRangeFromCondition = [&] (Value *Condition,
2001                                        bool TrueDest) {
2002     CmpInst::Predicate Pred;
2003     Value *CmpRHS;
2004     if (!match(Condition, m_ICmp(Pred, m_Specific(NarrowDefLHS),
2005                                  m_Value(CmpRHS))))
2006       return;
2007 
2008     CmpInst::Predicate P =
2009             TrueDest ? Pred : CmpInst::getInversePredicate(Pred);
2010 
2011     auto CmpRHSRange = SE->getSignedRange(SE->getSCEV(CmpRHS));
2012     auto CmpConstrainedLHSRange =
2013             ConstantRange::makeAllowedICmpRegion(P, CmpRHSRange);
2014     auto NarrowDefRange = CmpConstrainedLHSRange.addWithNoWrap(
2015         *NarrowDefRHS, OverflowingBinaryOperator::NoSignedWrap);
2016 
2017     updatePostIncRangeInfo(NarrowDef, NarrowUser, NarrowDefRange);
2018   };
2019 
2020   auto UpdateRangeFromGuards = [&](Instruction *Ctx) {
2021     if (!HasGuards)
2022       return;
2023 
2024     for (Instruction &I : make_range(Ctx->getIterator().getReverse(),
2025                                      Ctx->getParent()->rend())) {
2026       Value *C = nullptr;
2027       if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(C))))
2028         UpdateRangeFromCondition(C, /*TrueDest=*/true);
2029     }
2030   };
2031 
2032   UpdateRangeFromGuards(NarrowUser);
2033 
2034   BasicBlock *NarrowUserBB = NarrowUser->getParent();
2035   // If NarrowUserBB is statically unreachable asking dominator queries may
2036   // yield surprising results. (e.g. the block may not have a dom tree node)
2037   if (!DT->isReachableFromEntry(NarrowUserBB))
2038     return;
2039 
2040   for (auto *DTB = (*DT)[NarrowUserBB]->getIDom();
2041        L->contains(DTB->getBlock());
2042        DTB = DTB->getIDom()) {
2043     auto *BB = DTB->getBlock();
2044     auto *TI = BB->getTerminator();
2045     UpdateRangeFromGuards(TI);
2046 
2047     auto *BI = dyn_cast<BranchInst>(TI);
2048     if (!BI || !BI->isConditional())
2049       continue;
2050 
2051     auto *TrueSuccessor = BI->getSuccessor(0);
2052     auto *FalseSuccessor = BI->getSuccessor(1);
2053 
2054     auto DominatesNarrowUser = [this, NarrowUser] (BasicBlockEdge BBE) {
2055       return BBE.isSingleEdge() &&
2056              DT->dominates(BBE, NarrowUser->getParent());
2057     };
2058 
2059     if (DominatesNarrowUser(BasicBlockEdge(BB, TrueSuccessor)))
2060       UpdateRangeFromCondition(BI->getCondition(), /*TrueDest=*/true);
2061 
2062     if (DominatesNarrowUser(BasicBlockEdge(BB, FalseSuccessor)))
2063       UpdateRangeFromCondition(BI->getCondition(), /*TrueDest=*/false);
2064   }
2065 }
2066 
2067 /// Calculates PostIncRangeInfos map for the given IV
2068 void WidenIV::calculatePostIncRanges(PHINode *OrigPhi) {
2069   SmallPtrSet<Instruction *, 16> Visited;
2070   SmallVector<Instruction *, 6> Worklist;
2071   Worklist.push_back(OrigPhi);
2072   Visited.insert(OrigPhi);
2073 
2074   while (!Worklist.empty()) {
2075     Instruction *NarrowDef = Worklist.pop_back_val();
2076 
2077     for (Use &U : NarrowDef->uses()) {
2078       auto *NarrowUser = cast<Instruction>(U.getUser());
2079 
2080       // Don't go looking outside the current loop.
2081       auto *NarrowUserLoop = (*LI)[NarrowUser->getParent()];
2082       if (!NarrowUserLoop || !L->contains(NarrowUserLoop))
2083         continue;
2084 
2085       if (!Visited.insert(NarrowUser).second)
2086         continue;
2087 
2088       Worklist.push_back(NarrowUser);
2089 
2090       calculatePostIncRange(NarrowDef, NarrowUser);
2091     }
2092   }
2093 }
2094 
2095 PHINode *llvm::createWideIV(const WideIVInfo &WI,
2096     LoopInfo *LI, ScalarEvolution *SE, SCEVExpander &Rewriter,
2097     DominatorTree *DT, SmallVectorImpl<WeakTrackingVH> &DeadInsts,
2098     unsigned &NumElimExt, unsigned &NumWidened,
2099     bool HasGuards, bool UsePostIncrementRanges) {
2100   WidenIV Widener(WI, LI, SE, DT, DeadInsts, HasGuards, UsePostIncrementRanges);
2101   PHINode *WidePHI = Widener.createWideIV(Rewriter);
2102   NumElimExt = Widener.getNumElimExt();
2103   NumWidened = Widener.getNumWidened();
2104   return WidePHI;
2105 }
2106