xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp (revision 0ad011ececb978e22a9bff2acf76633b094f1ff6)
1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the scalar evolution expander,
10 // which is used to generate the code corresponding to a given scalar evolution
11 // expression.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/Dominators.h"
25 #include "llvm/IR/IntrinsicInst.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/Transforms/Utils/LoopUtils.h"
30 
31 #ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
32 #define SCEV_DEBUG_WITH_TYPE(TYPE, X) DEBUG_WITH_TYPE(TYPE, X)
33 #else
34 #define SCEV_DEBUG_WITH_TYPE(TYPE, X)
35 #endif
36 
37 using namespace llvm;
38 
39 cl::opt<unsigned> llvm::SCEVCheapExpansionBudget(
40     "scev-cheap-expansion-budget", cl::Hidden, cl::init(4),
41     cl::desc("When performing SCEV expansion only if it is cheap to do, this "
42              "controls the budget that is considered cheap (default = 4)"));
43 
44 using namespace PatternMatch;
45 
46 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
47 /// reusing an existing cast if a suitable one (= dominating IP) exists, or
48 /// creating a new one.
49 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
50                                        Instruction::CastOps Op,
51                                        BasicBlock::iterator IP) {
52   // This function must be called with the builder having a valid insertion
53   // point. It doesn't need to be the actual IP where the uses of the returned
54   // cast will be added, but it must dominate such IP.
55   // We use this precondition to produce a cast that will dominate all its
56   // uses. In particular, this is crucial for the case where the builder's
57   // insertion point *is* the point where we were asked to put the cast.
58   // Since we don't know the builder's insertion point is actually
59   // where the uses will be added (only that it dominates it), we are
60   // not allowed to move it.
61   BasicBlock::iterator BIP = Builder.GetInsertPoint();
62 
63   Value *Ret = nullptr;
64 
65   // Check to see if there is already a cast!
66   for (User *U : V->users()) {
67     if (U->getType() != Ty)
68       continue;
69     CastInst *CI = dyn_cast<CastInst>(U);
70     if (!CI || CI->getOpcode() != Op)
71       continue;
72 
73     // Found a suitable cast that is at IP or comes before IP. Use it. Note that
74     // the cast must also properly dominate the Builder's insertion point.
75     if (IP->getParent() == CI->getParent() && &*BIP != CI &&
76         (&*IP == CI || CI->comesBefore(&*IP))) {
77       Ret = CI;
78       break;
79     }
80   }
81 
82   // Create a new cast.
83   if (!Ret) {
84     SCEVInsertPointGuard Guard(Builder, this);
85     Builder.SetInsertPoint(&*IP);
86     Ret = Builder.CreateCast(Op, V, Ty, V->getName());
87   }
88 
89   // We assert at the end of the function since IP might point to an
90   // instruction with different dominance properties than a cast
91   // (an invoke for example) and not dominate BIP (but the cast does).
92   assert(!isa<Instruction>(Ret) ||
93          SE.DT.dominates(cast<Instruction>(Ret), &*BIP));
94 
95   return Ret;
96 }
97 
98 BasicBlock::iterator
99 SCEVExpander::findInsertPointAfter(Instruction *I,
100                                    Instruction *MustDominate) const {
101   BasicBlock::iterator IP = ++I->getIterator();
102   if (auto *II = dyn_cast<InvokeInst>(I))
103     IP = II->getNormalDest()->begin();
104 
105   while (isa<PHINode>(IP))
106     ++IP;
107 
108   if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
109     ++IP;
110   } else if (isa<CatchSwitchInst>(IP)) {
111     IP = MustDominate->getParent()->getFirstInsertionPt();
112   } else {
113     assert(!IP->isEHPad() && "unexpected eh pad!");
114   }
115 
116   // Adjust insert point to be after instructions inserted by the expander, so
117   // we can re-use already inserted instructions. Avoid skipping past the
118   // original \p MustDominate, in case it is an inserted instruction.
119   while (isInsertedInstruction(&*IP) && &*IP != MustDominate)
120     ++IP;
121 
122   return IP;
123 }
124 
125 BasicBlock::iterator
126 SCEVExpander::GetOptimalInsertionPointForCastOf(Value *V) const {
127   // Cast the argument at the beginning of the entry block, after
128   // any bitcasts of other arguments.
129   if (Argument *A = dyn_cast<Argument>(V)) {
130     BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
131     while ((isa<BitCastInst>(IP) &&
132             isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
133             cast<BitCastInst>(IP)->getOperand(0) != A) ||
134            isa<DbgInfoIntrinsic>(IP))
135       ++IP;
136     return IP;
137   }
138 
139   // Cast the instruction immediately after the instruction.
140   if (Instruction *I = dyn_cast<Instruction>(V))
141     return findInsertPointAfter(I, &*Builder.GetInsertPoint());
142 
143   // Otherwise, this must be some kind of a constant,
144   // so let's plop this cast into the function's entry block.
145   assert(isa<Constant>(V) &&
146          "Expected the cast argument to be a global/constant");
147   return Builder.GetInsertBlock()
148       ->getParent()
149       ->getEntryBlock()
150       .getFirstInsertionPt();
151 }
152 
153 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
154 /// which must be possible with a noop cast, doing what we can to share
155 /// the casts.
156 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
157   Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
158   assert((Op == Instruction::BitCast ||
159           Op == Instruction::PtrToInt ||
160           Op == Instruction::IntToPtr) &&
161          "InsertNoopCastOfTo cannot perform non-noop casts!");
162   assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
163          "InsertNoopCastOfTo cannot change sizes!");
164 
165   // inttoptr only works for integral pointers. For non-integral pointers, we
166   // can create a GEP on null with the integral value as index. Note that
167   // it is safe to use GEP of null instead of inttoptr here, because only
168   // expressions already based on a GEP of null should be converted to pointers
169   // during expansion.
170   if (Op == Instruction::IntToPtr) {
171     auto *PtrTy = cast<PointerType>(Ty);
172     if (DL.isNonIntegralPointerType(PtrTy)) {
173       auto *Int8PtrTy = Builder.getInt8PtrTy(PtrTy->getAddressSpace());
174       assert(DL.getTypeAllocSize(Builder.getInt8Ty()) == 1 &&
175              "alloc size of i8 must by 1 byte for the GEP to be correct");
176       return Builder.CreateGEP(
177           Builder.getInt8Ty(), Constant::getNullValue(Int8PtrTy), V, "scevgep");
178     }
179   }
180   // Short-circuit unnecessary bitcasts.
181   if (Op == Instruction::BitCast) {
182     if (V->getType() == Ty)
183       return V;
184     if (CastInst *CI = dyn_cast<CastInst>(V)) {
185       if (CI->getOperand(0)->getType() == Ty)
186         return CI->getOperand(0);
187     }
188   }
189   // Short-circuit unnecessary inttoptr<->ptrtoint casts.
190   if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
191       SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
192     if (CastInst *CI = dyn_cast<CastInst>(V))
193       if ((CI->getOpcode() == Instruction::PtrToInt ||
194            CI->getOpcode() == Instruction::IntToPtr) &&
195           SE.getTypeSizeInBits(CI->getType()) ==
196           SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
197         return CI->getOperand(0);
198     if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
199       if ((CE->getOpcode() == Instruction::PtrToInt ||
200            CE->getOpcode() == Instruction::IntToPtr) &&
201           SE.getTypeSizeInBits(CE->getType()) ==
202           SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
203         return CE->getOperand(0);
204   }
205 
206   // Fold a cast of a constant.
207   if (Constant *C = dyn_cast<Constant>(V))
208     return ConstantExpr::getCast(Op, C, Ty);
209 
210   // Try to reuse existing cast, or insert one.
211   return ReuseOrCreateCast(V, Ty, Op, GetOptimalInsertionPointForCastOf(V));
212 }
213 
214 /// InsertBinop - Insert the specified binary operator, doing a small amount
215 /// of work to avoid inserting an obviously redundant operation, and hoisting
216 /// to an outer loop when the opportunity is there and it is safe.
217 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
218                                  Value *LHS, Value *RHS,
219                                  SCEV::NoWrapFlags Flags, bool IsSafeToHoist) {
220   // Fold a binop with constant operands.
221   if (Constant *CLHS = dyn_cast<Constant>(LHS))
222     if (Constant *CRHS = dyn_cast<Constant>(RHS))
223       if (Constant *Res = ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, DL))
224         return Res;
225 
226   // Do a quick scan to see if we have this binop nearby.  If so, reuse it.
227   unsigned ScanLimit = 6;
228   BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
229   // Scanning starts from the last instruction before the insertion point.
230   BasicBlock::iterator IP = Builder.GetInsertPoint();
231   if (IP != BlockBegin) {
232     --IP;
233     for (; ScanLimit; --IP, --ScanLimit) {
234       // Don't count dbg.value against the ScanLimit, to avoid perturbing the
235       // generated code.
236       if (isa<DbgInfoIntrinsic>(IP))
237         ScanLimit++;
238 
239       auto canGenerateIncompatiblePoison = [&Flags](Instruction *I) {
240         // Ensure that no-wrap flags match.
241         if (isa<OverflowingBinaryOperator>(I)) {
242           if (I->hasNoSignedWrap() != (Flags & SCEV::FlagNSW))
243             return true;
244           if (I->hasNoUnsignedWrap() != (Flags & SCEV::FlagNUW))
245             return true;
246         }
247         // Conservatively, do not use any instruction which has any of exact
248         // flags installed.
249         if (isa<PossiblyExactOperator>(I) && I->isExact())
250           return true;
251         return false;
252       };
253       if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
254           IP->getOperand(1) == RHS && !canGenerateIncompatiblePoison(&*IP))
255         return &*IP;
256       if (IP == BlockBegin) break;
257     }
258   }
259 
260   // Save the original insertion point so we can restore it when we're done.
261   DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
262   SCEVInsertPointGuard Guard(Builder, this);
263 
264   if (IsSafeToHoist) {
265     // Move the insertion point out of as many loops as we can.
266     while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
267       if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
268       BasicBlock *Preheader = L->getLoopPreheader();
269       if (!Preheader) break;
270 
271       // Ok, move up a level.
272       Builder.SetInsertPoint(Preheader->getTerminator());
273     }
274   }
275 
276   // If we haven't found this binop, insert it.
277   // TODO: Use the Builder, which will make CreateBinOp below fold with
278   // InstSimplifyFolder.
279   Instruction *BO = Builder.Insert(BinaryOperator::Create(Opcode, LHS, RHS));
280   BO->setDebugLoc(Loc);
281   if (Flags & SCEV::FlagNUW)
282     BO->setHasNoUnsignedWrap();
283   if (Flags & SCEV::FlagNSW)
284     BO->setHasNoSignedWrap();
285 
286   return BO;
287 }
288 
289 /// expandAddToGEP - Expand an addition expression with a pointer type into
290 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
291 /// BasicAliasAnalysis and other passes analyze the result. See the rules
292 /// for getelementptr vs. inttoptr in
293 /// http://llvm.org/docs/LangRef.html#pointeraliasing
294 /// for details.
295 ///
296 /// Design note: The correctness of using getelementptr here depends on
297 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
298 /// they may introduce pointer arithmetic which may not be safely converted
299 /// into getelementptr.
300 ///
301 /// Design note: It might seem desirable for this function to be more
302 /// loop-aware. If some of the indices are loop-invariant while others
303 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
304 /// loop-invariant portions of the overall computation outside the loop.
305 /// However, there are a few reasons this is not done here. Hoisting simple
306 /// arithmetic is a low-level optimization that often isn't very
307 /// important until late in the optimization process. In fact, passes
308 /// like InstructionCombining will combine GEPs, even if it means
309 /// pushing loop-invariant computation down into loops, so even if the
310 /// GEPs were split here, the work would quickly be undone. The
311 /// LoopStrengthReduction pass, which is usually run quite late (and
312 /// after the last InstructionCombining pass), takes care of hoisting
313 /// loop-invariant portions of expressions, after considering what
314 /// can be folded using target addressing modes.
315 ///
316 Value *SCEVExpander::expandAddToGEP(const SCEV *Offset, Type *Ty, Value *V) {
317   assert(!isa<Instruction>(V) ||
318          SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
319 
320   Value *Idx = expandCodeForImpl(Offset, Ty);
321 
322   // Fold a GEP with constant operands.
323   if (Constant *CLHS = dyn_cast<Constant>(V))
324     if (Constant *CRHS = dyn_cast<Constant>(Idx))
325       return Builder.CreateGEP(Builder.getInt8Ty(), CLHS, CRHS);
326 
327   // Do a quick scan to see if we have this GEP nearby.  If so, reuse it.
328   unsigned ScanLimit = 6;
329   BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
330   // Scanning starts from the last instruction before the insertion point.
331   BasicBlock::iterator IP = Builder.GetInsertPoint();
332   if (IP != BlockBegin) {
333     --IP;
334     for (; ScanLimit; --IP, --ScanLimit) {
335       // Don't count dbg.value against the ScanLimit, to avoid perturbing the
336       // generated code.
337       if (isa<DbgInfoIntrinsic>(IP))
338         ScanLimit++;
339       if (IP->getOpcode() == Instruction::GetElementPtr &&
340           IP->getOperand(0) == V && IP->getOperand(1) == Idx &&
341           cast<GEPOperator>(&*IP)->getSourceElementType() ==
342               Type::getInt8Ty(Ty->getContext()))
343         return &*IP;
344       if (IP == BlockBegin) break;
345     }
346   }
347 
348   // Save the original insertion point so we can restore it when we're done.
349   SCEVInsertPointGuard Guard(Builder, this);
350 
351   // Move the insertion point out of as many loops as we can.
352   while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
353     if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
354     BasicBlock *Preheader = L->getLoopPreheader();
355     if (!Preheader) break;
356 
357     // Ok, move up a level.
358     Builder.SetInsertPoint(Preheader->getTerminator());
359   }
360 
361   // Emit a GEP.
362   return Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "scevgep");
363 }
364 
365 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
366 /// SCEV expansion. If they are nested, this is the most nested. If they are
367 /// neighboring, pick the later.
368 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
369                                         DominatorTree &DT) {
370   if (!A) return B;
371   if (!B) return A;
372   if (A->contains(B)) return B;
373   if (B->contains(A)) return A;
374   if (DT.dominates(A->getHeader(), B->getHeader())) return B;
375   if (DT.dominates(B->getHeader(), A->getHeader())) return A;
376   return A; // Arbitrarily break the tie.
377 }
378 
379 /// getRelevantLoop - Get the most relevant loop associated with the given
380 /// expression, according to PickMostRelevantLoop.
381 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
382   // Test whether we've already computed the most relevant loop for this SCEV.
383   auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr));
384   if (!Pair.second)
385     return Pair.first->second;
386 
387   switch (S->getSCEVType()) {
388   case scConstant:
389   case scVScale:
390     return nullptr; // A constant has no relevant loops.
391   case scTruncate:
392   case scZeroExtend:
393   case scSignExtend:
394   case scPtrToInt:
395   case scAddExpr:
396   case scMulExpr:
397   case scUDivExpr:
398   case scAddRecExpr:
399   case scUMaxExpr:
400   case scSMaxExpr:
401   case scUMinExpr:
402   case scSMinExpr:
403   case scSequentialUMinExpr: {
404     const Loop *L = nullptr;
405     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
406       L = AR->getLoop();
407     for (const SCEV *Op : S->operands())
408       L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT);
409     return RelevantLoops[S] = L;
410   }
411   case scUnknown: {
412     const SCEVUnknown *U = cast<SCEVUnknown>(S);
413     if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
414       return Pair.first->second = SE.LI.getLoopFor(I->getParent());
415     // A non-instruction has no relevant loops.
416     return nullptr;
417   }
418   case scCouldNotCompute:
419     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
420   }
421   llvm_unreachable("Unexpected SCEV type!");
422 }
423 
424 namespace {
425 
426 /// LoopCompare - Compare loops by PickMostRelevantLoop.
427 class LoopCompare {
428   DominatorTree &DT;
429 public:
430   explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
431 
432   bool operator()(std::pair<const Loop *, const SCEV *> LHS,
433                   std::pair<const Loop *, const SCEV *> RHS) const {
434     // Keep pointer operands sorted at the end.
435     if (LHS.second->getType()->isPointerTy() !=
436         RHS.second->getType()->isPointerTy())
437       return LHS.second->getType()->isPointerTy();
438 
439     // Compare loops with PickMostRelevantLoop.
440     if (LHS.first != RHS.first)
441       return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
442 
443     // If one operand is a non-constant negative and the other is not,
444     // put the non-constant negative on the right so that a sub can
445     // be used instead of a negate and add.
446     if (LHS.second->isNonConstantNegative()) {
447       if (!RHS.second->isNonConstantNegative())
448         return false;
449     } else if (RHS.second->isNonConstantNegative())
450       return true;
451 
452     // Otherwise they are equivalent according to this comparison.
453     return false;
454   }
455 };
456 
457 }
458 
459 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
460   Type *Ty = SE.getEffectiveSCEVType(S->getType());
461 
462   // Collect all the add operands in a loop, along with their associated loops.
463   // Iterate in reverse so that constants are emitted last, all else equal, and
464   // so that pointer operands are inserted first, which the code below relies on
465   // to form more involved GEPs.
466   SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
467   for (const SCEV *Op : reverse(S->operands()))
468     OpsAndLoops.push_back(std::make_pair(getRelevantLoop(Op), Op));
469 
470   // Sort by loop. Use a stable sort so that constants follow non-constants and
471   // pointer operands precede non-pointer operands.
472   llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
473 
474   // Emit instructions to add all the operands. Hoist as much as possible
475   // out of loops, and form meaningful getelementptrs where possible.
476   Value *Sum = nullptr;
477   for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
478     const Loop *CurLoop = I->first;
479     const SCEV *Op = I->second;
480     if (!Sum) {
481       // This is the first operand. Just expand it.
482       Sum = expand(Op);
483       ++I;
484       continue;
485     }
486 
487     assert(!Op->getType()->isPointerTy() && "Only first op can be pointer");
488     if (isa<PointerType>(Sum->getType())) {
489       // The running sum expression is a pointer. Try to form a getelementptr
490       // at this level with that as the base.
491       SmallVector<const SCEV *, 4> NewOps;
492       for (; I != E && I->first == CurLoop; ++I) {
493         // If the operand is SCEVUnknown and not instructions, peek through
494         // it, to enable more of it to be folded into the GEP.
495         const SCEV *X = I->second;
496         if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
497           if (!isa<Instruction>(U->getValue()))
498             X = SE.getSCEV(U->getValue());
499         NewOps.push_back(X);
500       }
501       Sum = expandAddToGEP(SE.getAddExpr(NewOps), Ty, Sum);
502     } else if (Op->isNonConstantNegative()) {
503       // Instead of doing a negate and add, just do a subtract.
504       Value *W = expandCodeForImpl(SE.getNegativeSCEV(Op), Ty);
505       Sum = InsertNoopCastOfTo(Sum, Ty);
506       Sum = InsertBinop(Instruction::Sub, Sum, W, SCEV::FlagAnyWrap,
507                         /*IsSafeToHoist*/ true);
508       ++I;
509     } else {
510       // A simple add.
511       Value *W = expandCodeForImpl(Op, Ty);
512       Sum = InsertNoopCastOfTo(Sum, Ty);
513       // Canonicalize a constant to the RHS.
514       if (isa<Constant>(Sum)) std::swap(Sum, W);
515       Sum = InsertBinop(Instruction::Add, Sum, W, S->getNoWrapFlags(),
516                         /*IsSafeToHoist*/ true);
517       ++I;
518     }
519   }
520 
521   return Sum;
522 }
523 
524 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
525   Type *Ty = SE.getEffectiveSCEVType(S->getType());
526 
527   // Collect all the mul operands in a loop, along with their associated loops.
528   // Iterate in reverse so that constants are emitted last, all else equal.
529   SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
530   for (const SCEV *Op : reverse(S->operands()))
531     OpsAndLoops.push_back(std::make_pair(getRelevantLoop(Op), Op));
532 
533   // Sort by loop. Use a stable sort so that constants follow non-constants.
534   llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
535 
536   // Emit instructions to mul all the operands. Hoist as much as possible
537   // out of loops.
538   Value *Prod = nullptr;
539   auto I = OpsAndLoops.begin();
540 
541   // Expand the calculation of X pow N in the following manner:
542   // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then:
543   // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK).
544   const auto ExpandOpBinPowN = [this, &I, &OpsAndLoops, &Ty]() {
545     auto E = I;
546     // Calculate how many times the same operand from the same loop is included
547     // into this power.
548     uint64_t Exponent = 0;
549     const uint64_t MaxExponent = UINT64_MAX >> 1;
550     // No one sane will ever try to calculate such huge exponents, but if we
551     // need this, we stop on UINT64_MAX / 2 because we need to exit the loop
552     // below when the power of 2 exceeds our Exponent, and we want it to be
553     // 1u << 31 at most to not deal with unsigned overflow.
554     while (E != OpsAndLoops.end() && *I == *E && Exponent != MaxExponent) {
555       ++Exponent;
556       ++E;
557     }
558     assert(Exponent > 0 && "Trying to calculate a zeroth exponent of operand?");
559 
560     // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them
561     // that are needed into the result.
562     Value *P = expandCodeForImpl(I->second, Ty);
563     Value *Result = nullptr;
564     if (Exponent & 1)
565       Result = P;
566     for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) {
567       P = InsertBinop(Instruction::Mul, P, P, SCEV::FlagAnyWrap,
568                       /*IsSafeToHoist*/ true);
569       if (Exponent & BinExp)
570         Result = Result ? InsertBinop(Instruction::Mul, Result, P,
571                                       SCEV::FlagAnyWrap,
572                                       /*IsSafeToHoist*/ true)
573                         : P;
574     }
575 
576     I = E;
577     assert(Result && "Nothing was expanded?");
578     return Result;
579   };
580 
581   while (I != OpsAndLoops.end()) {
582     if (!Prod) {
583       // This is the first operand. Just expand it.
584       Prod = ExpandOpBinPowN();
585     } else if (I->second->isAllOnesValue()) {
586       // Instead of doing a multiply by negative one, just do a negate.
587       Prod = InsertNoopCastOfTo(Prod, Ty);
588       Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod,
589                          SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
590       ++I;
591     } else {
592       // A simple mul.
593       Value *W = ExpandOpBinPowN();
594       Prod = InsertNoopCastOfTo(Prod, Ty);
595       // Canonicalize a constant to the RHS.
596       if (isa<Constant>(Prod)) std::swap(Prod, W);
597       const APInt *RHS;
598       if (match(W, m_Power2(RHS))) {
599         // Canonicalize Prod*(1<<C) to Prod<<C.
600         assert(!Ty->isVectorTy() && "vector types are not SCEVable");
601         auto NWFlags = S->getNoWrapFlags();
602         // clear nsw flag if shl will produce poison value.
603         if (RHS->logBase2() == RHS->getBitWidth() - 1)
604           NWFlags = ScalarEvolution::clearFlags(NWFlags, SCEV::FlagNSW);
605         Prod = InsertBinop(Instruction::Shl, Prod,
606                            ConstantInt::get(Ty, RHS->logBase2()), NWFlags,
607                            /*IsSafeToHoist*/ true);
608       } else {
609         Prod = InsertBinop(Instruction::Mul, Prod, W, S->getNoWrapFlags(),
610                            /*IsSafeToHoist*/ true);
611       }
612     }
613   }
614 
615   return Prod;
616 }
617 
618 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
619   Type *Ty = SE.getEffectiveSCEVType(S->getType());
620 
621   Value *LHS = expandCodeForImpl(S->getLHS(), Ty);
622   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
623     const APInt &RHS = SC->getAPInt();
624     if (RHS.isPowerOf2())
625       return InsertBinop(Instruction::LShr, LHS,
626                          ConstantInt::get(Ty, RHS.logBase2()),
627                          SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
628   }
629 
630   Value *RHS = expandCodeForImpl(S->getRHS(), Ty);
631   return InsertBinop(Instruction::UDiv, LHS, RHS, SCEV::FlagAnyWrap,
632                      /*IsSafeToHoist*/ SE.isKnownNonZero(S->getRHS()));
633 }
634 
635 /// Determine if this is a well-behaved chain of instructions leading back to
636 /// the PHI. If so, it may be reused by expanded expressions.
637 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
638                                          const Loop *L) {
639   if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
640       (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
641     return false;
642   // If any of the operands don't dominate the insert position, bail.
643   // Addrec operands are always loop-invariant, so this can only happen
644   // if there are instructions which haven't been hoisted.
645   if (L == IVIncInsertLoop) {
646     for (Use &Op : llvm::drop_begin(IncV->operands()))
647       if (Instruction *OInst = dyn_cast<Instruction>(Op))
648         if (!SE.DT.dominates(OInst, IVIncInsertPos))
649           return false;
650   }
651   // Advance to the next instruction.
652   IncV = dyn_cast<Instruction>(IncV->getOperand(0));
653   if (!IncV)
654     return false;
655 
656   if (IncV->mayHaveSideEffects())
657     return false;
658 
659   if (IncV == PN)
660     return true;
661 
662   return isNormalAddRecExprPHI(PN, IncV, L);
663 }
664 
665 /// getIVIncOperand returns an induction variable increment's induction
666 /// variable operand.
667 ///
668 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
669 /// operands dominate InsertPos.
670 ///
671 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
672 /// simple patterns generated by getAddRecExprPHILiterally and
673 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
674 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
675                                            Instruction *InsertPos,
676                                            bool allowScale) {
677   if (IncV == InsertPos)
678     return nullptr;
679 
680   switch (IncV->getOpcode()) {
681   default:
682     return nullptr;
683   // Check for a simple Add/Sub or GEP of a loop invariant step.
684   case Instruction::Add:
685   case Instruction::Sub: {
686     Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
687     if (!OInst || SE.DT.dominates(OInst, InsertPos))
688       return dyn_cast<Instruction>(IncV->getOperand(0));
689     return nullptr;
690   }
691   case Instruction::BitCast:
692     return dyn_cast<Instruction>(IncV->getOperand(0));
693   case Instruction::GetElementPtr:
694     for (Use &U : llvm::drop_begin(IncV->operands())) {
695       if (isa<Constant>(U))
696         continue;
697       if (Instruction *OInst = dyn_cast<Instruction>(U)) {
698         if (!SE.DT.dominates(OInst, InsertPos))
699           return nullptr;
700       }
701       if (allowScale) {
702         // allow any kind of GEP as long as it can be hoisted.
703         continue;
704       }
705       // GEPs produced by SCEVExpander use i8 element type.
706       if (!cast<GEPOperator>(IncV)->getSourceElementType()->isIntegerTy(8))
707         return nullptr;
708       break;
709     }
710     return dyn_cast<Instruction>(IncV->getOperand(0));
711   }
712 }
713 
714 /// If the insert point of the current builder or any of the builders on the
715 /// stack of saved builders has 'I' as its insert point, update it to point to
716 /// the instruction after 'I'.  This is intended to be used when the instruction
717 /// 'I' is being moved.  If this fixup is not done and 'I' is moved to a
718 /// different block, the inconsistent insert point (with a mismatched
719 /// Instruction and Block) can lead to an instruction being inserted in a block
720 /// other than its parent.
721 void SCEVExpander::fixupInsertPoints(Instruction *I) {
722   BasicBlock::iterator It(*I);
723   BasicBlock::iterator NewInsertPt = std::next(It);
724   if (Builder.GetInsertPoint() == It)
725     Builder.SetInsertPoint(&*NewInsertPt);
726   for (auto *InsertPtGuard : InsertPointGuards)
727     if (InsertPtGuard->GetInsertPoint() == It)
728       InsertPtGuard->SetInsertPoint(NewInsertPt);
729 }
730 
731 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
732 /// it available to other uses in this loop. Recursively hoist any operands,
733 /// until we reach a value that dominates InsertPos.
734 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos,
735                               bool RecomputePoisonFlags) {
736   auto FixupPoisonFlags = [this](Instruction *I) {
737     // Drop flags that are potentially inferred from old context and infer flags
738     // in new context.
739     I->dropPoisonGeneratingFlags();
740     if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(I))
741       if (auto Flags = SE.getStrengthenedNoWrapFlagsFromBinOp(OBO)) {
742         auto *BO = cast<BinaryOperator>(I);
743         BO->setHasNoUnsignedWrap(
744             ScalarEvolution::maskFlags(*Flags, SCEV::FlagNUW) == SCEV::FlagNUW);
745         BO->setHasNoSignedWrap(
746             ScalarEvolution::maskFlags(*Flags, SCEV::FlagNSW) == SCEV::FlagNSW);
747       }
748   };
749 
750   if (SE.DT.dominates(IncV, InsertPos)) {
751     if (RecomputePoisonFlags)
752       FixupPoisonFlags(IncV);
753     return true;
754   }
755 
756   // InsertPos must itself dominate IncV so that IncV's new position satisfies
757   // its existing users.
758   if (isa<PHINode>(InsertPos) ||
759       !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
760     return false;
761 
762   if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
763     return false;
764 
765   // Check that the chain of IV operands leading back to Phi can be hoisted.
766   SmallVector<Instruction*, 4> IVIncs;
767   for(;;) {
768     Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
769     if (!Oper)
770       return false;
771     // IncV is safe to hoist.
772     IVIncs.push_back(IncV);
773     IncV = Oper;
774     if (SE.DT.dominates(IncV, InsertPos))
775       break;
776   }
777   for (Instruction *I : llvm::reverse(IVIncs)) {
778     fixupInsertPoints(I);
779     I->moveBefore(InsertPos);
780     if (RecomputePoisonFlags)
781       FixupPoisonFlags(I);
782   }
783   return true;
784 }
785 
786 /// Determine if this cyclic phi is in a form that would have been generated by
787 /// LSR. We don't care if the phi was actually expanded in this pass, as long
788 /// as it is in a low-cost form, for example, no implied multiplication. This
789 /// should match any patterns generated by getAddRecExprPHILiterally and
790 /// expandAddtoGEP.
791 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
792                                            const Loop *L) {
793   for(Instruction *IVOper = IncV;
794       (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
795                                 /*allowScale=*/false));) {
796     if (IVOper == PN)
797       return true;
798   }
799   return false;
800 }
801 
802 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
803 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
804 /// need to materialize IV increments elsewhere to handle difficult situations.
805 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
806                                  Type *ExpandTy, Type *IntTy,
807                                  bool useSubtract) {
808   Value *IncV;
809   // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
810   if (ExpandTy->isPointerTy()) {
811     IncV = expandAddToGEP(SE.getSCEV(StepV), IntTy, PN);
812   } else {
813     IncV = useSubtract ?
814       Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
815       Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
816   }
817   return IncV;
818 }
819 
820 /// Check whether we can cheaply express the requested SCEV in terms of
821 /// the available PHI SCEV by truncation and/or inversion of the step.
822 static bool canBeCheaplyTransformed(ScalarEvolution &SE,
823                                     const SCEVAddRecExpr *Phi,
824                                     const SCEVAddRecExpr *Requested,
825                                     bool &InvertStep) {
826   // We can't transform to match a pointer PHI.
827   if (Phi->getType()->isPointerTy())
828     return false;
829 
830   Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
831   Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
832 
833   if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
834     return false;
835 
836   // Try truncate it if necessary.
837   Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
838   if (!Phi)
839     return false;
840 
841   // Check whether truncation will help.
842   if (Phi == Requested) {
843     InvertStep = false;
844     return true;
845   }
846 
847   // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
848   if (SE.getMinusSCEV(Requested->getStart(), Requested) == Phi) {
849     InvertStep = true;
850     return true;
851   }
852 
853   return false;
854 }
855 
856 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
857   if (!isa<IntegerType>(AR->getType()))
858     return false;
859 
860   unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
861   Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
862   const SCEV *Step = AR->getStepRecurrence(SE);
863   const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
864                                             SE.getSignExtendExpr(AR, WideTy));
865   const SCEV *ExtendAfterOp =
866     SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
867   return ExtendAfterOp == OpAfterExtend;
868 }
869 
870 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
871   if (!isa<IntegerType>(AR->getType()))
872     return false;
873 
874   unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
875   Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
876   const SCEV *Step = AR->getStepRecurrence(SE);
877   const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
878                                             SE.getZeroExtendExpr(AR, WideTy));
879   const SCEV *ExtendAfterOp =
880     SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
881   return ExtendAfterOp == OpAfterExtend;
882 }
883 
884 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
885 /// the base addrec, which is the addrec without any non-loop-dominating
886 /// values, and return the PHI.
887 PHINode *
888 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
889                                         const Loop *L,
890                                         Type *ExpandTy,
891                                         Type *IntTy,
892                                         Type *&TruncTy,
893                                         bool &InvertStep) {
894   assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
895 
896   // Reuse a previously-inserted PHI, if present.
897   BasicBlock *LatchBlock = L->getLoopLatch();
898   if (LatchBlock) {
899     PHINode *AddRecPhiMatch = nullptr;
900     Instruction *IncV = nullptr;
901     TruncTy = nullptr;
902     InvertStep = false;
903 
904     // Only try partially matching scevs that need truncation and/or
905     // step-inversion if we know this loop is outside the current loop.
906     bool TryNonMatchingSCEV =
907         IVIncInsertLoop &&
908         SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
909 
910     for (PHINode &PN : L->getHeader()->phis()) {
911       if (!SE.isSCEVable(PN.getType()))
912         continue;
913 
914       // We should not look for a incomplete PHI. Getting SCEV for a incomplete
915       // PHI has no meaning at all.
916       if (!PN.isComplete()) {
917         SCEV_DEBUG_WITH_TYPE(
918             DebugType, dbgs() << "One incomplete PHI is found: " << PN << "\n");
919         continue;
920       }
921 
922       const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN));
923       if (!PhiSCEV)
924         continue;
925 
926       bool IsMatchingSCEV = PhiSCEV == Normalized;
927       // We only handle truncation and inversion of phi recurrences for the
928       // expanded expression if the expanded expression's loop dominates the
929       // loop we insert to. Check now, so we can bail out early.
930       if (!IsMatchingSCEV && !TryNonMatchingSCEV)
931           continue;
932 
933       // TODO: this possibly can be reworked to avoid this cast at all.
934       Instruction *TempIncV =
935           dyn_cast<Instruction>(PN.getIncomingValueForBlock(LatchBlock));
936       if (!TempIncV)
937         continue;
938 
939       // Check whether we can reuse this PHI node.
940       if (LSRMode) {
941         if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
942           continue;
943       } else {
944         if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
945           continue;
946       }
947 
948       // Stop if we have found an exact match SCEV.
949       if (IsMatchingSCEV) {
950         IncV = TempIncV;
951         TruncTy = nullptr;
952         InvertStep = false;
953         AddRecPhiMatch = &PN;
954         break;
955       }
956 
957       // Try whether the phi can be translated into the requested form
958       // (truncated and/or offset by a constant).
959       if ((!TruncTy || InvertStep) &&
960           canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
961         // Record the phi node. But don't stop we might find an exact match
962         // later.
963         AddRecPhiMatch = &PN;
964         IncV = TempIncV;
965         TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
966       }
967     }
968 
969     if (AddRecPhiMatch) {
970       // Ok, the add recurrence looks usable.
971       // Remember this PHI, even in post-inc mode.
972       InsertedValues.insert(AddRecPhiMatch);
973       // Remember the increment.
974       rememberInstruction(IncV);
975       // Those values were not actually inserted but re-used.
976       ReusedValues.insert(AddRecPhiMatch);
977       ReusedValues.insert(IncV);
978       return AddRecPhiMatch;
979     }
980   }
981 
982   // Save the original insertion point so we can restore it when we're done.
983   SCEVInsertPointGuard Guard(Builder, this);
984 
985   // Another AddRec may need to be recursively expanded below. For example, if
986   // this AddRec is quadratic, the StepV may itself be an AddRec in this
987   // loop. Remove this loop from the PostIncLoops set before expanding such
988   // AddRecs. Otherwise, we cannot find a valid position for the step
989   // (i.e. StepV can never dominate its loop header).  Ideally, we could do
990   // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
991   // so it's not worth implementing SmallPtrSet::swap.
992   PostIncLoopSet SavedPostIncLoops = PostIncLoops;
993   PostIncLoops.clear();
994 
995   // Expand code for the start value into the loop preheader.
996   assert(L->getLoopPreheader() &&
997          "Can't expand add recurrences without a loop preheader!");
998   Value *StartV =
999       expandCodeForImpl(Normalized->getStart(), ExpandTy,
1000                         L->getLoopPreheader()->getTerminator());
1001 
1002   // StartV must have been be inserted into L's preheader to dominate the new
1003   // phi.
1004   assert(!isa<Instruction>(StartV) ||
1005          SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
1006                                  L->getHeader()));
1007 
1008   // Expand code for the step value. Do this before creating the PHI so that PHI
1009   // reuse code doesn't see an incomplete PHI.
1010   const SCEV *Step = Normalized->getStepRecurrence(SE);
1011   // If the stride is negative, insert a sub instead of an add for the increment
1012   // (unless it's a constant, because subtracts of constants are canonicalized
1013   // to adds).
1014   bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1015   if (useSubtract)
1016     Step = SE.getNegativeSCEV(Step);
1017   // Expand the step somewhere that dominates the loop header.
1018   Value *StepV = expandCodeForImpl(
1019       Step, IntTy, &*L->getHeader()->getFirstInsertionPt());
1020 
1021   // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1022   // we actually do emit an addition.  It does not apply if we emit a
1023   // subtraction.
1024   bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1025   bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1026 
1027   // Create the PHI.
1028   BasicBlock *Header = L->getHeader();
1029   Builder.SetInsertPoint(Header, Header->begin());
1030   pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1031   PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1032                                   Twine(IVName) + ".iv");
1033 
1034   // Create the step instructions and populate the PHI.
1035   for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1036     BasicBlock *Pred = *HPI;
1037 
1038     // Add a start value.
1039     if (!L->contains(Pred)) {
1040       PN->addIncoming(StartV, Pred);
1041       continue;
1042     }
1043 
1044     // Create a step value and add it to the PHI.
1045     // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1046     // instructions at IVIncInsertPos.
1047     Instruction *InsertPos = L == IVIncInsertLoop ?
1048       IVIncInsertPos : Pred->getTerminator();
1049     Builder.SetInsertPoint(InsertPos);
1050     Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1051 
1052     if (isa<OverflowingBinaryOperator>(IncV)) {
1053       if (IncrementIsNUW)
1054         cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1055       if (IncrementIsNSW)
1056         cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1057     }
1058     PN->addIncoming(IncV, Pred);
1059   }
1060 
1061   // After expanding subexpressions, restore the PostIncLoops set so the caller
1062   // can ensure that IVIncrement dominates the current uses.
1063   PostIncLoops = SavedPostIncLoops;
1064 
1065   // Remember this PHI, even in post-inc mode. LSR SCEV-based salvaging is most
1066   // effective when we are able to use an IV inserted here, so record it.
1067   InsertedValues.insert(PN);
1068   InsertedIVs.push_back(PN);
1069   return PN;
1070 }
1071 
1072 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1073   Type *STy = S->getType();
1074   Type *IntTy = SE.getEffectiveSCEVType(STy);
1075   const Loop *L = S->getLoop();
1076 
1077   // Determine a normalized form of this expression, which is the expression
1078   // before any post-inc adjustment is made.
1079   const SCEVAddRecExpr *Normalized = S;
1080   if (PostIncLoops.count(L)) {
1081     PostIncLoopSet Loops;
1082     Loops.insert(L);
1083     Normalized = cast<SCEVAddRecExpr>(
1084         normalizeForPostIncUse(S, Loops, SE, /*CheckInvertible=*/false));
1085   }
1086 
1087   // Strip off any non-loop-dominating component from the addrec start.
1088   const SCEV *Start = Normalized->getStart();
1089   const SCEV *PostLoopOffset = nullptr;
1090   if (!SE.properlyDominates(Start, L->getHeader())) {
1091     PostLoopOffset = Start;
1092     Start = SE.getConstant(Normalized->getType(), 0);
1093     Normalized = cast<SCEVAddRecExpr>(
1094       SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1095                        Normalized->getLoop(),
1096                        Normalized->getNoWrapFlags(SCEV::FlagNW)));
1097   }
1098 
1099   // Strip off any non-loop-dominating component from the addrec step.
1100   const SCEV *Step = Normalized->getStepRecurrence(SE);
1101   const SCEV *PostLoopScale = nullptr;
1102   if (!SE.dominates(Step, L->getHeader())) {
1103     PostLoopScale = Step;
1104     Step = SE.getConstant(Normalized->getType(), 1);
1105     if (!Start->isZero()) {
1106         // The normalization below assumes that Start is constant zero, so if
1107         // it isn't re-associate Start to PostLoopOffset.
1108         assert(!PostLoopOffset && "Start not-null but PostLoopOffset set?");
1109         PostLoopOffset = Start;
1110         Start = SE.getConstant(Normalized->getType(), 0);
1111     }
1112     Normalized =
1113       cast<SCEVAddRecExpr>(SE.getAddRecExpr(
1114                              Start, Step, Normalized->getLoop(),
1115                              Normalized->getNoWrapFlags(SCEV::FlagNW)));
1116   }
1117 
1118   // Expand the core addrec. If we need post-loop scaling, force it to
1119   // expand to an integer type to avoid the need for additional casting.
1120   Type *ExpandTy = PostLoopScale ? IntTy : STy;
1121   // We can't use a pointer type for the addrec if the pointer type is
1122   // non-integral.
1123   Type *AddRecPHIExpandTy =
1124       DL.isNonIntegralPointerType(STy) ? Normalized->getType() : ExpandTy;
1125 
1126   // In some cases, we decide to reuse an existing phi node but need to truncate
1127   // it and/or invert the step.
1128   Type *TruncTy = nullptr;
1129   bool InvertStep = false;
1130   PHINode *PN = getAddRecExprPHILiterally(Normalized, L, AddRecPHIExpandTy,
1131                                           IntTy, TruncTy, InvertStep);
1132 
1133   // Accommodate post-inc mode, if necessary.
1134   Value *Result;
1135   if (!PostIncLoops.count(L))
1136     Result = PN;
1137   else {
1138     // In PostInc mode, use the post-incremented value.
1139     BasicBlock *LatchBlock = L->getLoopLatch();
1140     assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1141     Result = PN->getIncomingValueForBlock(LatchBlock);
1142 
1143     // We might be introducing a new use of the post-inc IV that is not poison
1144     // safe, in which case we should drop poison generating flags. Only keep
1145     // those flags for which SCEV has proven that they always hold.
1146     if (isa<OverflowingBinaryOperator>(Result)) {
1147       auto *I = cast<Instruction>(Result);
1148       if (!S->hasNoUnsignedWrap())
1149         I->setHasNoUnsignedWrap(false);
1150       if (!S->hasNoSignedWrap())
1151         I->setHasNoSignedWrap(false);
1152     }
1153 
1154     // For an expansion to use the postinc form, the client must call
1155     // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1156     // or dominated by IVIncInsertPos.
1157     if (isa<Instruction>(Result) &&
1158         !SE.DT.dominates(cast<Instruction>(Result),
1159                          &*Builder.GetInsertPoint())) {
1160       // The induction variable's postinc expansion does not dominate this use.
1161       // IVUsers tries to prevent this case, so it is rare. However, it can
1162       // happen when an IVUser outside the loop is not dominated by the latch
1163       // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1164       // all cases. Consider a phi outside whose operand is replaced during
1165       // expansion with the value of the postinc user. Without fundamentally
1166       // changing the way postinc users are tracked, the only remedy is
1167       // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1168       // but hopefully expandCodeFor handles that.
1169       bool useSubtract =
1170         !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1171       if (useSubtract)
1172         Step = SE.getNegativeSCEV(Step);
1173       Value *StepV;
1174       {
1175         // Expand the step somewhere that dominates the loop header.
1176         SCEVInsertPointGuard Guard(Builder, this);
1177         StepV = expandCodeForImpl(
1178             Step, IntTy, &*L->getHeader()->getFirstInsertionPt());
1179       }
1180       Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1181     }
1182   }
1183 
1184   // We have decided to reuse an induction variable of a dominating loop. Apply
1185   // truncation and/or inversion of the step.
1186   if (TruncTy) {
1187     Type *ResTy = Result->getType();
1188     // Normalize the result type.
1189     if (ResTy != SE.getEffectiveSCEVType(ResTy))
1190       Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
1191     // Truncate the result.
1192     if (TruncTy != Result->getType())
1193       Result = Builder.CreateTrunc(Result, TruncTy);
1194 
1195     // Invert the result.
1196     if (InvertStep)
1197       Result = Builder.CreateSub(
1198           expandCodeForImpl(Normalized->getStart(), TruncTy), Result);
1199   }
1200 
1201   // Re-apply any non-loop-dominating scale.
1202   if (PostLoopScale) {
1203     assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
1204     Result = InsertNoopCastOfTo(Result, IntTy);
1205     Result = Builder.CreateMul(Result,
1206                                expandCodeForImpl(PostLoopScale, IntTy));
1207   }
1208 
1209   // Re-apply any non-loop-dominating offset.
1210   if (PostLoopOffset) {
1211     if (isa<PointerType>(ExpandTy)) {
1212       if (Result->getType()->isIntegerTy()) {
1213         Value *Base = expandCodeForImpl(PostLoopOffset, ExpandTy);
1214         Result = expandAddToGEP(SE.getUnknown(Result), IntTy, Base);
1215       } else {
1216         Result = expandAddToGEP(PostLoopOffset, IntTy, Result);
1217       }
1218     } else {
1219       Result = InsertNoopCastOfTo(Result, IntTy);
1220       Result = Builder.CreateAdd(
1221           Result, expandCodeForImpl(PostLoopOffset, IntTy));
1222     }
1223   }
1224 
1225   return Result;
1226 }
1227 
1228 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1229   // In canonical mode we compute the addrec as an expression of a canonical IV
1230   // using evaluateAtIteration and expand the resulting SCEV expression. This
1231   // way we avoid introducing new IVs to carry on the computation of the addrec
1232   // throughout the loop.
1233   //
1234   // For nested addrecs evaluateAtIteration might need a canonical IV of a
1235   // type wider than the addrec itself. Emitting a canonical IV of the
1236   // proper type might produce non-legal types, for example expanding an i64
1237   // {0,+,2,+,1} addrec would need an i65 canonical IV. To avoid this just fall
1238   // back to non-canonical mode for nested addrecs.
1239   if (!CanonicalMode || (S->getNumOperands() > 2))
1240     return expandAddRecExprLiterally(S);
1241 
1242   Type *Ty = SE.getEffectiveSCEVType(S->getType());
1243   const Loop *L = S->getLoop();
1244 
1245   // First check for an existing canonical IV in a suitable type.
1246   PHINode *CanonicalIV = nullptr;
1247   if (PHINode *PN = L->getCanonicalInductionVariable())
1248     if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1249       CanonicalIV = PN;
1250 
1251   // Rewrite an AddRec in terms of the canonical induction variable, if
1252   // its type is more narrow.
1253   if (CanonicalIV &&
1254       SE.getTypeSizeInBits(CanonicalIV->getType()) > SE.getTypeSizeInBits(Ty) &&
1255       !S->getType()->isPointerTy()) {
1256     SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1257     for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1258       NewOps[i] = SE.getAnyExtendExpr(S->getOperand(i), CanonicalIV->getType());
1259     Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1260                                        S->getNoWrapFlags(SCEV::FlagNW)));
1261     BasicBlock::iterator NewInsertPt =
1262         findInsertPointAfter(cast<Instruction>(V), &*Builder.GetInsertPoint());
1263     V = expandCodeForImpl(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
1264                           &*NewInsertPt);
1265     return V;
1266   }
1267 
1268   // {X,+,F} --> X + {0,+,F}
1269   if (!S->getStart()->isZero()) {
1270     if (isa<PointerType>(S->getType())) {
1271       Value *StartV = expand(SE.getPointerBase(S));
1272       return expandAddToGEP(SE.removePointerBase(S), Ty, StartV);
1273     }
1274 
1275     SmallVector<const SCEV *, 4> NewOps(S->operands());
1276     NewOps[0] = SE.getConstant(Ty, 0);
1277     const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1278                                         S->getNoWrapFlags(SCEV::FlagNW));
1279 
1280     // Just do a normal add. Pre-expand the operands to suppress folding.
1281     //
1282     // The LHS and RHS values are factored out of the expand call to make the
1283     // output independent of the argument evaluation order.
1284     const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
1285     const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1286     return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1287   }
1288 
1289   // If we don't yet have a canonical IV, create one.
1290   if (!CanonicalIV) {
1291     // Create and insert the PHI node for the induction variable in the
1292     // specified loop.
1293     BasicBlock *Header = L->getHeader();
1294     pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1295     CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1296                                   &Header->front());
1297     rememberInstruction(CanonicalIV);
1298 
1299     SmallSet<BasicBlock *, 4> PredSeen;
1300     Constant *One = ConstantInt::get(Ty, 1);
1301     for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1302       BasicBlock *HP = *HPI;
1303       if (!PredSeen.insert(HP).second) {
1304         // There must be an incoming value for each predecessor, even the
1305         // duplicates!
1306         CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
1307         continue;
1308       }
1309 
1310       if (L->contains(HP)) {
1311         // Insert a unit add instruction right before the terminator
1312         // corresponding to the back-edge.
1313         Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1314                                                      "indvar.next",
1315                                                      HP->getTerminator());
1316         Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1317         rememberInstruction(Add);
1318         CanonicalIV->addIncoming(Add, HP);
1319       } else {
1320         CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1321       }
1322     }
1323   }
1324 
1325   // {0,+,1} --> Insert a canonical induction variable into the loop!
1326   if (S->isAffine() && S->getOperand(1)->isOne()) {
1327     assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1328            "IVs with types different from the canonical IV should "
1329            "already have been handled!");
1330     return CanonicalIV;
1331   }
1332 
1333   // {0,+,F} --> {0,+,1} * F
1334 
1335   // If this is a simple linear addrec, emit it now as a special case.
1336   if (S->isAffine())    // {0,+,F} --> i*F
1337     return
1338       expand(SE.getTruncateOrNoop(
1339         SE.getMulExpr(SE.getUnknown(CanonicalIV),
1340                       SE.getNoopOrAnyExtend(S->getOperand(1),
1341                                             CanonicalIV->getType())),
1342         Ty));
1343 
1344   // If this is a chain of recurrences, turn it into a closed form, using the
1345   // folders, then expandCodeFor the closed form.  This allows the folders to
1346   // simplify the expression without having to build a bunch of special code
1347   // into this folder.
1348   const SCEV *IH = SE.getUnknown(CanonicalIV);   // Get I as a "symbolic" SCEV.
1349 
1350   // Promote S up to the canonical IV type, if the cast is foldable.
1351   const SCEV *NewS = S;
1352   const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1353   if (isa<SCEVAddRecExpr>(Ext))
1354     NewS = Ext;
1355 
1356   const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1357 
1358   // Truncate the result down to the original type, if needed.
1359   const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1360   return expand(T);
1361 }
1362 
1363 Value *SCEVExpander::visitPtrToIntExpr(const SCEVPtrToIntExpr *S) {
1364   Value *V =
1365       expandCodeForImpl(S->getOperand(), S->getOperand()->getType());
1366   return ReuseOrCreateCast(V, S->getType(), CastInst::PtrToInt,
1367                            GetOptimalInsertionPointForCastOf(V));
1368 }
1369 
1370 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1371   Type *Ty = SE.getEffectiveSCEVType(S->getType());
1372   Value *V = expandCodeForImpl(
1373       S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType())
1374       );
1375   return Builder.CreateTrunc(V, Ty);
1376 }
1377 
1378 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1379   Type *Ty = SE.getEffectiveSCEVType(S->getType());
1380   Value *V = expandCodeForImpl(
1381       S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType())
1382       );
1383   return Builder.CreateZExt(V, Ty);
1384 }
1385 
1386 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1387   Type *Ty = SE.getEffectiveSCEVType(S->getType());
1388   Value *V = expandCodeForImpl(
1389       S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType())
1390       );
1391   return Builder.CreateSExt(V, Ty);
1392 }
1393 
1394 Value *SCEVExpander::expandMinMaxExpr(const SCEVNAryExpr *S,
1395                                       Intrinsic::ID IntrinID, Twine Name,
1396                                       bool IsSequential) {
1397   Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1398   Type *Ty = LHS->getType();
1399   if (IsSequential)
1400     LHS = Builder.CreateFreeze(LHS);
1401   for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1402     Value *RHS = expandCodeForImpl(S->getOperand(i), Ty);
1403     if (IsSequential && i != 0)
1404       RHS = Builder.CreateFreeze(RHS);
1405     Value *Sel;
1406     if (Ty->isIntegerTy())
1407       Sel = Builder.CreateIntrinsic(IntrinID, {Ty}, {LHS, RHS},
1408                                     /*FMFSource=*/nullptr, Name);
1409     else {
1410       Value *ICmp =
1411           Builder.CreateICmp(MinMaxIntrinsic::getPredicate(IntrinID), LHS, RHS);
1412       Sel = Builder.CreateSelect(ICmp, LHS, RHS, Name);
1413     }
1414     LHS = Sel;
1415   }
1416   return LHS;
1417 }
1418 
1419 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1420   return expandMinMaxExpr(S, Intrinsic::smax, "smax");
1421 }
1422 
1423 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1424   return expandMinMaxExpr(S, Intrinsic::umax, "umax");
1425 }
1426 
1427 Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) {
1428   return expandMinMaxExpr(S, Intrinsic::smin, "smin");
1429 }
1430 
1431 Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) {
1432   return expandMinMaxExpr(S, Intrinsic::umin, "umin");
1433 }
1434 
1435 Value *SCEVExpander::visitSequentialUMinExpr(const SCEVSequentialUMinExpr *S) {
1436   return expandMinMaxExpr(S, Intrinsic::umin, "umin", /*IsSequential*/true);
1437 }
1438 
1439 Value *SCEVExpander::visitVScale(const SCEVVScale *S) {
1440   return Builder.CreateVScale(ConstantInt::get(S->getType(), 1));
1441 }
1442 
1443 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty,
1444                                        Instruction *IP) {
1445   setInsertPoint(IP);
1446   Value *V = expandCodeForImpl(SH, Ty);
1447   return V;
1448 }
1449 
1450 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty) {
1451   // Expand the code for this SCEV.
1452   Value *V = expand(SH);
1453 
1454   if (Ty) {
1455     assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1456            "non-trivial casts should be done with the SCEVs directly!");
1457     V = InsertNoopCastOfTo(V, Ty);
1458   }
1459   return V;
1460 }
1461 
1462 Value *SCEVExpander::FindValueInExprValueMap(const SCEV *S,
1463                                              const Instruction *InsertPt) {
1464   // If the expansion is not in CanonicalMode, and the SCEV contains any
1465   // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
1466   if (!CanonicalMode && SE.containsAddRecurrence(S))
1467     return nullptr;
1468 
1469   // If S is a constant, it may be worse to reuse an existing Value.
1470   if (isa<SCEVConstant>(S))
1471     return nullptr;
1472 
1473   // Choose a Value from the set which dominates the InsertPt.
1474   // InsertPt should be inside the Value's parent loop so as not to break
1475   // the LCSSA form.
1476   for (Value *V : SE.getSCEVValues(S)) {
1477     Instruction *EntInst = dyn_cast<Instruction>(V);
1478     if (!EntInst)
1479       continue;
1480 
1481     assert(EntInst->getFunction() == InsertPt->getFunction());
1482     if (S->getType() == V->getType() &&
1483         SE.DT.dominates(EntInst, InsertPt) &&
1484         (SE.LI.getLoopFor(EntInst->getParent()) == nullptr ||
1485          SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt)))
1486       return V;
1487   }
1488   return nullptr;
1489 }
1490 
1491 // The expansion of SCEV will either reuse a previous Value in ExprValueMap,
1492 // or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
1493 // and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
1494 // literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
1495 // the expansion will try to reuse Value from ExprValueMap, and only when it
1496 // fails, expand the SCEV literally.
1497 Value *SCEVExpander::expand(const SCEV *S) {
1498   // Compute an insertion point for this SCEV object. Hoist the instructions
1499   // as far out in the loop nest as possible.
1500   Instruction *InsertPt = &*Builder.GetInsertPoint();
1501 
1502   // We can move insertion point only if there is no div or rem operations
1503   // otherwise we are risky to move it over the check for zero denominator.
1504   auto SafeToHoist = [](const SCEV *S) {
1505     return !SCEVExprContains(S, [](const SCEV *S) {
1506               if (const auto *D = dyn_cast<SCEVUDivExpr>(S)) {
1507                 if (const auto *SC = dyn_cast<SCEVConstant>(D->getRHS()))
1508                   // Division by non-zero constants can be hoisted.
1509                   return SC->getValue()->isZero();
1510                 // All other divisions should not be moved as they may be
1511                 // divisions by zero and should be kept within the
1512                 // conditions of the surrounding loops that guard their
1513                 // execution (see PR35406).
1514                 return true;
1515               }
1516               return false;
1517             });
1518   };
1519   if (SafeToHoist(S)) {
1520     for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1521          L = L->getParentLoop()) {
1522       if (SE.isLoopInvariant(S, L)) {
1523         if (!L) break;
1524         if (BasicBlock *Preheader = L->getLoopPreheader())
1525           InsertPt = Preheader->getTerminator();
1526         else
1527           // LSR sets the insertion point for AddRec start/step values to the
1528           // block start to simplify value reuse, even though it's an invalid
1529           // position. SCEVExpander must correct for this in all cases.
1530           InsertPt = &*L->getHeader()->getFirstInsertionPt();
1531       } else {
1532         // If the SCEV is computable at this level, insert it into the header
1533         // after the PHIs (and after any other instructions that we've inserted
1534         // there) so that it is guaranteed to dominate any user inside the loop.
1535         if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1536           InsertPt = &*L->getHeader()->getFirstInsertionPt();
1537 
1538         while (InsertPt->getIterator() != Builder.GetInsertPoint() &&
1539                (isInsertedInstruction(InsertPt) ||
1540                 isa<DbgInfoIntrinsic>(InsertPt))) {
1541           InsertPt = &*std::next(InsertPt->getIterator());
1542         }
1543         break;
1544       }
1545     }
1546   }
1547 
1548   // Check to see if we already expanded this here.
1549   auto I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1550   if (I != InsertedExpressions.end())
1551     return I->second;
1552 
1553   SCEVInsertPointGuard Guard(Builder, this);
1554   Builder.SetInsertPoint(InsertPt);
1555 
1556   // Expand the expression into instructions.
1557   Value *V = FindValueInExprValueMap(S, InsertPt);
1558   if (!V) {
1559     V = visit(S);
1560     V = fixupLCSSAFormFor(V);
1561   } else {
1562     // If we're reusing an existing instruction, we are effectively CSEing two
1563     // copies of the instruction (with potentially different flags).  As such,
1564     // we need to drop any poison generating flags unless we can prove that
1565     // said flags must be valid for all new users.
1566     if (auto *I = dyn_cast<Instruction>(V))
1567       if (I->hasPoisonGeneratingFlags() && !programUndefinedIfPoison(I))
1568         I->dropPoisonGeneratingFlags();
1569   }
1570   // Remember the expanded value for this SCEV at this location.
1571   //
1572   // This is independent of PostIncLoops. The mapped value simply materializes
1573   // the expression at this insertion point. If the mapped value happened to be
1574   // a postinc expansion, it could be reused by a non-postinc user, but only if
1575   // its insertion point was already at the head of the loop.
1576   InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1577   return V;
1578 }
1579 
1580 void SCEVExpander::rememberInstruction(Value *I) {
1581   auto DoInsert = [this](Value *V) {
1582     if (!PostIncLoops.empty())
1583       InsertedPostIncValues.insert(V);
1584     else
1585       InsertedValues.insert(V);
1586   };
1587   DoInsert(I);
1588 }
1589 
1590 /// replaceCongruentIVs - Check for congruent phis in this loop header and
1591 /// replace them with their most canonical representative. Return the number of
1592 /// phis eliminated.
1593 ///
1594 /// This does not depend on any SCEVExpander state but should be used in
1595 /// the same context that SCEVExpander is used.
1596 unsigned
1597 SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
1598                                   SmallVectorImpl<WeakTrackingVH> &DeadInsts,
1599                                   const TargetTransformInfo *TTI) {
1600   // Find integer phis in order of increasing width.
1601   SmallVector<PHINode*, 8> Phis;
1602   for (PHINode &PN : L->getHeader()->phis())
1603     Phis.push_back(&PN);
1604 
1605   if (TTI)
1606     // Use stable_sort to preserve order of equivalent PHIs, so the order
1607     // of the sorted Phis is the same from run to run on the same loop.
1608     llvm::stable_sort(Phis, [](Value *LHS, Value *RHS) {
1609       // Put pointers at the back and make sure pointer < pointer = false.
1610       if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
1611         return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
1612       return RHS->getType()->getPrimitiveSizeInBits().getFixedValue() <
1613              LHS->getType()->getPrimitiveSizeInBits().getFixedValue();
1614     });
1615 
1616   unsigned NumElim = 0;
1617   DenseMap<const SCEV *, PHINode *> ExprToIVMap;
1618   // Process phis from wide to narrow. Map wide phis to their truncation
1619   // so narrow phis can reuse them.
1620   for (PHINode *Phi : Phis) {
1621     auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
1622       if (Value *V = simplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
1623         return V;
1624       if (!SE.isSCEVable(PN->getType()))
1625         return nullptr;
1626       auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN));
1627       if (!Const)
1628         return nullptr;
1629       return Const->getValue();
1630     };
1631 
1632     // Fold constant phis. They may be congruent to other constant phis and
1633     // would confuse the logic below that expects proper IVs.
1634     if (Value *V = SimplifyPHINode(Phi)) {
1635       if (V->getType() != Phi->getType())
1636         continue;
1637       SE.forgetValue(Phi);
1638       Phi->replaceAllUsesWith(V);
1639       DeadInsts.emplace_back(Phi);
1640       ++NumElim;
1641       SCEV_DEBUG_WITH_TYPE(DebugType,
1642                            dbgs() << "INDVARS: Eliminated constant iv: " << *Phi
1643                                   << '\n');
1644       continue;
1645     }
1646 
1647     if (!SE.isSCEVable(Phi->getType()))
1648       continue;
1649 
1650     PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1651     if (!OrigPhiRef) {
1652       OrigPhiRef = Phi;
1653       if (Phi->getType()->isIntegerTy() && TTI &&
1654           TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
1655         // Make sure we only rewrite using simple induction variables;
1656         // otherwise, we can make the trip count of a loop unanalyzable
1657         // to SCEV.
1658         const SCEV *PhiExpr = SE.getSCEV(Phi);
1659         if (isa<SCEVAddRecExpr>(PhiExpr)) {
1660           // This phi can be freely truncated to the narrowest phi type. Map the
1661           // truncated expression to it so it will be reused for narrow types.
1662           const SCEV *TruncExpr =
1663               SE.getTruncateExpr(PhiExpr, Phis.back()->getType());
1664           ExprToIVMap[TruncExpr] = Phi;
1665         }
1666       }
1667       continue;
1668     }
1669 
1670     // Replacing a pointer phi with an integer phi or vice-versa doesn't make
1671     // sense.
1672     if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
1673       continue;
1674 
1675     if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1676       Instruction *OrigInc = dyn_cast<Instruction>(
1677           OrigPhiRef->getIncomingValueForBlock(LatchBlock));
1678       Instruction *IsomorphicInc =
1679           dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1680 
1681       if (OrigInc && IsomorphicInc) {
1682         // If this phi has the same width but is more canonical, replace the
1683         // original with it. As part of the "more canonical" determination,
1684         // respect a prior decision to use an IV chain.
1685         if (OrigPhiRef->getType() == Phi->getType() &&
1686             !(ChainedPhis.count(Phi) ||
1687               isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) &&
1688             (ChainedPhis.count(Phi) ||
1689              isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1690           std::swap(OrigPhiRef, Phi);
1691           std::swap(OrigInc, IsomorphicInc);
1692         }
1693         // Replacing the congruent phi is sufficient because acyclic
1694         // redundancy elimination, CSE/GVN, should handle the
1695         // rest. However, once SCEV proves that a phi is congruent,
1696         // it's often the head of an IV user cycle that is isomorphic
1697         // with the original phi. It's worth eagerly cleaning up the
1698         // common case of a single IV increment so that DeleteDeadPHIs
1699         // can remove cycles that had postinc uses.
1700         // Because we may potentially introduce a new use of OrigIV that didn't
1701         // exist before at this point, its poison flags need readjustment.
1702         const SCEV *TruncExpr =
1703             SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
1704         if (OrigInc != IsomorphicInc &&
1705             TruncExpr == SE.getSCEV(IsomorphicInc) &&
1706             SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc) &&
1707             hoistIVInc(OrigInc, IsomorphicInc, /*RecomputePoisonFlags*/ true)) {
1708           SCEV_DEBUG_WITH_TYPE(
1709               DebugType, dbgs() << "INDVARS: Eliminated congruent iv.inc: "
1710                                 << *IsomorphicInc << '\n');
1711           Value *NewInc = OrigInc;
1712           if (OrigInc->getType() != IsomorphicInc->getType()) {
1713             Instruction *IP = nullptr;
1714             if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
1715               IP = &*PN->getParent()->getFirstInsertionPt();
1716             else
1717               IP = OrigInc->getNextNode();
1718 
1719             IRBuilder<> Builder(IP);
1720             Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
1721             NewInc = Builder.CreateTruncOrBitCast(
1722                 OrigInc, IsomorphicInc->getType(), IVName);
1723           }
1724           IsomorphicInc->replaceAllUsesWith(NewInc);
1725           DeadInsts.emplace_back(IsomorphicInc);
1726         }
1727       }
1728     }
1729     SCEV_DEBUG_WITH_TYPE(DebugType,
1730                          dbgs() << "INDVARS: Eliminated congruent iv: " << *Phi
1731                                 << '\n');
1732     SCEV_DEBUG_WITH_TYPE(
1733         DebugType, dbgs() << "INDVARS: Original iv: " << *OrigPhiRef << '\n');
1734     ++NumElim;
1735     Value *NewIV = OrigPhiRef;
1736     if (OrigPhiRef->getType() != Phi->getType()) {
1737       IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt());
1738       Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1739       NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1740     }
1741     Phi->replaceAllUsesWith(NewIV);
1742     DeadInsts.emplace_back(Phi);
1743   }
1744   return NumElim;
1745 }
1746 
1747 Value *SCEVExpander::getRelatedExistingExpansion(const SCEV *S,
1748                                                  const Instruction *At,
1749                                                  Loop *L) {
1750   using namespace llvm::PatternMatch;
1751 
1752   SmallVector<BasicBlock *, 4> ExitingBlocks;
1753   L->getExitingBlocks(ExitingBlocks);
1754 
1755   // Look for suitable value in simple conditions at the loop exits.
1756   for (BasicBlock *BB : ExitingBlocks) {
1757     ICmpInst::Predicate Pred;
1758     Instruction *LHS, *RHS;
1759 
1760     if (!match(BB->getTerminator(),
1761                m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)),
1762                     m_BasicBlock(), m_BasicBlock())))
1763       continue;
1764 
1765     if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
1766       return LHS;
1767 
1768     if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
1769       return RHS;
1770   }
1771 
1772   // Use expand's logic which is used for reusing a previous Value in
1773   // ExprValueMap.  Note that we don't currently model the cost of
1774   // needing to drop poison generating flags on the instruction if we
1775   // want to reuse it.  We effectively assume that has zero cost.
1776   return FindValueInExprValueMap(S, At);
1777 }
1778 
1779 template<typename T> static InstructionCost costAndCollectOperands(
1780   const SCEVOperand &WorkItem, const TargetTransformInfo &TTI,
1781   TargetTransformInfo::TargetCostKind CostKind,
1782   SmallVectorImpl<SCEVOperand> &Worklist) {
1783 
1784   const T *S = cast<T>(WorkItem.S);
1785   InstructionCost Cost = 0;
1786   // Object to help map SCEV operands to expanded IR instructions.
1787   struct OperationIndices {
1788     OperationIndices(unsigned Opc, size_t min, size_t max) :
1789       Opcode(Opc), MinIdx(min), MaxIdx(max) { }
1790     unsigned Opcode;
1791     size_t MinIdx;
1792     size_t MaxIdx;
1793   };
1794 
1795   // Collect the operations of all the instructions that will be needed to
1796   // expand the SCEVExpr. This is so that when we come to cost the operands,
1797   // we know what the generated user(s) will be.
1798   SmallVector<OperationIndices, 2> Operations;
1799 
1800   auto CastCost = [&](unsigned Opcode) -> InstructionCost {
1801     Operations.emplace_back(Opcode, 0, 0);
1802     return TTI.getCastInstrCost(Opcode, S->getType(),
1803                                 S->getOperand(0)->getType(),
1804                                 TTI::CastContextHint::None, CostKind);
1805   };
1806 
1807   auto ArithCost = [&](unsigned Opcode, unsigned NumRequired,
1808                        unsigned MinIdx = 0,
1809                        unsigned MaxIdx = 1) -> InstructionCost {
1810     Operations.emplace_back(Opcode, MinIdx, MaxIdx);
1811     return NumRequired *
1812       TTI.getArithmeticInstrCost(Opcode, S->getType(), CostKind);
1813   };
1814 
1815   auto CmpSelCost = [&](unsigned Opcode, unsigned NumRequired, unsigned MinIdx,
1816                         unsigned MaxIdx) -> InstructionCost {
1817     Operations.emplace_back(Opcode, MinIdx, MaxIdx);
1818     Type *OpType = S->getType();
1819     return NumRequired * TTI.getCmpSelInstrCost(
1820                              Opcode, OpType, CmpInst::makeCmpResultType(OpType),
1821                              CmpInst::BAD_ICMP_PREDICATE, CostKind);
1822   };
1823 
1824   switch (S->getSCEVType()) {
1825   case scCouldNotCompute:
1826     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
1827   case scUnknown:
1828   case scConstant:
1829   case scVScale:
1830     return 0;
1831   case scPtrToInt:
1832     Cost = CastCost(Instruction::PtrToInt);
1833     break;
1834   case scTruncate:
1835     Cost = CastCost(Instruction::Trunc);
1836     break;
1837   case scZeroExtend:
1838     Cost = CastCost(Instruction::ZExt);
1839     break;
1840   case scSignExtend:
1841     Cost = CastCost(Instruction::SExt);
1842     break;
1843   case scUDivExpr: {
1844     unsigned Opcode = Instruction::UDiv;
1845     if (auto *SC = dyn_cast<SCEVConstant>(S->getOperand(1)))
1846       if (SC->getAPInt().isPowerOf2())
1847         Opcode = Instruction::LShr;
1848     Cost = ArithCost(Opcode, 1);
1849     break;
1850   }
1851   case scAddExpr:
1852     Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1);
1853     break;
1854   case scMulExpr:
1855     // TODO: this is a very pessimistic cost modelling for Mul,
1856     // because of Bin Pow algorithm actually used by the expander,
1857     // see SCEVExpander::visitMulExpr(), ExpandOpBinPowN().
1858     Cost = ArithCost(Instruction::Mul, S->getNumOperands() - 1);
1859     break;
1860   case scSMaxExpr:
1861   case scUMaxExpr:
1862   case scSMinExpr:
1863   case scUMinExpr:
1864   case scSequentialUMinExpr: {
1865     // FIXME: should this ask the cost for Intrinsic's?
1866     // The reduction tree.
1867     Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
1868     Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
1869     switch (S->getSCEVType()) {
1870     case scSequentialUMinExpr: {
1871       // The safety net against poison.
1872       // FIXME: this is broken.
1873       Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 0);
1874       Cost += ArithCost(Instruction::Or,
1875                         S->getNumOperands() > 2 ? S->getNumOperands() - 2 : 0);
1876       Cost += CmpSelCost(Instruction::Select, 1, 0, 1);
1877       break;
1878     }
1879     default:
1880       assert(!isa<SCEVSequentialMinMaxExpr>(S) &&
1881              "Unhandled SCEV expression type?");
1882       break;
1883     }
1884     break;
1885   }
1886   case scAddRecExpr: {
1887     // In this polynominal, we may have some zero operands, and we shouldn't
1888     // really charge for those. So how many non-zero coefficients are there?
1889     int NumTerms = llvm::count_if(S->operands(), [](const SCEV *Op) {
1890                                     return !Op->isZero();
1891                                   });
1892 
1893     assert(NumTerms >= 1 && "Polynominal should have at least one term.");
1894     assert(!(*std::prev(S->operands().end()))->isZero() &&
1895            "Last operand should not be zero");
1896 
1897     // Ignoring constant term (operand 0), how many of the coefficients are u> 1?
1898     int NumNonZeroDegreeNonOneTerms =
1899       llvm::count_if(S->operands(), [](const SCEV *Op) {
1900                       auto *SConst = dyn_cast<SCEVConstant>(Op);
1901                       return !SConst || SConst->getAPInt().ugt(1);
1902                     });
1903 
1904     // Much like with normal add expr, the polynominal will require
1905     // one less addition than the number of it's terms.
1906     InstructionCost AddCost = ArithCost(Instruction::Add, NumTerms - 1,
1907                                         /*MinIdx*/ 1, /*MaxIdx*/ 1);
1908     // Here, *each* one of those will require a multiplication.
1909     InstructionCost MulCost =
1910         ArithCost(Instruction::Mul, NumNonZeroDegreeNonOneTerms);
1911     Cost = AddCost + MulCost;
1912 
1913     // What is the degree of this polynominal?
1914     int PolyDegree = S->getNumOperands() - 1;
1915     assert(PolyDegree >= 1 && "Should be at least affine.");
1916 
1917     // The final term will be:
1918     //   Op_{PolyDegree} * x ^ {PolyDegree}
1919     // Where  x ^ {PolyDegree}  will again require PolyDegree-1 mul operations.
1920     // Note that  x ^ {PolyDegree} = x * x ^ {PolyDegree-1}  so charging for
1921     // x ^ {PolyDegree}  will give us  x ^ {2} .. x ^ {PolyDegree-1}  for free.
1922     // FIXME: this is conservatively correct, but might be overly pessimistic.
1923     Cost += MulCost * (PolyDegree - 1);
1924     break;
1925   }
1926   }
1927 
1928   for (auto &CostOp : Operations) {
1929     for (auto SCEVOp : enumerate(S->operands())) {
1930       // Clamp the index to account for multiple IR operations being chained.
1931       size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx);
1932       size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx);
1933       Worklist.emplace_back(CostOp.Opcode, OpIdx, SCEVOp.value());
1934     }
1935   }
1936   return Cost;
1937 }
1938 
1939 bool SCEVExpander::isHighCostExpansionHelper(
1940     const SCEVOperand &WorkItem, Loop *L, const Instruction &At,
1941     InstructionCost &Cost, unsigned Budget, const TargetTransformInfo &TTI,
1942     SmallPtrSetImpl<const SCEV *> &Processed,
1943     SmallVectorImpl<SCEVOperand> &Worklist) {
1944   if (Cost > Budget)
1945     return true; // Already run out of budget, give up.
1946 
1947   const SCEV *S = WorkItem.S;
1948   // Was the cost of expansion of this expression already accounted for?
1949   if (!isa<SCEVConstant>(S) && !Processed.insert(S).second)
1950     return false; // We have already accounted for this expression.
1951 
1952   // If we can find an existing value for this scev available at the point "At"
1953   // then consider the expression cheap.
1954   if (getRelatedExistingExpansion(S, &At, L))
1955     return false; // Consider the expression to be free.
1956 
1957   TargetTransformInfo::TargetCostKind CostKind =
1958       L->getHeader()->getParent()->hasMinSize()
1959           ? TargetTransformInfo::TCK_CodeSize
1960           : TargetTransformInfo::TCK_RecipThroughput;
1961 
1962   switch (S->getSCEVType()) {
1963   case scCouldNotCompute:
1964     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
1965   case scUnknown:
1966   case scVScale:
1967     // Assume to be zero-cost.
1968     return false;
1969   case scConstant: {
1970     // Only evalulate the costs of constants when optimizing for size.
1971     if (CostKind != TargetTransformInfo::TCK_CodeSize)
1972       return false;
1973     const APInt &Imm = cast<SCEVConstant>(S)->getAPInt();
1974     Type *Ty = S->getType();
1975     Cost += TTI.getIntImmCostInst(
1976         WorkItem.ParentOpcode, WorkItem.OperandIdx, Imm, Ty, CostKind);
1977     return Cost > Budget;
1978   }
1979   case scTruncate:
1980   case scPtrToInt:
1981   case scZeroExtend:
1982   case scSignExtend: {
1983     Cost +=
1984         costAndCollectOperands<SCEVCastExpr>(WorkItem, TTI, CostKind, Worklist);
1985     return false; // Will answer upon next entry into this function.
1986   }
1987   case scUDivExpr: {
1988     // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
1989     // HowManyLessThans produced to compute a precise expression, rather than a
1990     // UDiv from the user's code. If we can't find a UDiv in the code with some
1991     // simple searching, we need to account for it's cost.
1992 
1993     // At the beginning of this function we already tried to find existing
1994     // value for plain 'S'. Now try to lookup 'S + 1' since it is common
1995     // pattern involving division. This is just a simple search heuristic.
1996     if (getRelatedExistingExpansion(
1997             SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), &At, L))
1998       return false; // Consider it to be free.
1999 
2000     Cost +=
2001         costAndCollectOperands<SCEVUDivExpr>(WorkItem, TTI, CostKind, Worklist);
2002     return false; // Will answer upon next entry into this function.
2003   }
2004   case scAddExpr:
2005   case scMulExpr:
2006   case scUMaxExpr:
2007   case scSMaxExpr:
2008   case scUMinExpr:
2009   case scSMinExpr:
2010   case scSequentialUMinExpr: {
2011     assert(cast<SCEVNAryExpr>(S)->getNumOperands() > 1 &&
2012            "Nary expr should have more than 1 operand.");
2013     // The simple nary expr will require one less op (or pair of ops)
2014     // than the number of it's terms.
2015     Cost +=
2016         costAndCollectOperands<SCEVNAryExpr>(WorkItem, TTI, CostKind, Worklist);
2017     return Cost > Budget;
2018   }
2019   case scAddRecExpr: {
2020     assert(cast<SCEVAddRecExpr>(S)->getNumOperands() >= 2 &&
2021            "Polynomial should be at least linear");
2022     Cost += costAndCollectOperands<SCEVAddRecExpr>(
2023         WorkItem, TTI, CostKind, Worklist);
2024     return Cost > Budget;
2025   }
2026   }
2027   llvm_unreachable("Unknown SCEV kind!");
2028 }
2029 
2030 Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred,
2031                                             Instruction *IP) {
2032   assert(IP);
2033   switch (Pred->getKind()) {
2034   case SCEVPredicate::P_Union:
2035     return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
2036   case SCEVPredicate::P_Compare:
2037     return expandComparePredicate(cast<SCEVComparePredicate>(Pred), IP);
2038   case SCEVPredicate::P_Wrap: {
2039     auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2040     return expandWrapPredicate(AddRecPred, IP);
2041   }
2042   }
2043   llvm_unreachable("Unknown SCEV predicate type");
2044 }
2045 
2046 Value *SCEVExpander::expandComparePredicate(const SCEVComparePredicate *Pred,
2047                                             Instruction *IP) {
2048   Value *Expr0 =
2049       expandCodeForImpl(Pred->getLHS(), Pred->getLHS()->getType(), IP);
2050   Value *Expr1 =
2051       expandCodeForImpl(Pred->getRHS(), Pred->getRHS()->getType(), IP);
2052 
2053   Builder.SetInsertPoint(IP);
2054   auto InvPred = ICmpInst::getInversePredicate(Pred->getPredicate());
2055   auto *I = Builder.CreateICmp(InvPred, Expr0, Expr1, "ident.check");
2056   return I;
2057 }
2058 
2059 Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
2060                                            Instruction *Loc, bool Signed) {
2061   assert(AR->isAffine() && "Cannot generate RT check for "
2062                            "non-affine expression");
2063 
2064   // FIXME: It is highly suspicious that we're ignoring the predicates here.
2065   SmallVector<const SCEVPredicate *, 4> Pred;
2066   const SCEV *ExitCount =
2067       SE.getPredicatedBackedgeTakenCount(AR->getLoop(), Pred);
2068 
2069   assert(!isa<SCEVCouldNotCompute>(ExitCount) && "Invalid loop count");
2070 
2071   const SCEV *Step = AR->getStepRecurrence(SE);
2072   const SCEV *Start = AR->getStart();
2073 
2074   Type *ARTy = AR->getType();
2075   unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType());
2076   unsigned DstBits = SE.getTypeSizeInBits(ARTy);
2077 
2078   // The expression {Start,+,Step} has nusw/nssw if
2079   //   Step < 0, Start - |Step| * Backedge <= Start
2080   //   Step >= 0, Start + |Step| * Backedge > Start
2081   // and |Step| * Backedge doesn't unsigned overflow.
2082 
2083   IntegerType *CountTy = IntegerType::get(Loc->getContext(), SrcBits);
2084   Builder.SetInsertPoint(Loc);
2085   Value *TripCountVal = expandCodeForImpl(ExitCount, CountTy, Loc);
2086 
2087   IntegerType *Ty =
2088       IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(ARTy));
2089 
2090   Value *StepValue = expandCodeForImpl(Step, Ty, Loc);
2091   Value *NegStepValue =
2092       expandCodeForImpl(SE.getNegativeSCEV(Step), Ty, Loc);
2093   Value *StartValue = expandCodeForImpl(Start, ARTy, Loc);
2094 
2095   ConstantInt *Zero =
2096       ConstantInt::get(Loc->getContext(), APInt::getZero(DstBits));
2097 
2098   Builder.SetInsertPoint(Loc);
2099   // Compute |Step|
2100   Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero);
2101   Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2102 
2103   // Compute |Step| * Backedge
2104   // Compute:
2105   //   1. Start + |Step| * Backedge < Start
2106   //   2. Start - |Step| * Backedge > Start
2107   //
2108   // And select either 1. or 2. depending on whether step is positive or
2109   // negative. If Step is known to be positive or negative, only create
2110   // either 1. or 2.
2111   auto ComputeEndCheck = [&]() -> Value * {
2112     // Checking <u 0 is always false.
2113     if (!Signed && Start->isZero() && SE.isKnownPositive(Step))
2114       return ConstantInt::getFalse(Loc->getContext());
2115 
2116     // Get the backedge taken count and truncate or extended to the AR type.
2117     Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2118 
2119     Value *MulV, *OfMul;
2120     if (Step->isOne()) {
2121       // Special-case Step of one. Potentially-costly `umul_with_overflow` isn't
2122       // needed, there is never an overflow, so to avoid artificially inflating
2123       // the cost of the check, directly emit the optimized IR.
2124       MulV = TruncTripCount;
2125       OfMul = ConstantInt::getFalse(MulV->getContext());
2126     } else {
2127       auto *MulF = Intrinsic::getDeclaration(Loc->getModule(),
2128                                              Intrinsic::umul_with_overflow, Ty);
2129       CallInst *Mul =
2130           Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul");
2131       MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
2132       OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
2133     }
2134 
2135     Value *Add = nullptr, *Sub = nullptr;
2136     bool NeedPosCheck = !SE.isKnownNegative(Step);
2137     bool NeedNegCheck = !SE.isKnownPositive(Step);
2138 
2139     if (PointerType *ARPtrTy = dyn_cast<PointerType>(ARTy)) {
2140       StartValue = InsertNoopCastOfTo(
2141           StartValue, Builder.getInt8PtrTy(ARPtrTy->getAddressSpace()));
2142       Value *NegMulV = Builder.CreateNeg(MulV);
2143       if (NeedPosCheck)
2144         Add = Builder.CreateGEP(Builder.getInt8Ty(), StartValue, MulV);
2145       if (NeedNegCheck)
2146         Sub = Builder.CreateGEP(Builder.getInt8Ty(), StartValue, NegMulV);
2147     } else {
2148       if (NeedPosCheck)
2149         Add = Builder.CreateAdd(StartValue, MulV);
2150       if (NeedNegCheck)
2151         Sub = Builder.CreateSub(StartValue, MulV);
2152     }
2153 
2154     Value *EndCompareLT = nullptr;
2155     Value *EndCompareGT = nullptr;
2156     Value *EndCheck = nullptr;
2157     if (NeedPosCheck)
2158       EndCheck = EndCompareLT = Builder.CreateICmp(
2159           Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue);
2160     if (NeedNegCheck)
2161       EndCheck = EndCompareGT = Builder.CreateICmp(
2162           Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue);
2163     if (NeedPosCheck && NeedNegCheck) {
2164       // Select the answer based on the sign of Step.
2165       EndCheck = Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2166     }
2167     return Builder.CreateOr(EndCheck, OfMul);
2168   };
2169   Value *EndCheck = ComputeEndCheck();
2170 
2171   // If the backedge taken count type is larger than the AR type,
2172   // check that we don't drop any bits by truncating it. If we are
2173   // dropping bits, then we have overflow (unless the step is zero).
2174   if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) {
2175     auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits);
2176     auto *BackedgeCheck =
2177         Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal,
2178                            ConstantInt::get(Loc->getContext(), MaxVal));
2179     BackedgeCheck = Builder.CreateAnd(
2180         BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero));
2181 
2182     EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2183   }
2184 
2185   return EndCheck;
2186 }
2187 
2188 Value *SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate *Pred,
2189                                          Instruction *IP) {
2190   const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr());
2191   Value *NSSWCheck = nullptr, *NUSWCheck = nullptr;
2192 
2193   // Add a check for NUSW
2194   if (Pred->getFlags() & SCEVWrapPredicate::IncrementNUSW)
2195     NUSWCheck = generateOverflowCheck(A, IP, false);
2196 
2197   // Add a check for NSSW
2198   if (Pred->getFlags() & SCEVWrapPredicate::IncrementNSSW)
2199     NSSWCheck = generateOverflowCheck(A, IP, true);
2200 
2201   if (NUSWCheck && NSSWCheck)
2202     return Builder.CreateOr(NUSWCheck, NSSWCheck);
2203 
2204   if (NUSWCheck)
2205     return NUSWCheck;
2206 
2207   if (NSSWCheck)
2208     return NSSWCheck;
2209 
2210   return ConstantInt::getFalse(IP->getContext());
2211 }
2212 
2213 Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union,
2214                                           Instruction *IP) {
2215   // Loop over all checks in this set.
2216   SmallVector<Value *> Checks;
2217   for (const auto *Pred : Union->getPredicates()) {
2218     Checks.push_back(expandCodeForPredicate(Pred, IP));
2219     Builder.SetInsertPoint(IP);
2220   }
2221 
2222   if (Checks.empty())
2223     return ConstantInt::getFalse(IP->getContext());
2224   return Builder.CreateOr(Checks);
2225 }
2226 
2227 Value *SCEVExpander::fixupLCSSAFormFor(Value *V) {
2228   auto *DefI = dyn_cast<Instruction>(V);
2229   if (!PreserveLCSSA || !DefI)
2230     return V;
2231 
2232   Instruction *InsertPt = &*Builder.GetInsertPoint();
2233   Loop *DefLoop = SE.LI.getLoopFor(DefI->getParent());
2234   Loop *UseLoop = SE.LI.getLoopFor(InsertPt->getParent());
2235   if (!DefLoop || UseLoop == DefLoop || DefLoop->contains(UseLoop))
2236     return V;
2237 
2238   // Create a temporary instruction to at the current insertion point, so we
2239   // can hand it off to the helper to create LCSSA PHIs if required for the
2240   // new use.
2241   // FIXME: Ideally formLCSSAForInstructions (used in fixupLCSSAFormFor)
2242   // would accept a insertion point and return an LCSSA phi for that
2243   // insertion point, so there is no need to insert & remove the temporary
2244   // instruction.
2245   Type *ToTy;
2246   if (DefI->getType()->isIntegerTy())
2247     ToTy = DefI->getType()->getPointerTo();
2248   else
2249     ToTy = Type::getInt32Ty(DefI->getContext());
2250   Instruction *User =
2251       CastInst::CreateBitOrPointerCast(DefI, ToTy, "tmp.lcssa.user", InsertPt);
2252   auto RemoveUserOnExit =
2253       make_scope_exit([User]() { User->eraseFromParent(); });
2254 
2255   SmallVector<Instruction *, 1> ToUpdate;
2256   ToUpdate.push_back(DefI);
2257   SmallVector<PHINode *, 16> PHIsToRemove;
2258   SmallVector<PHINode *, 16> InsertedPHIs;
2259   formLCSSAForInstructions(ToUpdate, SE.DT, SE.LI, &SE, &PHIsToRemove,
2260                            &InsertedPHIs);
2261   for (PHINode *PN : InsertedPHIs)
2262     rememberInstruction(PN);
2263   for (PHINode *PN : PHIsToRemove) {
2264     if (!PN->use_empty())
2265       continue;
2266     InsertedValues.erase(PN);
2267     InsertedPostIncValues.erase(PN);
2268     PN->eraseFromParent();
2269   }
2270 
2271   return User->getOperand(0);
2272 }
2273 
2274 namespace {
2275 // Search for a SCEV subexpression that is not safe to expand.  Any expression
2276 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
2277 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
2278 // instruction, but the important thing is that we prove the denominator is
2279 // nonzero before expansion.
2280 //
2281 // IVUsers already checks that IV-derived expressions are safe. So this check is
2282 // only needed when the expression includes some subexpression that is not IV
2283 // derived.
2284 //
2285 // Currently, we only allow division by a value provably non-zero here.
2286 //
2287 // We cannot generally expand recurrences unless the step dominates the loop
2288 // header. The expander handles the special case of affine recurrences by
2289 // scaling the recurrence outside the loop, but this technique isn't generally
2290 // applicable. Expanding a nested recurrence outside a loop requires computing
2291 // binomial coefficients. This could be done, but the recurrence has to be in a
2292 // perfectly reduced form, which can't be guaranteed.
2293 struct SCEVFindUnsafe {
2294   ScalarEvolution &SE;
2295   bool CanonicalMode;
2296   bool IsUnsafe = false;
2297 
2298   SCEVFindUnsafe(ScalarEvolution &SE, bool CanonicalMode)
2299       : SE(SE), CanonicalMode(CanonicalMode) {}
2300 
2301   bool follow(const SCEV *S) {
2302     if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
2303       if (!SE.isKnownNonZero(D->getRHS())) {
2304         IsUnsafe = true;
2305         return false;
2306       }
2307     }
2308     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2309       const SCEV *Step = AR->getStepRecurrence(SE);
2310       if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
2311         IsUnsafe = true;
2312         return false;
2313       }
2314 
2315       // For non-affine addrecs or in non-canonical mode we need a preheader
2316       // to insert into.
2317       if (!AR->getLoop()->getLoopPreheader() &&
2318           (!CanonicalMode || !AR->isAffine())) {
2319         IsUnsafe = true;
2320         return false;
2321       }
2322     }
2323     return true;
2324   }
2325   bool isDone() const { return IsUnsafe; }
2326 };
2327 } // namespace
2328 
2329 bool SCEVExpander::isSafeToExpand(const SCEV *S) const {
2330   SCEVFindUnsafe Search(SE, CanonicalMode);
2331   visitAll(S, Search);
2332   return !Search.IsUnsafe;
2333 }
2334 
2335 bool SCEVExpander::isSafeToExpandAt(const SCEV *S,
2336                                     const Instruction *InsertionPoint) const {
2337   if (!isSafeToExpand(S))
2338     return false;
2339   // We have to prove that the expanded site of S dominates InsertionPoint.
2340   // This is easy when not in the same block, but hard when S is an instruction
2341   // to be expanded somewhere inside the same block as our insertion point.
2342   // What we really need here is something analogous to an OrderedBasicBlock,
2343   // but for the moment, we paper over the problem by handling two common and
2344   // cheap to check cases.
2345   if (SE.properlyDominates(S, InsertionPoint->getParent()))
2346     return true;
2347   if (SE.dominates(S, InsertionPoint->getParent())) {
2348     if (InsertionPoint->getParent()->getTerminator() == InsertionPoint)
2349       return true;
2350     if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S))
2351       if (llvm::is_contained(InsertionPoint->operand_values(), U->getValue()))
2352         return true;
2353   }
2354   return false;
2355 }
2356 
2357 void SCEVExpanderCleaner::cleanup() {
2358   // Result is used, nothing to remove.
2359   if (ResultUsed)
2360     return;
2361 
2362   auto InsertedInstructions = Expander.getAllInsertedInstructions();
2363 #ifndef NDEBUG
2364   SmallPtrSet<Instruction *, 8> InsertedSet(InsertedInstructions.begin(),
2365                                             InsertedInstructions.end());
2366   (void)InsertedSet;
2367 #endif
2368   // Remove sets with value handles.
2369   Expander.clear();
2370 
2371   // Remove all inserted instructions.
2372   for (Instruction *I : reverse(InsertedInstructions)) {
2373 #ifndef NDEBUG
2374     assert(all_of(I->users(),
2375                   [&InsertedSet](Value *U) {
2376                     return InsertedSet.contains(cast<Instruction>(U));
2377                   }) &&
2378            "removed instruction should only be used by instructions inserted "
2379            "during expansion");
2380 #endif
2381     assert(!I->getType()->isVoidTy() &&
2382            "inserted instruction should have non-void types");
2383     I->replaceAllUsesWith(PoisonValue::get(I->getType()));
2384     I->eraseFromParent();
2385   }
2386 }
2387