xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/IVDescriptors.cpp (revision d9a42747950146bf03cda7f6e25d219253f8a57a)
1 //===- llvm/Analysis/IVDescriptors.cpp - IndVar Descriptors -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file "describes" induction and recurrence variables.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/IVDescriptors.h"
14 #include "llvm/Analysis/DemandedBits.h"
15 #include "llvm/Analysis/LoopInfo.h"
16 #include "llvm/Analysis/ScalarEvolution.h"
17 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/Dominators.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/IR/Module.h"
22 #include "llvm/IR/PatternMatch.h"
23 #include "llvm/IR/ValueHandle.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/KnownBits.h"
26 
27 #include <set>
28 
29 using namespace llvm;
30 using namespace llvm::PatternMatch;
31 
32 #define DEBUG_TYPE "iv-descriptors"
33 
34 bool RecurrenceDescriptor::areAllUsesIn(Instruction *I,
35                                         SmallPtrSetImpl<Instruction *> &Set) {
36   for (const Use &Use : I->operands())
37     if (!Set.count(dyn_cast<Instruction>(Use)))
38       return false;
39   return true;
40 }
41 
42 bool RecurrenceDescriptor::isIntegerRecurrenceKind(RecurKind Kind) {
43   switch (Kind) {
44   default:
45     break;
46   case RecurKind::Add:
47   case RecurKind::Mul:
48   case RecurKind::Or:
49   case RecurKind::And:
50   case RecurKind::Xor:
51   case RecurKind::SMax:
52   case RecurKind::SMin:
53   case RecurKind::UMax:
54   case RecurKind::UMin:
55   case RecurKind::SelectICmp:
56   case RecurKind::SelectFCmp:
57     return true;
58   }
59   return false;
60 }
61 
62 bool RecurrenceDescriptor::isFloatingPointRecurrenceKind(RecurKind Kind) {
63   return (Kind != RecurKind::None) && !isIntegerRecurrenceKind(Kind);
64 }
65 
66 /// Determines if Phi may have been type-promoted. If Phi has a single user
67 /// that ANDs the Phi with a type mask, return the user. RT is updated to
68 /// account for the narrower bit width represented by the mask, and the AND
69 /// instruction is added to CI.
70 static Instruction *lookThroughAnd(PHINode *Phi, Type *&RT,
71                                    SmallPtrSetImpl<Instruction *> &Visited,
72                                    SmallPtrSetImpl<Instruction *> &CI) {
73   if (!Phi->hasOneUse())
74     return Phi;
75 
76   const APInt *M = nullptr;
77   Instruction *I, *J = cast<Instruction>(Phi->use_begin()->getUser());
78 
79   // Matches either I & 2^x-1 or 2^x-1 & I. If we find a match, we update RT
80   // with a new integer type of the corresponding bit width.
81   if (match(J, m_c_And(m_Instruction(I), m_APInt(M)))) {
82     int32_t Bits = (*M + 1).exactLogBase2();
83     if (Bits > 0) {
84       RT = IntegerType::get(Phi->getContext(), Bits);
85       Visited.insert(Phi);
86       CI.insert(J);
87       return J;
88     }
89   }
90   return Phi;
91 }
92 
93 /// Compute the minimal bit width needed to represent a reduction whose exit
94 /// instruction is given by Exit.
95 static std::pair<Type *, bool> computeRecurrenceType(Instruction *Exit,
96                                                      DemandedBits *DB,
97                                                      AssumptionCache *AC,
98                                                      DominatorTree *DT) {
99   bool IsSigned = false;
100   const DataLayout &DL = Exit->getModule()->getDataLayout();
101   uint64_t MaxBitWidth = DL.getTypeSizeInBits(Exit->getType());
102 
103   if (DB) {
104     // Use the demanded bits analysis to determine the bits that are live out
105     // of the exit instruction, rounding up to the nearest power of two. If the
106     // use of demanded bits results in a smaller bit width, we know the value
107     // must be positive (i.e., IsSigned = false), because if this were not the
108     // case, the sign bit would have been demanded.
109     auto Mask = DB->getDemandedBits(Exit);
110     MaxBitWidth = Mask.getBitWidth() - Mask.countLeadingZeros();
111   }
112 
113   if (MaxBitWidth == DL.getTypeSizeInBits(Exit->getType()) && AC && DT) {
114     // If demanded bits wasn't able to limit the bit width, we can try to use
115     // value tracking instead. This can be the case, for example, if the value
116     // may be negative.
117     auto NumSignBits = ComputeNumSignBits(Exit, DL, 0, AC, nullptr, DT);
118     auto NumTypeBits = DL.getTypeSizeInBits(Exit->getType());
119     MaxBitWidth = NumTypeBits - NumSignBits;
120     KnownBits Bits = computeKnownBits(Exit, DL);
121     if (!Bits.isNonNegative()) {
122       // If the value is not known to be non-negative, we set IsSigned to true,
123       // meaning that we will use sext instructions instead of zext
124       // instructions to restore the original type.
125       IsSigned = true;
126       // Make sure at at least one sign bit is included in the result, so it
127       // will get properly sign-extended.
128       ++MaxBitWidth;
129     }
130   }
131   if (!isPowerOf2_64(MaxBitWidth))
132     MaxBitWidth = NextPowerOf2(MaxBitWidth);
133 
134   return std::make_pair(Type::getIntNTy(Exit->getContext(), MaxBitWidth),
135                         IsSigned);
136 }
137 
138 /// Collect cast instructions that can be ignored in the vectorizer's cost
139 /// model, given a reduction exit value and the minimal type in which the
140 // reduction can be represented. Also search casts to the recurrence type
141 // to find the minimum width used by the recurrence.
142 static void collectCastInstrs(Loop *TheLoop, Instruction *Exit,
143                               Type *RecurrenceType,
144                               SmallPtrSetImpl<Instruction *> &Casts,
145                               unsigned &MinWidthCastToRecurTy) {
146 
147   SmallVector<Instruction *, 8> Worklist;
148   SmallPtrSet<Instruction *, 8> Visited;
149   Worklist.push_back(Exit);
150   MinWidthCastToRecurTy = -1U;
151 
152   while (!Worklist.empty()) {
153     Instruction *Val = Worklist.pop_back_val();
154     Visited.insert(Val);
155     if (auto *Cast = dyn_cast<CastInst>(Val)) {
156       if (Cast->getSrcTy() == RecurrenceType) {
157         // If the source type of a cast instruction is equal to the recurrence
158         // type, it will be eliminated, and should be ignored in the vectorizer
159         // cost model.
160         Casts.insert(Cast);
161         continue;
162       }
163       if (Cast->getDestTy() == RecurrenceType) {
164         // The minimum width used by the recurrence is found by checking for
165         // casts on its operands. The minimum width is used by the vectorizer
166         // when finding the widest type for in-loop reductions without any
167         // loads/stores.
168         MinWidthCastToRecurTy = std::min<unsigned>(
169             MinWidthCastToRecurTy, Cast->getSrcTy()->getScalarSizeInBits());
170         continue;
171       }
172     }
173     // Add all operands to the work list if they are loop-varying values that
174     // we haven't yet visited.
175     for (Value *O : cast<User>(Val)->operands())
176       if (auto *I = dyn_cast<Instruction>(O))
177         if (TheLoop->contains(I) && !Visited.count(I))
178           Worklist.push_back(I);
179   }
180 }
181 
182 // Check if a given Phi node can be recognized as an ordered reduction for
183 // vectorizing floating point operations without unsafe math.
184 static bool checkOrderedReduction(RecurKind Kind, Instruction *ExactFPMathInst,
185                                   Instruction *Exit, PHINode *Phi) {
186   // Currently only FAdd and FMulAdd are supported.
187   if (Kind != RecurKind::FAdd && Kind != RecurKind::FMulAdd)
188     return false;
189 
190   if (Kind == RecurKind::FAdd && Exit->getOpcode() != Instruction::FAdd)
191     return false;
192 
193   if (Kind == RecurKind::FMulAdd &&
194       !RecurrenceDescriptor::isFMulAddIntrinsic(Exit))
195     return false;
196 
197   // Ensure the exit instruction has only one user other than the reduction PHI
198   if (Exit != ExactFPMathInst || Exit->hasNUsesOrMore(3))
199     return false;
200 
201   // The only pattern accepted is the one in which the reduction PHI
202   // is used as one of the operands of the exit instruction
203   auto *Op0 = Exit->getOperand(0);
204   auto *Op1 = Exit->getOperand(1);
205   if (Kind == RecurKind::FAdd && Op0 != Phi && Op1 != Phi)
206     return false;
207   if (Kind == RecurKind::FMulAdd && Exit->getOperand(2) != Phi)
208     return false;
209 
210   LLVM_DEBUG(dbgs() << "LV: Found an ordered reduction: Phi: " << *Phi
211                     << ", ExitInst: " << *Exit << "\n");
212 
213   return true;
214 }
215 
216 bool RecurrenceDescriptor::AddReductionVar(
217     PHINode *Phi, RecurKind Kind, Loop *TheLoop, FastMathFlags FuncFMF,
218     RecurrenceDescriptor &RedDes, DemandedBits *DB, AssumptionCache *AC,
219     DominatorTree *DT, ScalarEvolution *SE) {
220   if (Phi->getNumIncomingValues() != 2)
221     return false;
222 
223   // Reduction variables are only found in the loop header block.
224   if (Phi->getParent() != TheLoop->getHeader())
225     return false;
226 
227   // Obtain the reduction start value from the value that comes from the loop
228   // preheader.
229   Value *RdxStart = Phi->getIncomingValueForBlock(TheLoop->getLoopPreheader());
230 
231   // ExitInstruction is the single value which is used outside the loop.
232   // We only allow for a single reduction value to be used outside the loop.
233   // This includes users of the reduction, variables (which form a cycle
234   // which ends in the phi node).
235   Instruction *ExitInstruction = nullptr;
236 
237   // Variable to keep last visited store instruction. By the end of the
238   // algorithm this variable will be either empty or having intermediate
239   // reduction value stored in invariant address.
240   StoreInst *IntermediateStore = nullptr;
241 
242   // Indicates that we found a reduction operation in our scan.
243   bool FoundReduxOp = false;
244 
245   // We start with the PHI node and scan for all of the users of this
246   // instruction. All users must be instructions that can be used as reduction
247   // variables (such as ADD). We must have a single out-of-block user. The cycle
248   // must include the original PHI.
249   bool FoundStartPHI = false;
250 
251   // To recognize min/max patterns formed by a icmp select sequence, we store
252   // the number of instruction we saw from the recognized min/max pattern,
253   //  to make sure we only see exactly the two instructions.
254   unsigned NumCmpSelectPatternInst = 0;
255   InstDesc ReduxDesc(false, nullptr);
256 
257   // Data used for determining if the recurrence has been type-promoted.
258   Type *RecurrenceType = Phi->getType();
259   SmallPtrSet<Instruction *, 4> CastInsts;
260   unsigned MinWidthCastToRecurrenceType;
261   Instruction *Start = Phi;
262   bool IsSigned = false;
263 
264   SmallPtrSet<Instruction *, 8> VisitedInsts;
265   SmallVector<Instruction *, 8> Worklist;
266 
267   // Return early if the recurrence kind does not match the type of Phi. If the
268   // recurrence kind is arithmetic, we attempt to look through AND operations
269   // resulting from the type promotion performed by InstCombine.  Vector
270   // operations are not limited to the legal integer widths, so we may be able
271   // to evaluate the reduction in the narrower width.
272   if (RecurrenceType->isFloatingPointTy()) {
273     if (!isFloatingPointRecurrenceKind(Kind))
274       return false;
275   } else if (RecurrenceType->isIntegerTy()) {
276     if (!isIntegerRecurrenceKind(Kind))
277       return false;
278     if (!isMinMaxRecurrenceKind(Kind))
279       Start = lookThroughAnd(Phi, RecurrenceType, VisitedInsts, CastInsts);
280   } else {
281     // Pointer min/max may exist, but it is not supported as a reduction op.
282     return false;
283   }
284 
285   Worklist.push_back(Start);
286   VisitedInsts.insert(Start);
287 
288   // Start with all flags set because we will intersect this with the reduction
289   // flags from all the reduction operations.
290   FastMathFlags FMF = FastMathFlags::getFast();
291 
292   // The first instruction in the use-def chain of the Phi node that requires
293   // exact floating point operations.
294   Instruction *ExactFPMathInst = nullptr;
295 
296   // A value in the reduction can be used:
297   //  - By the reduction:
298   //      - Reduction operation:
299   //        - One use of reduction value (safe).
300   //        - Multiple use of reduction value (not safe).
301   //      - PHI:
302   //        - All uses of the PHI must be the reduction (safe).
303   //        - Otherwise, not safe.
304   //  - By instructions outside of the loop (safe).
305   //      * One value may have several outside users, but all outside
306   //        uses must be of the same value.
307   //  - By store instructions with a loop invariant address (safe with
308   //    the following restrictions):
309   //      * If there are several stores, all must have the same address.
310   //      * Final value should be stored in that loop invariant address.
311   //  - By an instruction that is not part of the reduction (not safe).
312   //    This is either:
313   //      * An instruction type other than PHI or the reduction operation.
314   //      * A PHI in the header other than the initial PHI.
315   while (!Worklist.empty()) {
316     Instruction *Cur = Worklist.pop_back_val();
317 
318     // Store instructions are allowed iff it is the store of the reduction
319     // value to the same loop invariant memory location.
320     if (auto *SI = dyn_cast<StoreInst>(Cur)) {
321       if (!SE) {
322         LLVM_DEBUG(dbgs() << "Store instructions are not processed without "
323                           << "Scalar Evolution Analysis\n");
324         return false;
325       }
326 
327       const SCEV *PtrScev = SE->getSCEV(SI->getPointerOperand());
328       // Check it is the same address as previous stores
329       if (IntermediateStore) {
330         const SCEV *OtherScev =
331             SE->getSCEV(IntermediateStore->getPointerOperand());
332 
333         if (OtherScev != PtrScev) {
334           LLVM_DEBUG(dbgs() << "Storing reduction value to different addresses "
335                             << "inside the loop: " << *SI->getPointerOperand()
336                             << " and "
337                             << *IntermediateStore->getPointerOperand() << '\n');
338           return false;
339         }
340       }
341 
342       // Check the pointer is loop invariant
343       if (!SE->isLoopInvariant(PtrScev, TheLoop)) {
344         LLVM_DEBUG(dbgs() << "Storing reduction value to non-uniform address "
345                           << "inside the loop: " << *SI->getPointerOperand()
346                           << '\n');
347         return false;
348       }
349 
350       // IntermediateStore is always the last store in the loop.
351       IntermediateStore = SI;
352       continue;
353     }
354 
355     // No Users.
356     // If the instruction has no users then this is a broken chain and can't be
357     // a reduction variable.
358     if (Cur->use_empty())
359       return false;
360 
361     bool IsAPhi = isa<PHINode>(Cur);
362 
363     // A header PHI use other than the original PHI.
364     if (Cur != Phi && IsAPhi && Cur->getParent() == Phi->getParent())
365       return false;
366 
367     // Reductions of instructions such as Div, and Sub is only possible if the
368     // LHS is the reduction variable.
369     if (!Cur->isCommutative() && !IsAPhi && !isa<SelectInst>(Cur) &&
370         !isa<ICmpInst>(Cur) && !isa<FCmpInst>(Cur) &&
371         !VisitedInsts.count(dyn_cast<Instruction>(Cur->getOperand(0))))
372       return false;
373 
374     // Any reduction instruction must be of one of the allowed kinds. We ignore
375     // the starting value (the Phi or an AND instruction if the Phi has been
376     // type-promoted).
377     if (Cur != Start) {
378       ReduxDesc =
379           isRecurrenceInstr(TheLoop, Phi, Cur, Kind, ReduxDesc, FuncFMF);
380       ExactFPMathInst = ExactFPMathInst == nullptr
381                             ? ReduxDesc.getExactFPMathInst()
382                             : ExactFPMathInst;
383       if (!ReduxDesc.isRecurrence())
384         return false;
385       // FIXME: FMF is allowed on phi, but propagation is not handled correctly.
386       if (isa<FPMathOperator>(ReduxDesc.getPatternInst()) && !IsAPhi) {
387         FastMathFlags CurFMF = ReduxDesc.getPatternInst()->getFastMathFlags();
388         if (auto *Sel = dyn_cast<SelectInst>(ReduxDesc.getPatternInst())) {
389           // Accept FMF on either fcmp or select of a min/max idiom.
390           // TODO: This is a hack to work-around the fact that FMF may not be
391           //       assigned/propagated correctly. If that problem is fixed or we
392           //       standardize on fmin/fmax via intrinsics, this can be removed.
393           if (auto *FCmp = dyn_cast<FCmpInst>(Sel->getCondition()))
394             CurFMF |= FCmp->getFastMathFlags();
395         }
396         FMF &= CurFMF;
397       }
398       // Update this reduction kind if we matched a new instruction.
399       // TODO: Can we eliminate the need for a 2nd InstDesc by keeping 'Kind'
400       //       state accurate while processing the worklist?
401       if (ReduxDesc.getRecKind() != RecurKind::None)
402         Kind = ReduxDesc.getRecKind();
403     }
404 
405     bool IsASelect = isa<SelectInst>(Cur);
406 
407     // A conditional reduction operation must only have 2 or less uses in
408     // VisitedInsts.
409     if (IsASelect && (Kind == RecurKind::FAdd || Kind == RecurKind::FMul) &&
410         hasMultipleUsesOf(Cur, VisitedInsts, 2))
411       return false;
412 
413     // A reduction operation must only have one use of the reduction value.
414     if (!IsAPhi && !IsASelect && !isMinMaxRecurrenceKind(Kind) &&
415         !isSelectCmpRecurrenceKind(Kind) &&
416         hasMultipleUsesOf(Cur, VisitedInsts, 1))
417       return false;
418 
419     // All inputs to a PHI node must be a reduction value.
420     if (IsAPhi && Cur != Phi && !areAllUsesIn(Cur, VisitedInsts))
421       return false;
422 
423     if ((isIntMinMaxRecurrenceKind(Kind) || Kind == RecurKind::SelectICmp) &&
424         (isa<ICmpInst>(Cur) || isa<SelectInst>(Cur)))
425       ++NumCmpSelectPatternInst;
426     if ((isFPMinMaxRecurrenceKind(Kind) || Kind == RecurKind::SelectFCmp) &&
427         (isa<FCmpInst>(Cur) || isa<SelectInst>(Cur)))
428       ++NumCmpSelectPatternInst;
429 
430     // Check  whether we found a reduction operator.
431     FoundReduxOp |= !IsAPhi && Cur != Start;
432 
433     // Process users of current instruction. Push non-PHI nodes after PHI nodes
434     // onto the stack. This way we are going to have seen all inputs to PHI
435     // nodes once we get to them.
436     SmallVector<Instruction *, 8> NonPHIs;
437     SmallVector<Instruction *, 8> PHIs;
438     for (User *U : Cur->users()) {
439       Instruction *UI = cast<Instruction>(U);
440 
441       // If the user is a call to llvm.fmuladd then the instruction can only be
442       // the final operand.
443       if (isFMulAddIntrinsic(UI))
444         if (Cur == UI->getOperand(0) || Cur == UI->getOperand(1))
445           return false;
446 
447       // Check if we found the exit user.
448       BasicBlock *Parent = UI->getParent();
449       if (!TheLoop->contains(Parent)) {
450         // If we already know this instruction is used externally, move on to
451         // the next user.
452         if (ExitInstruction == Cur)
453           continue;
454 
455         // Exit if you find multiple values used outside or if the header phi
456         // node is being used. In this case the user uses the value of the
457         // previous iteration, in which case we would loose "VF-1" iterations of
458         // the reduction operation if we vectorize.
459         if (ExitInstruction != nullptr || Cur == Phi)
460           return false;
461 
462         // The instruction used by an outside user must be the last instruction
463         // before we feed back to the reduction phi. Otherwise, we loose VF-1
464         // operations on the value.
465         if (!is_contained(Phi->operands(), Cur))
466           return false;
467 
468         ExitInstruction = Cur;
469         continue;
470       }
471 
472       // Process instructions only once (termination). Each reduction cycle
473       // value must only be used once, except by phi nodes and min/max
474       // reductions which are represented as a cmp followed by a select.
475       InstDesc IgnoredVal(false, nullptr);
476       if (VisitedInsts.insert(UI).second) {
477         if (isa<PHINode>(UI)) {
478           PHIs.push_back(UI);
479         } else {
480           StoreInst *SI = dyn_cast<StoreInst>(UI);
481           if (SI && SI->getPointerOperand() == Cur) {
482             // Reduction variable chain can only be stored somewhere but it
483             // can't be used as an address.
484             return false;
485           }
486           NonPHIs.push_back(UI);
487         }
488       } else if (!isa<PHINode>(UI) &&
489                  ((!isa<FCmpInst>(UI) && !isa<ICmpInst>(UI) &&
490                    !isa<SelectInst>(UI)) ||
491                   (!isConditionalRdxPattern(Kind, UI).isRecurrence() &&
492                    !isSelectCmpPattern(TheLoop, Phi, UI, IgnoredVal)
493                         .isRecurrence() &&
494                    !isMinMaxPattern(UI, Kind, IgnoredVal).isRecurrence())))
495         return false;
496 
497       // Remember that we completed the cycle.
498       if (UI == Phi)
499         FoundStartPHI = true;
500     }
501     Worklist.append(PHIs.begin(), PHIs.end());
502     Worklist.append(NonPHIs.begin(), NonPHIs.end());
503   }
504 
505   // This means we have seen one but not the other instruction of the
506   // pattern or more than just a select and cmp. Zero implies that we saw a
507   // llvm.min/max intrinsic, which is always OK.
508   if (isMinMaxRecurrenceKind(Kind) && NumCmpSelectPatternInst != 2 &&
509       NumCmpSelectPatternInst != 0)
510     return false;
511 
512   if (isSelectCmpRecurrenceKind(Kind) && NumCmpSelectPatternInst != 1)
513     return false;
514 
515   if (IntermediateStore) {
516     // Check that stored value goes to the phi node again. This way we make sure
517     // that the value stored in IntermediateStore is indeed the final reduction
518     // value.
519     if (!is_contained(Phi->operands(), IntermediateStore->getValueOperand())) {
520       LLVM_DEBUG(dbgs() << "Not a final reduction value stored: "
521                         << *IntermediateStore << '\n');
522       return false;
523     }
524 
525     // If there is an exit instruction it's value should be stored in
526     // IntermediateStore
527     if (ExitInstruction &&
528         IntermediateStore->getValueOperand() != ExitInstruction) {
529       LLVM_DEBUG(dbgs() << "Last store Instruction of reduction value does not "
530                            "store last calculated value of the reduction: "
531                         << *IntermediateStore << '\n');
532       return false;
533     }
534 
535     // If all uses are inside the loop (intermediate stores), then the
536     // reduction value after the loop will be the one used in the last store.
537     if (!ExitInstruction)
538       ExitInstruction = cast<Instruction>(IntermediateStore->getValueOperand());
539   }
540 
541   if (!FoundStartPHI || !FoundReduxOp || !ExitInstruction)
542     return false;
543 
544   const bool IsOrdered =
545       checkOrderedReduction(Kind, ExactFPMathInst, ExitInstruction, Phi);
546 
547   if (Start != Phi) {
548     // If the starting value is not the same as the phi node, we speculatively
549     // looked through an 'and' instruction when evaluating a potential
550     // arithmetic reduction to determine if it may have been type-promoted.
551     //
552     // We now compute the minimal bit width that is required to represent the
553     // reduction. If this is the same width that was indicated by the 'and', we
554     // can represent the reduction in the smaller type. The 'and' instruction
555     // will be eliminated since it will essentially be a cast instruction that
556     // can be ignore in the cost model. If we compute a different type than we
557     // did when evaluating the 'and', the 'and' will not be eliminated, and we
558     // will end up with different kinds of operations in the recurrence
559     // expression (e.g., IntegerAND, IntegerADD). We give up if this is
560     // the case.
561     //
562     // The vectorizer relies on InstCombine to perform the actual
563     // type-shrinking. It does this by inserting instructions to truncate the
564     // exit value of the reduction to the width indicated by RecurrenceType and
565     // then extend this value back to the original width. If IsSigned is false,
566     // a 'zext' instruction will be generated; otherwise, a 'sext' will be
567     // used.
568     //
569     // TODO: We should not rely on InstCombine to rewrite the reduction in the
570     //       smaller type. We should just generate a correctly typed expression
571     //       to begin with.
572     Type *ComputedType;
573     std::tie(ComputedType, IsSigned) =
574         computeRecurrenceType(ExitInstruction, DB, AC, DT);
575     if (ComputedType != RecurrenceType)
576       return false;
577   }
578 
579   // Collect cast instructions and the minimum width used by the recurrence.
580   // If the starting value is not the same as the phi node and the computed
581   // recurrence type is equal to the recurrence type, the recurrence expression
582   // will be represented in a narrower or wider type. If there are any cast
583   // instructions that will be unnecessary, collect them in CastsFromRecurTy.
584   // Note that the 'and' instruction was already included in this list.
585   //
586   // TODO: A better way to represent this may be to tag in some way all the
587   //       instructions that are a part of the reduction. The vectorizer cost
588   //       model could then apply the recurrence type to these instructions,
589   //       without needing a white list of instructions to ignore.
590   //       This may also be useful for the inloop reductions, if it can be
591   //       kept simple enough.
592   collectCastInstrs(TheLoop, ExitInstruction, RecurrenceType, CastInsts,
593                     MinWidthCastToRecurrenceType);
594 
595   // We found a reduction var if we have reached the original phi node and we
596   // only have a single instruction with out-of-loop users.
597 
598   // The ExitInstruction(Instruction which is allowed to have out-of-loop users)
599   // is saved as part of the RecurrenceDescriptor.
600 
601   // Save the description of this reduction variable.
602   RecurrenceDescriptor RD(RdxStart, ExitInstruction, IntermediateStore, Kind,
603                           FMF, ExactFPMathInst, RecurrenceType, IsSigned,
604                           IsOrdered, CastInsts, MinWidthCastToRecurrenceType);
605   RedDes = RD;
606 
607   return true;
608 }
609 
610 // We are looking for loops that do something like this:
611 //   int r = 0;
612 //   for (int i = 0; i < n; i++) {
613 //     if (src[i] > 3)
614 //       r = 3;
615 //   }
616 // where the reduction value (r) only has two states, in this example 0 or 3.
617 // The generated LLVM IR for this type of loop will be like this:
618 //   for.body:
619 //     %r = phi i32 [ %spec.select, %for.body ], [ 0, %entry ]
620 //     ...
621 //     %cmp = icmp sgt i32 %5, 3
622 //     %spec.select = select i1 %cmp, i32 3, i32 %r
623 //     ...
624 // In general we can support vectorization of loops where 'r' flips between
625 // any two non-constants, provided they are loop invariant. The only thing
626 // we actually care about at the end of the loop is whether or not any lane
627 // in the selected vector is different from the start value. The final
628 // across-vector reduction after the loop simply involves choosing the start
629 // value if nothing changed (0 in the example above) or the other selected
630 // value (3 in the example above).
631 RecurrenceDescriptor::InstDesc
632 RecurrenceDescriptor::isSelectCmpPattern(Loop *Loop, PHINode *OrigPhi,
633                                          Instruction *I, InstDesc &Prev) {
634   // We must handle the select(cmp(),x,y) as a single instruction. Advance to
635   // the select.
636   CmpInst::Predicate Pred;
637   if (match(I, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) {
638     if (auto *Select = dyn_cast<SelectInst>(*I->user_begin()))
639       return InstDesc(Select, Prev.getRecKind());
640   }
641 
642   // Only match select with single use cmp condition.
643   if (!match(I, m_Select(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), m_Value(),
644                          m_Value())))
645     return InstDesc(false, I);
646 
647   SelectInst *SI = cast<SelectInst>(I);
648   Value *NonPhi = nullptr;
649 
650   if (OrigPhi == dyn_cast<PHINode>(SI->getTrueValue()))
651     NonPhi = SI->getFalseValue();
652   else if (OrigPhi == dyn_cast<PHINode>(SI->getFalseValue()))
653     NonPhi = SI->getTrueValue();
654   else
655     return InstDesc(false, I);
656 
657   // We are looking for selects of the form:
658   //   select(cmp(), phi, loop_invariant) or
659   //   select(cmp(), loop_invariant, phi)
660   if (!Loop->isLoopInvariant(NonPhi))
661     return InstDesc(false, I);
662 
663   return InstDesc(I, isa<ICmpInst>(I->getOperand(0)) ? RecurKind::SelectICmp
664                                                      : RecurKind::SelectFCmp);
665 }
666 
667 RecurrenceDescriptor::InstDesc
668 RecurrenceDescriptor::isMinMaxPattern(Instruction *I, RecurKind Kind,
669                                       const InstDesc &Prev) {
670   assert((isa<CmpInst>(I) || isa<SelectInst>(I) || isa<CallInst>(I)) &&
671          "Expected a cmp or select or call instruction");
672   if (!isMinMaxRecurrenceKind(Kind))
673     return InstDesc(false, I);
674 
675   // We must handle the select(cmp()) as a single instruction. Advance to the
676   // select.
677   CmpInst::Predicate Pred;
678   if (match(I, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) {
679     if (auto *Select = dyn_cast<SelectInst>(*I->user_begin()))
680       return InstDesc(Select, Prev.getRecKind());
681   }
682 
683   // Only match select with single use cmp condition, or a min/max intrinsic.
684   if (!isa<IntrinsicInst>(I) &&
685       !match(I, m_Select(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), m_Value(),
686                          m_Value())))
687     return InstDesc(false, I);
688 
689   // Look for a min/max pattern.
690   if (match(I, m_UMin(m_Value(), m_Value())))
691     return InstDesc(Kind == RecurKind::UMin, I);
692   if (match(I, m_UMax(m_Value(), m_Value())))
693     return InstDesc(Kind == RecurKind::UMax, I);
694   if (match(I, m_SMax(m_Value(), m_Value())))
695     return InstDesc(Kind == RecurKind::SMax, I);
696   if (match(I, m_SMin(m_Value(), m_Value())))
697     return InstDesc(Kind == RecurKind::SMin, I);
698   if (match(I, m_OrdFMin(m_Value(), m_Value())))
699     return InstDesc(Kind == RecurKind::FMin, I);
700   if (match(I, m_OrdFMax(m_Value(), m_Value())))
701     return InstDesc(Kind == RecurKind::FMax, I);
702   if (match(I, m_UnordFMin(m_Value(), m_Value())))
703     return InstDesc(Kind == RecurKind::FMin, I);
704   if (match(I, m_UnordFMax(m_Value(), m_Value())))
705     return InstDesc(Kind == RecurKind::FMax, I);
706   if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value())))
707     return InstDesc(Kind == RecurKind::FMin, I);
708   if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value())))
709     return InstDesc(Kind == RecurKind::FMax, I);
710 
711   return InstDesc(false, I);
712 }
713 
714 /// Returns true if the select instruction has users in the compare-and-add
715 /// reduction pattern below. The select instruction argument is the last one
716 /// in the sequence.
717 ///
718 /// %sum.1 = phi ...
719 /// ...
720 /// %cmp = fcmp pred %0, %CFP
721 /// %add = fadd %0, %sum.1
722 /// %sum.2 = select %cmp, %add, %sum.1
723 RecurrenceDescriptor::InstDesc
724 RecurrenceDescriptor::isConditionalRdxPattern(RecurKind Kind, Instruction *I) {
725   SelectInst *SI = dyn_cast<SelectInst>(I);
726   if (!SI)
727     return InstDesc(false, I);
728 
729   CmpInst *CI = dyn_cast<CmpInst>(SI->getCondition());
730   // Only handle single use cases for now.
731   if (!CI || !CI->hasOneUse())
732     return InstDesc(false, I);
733 
734   Value *TrueVal = SI->getTrueValue();
735   Value *FalseVal = SI->getFalseValue();
736   // Handle only when either of operands of select instruction is a PHI
737   // node for now.
738   if ((isa<PHINode>(*TrueVal) && isa<PHINode>(*FalseVal)) ||
739       (!isa<PHINode>(*TrueVal) && !isa<PHINode>(*FalseVal)))
740     return InstDesc(false, I);
741 
742   Instruction *I1 =
743       isa<PHINode>(*TrueVal) ? dyn_cast<Instruction>(FalseVal)
744                              : dyn_cast<Instruction>(TrueVal);
745   if (!I1 || !I1->isBinaryOp())
746     return InstDesc(false, I);
747 
748   Value *Op1, *Op2;
749   if ((m_FAdd(m_Value(Op1), m_Value(Op2)).match(I1)  ||
750        m_FSub(m_Value(Op1), m_Value(Op2)).match(I1)) &&
751       I1->isFast())
752     return InstDesc(Kind == RecurKind::FAdd, SI);
753 
754   if (m_FMul(m_Value(Op1), m_Value(Op2)).match(I1) && (I1->isFast()))
755     return InstDesc(Kind == RecurKind::FMul, SI);
756 
757   return InstDesc(false, I);
758 }
759 
760 RecurrenceDescriptor::InstDesc
761 RecurrenceDescriptor::isRecurrenceInstr(Loop *L, PHINode *OrigPhi,
762                                         Instruction *I, RecurKind Kind,
763                                         InstDesc &Prev, FastMathFlags FuncFMF) {
764   assert(Prev.getRecKind() == RecurKind::None || Prev.getRecKind() == Kind);
765   switch (I->getOpcode()) {
766   default:
767     return InstDesc(false, I);
768   case Instruction::PHI:
769     return InstDesc(I, Prev.getRecKind(), Prev.getExactFPMathInst());
770   case Instruction::Sub:
771   case Instruction::Add:
772     return InstDesc(Kind == RecurKind::Add, I);
773   case Instruction::Mul:
774     return InstDesc(Kind == RecurKind::Mul, I);
775   case Instruction::And:
776     return InstDesc(Kind == RecurKind::And, I);
777   case Instruction::Or:
778     return InstDesc(Kind == RecurKind::Or, I);
779   case Instruction::Xor:
780     return InstDesc(Kind == RecurKind::Xor, I);
781   case Instruction::FDiv:
782   case Instruction::FMul:
783     return InstDesc(Kind == RecurKind::FMul, I,
784                     I->hasAllowReassoc() ? nullptr : I);
785   case Instruction::FSub:
786   case Instruction::FAdd:
787     return InstDesc(Kind == RecurKind::FAdd, I,
788                     I->hasAllowReassoc() ? nullptr : I);
789   case Instruction::Select:
790     if (Kind == RecurKind::FAdd || Kind == RecurKind::FMul)
791       return isConditionalRdxPattern(Kind, I);
792     LLVM_FALLTHROUGH;
793   case Instruction::FCmp:
794   case Instruction::ICmp:
795   case Instruction::Call:
796     if (isSelectCmpRecurrenceKind(Kind))
797       return isSelectCmpPattern(L, OrigPhi, I, Prev);
798     if (isIntMinMaxRecurrenceKind(Kind) ||
799         (((FuncFMF.noNaNs() && FuncFMF.noSignedZeros()) ||
800           (isa<FPMathOperator>(I) && I->hasNoNaNs() &&
801            I->hasNoSignedZeros())) &&
802          isFPMinMaxRecurrenceKind(Kind)))
803       return isMinMaxPattern(I, Kind, Prev);
804     else if (isFMulAddIntrinsic(I))
805       return InstDesc(Kind == RecurKind::FMulAdd, I,
806                       I->hasAllowReassoc() ? nullptr : I);
807     return InstDesc(false, I);
808   }
809 }
810 
811 bool RecurrenceDescriptor::hasMultipleUsesOf(
812     Instruction *I, SmallPtrSetImpl<Instruction *> &Insts,
813     unsigned MaxNumUses) {
814   unsigned NumUses = 0;
815   for (const Use &U : I->operands()) {
816     if (Insts.count(dyn_cast<Instruction>(U)))
817       ++NumUses;
818     if (NumUses > MaxNumUses)
819       return true;
820   }
821 
822   return false;
823 }
824 
825 bool RecurrenceDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop,
826                                           RecurrenceDescriptor &RedDes,
827                                           DemandedBits *DB, AssumptionCache *AC,
828                                           DominatorTree *DT,
829                                           ScalarEvolution *SE) {
830   BasicBlock *Header = TheLoop->getHeader();
831   Function &F = *Header->getParent();
832   FastMathFlags FMF;
833   FMF.setNoNaNs(
834       F.getFnAttribute("no-nans-fp-math").getValueAsBool());
835   FMF.setNoSignedZeros(
836       F.getFnAttribute("no-signed-zeros-fp-math").getValueAsBool());
837 
838   if (AddReductionVar(Phi, RecurKind::Add, TheLoop, FMF, RedDes, DB, AC, DT,
839                       SE)) {
840     LLVM_DEBUG(dbgs() << "Found an ADD reduction PHI." << *Phi << "\n");
841     return true;
842   }
843   if (AddReductionVar(Phi, RecurKind::Mul, TheLoop, FMF, RedDes, DB, AC, DT,
844                       SE)) {
845     LLVM_DEBUG(dbgs() << "Found a MUL reduction PHI." << *Phi << "\n");
846     return true;
847   }
848   if (AddReductionVar(Phi, RecurKind::Or, TheLoop, FMF, RedDes, DB, AC, DT,
849                       SE)) {
850     LLVM_DEBUG(dbgs() << "Found an OR reduction PHI." << *Phi << "\n");
851     return true;
852   }
853   if (AddReductionVar(Phi, RecurKind::And, TheLoop, FMF, RedDes, DB, AC, DT,
854                       SE)) {
855     LLVM_DEBUG(dbgs() << "Found an AND reduction PHI." << *Phi << "\n");
856     return true;
857   }
858   if (AddReductionVar(Phi, RecurKind::Xor, TheLoop, FMF, RedDes, DB, AC, DT,
859                       SE)) {
860     LLVM_DEBUG(dbgs() << "Found a XOR reduction PHI." << *Phi << "\n");
861     return true;
862   }
863   if (AddReductionVar(Phi, RecurKind::SMax, TheLoop, FMF, RedDes, DB, AC, DT,
864                       SE)) {
865     LLVM_DEBUG(dbgs() << "Found a SMAX reduction PHI." << *Phi << "\n");
866     return true;
867   }
868   if (AddReductionVar(Phi, RecurKind::SMin, TheLoop, FMF, RedDes, DB, AC, DT,
869                       SE)) {
870     LLVM_DEBUG(dbgs() << "Found a SMIN reduction PHI." << *Phi << "\n");
871     return true;
872   }
873   if (AddReductionVar(Phi, RecurKind::UMax, TheLoop, FMF, RedDes, DB, AC, DT,
874                       SE)) {
875     LLVM_DEBUG(dbgs() << "Found a UMAX reduction PHI." << *Phi << "\n");
876     return true;
877   }
878   if (AddReductionVar(Phi, RecurKind::UMin, TheLoop, FMF, RedDes, DB, AC, DT,
879                       SE)) {
880     LLVM_DEBUG(dbgs() << "Found a UMIN reduction PHI." << *Phi << "\n");
881     return true;
882   }
883   if (AddReductionVar(Phi, RecurKind::SelectICmp, TheLoop, FMF, RedDes, DB, AC,
884                       DT, SE)) {
885     LLVM_DEBUG(dbgs() << "Found an integer conditional select reduction PHI."
886                       << *Phi << "\n");
887     return true;
888   }
889   if (AddReductionVar(Phi, RecurKind::FMul, TheLoop, FMF, RedDes, DB, AC, DT,
890                       SE)) {
891     LLVM_DEBUG(dbgs() << "Found an FMult reduction PHI." << *Phi << "\n");
892     return true;
893   }
894   if (AddReductionVar(Phi, RecurKind::FAdd, TheLoop, FMF, RedDes, DB, AC, DT,
895                       SE)) {
896     LLVM_DEBUG(dbgs() << "Found an FAdd reduction PHI." << *Phi << "\n");
897     return true;
898   }
899   if (AddReductionVar(Phi, RecurKind::FMax, TheLoop, FMF, RedDes, DB, AC, DT,
900                       SE)) {
901     LLVM_DEBUG(dbgs() << "Found a float MAX reduction PHI." << *Phi << "\n");
902     return true;
903   }
904   if (AddReductionVar(Phi, RecurKind::FMin, TheLoop, FMF, RedDes, DB, AC, DT,
905                       SE)) {
906     LLVM_DEBUG(dbgs() << "Found a float MIN reduction PHI." << *Phi << "\n");
907     return true;
908   }
909   if (AddReductionVar(Phi, RecurKind::SelectFCmp, TheLoop, FMF, RedDes, DB, AC,
910                       DT, SE)) {
911     LLVM_DEBUG(dbgs() << "Found a float conditional select reduction PHI."
912                       << " PHI." << *Phi << "\n");
913     return true;
914   }
915   if (AddReductionVar(Phi, RecurKind::FMulAdd, TheLoop, FMF, RedDes, DB, AC, DT,
916                       SE)) {
917     LLVM_DEBUG(dbgs() << "Found an FMulAdd reduction PHI." << *Phi << "\n");
918     return true;
919   }
920   // Not a reduction of known type.
921   return false;
922 }
923 
924 bool RecurrenceDescriptor::isFirstOrderRecurrence(
925     PHINode *Phi, Loop *TheLoop,
926     MapVector<Instruction *, Instruction *> &SinkAfter, DominatorTree *DT) {
927 
928   // Ensure the phi node is in the loop header and has two incoming values.
929   if (Phi->getParent() != TheLoop->getHeader() ||
930       Phi->getNumIncomingValues() != 2)
931     return false;
932 
933   // Ensure the loop has a preheader and a single latch block. The loop
934   // vectorizer will need the latch to set up the next iteration of the loop.
935   auto *Preheader = TheLoop->getLoopPreheader();
936   auto *Latch = TheLoop->getLoopLatch();
937   if (!Preheader || !Latch)
938     return false;
939 
940   // Ensure the phi node's incoming blocks are the loop preheader and latch.
941   if (Phi->getBasicBlockIndex(Preheader) < 0 ||
942       Phi->getBasicBlockIndex(Latch) < 0)
943     return false;
944 
945   // Get the previous value. The previous value comes from the latch edge while
946   // the initial value comes form the preheader edge.
947   auto *Previous = dyn_cast<Instruction>(Phi->getIncomingValueForBlock(Latch));
948   if (!Previous || !TheLoop->contains(Previous) || isa<PHINode>(Previous) ||
949       SinkAfter.count(Previous)) // Cannot rely on dominance due to motion.
950     return false;
951 
952   // Ensure every user of the phi node (recursively) is dominated by the
953   // previous value. The dominance requirement ensures the loop vectorizer will
954   // not need to vectorize the initial value prior to the first iteration of the
955   // loop.
956   // TODO: Consider extending this sinking to handle memory instructions.
957 
958   // We optimistically assume we can sink all users after Previous. Keep a set
959   // of instructions to sink after Previous ordered by dominance in the common
960   // basic block. It will be applied to SinkAfter if all users can be sunk.
961   auto CompareByComesBefore = [](const Instruction *A, const Instruction *B) {
962     return A->comesBefore(B);
963   };
964   std::set<Instruction *, decltype(CompareByComesBefore)> InstrsToSink(
965       CompareByComesBefore);
966 
967   BasicBlock *PhiBB = Phi->getParent();
968   SmallVector<Instruction *, 8> WorkList;
969   auto TryToPushSinkCandidate = [&](Instruction *SinkCandidate) {
970     // Already sunk SinkCandidate.
971     if (SinkCandidate->getParent() == PhiBB &&
972         InstrsToSink.find(SinkCandidate) != InstrsToSink.end())
973       return true;
974 
975     // Cyclic dependence.
976     if (Previous == SinkCandidate)
977       return false;
978 
979     if (DT->dominates(Previous,
980                       SinkCandidate)) // We already are good w/o sinking.
981       return true;
982 
983     if (SinkCandidate->getParent() != PhiBB ||
984         SinkCandidate->mayHaveSideEffects() ||
985         SinkCandidate->mayReadFromMemory() || SinkCandidate->isTerminator())
986       return false;
987 
988     // Avoid sinking an instruction multiple times (if multiple operands are
989     // first order recurrences) by sinking once - after the latest 'previous'
990     // instruction.
991     auto It = SinkAfter.find(SinkCandidate);
992     if (It != SinkAfter.end()) {
993       auto *OtherPrev = It->second;
994       // Find the earliest entry in the 'sink-after' chain. The last entry in
995       // the chain is the original 'Previous' for a recurrence handled earlier.
996       auto EarlierIt = SinkAfter.find(OtherPrev);
997       while (EarlierIt != SinkAfter.end()) {
998         Instruction *EarlierInst = EarlierIt->second;
999         EarlierIt = SinkAfter.find(EarlierInst);
1000         // Bail out if order has not been preserved.
1001         if (EarlierIt != SinkAfter.end() &&
1002             !DT->dominates(EarlierInst, OtherPrev))
1003           return false;
1004         OtherPrev = EarlierInst;
1005       }
1006       // Bail out if order has not been preserved.
1007       if (OtherPrev != It->second && !DT->dominates(It->second, OtherPrev))
1008         return false;
1009 
1010       // SinkCandidate is already being sunk after an instruction after
1011       // Previous. Nothing left to do.
1012       if (DT->dominates(Previous, OtherPrev) || Previous == OtherPrev)
1013         return true;
1014       // Otherwise, Previous comes after OtherPrev and SinkCandidate needs to be
1015       // re-sunk to Previous, instead of sinking to OtherPrev. Remove
1016       // SinkCandidate from SinkAfter to ensure it's insert position is updated.
1017       SinkAfter.erase(SinkCandidate);
1018     }
1019 
1020     // If we reach a PHI node that is not dominated by Previous, we reached a
1021     // header PHI. No need for sinking.
1022     if (isa<PHINode>(SinkCandidate))
1023       return true;
1024 
1025     // Sink User tentatively and check its users
1026     InstrsToSink.insert(SinkCandidate);
1027     WorkList.push_back(SinkCandidate);
1028     return true;
1029   };
1030 
1031   WorkList.push_back(Phi);
1032   // Try to recursively sink instructions and their users after Previous.
1033   while (!WorkList.empty()) {
1034     Instruction *Current = WorkList.pop_back_val();
1035     for (User *User : Current->users()) {
1036       if (!TryToPushSinkCandidate(cast<Instruction>(User)))
1037         return false;
1038     }
1039   }
1040 
1041   // We can sink all users of Phi. Update the mapping.
1042   for (Instruction *I : InstrsToSink) {
1043     SinkAfter[I] = Previous;
1044     Previous = I;
1045   }
1046   return true;
1047 }
1048 
1049 /// This function returns the identity element (or neutral element) for
1050 /// the operation K.
1051 Value *RecurrenceDescriptor::getRecurrenceIdentity(RecurKind K, Type *Tp,
1052                                                    FastMathFlags FMF) const {
1053   switch (K) {
1054   case RecurKind::Xor:
1055   case RecurKind::Add:
1056   case RecurKind::Or:
1057     // Adding, Xoring, Oring zero to a number does not change it.
1058     return ConstantInt::get(Tp, 0);
1059   case RecurKind::Mul:
1060     // Multiplying a number by 1 does not change it.
1061     return ConstantInt::get(Tp, 1);
1062   case RecurKind::And:
1063     // AND-ing a number with an all-1 value does not change it.
1064     return ConstantInt::get(Tp, -1, true);
1065   case RecurKind::FMul:
1066     // Multiplying a number by 1 does not change it.
1067     return ConstantFP::get(Tp, 1.0L);
1068   case RecurKind::FMulAdd:
1069   case RecurKind::FAdd:
1070     // Adding zero to a number does not change it.
1071     // FIXME: Ideally we should not need to check FMF for FAdd and should always
1072     // use -0.0. However, this will currently result in mixed vectors of 0.0/-0.0.
1073     // Instead, we should ensure that 1) the FMF from FAdd are propagated to the PHI
1074     // nodes where possible, and 2) PHIs with the nsz flag + -0.0 use 0.0. This would
1075     // mean we can then remove the check for noSignedZeros() below (see D98963).
1076     if (FMF.noSignedZeros())
1077       return ConstantFP::get(Tp, 0.0L);
1078     return ConstantFP::get(Tp, -0.0L);
1079   case RecurKind::UMin:
1080     return ConstantInt::get(Tp, -1);
1081   case RecurKind::UMax:
1082     return ConstantInt::get(Tp, 0);
1083   case RecurKind::SMin:
1084     return ConstantInt::get(Tp,
1085                             APInt::getSignedMaxValue(Tp->getIntegerBitWidth()));
1086   case RecurKind::SMax:
1087     return ConstantInt::get(Tp,
1088                             APInt::getSignedMinValue(Tp->getIntegerBitWidth()));
1089   case RecurKind::FMin:
1090     return ConstantFP::getInfinity(Tp, true);
1091   case RecurKind::FMax:
1092     return ConstantFP::getInfinity(Tp, false);
1093   case RecurKind::SelectICmp:
1094   case RecurKind::SelectFCmp:
1095     return getRecurrenceStartValue();
1096     break;
1097   default:
1098     llvm_unreachable("Unknown recurrence kind");
1099   }
1100 }
1101 
1102 unsigned RecurrenceDescriptor::getOpcode(RecurKind Kind) {
1103   switch (Kind) {
1104   case RecurKind::Add:
1105     return Instruction::Add;
1106   case RecurKind::Mul:
1107     return Instruction::Mul;
1108   case RecurKind::Or:
1109     return Instruction::Or;
1110   case RecurKind::And:
1111     return Instruction::And;
1112   case RecurKind::Xor:
1113     return Instruction::Xor;
1114   case RecurKind::FMul:
1115     return Instruction::FMul;
1116   case RecurKind::FMulAdd:
1117   case RecurKind::FAdd:
1118     return Instruction::FAdd;
1119   case RecurKind::SMax:
1120   case RecurKind::SMin:
1121   case RecurKind::UMax:
1122   case RecurKind::UMin:
1123   case RecurKind::SelectICmp:
1124     return Instruction::ICmp;
1125   case RecurKind::FMax:
1126   case RecurKind::FMin:
1127   case RecurKind::SelectFCmp:
1128     return Instruction::FCmp;
1129   default:
1130     llvm_unreachable("Unknown recurrence operation");
1131   }
1132 }
1133 
1134 SmallVector<Instruction *, 4>
1135 RecurrenceDescriptor::getReductionOpChain(PHINode *Phi, Loop *L) const {
1136   SmallVector<Instruction *, 4> ReductionOperations;
1137   unsigned RedOp = getOpcode(Kind);
1138 
1139   // Search down from the Phi to the LoopExitInstr, looking for instructions
1140   // with a single user of the correct type for the reduction.
1141 
1142   // Note that we check that the type of the operand is correct for each item in
1143   // the chain, including the last (the loop exit value). This can come up from
1144   // sub, which would otherwise be treated as an add reduction. MinMax also need
1145   // to check for a pair of icmp/select, for which we use getNextInstruction and
1146   // isCorrectOpcode functions to step the right number of instruction, and
1147   // check the icmp/select pair.
1148   // FIXME: We also do not attempt to look through Select's yet, which might
1149   // be part of the reduction chain, or attempt to looks through And's to find a
1150   // smaller bitwidth. Subs are also currently not allowed (which are usually
1151   // treated as part of a add reduction) as they are expected to generally be
1152   // more expensive than out-of-loop reductions, and need to be costed more
1153   // carefully.
1154   unsigned ExpectedUses = 1;
1155   if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp)
1156     ExpectedUses = 2;
1157 
1158   auto getNextInstruction = [&](Instruction *Cur) -> Instruction * {
1159     for (auto *User : Cur->users()) {
1160       Instruction *UI = cast<Instruction>(User);
1161       if (isa<PHINode>(UI))
1162         continue;
1163       if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) {
1164         // We are expecting a icmp/select pair, which we go to the next select
1165         // instruction if we can. We already know that Cur has 2 uses.
1166         if (isa<SelectInst>(UI))
1167           return UI;
1168         continue;
1169       }
1170       return UI;
1171     }
1172     return nullptr;
1173   };
1174   auto isCorrectOpcode = [&](Instruction *Cur) {
1175     if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) {
1176       Value *LHS, *RHS;
1177       return SelectPatternResult::isMinOrMax(
1178           matchSelectPattern(Cur, LHS, RHS).Flavor);
1179     }
1180     // Recognize a call to the llvm.fmuladd intrinsic.
1181     if (isFMulAddIntrinsic(Cur))
1182       return true;
1183 
1184     return Cur->getOpcode() == RedOp;
1185   };
1186 
1187   // Attempt to look through Phis which are part of the reduction chain
1188   unsigned ExtraPhiUses = 0;
1189   Instruction *RdxInstr = LoopExitInstr;
1190   if (auto ExitPhi = dyn_cast<PHINode>(LoopExitInstr)) {
1191     if (ExitPhi->getNumIncomingValues() != 2)
1192       return {};
1193 
1194     Instruction *Inc0 = dyn_cast<Instruction>(ExitPhi->getIncomingValue(0));
1195     Instruction *Inc1 = dyn_cast<Instruction>(ExitPhi->getIncomingValue(1));
1196 
1197     Instruction *Chain = nullptr;
1198     if (Inc0 == Phi)
1199       Chain = Inc1;
1200     else if (Inc1 == Phi)
1201       Chain = Inc0;
1202     else
1203       return {};
1204 
1205     RdxInstr = Chain;
1206     ExtraPhiUses = 1;
1207   }
1208 
1209   // The loop exit instruction we check first (as a quick test) but add last. We
1210   // check the opcode is correct (and dont allow them to be Subs) and that they
1211   // have expected to have the expected number of uses. They will have one use
1212   // from the phi and one from a LCSSA value, no matter the type.
1213   if (!isCorrectOpcode(RdxInstr) || !LoopExitInstr->hasNUses(2))
1214     return {};
1215 
1216   // Check that the Phi has one (or two for min/max) uses, plus an extra use
1217   // for conditional reductions.
1218   if (!Phi->hasNUses(ExpectedUses + ExtraPhiUses))
1219     return {};
1220 
1221   Instruction *Cur = getNextInstruction(Phi);
1222 
1223   // Each other instruction in the chain should have the expected number of uses
1224   // and be the correct opcode.
1225   while (Cur != RdxInstr) {
1226     if (!Cur || !isCorrectOpcode(Cur) || !Cur->hasNUses(ExpectedUses))
1227       return {};
1228 
1229     ReductionOperations.push_back(Cur);
1230     Cur = getNextInstruction(Cur);
1231   }
1232 
1233   ReductionOperations.push_back(Cur);
1234   return ReductionOperations;
1235 }
1236 
1237 InductionDescriptor::InductionDescriptor(Value *Start, InductionKind K,
1238                                          const SCEV *Step, BinaryOperator *BOp,
1239                                          Type *ElementType,
1240                                          SmallVectorImpl<Instruction *> *Casts)
1241     : StartValue(Start), IK(K), Step(Step), InductionBinOp(BOp),
1242       ElementType(ElementType) {
1243   assert(IK != IK_NoInduction && "Not an induction");
1244 
1245   // Start value type should match the induction kind and the value
1246   // itself should not be null.
1247   assert(StartValue && "StartValue is null");
1248   assert((IK != IK_PtrInduction || StartValue->getType()->isPointerTy()) &&
1249          "StartValue is not a pointer for pointer induction");
1250   assert((IK != IK_IntInduction || StartValue->getType()->isIntegerTy()) &&
1251          "StartValue is not an integer for integer induction");
1252 
1253   // Check the Step Value. It should be non-zero integer value.
1254   assert((!getConstIntStepValue() || !getConstIntStepValue()->isZero()) &&
1255          "Step value is zero");
1256 
1257   assert((IK != IK_PtrInduction || getConstIntStepValue()) &&
1258          "Step value should be constant for pointer induction");
1259   assert((IK == IK_FpInduction || Step->getType()->isIntegerTy()) &&
1260          "StepValue is not an integer");
1261 
1262   assert((IK != IK_FpInduction || Step->getType()->isFloatingPointTy()) &&
1263          "StepValue is not FP for FpInduction");
1264   assert((IK != IK_FpInduction ||
1265           (InductionBinOp &&
1266            (InductionBinOp->getOpcode() == Instruction::FAdd ||
1267             InductionBinOp->getOpcode() == Instruction::FSub))) &&
1268          "Binary opcode should be specified for FP induction");
1269 
1270   if (IK == IK_PtrInduction)
1271     assert(ElementType && "Pointer induction must have element type");
1272   else
1273     assert(!ElementType && "Non-pointer induction cannot have element type");
1274 
1275   if (Casts) {
1276     for (auto &Inst : *Casts) {
1277       RedundantCasts.push_back(Inst);
1278     }
1279   }
1280 }
1281 
1282 ConstantInt *InductionDescriptor::getConstIntStepValue() const {
1283   if (isa<SCEVConstant>(Step))
1284     return dyn_cast<ConstantInt>(cast<SCEVConstant>(Step)->getValue());
1285   return nullptr;
1286 }
1287 
1288 bool InductionDescriptor::isFPInductionPHI(PHINode *Phi, const Loop *TheLoop,
1289                                            ScalarEvolution *SE,
1290                                            InductionDescriptor &D) {
1291 
1292   // Here we only handle FP induction variables.
1293   assert(Phi->getType()->isFloatingPointTy() && "Unexpected Phi type");
1294 
1295   if (TheLoop->getHeader() != Phi->getParent())
1296     return false;
1297 
1298   // The loop may have multiple entrances or multiple exits; we can analyze
1299   // this phi if it has a unique entry value and a unique backedge value.
1300   if (Phi->getNumIncomingValues() != 2)
1301     return false;
1302   Value *BEValue = nullptr, *StartValue = nullptr;
1303   if (TheLoop->contains(Phi->getIncomingBlock(0))) {
1304     BEValue = Phi->getIncomingValue(0);
1305     StartValue = Phi->getIncomingValue(1);
1306   } else {
1307     assert(TheLoop->contains(Phi->getIncomingBlock(1)) &&
1308            "Unexpected Phi node in the loop");
1309     BEValue = Phi->getIncomingValue(1);
1310     StartValue = Phi->getIncomingValue(0);
1311   }
1312 
1313   BinaryOperator *BOp = dyn_cast<BinaryOperator>(BEValue);
1314   if (!BOp)
1315     return false;
1316 
1317   Value *Addend = nullptr;
1318   if (BOp->getOpcode() == Instruction::FAdd) {
1319     if (BOp->getOperand(0) == Phi)
1320       Addend = BOp->getOperand(1);
1321     else if (BOp->getOperand(1) == Phi)
1322       Addend = BOp->getOperand(0);
1323   } else if (BOp->getOpcode() == Instruction::FSub)
1324     if (BOp->getOperand(0) == Phi)
1325       Addend = BOp->getOperand(1);
1326 
1327   if (!Addend)
1328     return false;
1329 
1330   // The addend should be loop invariant
1331   if (auto *I = dyn_cast<Instruction>(Addend))
1332     if (TheLoop->contains(I))
1333       return false;
1334 
1335   // FP Step has unknown SCEV
1336   const SCEV *Step = SE->getUnknown(Addend);
1337   D = InductionDescriptor(StartValue, IK_FpInduction, Step, BOp);
1338   return true;
1339 }
1340 
1341 /// This function is called when we suspect that the update-chain of a phi node
1342 /// (whose symbolic SCEV expression sin \p PhiScev) contains redundant casts,
1343 /// that can be ignored. (This can happen when the PSCEV rewriter adds a runtime
1344 /// predicate P under which the SCEV expression for the phi can be the
1345 /// AddRecurrence \p AR; See createAddRecFromPHIWithCast). We want to find the
1346 /// cast instructions that are involved in the update-chain of this induction.
1347 /// A caller that adds the required runtime predicate can be free to drop these
1348 /// cast instructions, and compute the phi using \p AR (instead of some scev
1349 /// expression with casts).
1350 ///
1351 /// For example, without a predicate the scev expression can take the following
1352 /// form:
1353 ///      (Ext ix (Trunc iy ( Start + i*Step ) to ix) to iy)
1354 ///
1355 /// It corresponds to the following IR sequence:
1356 /// %for.body:
1357 ///   %x = phi i64 [ 0, %ph ], [ %add, %for.body ]
1358 ///   %casted_phi = "ExtTrunc i64 %x"
1359 ///   %add = add i64 %casted_phi, %step
1360 ///
1361 /// where %x is given in \p PN,
1362 /// PSE.getSCEV(%x) is equal to PSE.getSCEV(%casted_phi) under a predicate,
1363 /// and the IR sequence that "ExtTrunc i64 %x" represents can take one of
1364 /// several forms, for example, such as:
1365 ///   ExtTrunc1:    %casted_phi = and  %x, 2^n-1
1366 /// or:
1367 ///   ExtTrunc2:    %t = shl %x, m
1368 ///                 %casted_phi = ashr %t, m
1369 ///
1370 /// If we are able to find such sequence, we return the instructions
1371 /// we found, namely %casted_phi and the instructions on its use-def chain up
1372 /// to the phi (not including the phi).
1373 static bool getCastsForInductionPHI(PredicatedScalarEvolution &PSE,
1374                                     const SCEVUnknown *PhiScev,
1375                                     const SCEVAddRecExpr *AR,
1376                                     SmallVectorImpl<Instruction *> &CastInsts) {
1377 
1378   assert(CastInsts.empty() && "CastInsts is expected to be empty.");
1379   auto *PN = cast<PHINode>(PhiScev->getValue());
1380   assert(PSE.getSCEV(PN) == AR && "Unexpected phi node SCEV expression");
1381   const Loop *L = AR->getLoop();
1382 
1383   // Find any cast instructions that participate in the def-use chain of
1384   // PhiScev in the loop.
1385   // FORNOW/TODO: We currently expect the def-use chain to include only
1386   // two-operand instructions, where one of the operands is an invariant.
1387   // createAddRecFromPHIWithCasts() currently does not support anything more
1388   // involved than that, so we keep the search simple. This can be
1389   // extended/generalized as needed.
1390 
1391   auto getDef = [&](const Value *Val) -> Value * {
1392     const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Val);
1393     if (!BinOp)
1394       return nullptr;
1395     Value *Op0 = BinOp->getOperand(0);
1396     Value *Op1 = BinOp->getOperand(1);
1397     Value *Def = nullptr;
1398     if (L->isLoopInvariant(Op0))
1399       Def = Op1;
1400     else if (L->isLoopInvariant(Op1))
1401       Def = Op0;
1402     return Def;
1403   };
1404 
1405   // Look for the instruction that defines the induction via the
1406   // loop backedge.
1407   BasicBlock *Latch = L->getLoopLatch();
1408   if (!Latch)
1409     return false;
1410   Value *Val = PN->getIncomingValueForBlock(Latch);
1411   if (!Val)
1412     return false;
1413 
1414   // Follow the def-use chain until the induction phi is reached.
1415   // If on the way we encounter a Value that has the same SCEV Expr as the
1416   // phi node, we can consider the instructions we visit from that point
1417   // as part of the cast-sequence that can be ignored.
1418   bool InCastSequence = false;
1419   auto *Inst = dyn_cast<Instruction>(Val);
1420   while (Val != PN) {
1421     // If we encountered a phi node other than PN, or if we left the loop,
1422     // we bail out.
1423     if (!Inst || !L->contains(Inst)) {
1424       return false;
1425     }
1426     auto *AddRec = dyn_cast<SCEVAddRecExpr>(PSE.getSCEV(Val));
1427     if (AddRec && PSE.areAddRecsEqualWithPreds(AddRec, AR))
1428       InCastSequence = true;
1429     if (InCastSequence) {
1430       // Only the last instruction in the cast sequence is expected to have
1431       // uses outside the induction def-use chain.
1432       if (!CastInsts.empty())
1433         if (!Inst->hasOneUse())
1434           return false;
1435       CastInsts.push_back(Inst);
1436     }
1437     Val = getDef(Val);
1438     if (!Val)
1439       return false;
1440     Inst = dyn_cast<Instruction>(Val);
1441   }
1442 
1443   return InCastSequence;
1444 }
1445 
1446 bool InductionDescriptor::isInductionPHI(PHINode *Phi, const Loop *TheLoop,
1447                                          PredicatedScalarEvolution &PSE,
1448                                          InductionDescriptor &D, bool Assume) {
1449   Type *PhiTy = Phi->getType();
1450 
1451   // Handle integer and pointer inductions variables.
1452   // Now we handle also FP induction but not trying to make a
1453   // recurrent expression from the PHI node in-place.
1454 
1455   if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy() && !PhiTy->isFloatTy() &&
1456       !PhiTy->isDoubleTy() && !PhiTy->isHalfTy())
1457     return false;
1458 
1459   if (PhiTy->isFloatingPointTy())
1460     return isFPInductionPHI(Phi, TheLoop, PSE.getSE(), D);
1461 
1462   const SCEV *PhiScev = PSE.getSCEV(Phi);
1463   const auto *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
1464 
1465   // We need this expression to be an AddRecExpr.
1466   if (Assume && !AR)
1467     AR = PSE.getAsAddRec(Phi);
1468 
1469   if (!AR) {
1470     LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
1471     return false;
1472   }
1473 
1474   // Record any Cast instructions that participate in the induction update
1475   const auto *SymbolicPhi = dyn_cast<SCEVUnknown>(PhiScev);
1476   // If we started from an UnknownSCEV, and managed to build an addRecurrence
1477   // only after enabling Assume with PSCEV, this means we may have encountered
1478   // cast instructions that required adding a runtime check in order to
1479   // guarantee the correctness of the AddRecurrence respresentation of the
1480   // induction.
1481   if (PhiScev != AR && SymbolicPhi) {
1482     SmallVector<Instruction *, 2> Casts;
1483     if (getCastsForInductionPHI(PSE, SymbolicPhi, AR, Casts))
1484       return isInductionPHI(Phi, TheLoop, PSE.getSE(), D, AR, &Casts);
1485   }
1486 
1487   return isInductionPHI(Phi, TheLoop, PSE.getSE(), D, AR);
1488 }
1489 
1490 bool InductionDescriptor::isInductionPHI(
1491     PHINode *Phi, const Loop *TheLoop, ScalarEvolution *SE,
1492     InductionDescriptor &D, const SCEV *Expr,
1493     SmallVectorImpl<Instruction *> *CastsToIgnore) {
1494   Type *PhiTy = Phi->getType();
1495   // We only handle integer and pointer inductions variables.
1496   if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy())
1497     return false;
1498 
1499   // Check that the PHI is consecutive.
1500   const SCEV *PhiScev = Expr ? Expr : SE->getSCEV(Phi);
1501   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
1502 
1503   if (!AR) {
1504     LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
1505     return false;
1506   }
1507 
1508   if (AR->getLoop() != TheLoop) {
1509     // FIXME: We should treat this as a uniform. Unfortunately, we
1510     // don't currently know how to handled uniform PHIs.
1511     LLVM_DEBUG(
1512         dbgs() << "LV: PHI is a recurrence with respect to an outer loop.\n");
1513     return false;
1514   }
1515 
1516   Value *StartValue =
1517       Phi->getIncomingValueForBlock(AR->getLoop()->getLoopPreheader());
1518 
1519   BasicBlock *Latch = AR->getLoop()->getLoopLatch();
1520   if (!Latch)
1521     return false;
1522 
1523   const SCEV *Step = AR->getStepRecurrence(*SE);
1524   // Calculate the pointer stride and check if it is consecutive.
1525   // The stride may be a constant or a loop invariant integer value.
1526   const SCEVConstant *ConstStep = dyn_cast<SCEVConstant>(Step);
1527   if (!ConstStep && !SE->isLoopInvariant(Step, TheLoop))
1528     return false;
1529 
1530   if (PhiTy->isIntegerTy()) {
1531     BinaryOperator *BOp =
1532         dyn_cast<BinaryOperator>(Phi->getIncomingValueForBlock(Latch));
1533     D = InductionDescriptor(StartValue, IK_IntInduction, Step, BOp,
1534                             /* ElementType */ nullptr, CastsToIgnore);
1535     return true;
1536   }
1537 
1538   assert(PhiTy->isPointerTy() && "The PHI must be a pointer");
1539   // Pointer induction should be a constant.
1540   if (!ConstStep)
1541     return false;
1542 
1543   // Always use i8 element type for opaque pointer inductions.
1544   PointerType *PtrTy = cast<PointerType>(PhiTy);
1545   Type *ElementType = PtrTy->isOpaque()
1546                           ? Type::getInt8Ty(PtrTy->getContext())
1547                           : PtrTy->getNonOpaquePointerElementType();
1548   if (!ElementType->isSized())
1549     return false;
1550 
1551   ConstantInt *CV = ConstStep->getValue();
1552   const DataLayout &DL = Phi->getModule()->getDataLayout();
1553   TypeSize TySize = DL.getTypeAllocSize(ElementType);
1554   // TODO: We could potentially support this for scalable vectors if we can
1555   // prove at compile time that the constant step is always a multiple of
1556   // the scalable type.
1557   if (TySize.isZero() || TySize.isScalable())
1558     return false;
1559 
1560   int64_t Size = static_cast<int64_t>(TySize.getFixedSize());
1561   int64_t CVSize = CV->getSExtValue();
1562   if (CVSize % Size)
1563     return false;
1564   auto *StepValue =
1565       SE->getConstant(CV->getType(), CVSize / Size, true /* signed */);
1566   D = InductionDescriptor(StartValue, IK_PtrInduction, StepValue,
1567                           /* BinOp */ nullptr, ElementType);
1568   return true;
1569 }
1570