xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlan.h (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===- VPlan.h - Represent A Vectorizer Plan --------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file contains the declarations of the Vectorization Plan base classes:
11 /// 1. VPBasicBlock and VPRegionBlock that inherit from a common pure virtual
12 ///    VPBlockBase, together implementing a Hierarchical CFG;
13 /// 2. Pure virtual VPRecipeBase serving as the base class for recipes contained
14 ///    within VPBasicBlocks;
15 /// 3. VPInstruction, a concrete Recipe and VPUser modeling a single planned
16 ///    instruction;
17 /// 4. The VPlan class holding a candidate for vectorization;
18 /// 5. The VPlanPrinter class providing a way to print a plan in dot format;
19 /// These are documented in docs/VectorizationPlan.rst.
20 //
21 //===----------------------------------------------------------------------===//
22 
23 #ifndef LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
24 #define LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
25 
26 #include "VPlanAnalysis.h"
27 #include "VPlanValue.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/MapVector.h"
30 #include "llvm/ADT/SmallBitVector.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/Twine.h"
34 #include "llvm/ADT/ilist.h"
35 #include "llvm/ADT/ilist_node.h"
36 #include "llvm/Analysis/IVDescriptors.h"
37 #include "llvm/Analysis/LoopInfo.h"
38 #include "llvm/Analysis/VectorUtils.h"
39 #include "llvm/IR/DebugLoc.h"
40 #include "llvm/IR/FMF.h"
41 #include "llvm/IR/Operator.h"
42 #include <algorithm>
43 #include <cassert>
44 #include <cstddef>
45 #include <string>
46 
47 namespace llvm {
48 
49 class BasicBlock;
50 class DominatorTree;
51 class InnerLoopVectorizer;
52 class IRBuilderBase;
53 class LoopInfo;
54 class raw_ostream;
55 class RecurrenceDescriptor;
56 class SCEV;
57 class Type;
58 class VPBasicBlock;
59 class VPRegionBlock;
60 class VPlan;
61 class VPReplicateRecipe;
62 class VPlanSlp;
63 class Value;
64 class LoopVersioning;
65 
66 namespace Intrinsic {
67 typedef unsigned ID;
68 }
69 
70 /// Returns a calculation for the total number of elements for a given \p VF.
71 /// For fixed width vectors this value is a constant, whereas for scalable
72 /// vectors it is an expression determined at runtime.
73 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF);
74 
75 /// Return a value for Step multiplied by VF.
76 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF,
77                        int64_t Step);
78 
79 const SCEV *createTripCountSCEV(Type *IdxTy, PredicatedScalarEvolution &PSE,
80                                 Loop *CurLoop = nullptr);
81 
82 /// A range of powers-of-2 vectorization factors with fixed start and
83 /// adjustable end. The range includes start and excludes end, e.g.,:
84 /// [1, 16) = {1, 2, 4, 8}
85 struct VFRange {
86   // A power of 2.
87   const ElementCount Start;
88 
89   // A power of 2. If End <= Start range is empty.
90   ElementCount End;
91 
92   bool isEmpty() const {
93     return End.getKnownMinValue() <= Start.getKnownMinValue();
94   }
95 
96   VFRange(const ElementCount &Start, const ElementCount &End)
97       : Start(Start), End(End) {
98     assert(Start.isScalable() == End.isScalable() &&
99            "Both Start and End should have the same scalable flag");
100     assert(isPowerOf2_32(Start.getKnownMinValue()) &&
101            "Expected Start to be a power of 2");
102     assert(isPowerOf2_32(End.getKnownMinValue()) &&
103            "Expected End to be a power of 2");
104   }
105 
106   /// Iterator to iterate over vectorization factors in a VFRange.
107   class iterator
108       : public iterator_facade_base<iterator, std::forward_iterator_tag,
109                                     ElementCount> {
110     ElementCount VF;
111 
112   public:
113     iterator(ElementCount VF) : VF(VF) {}
114 
115     bool operator==(const iterator &Other) const { return VF == Other.VF; }
116 
117     ElementCount operator*() const { return VF; }
118 
119     iterator &operator++() {
120       VF *= 2;
121       return *this;
122     }
123   };
124 
125   iterator begin() { return iterator(Start); }
126   iterator end() {
127     assert(isPowerOf2_32(End.getKnownMinValue()));
128     return iterator(End);
129   }
130 };
131 
132 using VPlanPtr = std::unique_ptr<VPlan>;
133 
134 /// In what follows, the term "input IR" refers to code that is fed into the
135 /// vectorizer whereas the term "output IR" refers to code that is generated by
136 /// the vectorizer.
137 
138 /// VPLane provides a way to access lanes in both fixed width and scalable
139 /// vectors, where for the latter the lane index sometimes needs calculating
140 /// as a runtime expression.
141 class VPLane {
142 public:
143   /// Kind describes how to interpret Lane.
144   enum class Kind : uint8_t {
145     /// For First, Lane is the index into the first N elements of a
146     /// fixed-vector <N x <ElTy>> or a scalable vector <vscale x N x <ElTy>>.
147     First,
148     /// For ScalableLast, Lane is the offset from the start of the last
149     /// N-element subvector in a scalable vector <vscale x N x <ElTy>>. For
150     /// example, a Lane of 0 corresponds to lane `(vscale - 1) * N`, a Lane of
151     /// 1 corresponds to `((vscale - 1) * N) + 1`, etc.
152     ScalableLast
153   };
154 
155 private:
156   /// in [0..VF)
157   unsigned Lane;
158 
159   /// Indicates how the Lane should be interpreted, as described above.
160   Kind LaneKind;
161 
162 public:
163   VPLane(unsigned Lane, Kind LaneKind) : Lane(Lane), LaneKind(LaneKind) {}
164 
165   static VPLane getFirstLane() { return VPLane(0, VPLane::Kind::First); }
166 
167   static VPLane getLastLaneForVF(const ElementCount &VF) {
168     unsigned LaneOffset = VF.getKnownMinValue() - 1;
169     Kind LaneKind;
170     if (VF.isScalable())
171       // In this case 'LaneOffset' refers to the offset from the start of the
172       // last subvector with VF.getKnownMinValue() elements.
173       LaneKind = VPLane::Kind::ScalableLast;
174     else
175       LaneKind = VPLane::Kind::First;
176     return VPLane(LaneOffset, LaneKind);
177   }
178 
179   /// Returns a compile-time known value for the lane index and asserts if the
180   /// lane can only be calculated at runtime.
181   unsigned getKnownLane() const {
182     assert(LaneKind == Kind::First);
183     return Lane;
184   }
185 
186   /// Returns an expression describing the lane index that can be used at
187   /// runtime.
188   Value *getAsRuntimeExpr(IRBuilderBase &Builder, const ElementCount &VF) const;
189 
190   /// Returns the Kind of lane offset.
191   Kind getKind() const { return LaneKind; }
192 
193   /// Returns true if this is the first lane of the whole vector.
194   bool isFirstLane() const { return Lane == 0 && LaneKind == Kind::First; }
195 
196   /// Maps the lane to a cache index based on \p VF.
197   unsigned mapToCacheIndex(const ElementCount &VF) const {
198     switch (LaneKind) {
199     case VPLane::Kind::ScalableLast:
200       assert(VF.isScalable() && Lane < VF.getKnownMinValue());
201       return VF.getKnownMinValue() + Lane;
202     default:
203       assert(Lane < VF.getKnownMinValue());
204       return Lane;
205     }
206   }
207 
208   /// Returns the maxmimum number of lanes that we are able to consider
209   /// caching for \p VF.
210   static unsigned getNumCachedLanes(const ElementCount &VF) {
211     return VF.getKnownMinValue() * (VF.isScalable() ? 2 : 1);
212   }
213 };
214 
215 /// VPIteration represents a single point in the iteration space of the output
216 /// (vectorized and/or unrolled) IR loop.
217 struct VPIteration {
218   /// in [0..UF)
219   unsigned Part;
220 
221   VPLane Lane;
222 
223   VPIteration(unsigned Part, unsigned Lane,
224               VPLane::Kind Kind = VPLane::Kind::First)
225       : Part(Part), Lane(Lane, Kind) {}
226 
227   VPIteration(unsigned Part, const VPLane &Lane) : Part(Part), Lane(Lane) {}
228 
229   bool isFirstIteration() const { return Part == 0 && Lane.isFirstLane(); }
230 };
231 
232 /// VPTransformState holds information passed down when "executing" a VPlan,
233 /// needed for generating the output IR.
234 struct VPTransformState {
235   VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
236                    DominatorTree *DT, IRBuilderBase &Builder,
237                    InnerLoopVectorizer *ILV, VPlan *Plan, LLVMContext &Ctx)
238       : VF(VF), UF(UF), LI(LI), DT(DT), Builder(Builder), ILV(ILV), Plan(Plan),
239         LVer(nullptr), TypeAnalysis(Ctx) {}
240 
241   /// The chosen Vectorization and Unroll Factors of the loop being vectorized.
242   ElementCount VF;
243   unsigned UF;
244 
245   /// Hold the indices to generate specific scalar instructions. Null indicates
246   /// that all instances are to be generated, using either scalar or vector
247   /// instructions.
248   std::optional<VPIteration> Instance;
249 
250   struct DataState {
251     /// A type for vectorized values in the new loop. Each value from the
252     /// original loop, when vectorized, is represented by UF vector values in
253     /// the new unrolled loop, where UF is the unroll factor.
254     typedef SmallVector<Value *, 2> PerPartValuesTy;
255 
256     DenseMap<VPValue *, PerPartValuesTy> PerPartOutput;
257 
258     using ScalarsPerPartValuesTy = SmallVector<SmallVector<Value *, 4>, 2>;
259     DenseMap<VPValue *, ScalarsPerPartValuesTy> PerPartScalars;
260   } Data;
261 
262   /// Get the generated Value for a given VPValue and a given Part. Note that
263   /// as some Defs are still created by ILV and managed in its ValueMap, this
264   /// method will delegate the call to ILV in such cases in order to provide
265   /// callers a consistent API.
266   /// \see set.
267   Value *get(VPValue *Def, unsigned Part);
268 
269   /// Get the generated Value for a given VPValue and given Part and Lane.
270   Value *get(VPValue *Def, const VPIteration &Instance);
271 
272   bool hasVectorValue(VPValue *Def, unsigned Part) {
273     auto I = Data.PerPartOutput.find(Def);
274     return I != Data.PerPartOutput.end() && Part < I->second.size() &&
275            I->second[Part];
276   }
277 
278   bool hasScalarValue(VPValue *Def, VPIteration Instance) {
279     auto I = Data.PerPartScalars.find(Def);
280     if (I == Data.PerPartScalars.end())
281       return false;
282     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
283     return Instance.Part < I->second.size() &&
284            CacheIdx < I->second[Instance.Part].size() &&
285            I->second[Instance.Part][CacheIdx];
286   }
287 
288   /// Set the generated Value for a given VPValue and a given Part.
289   void set(VPValue *Def, Value *V, unsigned Part) {
290     if (!Data.PerPartOutput.count(Def)) {
291       DataState::PerPartValuesTy Entry(UF);
292       Data.PerPartOutput[Def] = Entry;
293     }
294     Data.PerPartOutput[Def][Part] = V;
295   }
296   /// Reset an existing vector value for \p Def and a given \p Part.
297   void reset(VPValue *Def, Value *V, unsigned Part) {
298     auto Iter = Data.PerPartOutput.find(Def);
299     assert(Iter != Data.PerPartOutput.end() &&
300            "need to overwrite existing value");
301     Iter->second[Part] = V;
302   }
303 
304   /// Set the generated scalar \p V for \p Def and the given \p Instance.
305   void set(VPValue *Def, Value *V, const VPIteration &Instance) {
306     auto Iter = Data.PerPartScalars.insert({Def, {}});
307     auto &PerPartVec = Iter.first->second;
308     while (PerPartVec.size() <= Instance.Part)
309       PerPartVec.emplace_back();
310     auto &Scalars = PerPartVec[Instance.Part];
311     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
312     while (Scalars.size() <= CacheIdx)
313       Scalars.push_back(nullptr);
314     assert(!Scalars[CacheIdx] && "should overwrite existing value");
315     Scalars[CacheIdx] = V;
316   }
317 
318   /// Reset an existing scalar value for \p Def and a given \p Instance.
319   void reset(VPValue *Def, Value *V, const VPIteration &Instance) {
320     auto Iter = Data.PerPartScalars.find(Def);
321     assert(Iter != Data.PerPartScalars.end() &&
322            "need to overwrite existing value");
323     assert(Instance.Part < Iter->second.size() &&
324            "need to overwrite existing value");
325     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
326     assert(CacheIdx < Iter->second[Instance.Part].size() &&
327            "need to overwrite existing value");
328     Iter->second[Instance.Part][CacheIdx] = V;
329   }
330 
331   /// Add additional metadata to \p To that was not present on \p Orig.
332   ///
333   /// Currently this is used to add the noalias annotations based on the
334   /// inserted memchecks.  Use this for instructions that are *cloned* into the
335   /// vector loop.
336   void addNewMetadata(Instruction *To, const Instruction *Orig);
337 
338   /// Add metadata from one instruction to another.
339   ///
340   /// This includes both the original MDs from \p From and additional ones (\see
341   /// addNewMetadata).  Use this for *newly created* instructions in the vector
342   /// loop.
343   void addMetadata(Instruction *To, Instruction *From);
344 
345   /// Similar to the previous function but it adds the metadata to a
346   /// vector of instructions.
347   void addMetadata(ArrayRef<Value *> To, Instruction *From);
348 
349   /// Set the debug location in the builder using the debug location \p DL.
350   void setDebugLocFrom(DebugLoc DL);
351 
352   /// Construct the vector value of a scalarized value \p V one lane at a time.
353   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance);
354 
355   /// Hold state information used when constructing the CFG of the output IR,
356   /// traversing the VPBasicBlocks and generating corresponding IR BasicBlocks.
357   struct CFGState {
358     /// The previous VPBasicBlock visited. Initially set to null.
359     VPBasicBlock *PrevVPBB = nullptr;
360 
361     /// The previous IR BasicBlock created or used. Initially set to the new
362     /// header BasicBlock.
363     BasicBlock *PrevBB = nullptr;
364 
365     /// The last IR BasicBlock in the output IR. Set to the exit block of the
366     /// vector loop.
367     BasicBlock *ExitBB = nullptr;
368 
369     /// A mapping of each VPBasicBlock to the corresponding BasicBlock. In case
370     /// of replication, maps the BasicBlock of the last replica created.
371     SmallDenseMap<VPBasicBlock *, BasicBlock *> VPBB2IRBB;
372 
373     CFGState() = default;
374 
375     /// Returns the BasicBlock* mapped to the pre-header of the loop region
376     /// containing \p R.
377     BasicBlock *getPreheaderBBFor(VPRecipeBase *R);
378   } CFG;
379 
380   /// Hold a pointer to LoopInfo to register new basic blocks in the loop.
381   LoopInfo *LI;
382 
383   /// Hold a pointer to Dominator Tree to register new basic blocks in the loop.
384   DominatorTree *DT;
385 
386   /// Hold a reference to the IRBuilder used to generate output IR code.
387   IRBuilderBase &Builder;
388 
389   VPValue2ValueTy VPValue2Value;
390 
391   /// Hold the canonical scalar IV of the vector loop (start=0, step=VF*UF).
392   Value *CanonicalIV = nullptr;
393 
394   /// Hold a pointer to InnerLoopVectorizer to reuse its IR generation methods.
395   InnerLoopVectorizer *ILV;
396 
397   /// Pointer to the VPlan code is generated for.
398   VPlan *Plan;
399 
400   /// The loop object for the current parent region, or nullptr.
401   Loop *CurrentVectorLoop = nullptr;
402 
403   /// LoopVersioning.  It's only set up (non-null) if memchecks were
404   /// used.
405   ///
406   /// This is currently only used to add no-alias metadata based on the
407   /// memchecks.  The actually versioning is performed manually.
408   LoopVersioning *LVer = nullptr;
409 
410   /// Map SCEVs to their expanded values. Populated when executing
411   /// VPExpandSCEVRecipes.
412   DenseMap<const SCEV *, Value *> ExpandedSCEVs;
413 
414   /// VPlan-based type analysis.
415   VPTypeAnalysis TypeAnalysis;
416 };
417 
418 /// VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
419 /// A VPBlockBase can be either a VPBasicBlock or a VPRegionBlock.
420 class VPBlockBase {
421   friend class VPBlockUtils;
422 
423   const unsigned char SubclassID; ///< Subclass identifier (for isa/dyn_cast).
424 
425   /// An optional name for the block.
426   std::string Name;
427 
428   /// The immediate VPRegionBlock which this VPBlockBase belongs to, or null if
429   /// it is a topmost VPBlockBase.
430   VPRegionBlock *Parent = nullptr;
431 
432   /// List of predecessor blocks.
433   SmallVector<VPBlockBase *, 1> Predecessors;
434 
435   /// List of successor blocks.
436   SmallVector<VPBlockBase *, 1> Successors;
437 
438   /// VPlan containing the block. Can only be set on the entry block of the
439   /// plan.
440   VPlan *Plan = nullptr;
441 
442   /// Add \p Successor as the last successor to this block.
443   void appendSuccessor(VPBlockBase *Successor) {
444     assert(Successor && "Cannot add nullptr successor!");
445     Successors.push_back(Successor);
446   }
447 
448   /// Add \p Predecessor as the last predecessor to this block.
449   void appendPredecessor(VPBlockBase *Predecessor) {
450     assert(Predecessor && "Cannot add nullptr predecessor!");
451     Predecessors.push_back(Predecessor);
452   }
453 
454   /// Remove \p Predecessor from the predecessors of this block.
455   void removePredecessor(VPBlockBase *Predecessor) {
456     auto Pos = find(Predecessors, Predecessor);
457     assert(Pos && "Predecessor does not exist");
458     Predecessors.erase(Pos);
459   }
460 
461   /// Remove \p Successor from the successors of this block.
462   void removeSuccessor(VPBlockBase *Successor) {
463     auto Pos = find(Successors, Successor);
464     assert(Pos && "Successor does not exist");
465     Successors.erase(Pos);
466   }
467 
468 protected:
469   VPBlockBase(const unsigned char SC, const std::string &N)
470       : SubclassID(SC), Name(N) {}
471 
472 public:
473   /// An enumeration for keeping track of the concrete subclass of VPBlockBase
474   /// that are actually instantiated. Values of this enumeration are kept in the
475   /// SubclassID field of the VPBlockBase objects. They are used for concrete
476   /// type identification.
477   using VPBlockTy = enum { VPBasicBlockSC, VPRegionBlockSC };
478 
479   using VPBlocksTy = SmallVectorImpl<VPBlockBase *>;
480 
481   virtual ~VPBlockBase() = default;
482 
483   const std::string &getName() const { return Name; }
484 
485   void setName(const Twine &newName) { Name = newName.str(); }
486 
487   /// \return an ID for the concrete type of this object.
488   /// This is used to implement the classof checks. This should not be used
489   /// for any other purpose, as the values may change as LLVM evolves.
490   unsigned getVPBlockID() const { return SubclassID; }
491 
492   VPRegionBlock *getParent() { return Parent; }
493   const VPRegionBlock *getParent() const { return Parent; }
494 
495   /// \return A pointer to the plan containing the current block.
496   VPlan *getPlan();
497   const VPlan *getPlan() const;
498 
499   /// Sets the pointer of the plan containing the block. The block must be the
500   /// entry block into the VPlan.
501   void setPlan(VPlan *ParentPlan);
502 
503   void setParent(VPRegionBlock *P) { Parent = P; }
504 
505   /// \return the VPBasicBlock that is the entry of this VPBlockBase,
506   /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
507   /// VPBlockBase is a VPBasicBlock, it is returned.
508   const VPBasicBlock *getEntryBasicBlock() const;
509   VPBasicBlock *getEntryBasicBlock();
510 
511   /// \return the VPBasicBlock that is the exiting this VPBlockBase,
512   /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
513   /// VPBlockBase is a VPBasicBlock, it is returned.
514   const VPBasicBlock *getExitingBasicBlock() const;
515   VPBasicBlock *getExitingBasicBlock();
516 
517   const VPBlocksTy &getSuccessors() const { return Successors; }
518   VPBlocksTy &getSuccessors() { return Successors; }
519 
520   iterator_range<VPBlockBase **> successors() { return Successors; }
521 
522   const VPBlocksTy &getPredecessors() const { return Predecessors; }
523   VPBlocksTy &getPredecessors() { return Predecessors; }
524 
525   /// \return the successor of this VPBlockBase if it has a single successor.
526   /// Otherwise return a null pointer.
527   VPBlockBase *getSingleSuccessor() const {
528     return (Successors.size() == 1 ? *Successors.begin() : nullptr);
529   }
530 
531   /// \return the predecessor of this VPBlockBase if it has a single
532   /// predecessor. Otherwise return a null pointer.
533   VPBlockBase *getSinglePredecessor() const {
534     return (Predecessors.size() == 1 ? *Predecessors.begin() : nullptr);
535   }
536 
537   size_t getNumSuccessors() const { return Successors.size(); }
538   size_t getNumPredecessors() const { return Predecessors.size(); }
539 
540   /// An Enclosing Block of a block B is any block containing B, including B
541   /// itself. \return the closest enclosing block starting from "this", which
542   /// has successors. \return the root enclosing block if all enclosing blocks
543   /// have no successors.
544   VPBlockBase *getEnclosingBlockWithSuccessors();
545 
546   /// \return the closest enclosing block starting from "this", which has
547   /// predecessors. \return the root enclosing block if all enclosing blocks
548   /// have no predecessors.
549   VPBlockBase *getEnclosingBlockWithPredecessors();
550 
551   /// \return the successors either attached directly to this VPBlockBase or, if
552   /// this VPBlockBase is the exit block of a VPRegionBlock and has no
553   /// successors of its own, search recursively for the first enclosing
554   /// VPRegionBlock that has successors and return them. If no such
555   /// VPRegionBlock exists, return the (empty) successors of the topmost
556   /// VPBlockBase reached.
557   const VPBlocksTy &getHierarchicalSuccessors() {
558     return getEnclosingBlockWithSuccessors()->getSuccessors();
559   }
560 
561   /// \return the hierarchical successor of this VPBlockBase if it has a single
562   /// hierarchical successor. Otherwise return a null pointer.
563   VPBlockBase *getSingleHierarchicalSuccessor() {
564     return getEnclosingBlockWithSuccessors()->getSingleSuccessor();
565   }
566 
567   /// \return the predecessors either attached directly to this VPBlockBase or,
568   /// if this VPBlockBase is the entry block of a VPRegionBlock and has no
569   /// predecessors of its own, search recursively for the first enclosing
570   /// VPRegionBlock that has predecessors and return them. If no such
571   /// VPRegionBlock exists, return the (empty) predecessors of the topmost
572   /// VPBlockBase reached.
573   const VPBlocksTy &getHierarchicalPredecessors() {
574     return getEnclosingBlockWithPredecessors()->getPredecessors();
575   }
576 
577   /// \return the hierarchical predecessor of this VPBlockBase if it has a
578   /// single hierarchical predecessor. Otherwise return a null pointer.
579   VPBlockBase *getSingleHierarchicalPredecessor() {
580     return getEnclosingBlockWithPredecessors()->getSinglePredecessor();
581   }
582 
583   /// Set a given VPBlockBase \p Successor as the single successor of this
584   /// VPBlockBase. This VPBlockBase is not added as predecessor of \p Successor.
585   /// This VPBlockBase must have no successors.
586   void setOneSuccessor(VPBlockBase *Successor) {
587     assert(Successors.empty() && "Setting one successor when others exist.");
588     assert(Successor->getParent() == getParent() &&
589            "connected blocks must have the same parent");
590     appendSuccessor(Successor);
591   }
592 
593   /// Set two given VPBlockBases \p IfTrue and \p IfFalse to be the two
594   /// successors of this VPBlockBase. This VPBlockBase is not added as
595   /// predecessor of \p IfTrue or \p IfFalse. This VPBlockBase must have no
596   /// successors.
597   void setTwoSuccessors(VPBlockBase *IfTrue, VPBlockBase *IfFalse) {
598     assert(Successors.empty() && "Setting two successors when others exist.");
599     appendSuccessor(IfTrue);
600     appendSuccessor(IfFalse);
601   }
602 
603   /// Set each VPBasicBlock in \p NewPreds as predecessor of this VPBlockBase.
604   /// This VPBlockBase must have no predecessors. This VPBlockBase is not added
605   /// as successor of any VPBasicBlock in \p NewPreds.
606   void setPredecessors(ArrayRef<VPBlockBase *> NewPreds) {
607     assert(Predecessors.empty() && "Block predecessors already set.");
608     for (auto *Pred : NewPreds)
609       appendPredecessor(Pred);
610   }
611 
612   /// Remove all the predecessor of this block.
613   void clearPredecessors() { Predecessors.clear(); }
614 
615   /// Remove all the successors of this block.
616   void clearSuccessors() { Successors.clear(); }
617 
618   /// The method which generates the output IR that correspond to this
619   /// VPBlockBase, thereby "executing" the VPlan.
620   virtual void execute(VPTransformState *State) = 0;
621 
622   /// Delete all blocks reachable from a given VPBlockBase, inclusive.
623   static void deleteCFG(VPBlockBase *Entry);
624 
625   /// Return true if it is legal to hoist instructions into this block.
626   bool isLegalToHoistInto() {
627     // There are currently no constraints that prevent an instruction to be
628     // hoisted into a VPBlockBase.
629     return true;
630   }
631 
632   /// Replace all operands of VPUsers in the block with \p NewValue and also
633   /// replaces all uses of VPValues defined in the block with NewValue.
634   virtual void dropAllReferences(VPValue *NewValue) = 0;
635 
636 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
637   void printAsOperand(raw_ostream &OS, bool PrintType) const {
638     OS << getName();
639   }
640 
641   /// Print plain-text dump of this VPBlockBase to \p O, prefixing all lines
642   /// with \p Indent. \p SlotTracker is used to print unnamed VPValue's using
643   /// consequtive numbers.
644   ///
645   /// Note that the numbering is applied to the whole VPlan, so printing
646   /// individual blocks is consistent with the whole VPlan printing.
647   virtual void print(raw_ostream &O, const Twine &Indent,
648                      VPSlotTracker &SlotTracker) const = 0;
649 
650   /// Print plain-text dump of this VPlan to \p O.
651   void print(raw_ostream &O) const {
652     VPSlotTracker SlotTracker(getPlan());
653     print(O, "", SlotTracker);
654   }
655 
656   /// Print the successors of this block to \p O, prefixing all lines with \p
657   /// Indent.
658   void printSuccessors(raw_ostream &O, const Twine &Indent) const;
659 
660   /// Dump this VPBlockBase to dbgs().
661   LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
662 #endif
663 };
664 
665 /// A value that is used outside the VPlan. The operand of the user needs to be
666 /// added to the associated LCSSA phi node.
667 class VPLiveOut : public VPUser {
668   PHINode *Phi;
669 
670 public:
671   VPLiveOut(PHINode *Phi, VPValue *Op)
672       : VPUser({Op}, VPUser::VPUserID::LiveOut), Phi(Phi) {}
673 
674   static inline bool classof(const VPUser *U) {
675     return U->getVPUserID() == VPUser::VPUserID::LiveOut;
676   }
677 
678   /// Fixup the wrapped LCSSA phi node in the unique exit block.  This simply
679   /// means we need to add the appropriate incoming value from the middle
680   /// block as exiting edges from the scalar epilogue loop (if present) are
681   /// already in place, and we exit the vector loop exclusively to the middle
682   /// block.
683   void fixPhi(VPlan &Plan, VPTransformState &State);
684 
685   /// Returns true if the VPLiveOut uses scalars of operand \p Op.
686   bool usesScalars(const VPValue *Op) const override {
687     assert(is_contained(operands(), Op) &&
688            "Op must be an operand of the recipe");
689     return true;
690   }
691 
692   PHINode *getPhi() const { return Phi; }
693 
694 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
695   /// Print the VPLiveOut to \p O.
696   void print(raw_ostream &O, VPSlotTracker &SlotTracker) const;
697 #endif
698 };
699 
700 /// VPRecipeBase is a base class modeling a sequence of one or more output IR
701 /// instructions. VPRecipeBase owns the VPValues it defines through VPDef
702 /// and is responsible for deleting its defined values. Single-value
703 /// VPRecipeBases that also inherit from VPValue must make sure to inherit from
704 /// VPRecipeBase before VPValue.
705 class VPRecipeBase : public ilist_node_with_parent<VPRecipeBase, VPBasicBlock>,
706                      public VPDef,
707                      public VPUser {
708   friend VPBasicBlock;
709   friend class VPBlockUtils;
710 
711   /// Each VPRecipe belongs to a single VPBasicBlock.
712   VPBasicBlock *Parent = nullptr;
713 
714   /// The debug location for the recipe.
715   DebugLoc DL;
716 
717 public:
718   VPRecipeBase(const unsigned char SC, ArrayRef<VPValue *> Operands,
719                DebugLoc DL = {})
720       : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe), DL(DL) {}
721 
722   template <typename IterT>
723   VPRecipeBase(const unsigned char SC, iterator_range<IterT> Operands,
724                DebugLoc DL = {})
725       : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe), DL(DL) {}
726   virtual ~VPRecipeBase() = default;
727 
728   /// \return the VPBasicBlock which this VPRecipe belongs to.
729   VPBasicBlock *getParent() { return Parent; }
730   const VPBasicBlock *getParent() const { return Parent; }
731 
732   /// The method which generates the output IR instructions that correspond to
733   /// this VPRecipe, thereby "executing" the VPlan.
734   virtual void execute(VPTransformState &State) = 0;
735 
736   /// Insert an unlinked recipe into a basic block immediately before
737   /// the specified recipe.
738   void insertBefore(VPRecipeBase *InsertPos);
739   /// Insert an unlinked recipe into \p BB immediately before the insertion
740   /// point \p IP;
741   void insertBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator IP);
742 
743   /// Insert an unlinked Recipe into a basic block immediately after
744   /// the specified Recipe.
745   void insertAfter(VPRecipeBase *InsertPos);
746 
747   /// Unlink this recipe from its current VPBasicBlock and insert it into
748   /// the VPBasicBlock that MovePos lives in, right after MovePos.
749   void moveAfter(VPRecipeBase *MovePos);
750 
751   /// Unlink this recipe and insert into BB before I.
752   ///
753   /// \pre I is a valid iterator into BB.
754   void moveBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator I);
755 
756   /// This method unlinks 'this' from the containing basic block, but does not
757   /// delete it.
758   void removeFromParent();
759 
760   /// This method unlinks 'this' from the containing basic block and deletes it.
761   ///
762   /// \returns an iterator pointing to the element after the erased one
763   iplist<VPRecipeBase>::iterator eraseFromParent();
764 
765   /// Returns the underlying instruction, if the recipe is a VPValue or nullptr
766   /// otherwise.
767   Instruction *getUnderlyingInstr() {
768     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
769   }
770   const Instruction *getUnderlyingInstr() const {
771     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
772   }
773 
774   /// Method to support type inquiry through isa, cast, and dyn_cast.
775   static inline bool classof(const VPDef *D) {
776     // All VPDefs are also VPRecipeBases.
777     return true;
778   }
779 
780   static inline bool classof(const VPUser *U) {
781     return U->getVPUserID() == VPUser::VPUserID::Recipe;
782   }
783 
784   /// Returns true if the recipe may have side-effects.
785   bool mayHaveSideEffects() const;
786 
787   /// Returns true for PHI-like recipes.
788   bool isPhi() const {
789     return getVPDefID() >= VPFirstPHISC && getVPDefID() <= VPLastPHISC;
790   }
791 
792   /// Returns true if the recipe may read from memory.
793   bool mayReadFromMemory() const;
794 
795   /// Returns true if the recipe may write to memory.
796   bool mayWriteToMemory() const;
797 
798   /// Returns true if the recipe may read from or write to memory.
799   bool mayReadOrWriteMemory() const {
800     return mayReadFromMemory() || mayWriteToMemory();
801   }
802 
803   /// Returns the debug location of the recipe.
804   DebugLoc getDebugLoc() const { return DL; }
805 };
806 
807 // Helper macro to define common classof implementations for recipes.
808 #define VP_CLASSOF_IMPL(VPDefID)                                               \
809   static inline bool classof(const VPDef *D) {                                 \
810     return D->getVPDefID() == VPDefID;                                         \
811   }                                                                            \
812   static inline bool classof(const VPValue *V) {                               \
813     auto *R = V->getDefiningRecipe();                                          \
814     return R && R->getVPDefID() == VPDefID;                                    \
815   }                                                                            \
816   static inline bool classof(const VPUser *U) {                                \
817     auto *R = dyn_cast<VPRecipeBase>(U);                                       \
818     return R && R->getVPDefID() == VPDefID;                                    \
819   }                                                                            \
820   static inline bool classof(const VPRecipeBase *R) {                          \
821     return R->getVPDefID() == VPDefID;                                         \
822   }
823 
824 /// Class to record LLVM IR flag for a recipe along with it.
825 class VPRecipeWithIRFlags : public VPRecipeBase {
826   enum class OperationType : unsigned char {
827     Cmp,
828     OverflowingBinOp,
829     DisjointOp,
830     PossiblyExactOp,
831     GEPOp,
832     FPMathOp,
833     NonNegOp,
834     Other
835   };
836 
837 public:
838   struct WrapFlagsTy {
839     char HasNUW : 1;
840     char HasNSW : 1;
841 
842     WrapFlagsTy(bool HasNUW, bool HasNSW) : HasNUW(HasNUW), HasNSW(HasNSW) {}
843   };
844 
845 protected:
846   struct GEPFlagsTy {
847     char IsInBounds : 1;
848     GEPFlagsTy(bool IsInBounds) : IsInBounds(IsInBounds) {}
849   };
850 
851 private:
852   struct DisjointFlagsTy {
853     char IsDisjoint : 1;
854   };
855   struct ExactFlagsTy {
856     char IsExact : 1;
857   };
858   struct NonNegFlagsTy {
859     char NonNeg : 1;
860   };
861   struct FastMathFlagsTy {
862     char AllowReassoc : 1;
863     char NoNaNs : 1;
864     char NoInfs : 1;
865     char NoSignedZeros : 1;
866     char AllowReciprocal : 1;
867     char AllowContract : 1;
868     char ApproxFunc : 1;
869 
870     FastMathFlagsTy(const FastMathFlags &FMF);
871   };
872 
873   OperationType OpType;
874 
875   union {
876     CmpInst::Predicate CmpPredicate;
877     WrapFlagsTy WrapFlags;
878     DisjointFlagsTy DisjointFlags;
879     ExactFlagsTy ExactFlags;
880     GEPFlagsTy GEPFlags;
881     NonNegFlagsTy NonNegFlags;
882     FastMathFlagsTy FMFs;
883     unsigned AllFlags;
884   };
885 
886 public:
887   template <typename IterT>
888   VPRecipeWithIRFlags(const unsigned char SC, IterT Operands, DebugLoc DL = {})
889       : VPRecipeBase(SC, Operands, DL) {
890     OpType = OperationType::Other;
891     AllFlags = 0;
892   }
893 
894   template <typename IterT>
895   VPRecipeWithIRFlags(const unsigned char SC, IterT Operands, Instruction &I)
896       : VPRecipeWithIRFlags(SC, Operands, I.getDebugLoc()) {
897     if (auto *Op = dyn_cast<CmpInst>(&I)) {
898       OpType = OperationType::Cmp;
899       CmpPredicate = Op->getPredicate();
900     } else if (auto *Op = dyn_cast<PossiblyDisjointInst>(&I)) {
901       OpType = OperationType::DisjointOp;
902       DisjointFlags.IsDisjoint = Op->isDisjoint();
903     } else if (auto *Op = dyn_cast<OverflowingBinaryOperator>(&I)) {
904       OpType = OperationType::OverflowingBinOp;
905       WrapFlags = {Op->hasNoUnsignedWrap(), Op->hasNoSignedWrap()};
906     } else if (auto *Op = dyn_cast<PossiblyExactOperator>(&I)) {
907       OpType = OperationType::PossiblyExactOp;
908       ExactFlags.IsExact = Op->isExact();
909     } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
910       OpType = OperationType::GEPOp;
911       GEPFlags.IsInBounds = GEP->isInBounds();
912     } else if (auto *PNNI = dyn_cast<PossiblyNonNegInst>(&I)) {
913       OpType = OperationType::NonNegOp;
914       NonNegFlags.NonNeg = PNNI->hasNonNeg();
915     } else if (auto *Op = dyn_cast<FPMathOperator>(&I)) {
916       OpType = OperationType::FPMathOp;
917       FMFs = Op->getFastMathFlags();
918     }
919   }
920 
921   template <typename IterT>
922   VPRecipeWithIRFlags(const unsigned char SC, IterT Operands,
923                       CmpInst::Predicate Pred, DebugLoc DL = {})
924       : VPRecipeBase(SC, Operands, DL), OpType(OperationType::Cmp),
925         CmpPredicate(Pred) {}
926 
927   template <typename IterT>
928   VPRecipeWithIRFlags(const unsigned char SC, IterT Operands,
929                       WrapFlagsTy WrapFlags, DebugLoc DL = {})
930       : VPRecipeBase(SC, Operands, DL), OpType(OperationType::OverflowingBinOp),
931         WrapFlags(WrapFlags) {}
932 
933   template <typename IterT>
934   VPRecipeWithIRFlags(const unsigned char SC, IterT Operands,
935                       FastMathFlags FMFs, DebugLoc DL = {})
936       : VPRecipeBase(SC, Operands, DL), OpType(OperationType::FPMathOp),
937         FMFs(FMFs) {}
938 
939 protected:
940   template <typename IterT>
941   VPRecipeWithIRFlags(const unsigned char SC, IterT Operands,
942                       GEPFlagsTy GEPFlags, DebugLoc DL = {})
943       : VPRecipeBase(SC, Operands, DL), OpType(OperationType::GEPOp),
944         GEPFlags(GEPFlags) {}
945 
946 public:
947   static inline bool classof(const VPRecipeBase *R) {
948     return R->getVPDefID() == VPRecipeBase::VPInstructionSC ||
949            R->getVPDefID() == VPRecipeBase::VPWidenSC ||
950            R->getVPDefID() == VPRecipeBase::VPWidenGEPSC ||
951            R->getVPDefID() == VPRecipeBase::VPWidenCastSC ||
952            R->getVPDefID() == VPRecipeBase::VPReplicateSC ||
953            R->getVPDefID() == VPRecipeBase::VPVectorPointerSC;
954   }
955 
956   /// Drop all poison-generating flags.
957   void dropPoisonGeneratingFlags() {
958     // NOTE: This needs to be kept in-sync with
959     // Instruction::dropPoisonGeneratingFlags.
960     switch (OpType) {
961     case OperationType::OverflowingBinOp:
962       WrapFlags.HasNUW = false;
963       WrapFlags.HasNSW = false;
964       break;
965     case OperationType::DisjointOp:
966       DisjointFlags.IsDisjoint = false;
967       break;
968     case OperationType::PossiblyExactOp:
969       ExactFlags.IsExact = false;
970       break;
971     case OperationType::GEPOp:
972       GEPFlags.IsInBounds = false;
973       break;
974     case OperationType::FPMathOp:
975       FMFs.NoNaNs = false;
976       FMFs.NoInfs = false;
977       break;
978     case OperationType::NonNegOp:
979       NonNegFlags.NonNeg = false;
980       break;
981     case OperationType::Cmp:
982     case OperationType::Other:
983       break;
984     }
985   }
986 
987   /// Set the IR flags for \p I.
988   void setFlags(Instruction *I) const {
989     switch (OpType) {
990     case OperationType::OverflowingBinOp:
991       I->setHasNoUnsignedWrap(WrapFlags.HasNUW);
992       I->setHasNoSignedWrap(WrapFlags.HasNSW);
993       break;
994     case OperationType::DisjointOp:
995       cast<PossiblyDisjointInst>(I)->setIsDisjoint(DisjointFlags.IsDisjoint);
996       break;
997     case OperationType::PossiblyExactOp:
998       I->setIsExact(ExactFlags.IsExact);
999       break;
1000     case OperationType::GEPOp:
1001       cast<GetElementPtrInst>(I)->setIsInBounds(GEPFlags.IsInBounds);
1002       break;
1003     case OperationType::FPMathOp:
1004       I->setHasAllowReassoc(FMFs.AllowReassoc);
1005       I->setHasNoNaNs(FMFs.NoNaNs);
1006       I->setHasNoInfs(FMFs.NoInfs);
1007       I->setHasNoSignedZeros(FMFs.NoSignedZeros);
1008       I->setHasAllowReciprocal(FMFs.AllowReciprocal);
1009       I->setHasAllowContract(FMFs.AllowContract);
1010       I->setHasApproxFunc(FMFs.ApproxFunc);
1011       break;
1012     case OperationType::NonNegOp:
1013       I->setNonNeg(NonNegFlags.NonNeg);
1014       break;
1015     case OperationType::Cmp:
1016     case OperationType::Other:
1017       break;
1018     }
1019   }
1020 
1021   CmpInst::Predicate getPredicate() const {
1022     assert(OpType == OperationType::Cmp &&
1023            "recipe doesn't have a compare predicate");
1024     return CmpPredicate;
1025   }
1026 
1027   bool isInBounds() const {
1028     assert(OpType == OperationType::GEPOp &&
1029            "recipe doesn't have inbounds flag");
1030     return GEPFlags.IsInBounds;
1031   }
1032 
1033   /// Returns true if the recipe has fast-math flags.
1034   bool hasFastMathFlags() const { return OpType == OperationType::FPMathOp; }
1035 
1036   FastMathFlags getFastMathFlags() const;
1037 
1038   bool hasNoUnsignedWrap() const {
1039     assert(OpType == OperationType::OverflowingBinOp &&
1040            "recipe doesn't have a NUW flag");
1041     return WrapFlags.HasNUW;
1042   }
1043 
1044   bool hasNoSignedWrap() const {
1045     assert(OpType == OperationType::OverflowingBinOp &&
1046            "recipe doesn't have a NSW flag");
1047     return WrapFlags.HasNSW;
1048   }
1049 
1050 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1051   void printFlags(raw_ostream &O) const;
1052 #endif
1053 };
1054 
1055 /// This is a concrete Recipe that models a single VPlan-level instruction.
1056 /// While as any Recipe it may generate a sequence of IR instructions when
1057 /// executed, these instructions would always form a single-def expression as
1058 /// the VPInstruction is also a single def-use vertex.
1059 class VPInstruction : public VPRecipeWithIRFlags, public VPValue {
1060   friend class VPlanSlp;
1061 
1062 public:
1063   /// VPlan opcodes, extending LLVM IR with idiomatics instructions.
1064   enum {
1065     FirstOrderRecurrenceSplice =
1066         Instruction::OtherOpsEnd + 1, // Combines the incoming and previous
1067                                       // values of a first-order recurrence.
1068     Not,
1069     SLPLoad,
1070     SLPStore,
1071     ActiveLaneMask,
1072     CalculateTripCountMinusVF,
1073     // Increment the canonical IV separately for each unrolled part.
1074     CanonicalIVIncrementForPart,
1075     BranchOnCount,
1076     BranchOnCond,
1077     ComputeReductionResult,
1078   };
1079 
1080 private:
1081   typedef unsigned char OpcodeTy;
1082   OpcodeTy Opcode;
1083 
1084   /// An optional name that can be used for the generated IR instruction.
1085   const std::string Name;
1086 
1087   /// Utility method serving execute(): generates a single instance of the
1088   /// modeled instruction. \returns the generated value for \p Part.
1089   /// In some cases an existing value is returned rather than a generated
1090   /// one.
1091   Value *generateInstruction(VPTransformState &State, unsigned Part);
1092 
1093 #if !defined(NDEBUG)
1094   /// Return true if the VPInstruction is a floating point math operation, i.e.
1095   /// has fast-math flags.
1096   bool isFPMathOp() const;
1097 #endif
1098 
1099 protected:
1100   void setUnderlyingInstr(Instruction *I) { setUnderlyingValue(I); }
1101 
1102 public:
1103   VPInstruction(unsigned Opcode, ArrayRef<VPValue *> Operands, DebugLoc DL,
1104                 const Twine &Name = "")
1105       : VPRecipeWithIRFlags(VPDef::VPInstructionSC, Operands, DL),
1106         VPValue(this), Opcode(Opcode), Name(Name.str()) {}
1107 
1108   VPInstruction(unsigned Opcode, std::initializer_list<VPValue *> Operands,
1109                 DebugLoc DL = {}, const Twine &Name = "")
1110       : VPInstruction(Opcode, ArrayRef<VPValue *>(Operands), DL, Name) {}
1111 
1112   VPInstruction(unsigned Opcode, CmpInst::Predicate Pred, VPValue *A,
1113                 VPValue *B, DebugLoc DL = {}, const Twine &Name = "");
1114 
1115   VPInstruction(unsigned Opcode, std::initializer_list<VPValue *> Operands,
1116                 WrapFlagsTy WrapFlags, DebugLoc DL = {}, const Twine &Name = "")
1117       : VPRecipeWithIRFlags(VPDef::VPInstructionSC, Operands, WrapFlags, DL),
1118         VPValue(this), Opcode(Opcode), Name(Name.str()) {}
1119 
1120   VPInstruction(unsigned Opcode, std::initializer_list<VPValue *> Operands,
1121                 FastMathFlags FMFs, DebugLoc DL = {}, const Twine &Name = "");
1122 
1123   VP_CLASSOF_IMPL(VPDef::VPInstructionSC)
1124 
1125   unsigned getOpcode() const { return Opcode; }
1126 
1127   /// Generate the instruction.
1128   /// TODO: We currently execute only per-part unless a specific instance is
1129   /// provided.
1130   void execute(VPTransformState &State) override;
1131 
1132 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1133   /// Print the VPInstruction to \p O.
1134   void print(raw_ostream &O, const Twine &Indent,
1135              VPSlotTracker &SlotTracker) const override;
1136 
1137   /// Print the VPInstruction to dbgs() (for debugging).
1138   LLVM_DUMP_METHOD void dump() const;
1139 #endif
1140 
1141   /// Return true if this instruction may modify memory.
1142   bool mayWriteToMemory() const {
1143     // TODO: we can use attributes of the called function to rule out memory
1144     //       modifications.
1145     return Opcode == Instruction::Store || Opcode == Instruction::Call ||
1146            Opcode == Instruction::Invoke || Opcode == SLPStore;
1147   }
1148 
1149   bool hasResult() const {
1150     // CallInst may or may not have a result, depending on the called function.
1151     // Conservatively return calls have results for now.
1152     switch (getOpcode()) {
1153     case Instruction::Ret:
1154     case Instruction::Br:
1155     case Instruction::Store:
1156     case Instruction::Switch:
1157     case Instruction::IndirectBr:
1158     case Instruction::Resume:
1159     case Instruction::CatchRet:
1160     case Instruction::Unreachable:
1161     case Instruction::Fence:
1162     case Instruction::AtomicRMW:
1163     case VPInstruction::BranchOnCond:
1164     case VPInstruction::BranchOnCount:
1165       return false;
1166     default:
1167       return true;
1168     }
1169   }
1170 
1171   /// Returns true if the recipe only uses the first lane of operand \p Op.
1172   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1173     assert(is_contained(operands(), Op) &&
1174            "Op must be an operand of the recipe");
1175     if (getOperand(0) != Op)
1176       return false;
1177     switch (getOpcode()) {
1178     default:
1179       return false;
1180     case VPInstruction::ActiveLaneMask:
1181     case VPInstruction::CalculateTripCountMinusVF:
1182     case VPInstruction::CanonicalIVIncrementForPart:
1183     case VPInstruction::BranchOnCount:
1184       return true;
1185     };
1186     llvm_unreachable("switch should return");
1187   }
1188 
1189   /// Returns true if the recipe only uses the first part of operand \p Op.
1190   bool onlyFirstPartUsed(const VPValue *Op) const override {
1191     assert(is_contained(operands(), Op) &&
1192            "Op must be an operand of the recipe");
1193     if (getOperand(0) != Op)
1194       return false;
1195     switch (getOpcode()) {
1196     default:
1197       return false;
1198     case VPInstruction::BranchOnCount:
1199       return true;
1200     };
1201     llvm_unreachable("switch should return");
1202   }
1203 };
1204 
1205 /// VPWidenRecipe is a recipe for producing a copy of vector type its
1206 /// ingredient. This recipe covers most of the traditional vectorization cases
1207 /// where each ingredient transforms into a vectorized version of itself.
1208 class VPWidenRecipe : public VPRecipeWithIRFlags, public VPValue {
1209   unsigned Opcode;
1210 
1211 public:
1212   template <typename IterT>
1213   VPWidenRecipe(Instruction &I, iterator_range<IterT> Operands)
1214       : VPRecipeWithIRFlags(VPDef::VPWidenSC, Operands, I), VPValue(this, &I),
1215         Opcode(I.getOpcode()) {}
1216 
1217   ~VPWidenRecipe() override = default;
1218 
1219   VP_CLASSOF_IMPL(VPDef::VPWidenSC)
1220 
1221   /// Produce widened copies of all Ingredients.
1222   void execute(VPTransformState &State) override;
1223 
1224   unsigned getOpcode() const { return Opcode; }
1225 
1226 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1227   /// Print the recipe.
1228   void print(raw_ostream &O, const Twine &Indent,
1229              VPSlotTracker &SlotTracker) const override;
1230 #endif
1231 };
1232 
1233 /// VPWidenCastRecipe is a recipe to create vector cast instructions.
1234 class VPWidenCastRecipe : public VPRecipeWithIRFlags, public VPValue {
1235   /// Cast instruction opcode.
1236   Instruction::CastOps Opcode;
1237 
1238   /// Result type for the cast.
1239   Type *ResultTy;
1240 
1241 public:
1242   VPWidenCastRecipe(Instruction::CastOps Opcode, VPValue *Op, Type *ResultTy,
1243                     CastInst &UI)
1244       : VPRecipeWithIRFlags(VPDef::VPWidenCastSC, Op, UI), VPValue(this, &UI),
1245         Opcode(Opcode), ResultTy(ResultTy) {
1246     assert(UI.getOpcode() == Opcode &&
1247            "opcode of underlying cast doesn't match");
1248     assert(UI.getType() == ResultTy &&
1249            "result type of underlying cast doesn't match");
1250   }
1251 
1252   VPWidenCastRecipe(Instruction::CastOps Opcode, VPValue *Op, Type *ResultTy)
1253       : VPRecipeWithIRFlags(VPDef::VPWidenCastSC, Op), VPValue(this, nullptr),
1254         Opcode(Opcode), ResultTy(ResultTy) {}
1255 
1256   ~VPWidenCastRecipe() override = default;
1257 
1258   VP_CLASSOF_IMPL(VPDef::VPWidenCastSC)
1259 
1260   /// Produce widened copies of the cast.
1261   void execute(VPTransformState &State) override;
1262 
1263 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1264   /// Print the recipe.
1265   void print(raw_ostream &O, const Twine &Indent,
1266              VPSlotTracker &SlotTracker) const override;
1267 #endif
1268 
1269   Instruction::CastOps getOpcode() const { return Opcode; }
1270 
1271   /// Returns the result type of the cast.
1272   Type *getResultType() const { return ResultTy; }
1273 };
1274 
1275 /// A recipe for widening Call instructions.
1276 class VPWidenCallRecipe : public VPRecipeBase, public VPValue {
1277   /// ID of the vector intrinsic to call when widening the call. If set the
1278   /// Intrinsic::not_intrinsic, a library call will be used instead.
1279   Intrinsic::ID VectorIntrinsicID;
1280   /// If this recipe represents a library call, Variant stores a pointer to
1281   /// the chosen function. There is a 1:1 mapping between a given VF and the
1282   /// chosen vectorized variant, so there will be a different vplan for each
1283   /// VF with a valid variant.
1284   Function *Variant;
1285 
1286 public:
1287   template <typename IterT>
1288   VPWidenCallRecipe(CallInst &I, iterator_range<IterT> CallArguments,
1289                     Intrinsic::ID VectorIntrinsicID,
1290                     Function *Variant = nullptr)
1291       : VPRecipeBase(VPDef::VPWidenCallSC, CallArguments), VPValue(this, &I),
1292         VectorIntrinsicID(VectorIntrinsicID), Variant(Variant) {}
1293 
1294   ~VPWidenCallRecipe() override = default;
1295 
1296   VP_CLASSOF_IMPL(VPDef::VPWidenCallSC)
1297 
1298   /// Produce a widened version of the call instruction.
1299   void execute(VPTransformState &State) override;
1300 
1301 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1302   /// Print the recipe.
1303   void print(raw_ostream &O, const Twine &Indent,
1304              VPSlotTracker &SlotTracker) const override;
1305 #endif
1306 };
1307 
1308 /// A recipe for widening select instructions.
1309 struct VPWidenSelectRecipe : public VPRecipeBase, public VPValue {
1310   template <typename IterT>
1311   VPWidenSelectRecipe(SelectInst &I, iterator_range<IterT> Operands)
1312       : VPRecipeBase(VPDef::VPWidenSelectSC, Operands, I.getDebugLoc()),
1313         VPValue(this, &I) {}
1314 
1315   ~VPWidenSelectRecipe() override = default;
1316 
1317   VP_CLASSOF_IMPL(VPDef::VPWidenSelectSC)
1318 
1319   /// Produce a widened version of the select instruction.
1320   void execute(VPTransformState &State) override;
1321 
1322 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1323   /// Print the recipe.
1324   void print(raw_ostream &O, const Twine &Indent,
1325              VPSlotTracker &SlotTracker) const override;
1326 #endif
1327 
1328   VPValue *getCond() const {
1329     return getOperand(0);
1330   }
1331 
1332   bool isInvariantCond() const {
1333     return getCond()->isDefinedOutsideVectorRegions();
1334   }
1335 };
1336 
1337 /// A recipe for handling GEP instructions.
1338 class VPWidenGEPRecipe : public VPRecipeWithIRFlags, public VPValue {
1339   bool isPointerLoopInvariant() const {
1340     return getOperand(0)->isDefinedOutsideVectorRegions();
1341   }
1342 
1343   bool isIndexLoopInvariant(unsigned I) const {
1344     return getOperand(I + 1)->isDefinedOutsideVectorRegions();
1345   }
1346 
1347   bool areAllOperandsInvariant() const {
1348     return all_of(operands(), [](VPValue *Op) {
1349       return Op->isDefinedOutsideVectorRegions();
1350     });
1351   }
1352 
1353 public:
1354   template <typename IterT>
1355   VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands)
1356       : VPRecipeWithIRFlags(VPDef::VPWidenGEPSC, Operands, *GEP),
1357         VPValue(this, GEP) {}
1358 
1359   ~VPWidenGEPRecipe() override = default;
1360 
1361   VP_CLASSOF_IMPL(VPDef::VPWidenGEPSC)
1362 
1363   /// Generate the gep nodes.
1364   void execute(VPTransformState &State) override;
1365 
1366 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1367   /// Print the recipe.
1368   void print(raw_ostream &O, const Twine &Indent,
1369              VPSlotTracker &SlotTracker) const override;
1370 #endif
1371 };
1372 
1373 /// A recipe to compute the pointers for widened memory accesses of IndexTy for
1374 /// all parts. If IsReverse is true, compute pointers for accessing the input in
1375 /// reverse order per part.
1376 class VPVectorPointerRecipe : public VPRecipeWithIRFlags, public VPValue {
1377   Type *IndexedTy;
1378   bool IsReverse;
1379 
1380 public:
1381   VPVectorPointerRecipe(VPValue *Ptr, Type *IndexedTy, bool IsReverse,
1382                         bool IsInBounds, DebugLoc DL)
1383       : VPRecipeWithIRFlags(VPDef::VPVectorPointerSC, ArrayRef<VPValue *>(Ptr),
1384                             GEPFlagsTy(IsInBounds), DL),
1385         VPValue(this), IndexedTy(IndexedTy), IsReverse(IsReverse) {}
1386 
1387   VP_CLASSOF_IMPL(VPDef::VPVectorPointerSC)
1388 
1389   void execute(VPTransformState &State) override;
1390 
1391   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1392     assert(is_contained(operands(), Op) &&
1393            "Op must be an operand of the recipe");
1394     return true;
1395   }
1396 
1397 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1398   /// Print the recipe.
1399   void print(raw_ostream &O, const Twine &Indent,
1400              VPSlotTracker &SlotTracker) const override;
1401 #endif
1402 };
1403 
1404 /// A pure virtual base class for all recipes modeling header phis, including
1405 /// phis for first order recurrences, pointer inductions and reductions. The
1406 /// start value is the first operand of the recipe and the incoming value from
1407 /// the backedge is the second operand.
1408 ///
1409 /// Inductions are modeled using the following sub-classes:
1410 ///  * VPCanonicalIVPHIRecipe: Canonical scalar induction of the vector loop,
1411 ///    starting at a specified value (zero for the main vector loop, the resume
1412 ///    value for the epilogue vector loop) and stepping by 1. The induction
1413 ///    controls exiting of the vector loop by comparing against the vector trip
1414 ///    count. Produces a single scalar PHI for the induction value per
1415 ///    iteration.
1416 ///  * VPWidenIntOrFpInductionRecipe: Generates vector values for integer and
1417 ///    floating point inductions with arbitrary start and step values. Produces
1418 ///    a vector PHI per-part.
1419 ///  * VPDerivedIVRecipe: Converts the canonical IV value to the corresponding
1420 ///    value of an IV with different start and step values. Produces a single
1421 ///    scalar value per iteration
1422 ///  * VPScalarIVStepsRecipe: Generates scalar values per-lane based on a
1423 ///    canonical or derived induction.
1424 ///  * VPWidenPointerInductionRecipe: Generate vector and scalar values for a
1425 ///    pointer induction. Produces either a vector PHI per-part or scalar values
1426 ///    per-lane based on the canonical induction.
1427 class VPHeaderPHIRecipe : public VPRecipeBase, public VPValue {
1428 protected:
1429   VPHeaderPHIRecipe(unsigned char VPDefID, Instruction *UnderlyingInstr,
1430                     VPValue *Start = nullptr, DebugLoc DL = {})
1431       : VPRecipeBase(VPDefID, {}, DL), VPValue(this, UnderlyingInstr) {
1432     if (Start)
1433       addOperand(Start);
1434   }
1435 
1436 public:
1437   ~VPHeaderPHIRecipe() override = default;
1438 
1439   /// Method to support type inquiry through isa, cast, and dyn_cast.
1440   static inline bool classof(const VPRecipeBase *B) {
1441     return B->getVPDefID() >= VPDef::VPFirstHeaderPHISC &&
1442            B->getVPDefID() <= VPDef::VPLastHeaderPHISC;
1443   }
1444   static inline bool classof(const VPValue *V) {
1445     auto *B = V->getDefiningRecipe();
1446     return B && B->getVPDefID() >= VPRecipeBase::VPFirstHeaderPHISC &&
1447            B->getVPDefID() <= VPRecipeBase::VPLastHeaderPHISC;
1448   }
1449 
1450   /// Generate the phi nodes.
1451   void execute(VPTransformState &State) override = 0;
1452 
1453 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1454   /// Print the recipe.
1455   void print(raw_ostream &O, const Twine &Indent,
1456              VPSlotTracker &SlotTracker) const override = 0;
1457 #endif
1458 
1459   /// Returns the start value of the phi, if one is set.
1460   VPValue *getStartValue() {
1461     return getNumOperands() == 0 ? nullptr : getOperand(0);
1462   }
1463   VPValue *getStartValue() const {
1464     return getNumOperands() == 0 ? nullptr : getOperand(0);
1465   }
1466 
1467   /// Update the start value of the recipe.
1468   void setStartValue(VPValue *V) { setOperand(0, V); }
1469 
1470   /// Returns the incoming value from the loop backedge.
1471   virtual VPValue *getBackedgeValue() {
1472     return getOperand(1);
1473   }
1474 
1475   /// Returns the backedge value as a recipe. The backedge value is guaranteed
1476   /// to be a recipe.
1477   virtual VPRecipeBase &getBackedgeRecipe() {
1478     return *getBackedgeValue()->getDefiningRecipe();
1479   }
1480 };
1481 
1482 /// A recipe for handling phi nodes of integer and floating-point inductions,
1483 /// producing their vector values.
1484 class VPWidenIntOrFpInductionRecipe : public VPHeaderPHIRecipe {
1485   PHINode *IV;
1486   TruncInst *Trunc;
1487   const InductionDescriptor &IndDesc;
1488 
1489 public:
1490   VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start, VPValue *Step,
1491                                 const InductionDescriptor &IndDesc)
1492       : VPHeaderPHIRecipe(VPDef::VPWidenIntOrFpInductionSC, IV, Start), IV(IV),
1493         Trunc(nullptr), IndDesc(IndDesc) {
1494     addOperand(Step);
1495   }
1496 
1497   VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start, VPValue *Step,
1498                                 const InductionDescriptor &IndDesc,
1499                                 TruncInst *Trunc)
1500       : VPHeaderPHIRecipe(VPDef::VPWidenIntOrFpInductionSC, Trunc, Start),
1501         IV(IV), Trunc(Trunc), IndDesc(IndDesc) {
1502     addOperand(Step);
1503   }
1504 
1505   ~VPWidenIntOrFpInductionRecipe() override = default;
1506 
1507   VP_CLASSOF_IMPL(VPDef::VPWidenIntOrFpInductionSC)
1508 
1509   /// Generate the vectorized and scalarized versions of the phi node as
1510   /// needed by their users.
1511   void execute(VPTransformState &State) override;
1512 
1513 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1514   /// Print the recipe.
1515   void print(raw_ostream &O, const Twine &Indent,
1516              VPSlotTracker &SlotTracker) const override;
1517 #endif
1518 
1519   VPValue *getBackedgeValue() override {
1520     // TODO: All operands of base recipe must exist and be at same index in
1521     // derived recipe.
1522     llvm_unreachable(
1523         "VPWidenIntOrFpInductionRecipe generates its own backedge value");
1524   }
1525 
1526   VPRecipeBase &getBackedgeRecipe() override {
1527     // TODO: All operands of base recipe must exist and be at same index in
1528     // derived recipe.
1529     llvm_unreachable(
1530         "VPWidenIntOrFpInductionRecipe generates its own backedge value");
1531   }
1532 
1533   /// Returns the step value of the induction.
1534   VPValue *getStepValue() { return getOperand(1); }
1535   const VPValue *getStepValue() const { return getOperand(1); }
1536 
1537   /// Returns the first defined value as TruncInst, if it is one or nullptr
1538   /// otherwise.
1539   TruncInst *getTruncInst() { return Trunc; }
1540   const TruncInst *getTruncInst() const { return Trunc; }
1541 
1542   PHINode *getPHINode() { return IV; }
1543 
1544   /// Returns the induction descriptor for the recipe.
1545   const InductionDescriptor &getInductionDescriptor() const { return IndDesc; }
1546 
1547   /// Returns true if the induction is canonical, i.e. starting at 0 and
1548   /// incremented by UF * VF (= the original IV is incremented by 1).
1549   bool isCanonical() const;
1550 
1551   /// Returns the scalar type of the induction.
1552   Type *getScalarType() const {
1553     return Trunc ? Trunc->getType() : IV->getType();
1554   }
1555 };
1556 
1557 class VPWidenPointerInductionRecipe : public VPHeaderPHIRecipe {
1558   const InductionDescriptor &IndDesc;
1559 
1560   bool IsScalarAfterVectorization;
1561 
1562 public:
1563   /// Create a new VPWidenPointerInductionRecipe for \p Phi with start value \p
1564   /// Start.
1565   VPWidenPointerInductionRecipe(PHINode *Phi, VPValue *Start, VPValue *Step,
1566                                 const InductionDescriptor &IndDesc,
1567                                 bool IsScalarAfterVectorization)
1568       : VPHeaderPHIRecipe(VPDef::VPWidenPointerInductionSC, Phi),
1569         IndDesc(IndDesc),
1570         IsScalarAfterVectorization(IsScalarAfterVectorization) {
1571     addOperand(Start);
1572     addOperand(Step);
1573   }
1574 
1575   ~VPWidenPointerInductionRecipe() override = default;
1576 
1577   VP_CLASSOF_IMPL(VPDef::VPWidenPointerInductionSC)
1578 
1579   /// Generate vector values for the pointer induction.
1580   void execute(VPTransformState &State) override;
1581 
1582   /// Returns true if only scalar values will be generated.
1583   bool onlyScalarsGenerated(ElementCount VF);
1584 
1585   /// Returns the induction descriptor for the recipe.
1586   const InductionDescriptor &getInductionDescriptor() const { return IndDesc; }
1587 
1588 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1589   /// Print the recipe.
1590   void print(raw_ostream &O, const Twine &Indent,
1591              VPSlotTracker &SlotTracker) const override;
1592 #endif
1593 };
1594 
1595 /// A recipe for handling header phis that are widened in the vector loop.
1596 /// In the VPlan native path, all incoming VPValues & VPBasicBlock pairs are
1597 /// managed in the recipe directly.
1598 class VPWidenPHIRecipe : public VPHeaderPHIRecipe {
1599   /// List of incoming blocks. Only used in the VPlan native path.
1600   SmallVector<VPBasicBlock *, 2> IncomingBlocks;
1601 
1602 public:
1603   /// Create a new VPWidenPHIRecipe for \p Phi with start value \p Start.
1604   VPWidenPHIRecipe(PHINode *Phi, VPValue *Start = nullptr)
1605       : VPHeaderPHIRecipe(VPDef::VPWidenPHISC, Phi) {
1606     if (Start)
1607       addOperand(Start);
1608   }
1609 
1610   ~VPWidenPHIRecipe() override = default;
1611 
1612   VP_CLASSOF_IMPL(VPDef::VPWidenPHISC)
1613 
1614   /// Generate the phi/select nodes.
1615   void execute(VPTransformState &State) override;
1616 
1617 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1618   /// Print the recipe.
1619   void print(raw_ostream &O, const Twine &Indent,
1620              VPSlotTracker &SlotTracker) const override;
1621 #endif
1622 
1623   /// Adds a pair (\p IncomingV, \p IncomingBlock) to the phi.
1624   void addIncoming(VPValue *IncomingV, VPBasicBlock *IncomingBlock) {
1625     addOperand(IncomingV);
1626     IncomingBlocks.push_back(IncomingBlock);
1627   }
1628 
1629   /// Returns the \p I th incoming VPBasicBlock.
1630   VPBasicBlock *getIncomingBlock(unsigned I) { return IncomingBlocks[I]; }
1631 
1632   /// Returns the \p I th incoming VPValue.
1633   VPValue *getIncomingValue(unsigned I) { return getOperand(I); }
1634 };
1635 
1636 /// A recipe for handling first-order recurrence phis. The start value is the
1637 /// first operand of the recipe and the incoming value from the backedge is the
1638 /// second operand.
1639 struct VPFirstOrderRecurrencePHIRecipe : public VPHeaderPHIRecipe {
1640   VPFirstOrderRecurrencePHIRecipe(PHINode *Phi, VPValue &Start)
1641       : VPHeaderPHIRecipe(VPDef::VPFirstOrderRecurrencePHISC, Phi, &Start) {}
1642 
1643   VP_CLASSOF_IMPL(VPDef::VPFirstOrderRecurrencePHISC)
1644 
1645   static inline bool classof(const VPHeaderPHIRecipe *R) {
1646     return R->getVPDefID() == VPDef::VPFirstOrderRecurrencePHISC;
1647   }
1648 
1649   void execute(VPTransformState &State) override;
1650 
1651 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1652   /// Print the recipe.
1653   void print(raw_ostream &O, const Twine &Indent,
1654              VPSlotTracker &SlotTracker) const override;
1655 #endif
1656 };
1657 
1658 /// A recipe for handling reduction phis. The start value is the first operand
1659 /// of the recipe and the incoming value from the backedge is the second
1660 /// operand.
1661 class VPReductionPHIRecipe : public VPHeaderPHIRecipe {
1662   /// Descriptor for the reduction.
1663   const RecurrenceDescriptor &RdxDesc;
1664 
1665   /// The phi is part of an in-loop reduction.
1666   bool IsInLoop;
1667 
1668   /// The phi is part of an ordered reduction. Requires IsInLoop to be true.
1669   bool IsOrdered;
1670 
1671 public:
1672   /// Create a new VPReductionPHIRecipe for the reduction \p Phi described by \p
1673   /// RdxDesc.
1674   VPReductionPHIRecipe(PHINode *Phi, const RecurrenceDescriptor &RdxDesc,
1675                        VPValue &Start, bool IsInLoop = false,
1676                        bool IsOrdered = false)
1677       : VPHeaderPHIRecipe(VPDef::VPReductionPHISC, Phi, &Start),
1678         RdxDesc(RdxDesc), IsInLoop(IsInLoop), IsOrdered(IsOrdered) {
1679     assert((!IsOrdered || IsInLoop) && "IsOrdered requires IsInLoop");
1680   }
1681 
1682   ~VPReductionPHIRecipe() override = default;
1683 
1684   VP_CLASSOF_IMPL(VPDef::VPReductionPHISC)
1685 
1686   static inline bool classof(const VPHeaderPHIRecipe *R) {
1687     return R->getVPDefID() == VPDef::VPReductionPHISC;
1688   }
1689 
1690   /// Generate the phi/select nodes.
1691   void execute(VPTransformState &State) override;
1692 
1693 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1694   /// Print the recipe.
1695   void print(raw_ostream &O, const Twine &Indent,
1696              VPSlotTracker &SlotTracker) const override;
1697 #endif
1698 
1699   const RecurrenceDescriptor &getRecurrenceDescriptor() const {
1700     return RdxDesc;
1701   }
1702 
1703   /// Returns true, if the phi is part of an ordered reduction.
1704   bool isOrdered() const { return IsOrdered; }
1705 
1706   /// Returns true, if the phi is part of an in-loop reduction.
1707   bool isInLoop() const { return IsInLoop; }
1708 };
1709 
1710 /// A recipe for vectorizing a phi-node as a sequence of mask-based select
1711 /// instructions.
1712 class VPBlendRecipe : public VPRecipeBase, public VPValue {
1713 public:
1714   /// The blend operation is a User of the incoming values and of their
1715   /// respective masks, ordered [I0, M0, I1, M1, ...]. Note that a single value
1716   /// might be incoming with a full mask for which there is no VPValue.
1717   VPBlendRecipe(PHINode *Phi, ArrayRef<VPValue *> Operands)
1718       : VPRecipeBase(VPDef::VPBlendSC, Operands, Phi->getDebugLoc()),
1719         VPValue(this, Phi) {
1720     assert(Operands.size() > 0 &&
1721            ((Operands.size() == 1) || (Operands.size() % 2 == 0)) &&
1722            "Expected either a single incoming value or a positive even number "
1723            "of operands");
1724   }
1725 
1726   VP_CLASSOF_IMPL(VPDef::VPBlendSC)
1727 
1728   /// Return the number of incoming values, taking into account that a single
1729   /// incoming value has no mask.
1730   unsigned getNumIncomingValues() const { return (getNumOperands() + 1) / 2; }
1731 
1732   /// Return incoming value number \p Idx.
1733   VPValue *getIncomingValue(unsigned Idx) const { return getOperand(Idx * 2); }
1734 
1735   /// Return mask number \p Idx.
1736   VPValue *getMask(unsigned Idx) const { return getOperand(Idx * 2 + 1); }
1737 
1738   /// Generate the phi/select nodes.
1739   void execute(VPTransformState &State) override;
1740 
1741 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1742   /// Print the recipe.
1743   void print(raw_ostream &O, const Twine &Indent,
1744              VPSlotTracker &SlotTracker) const override;
1745 #endif
1746 
1747   /// Returns true if the recipe only uses the first lane of operand \p Op.
1748   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1749     assert(is_contained(operands(), Op) &&
1750            "Op must be an operand of the recipe");
1751     // Recursing through Blend recipes only, must terminate at header phi's the
1752     // latest.
1753     return all_of(users(),
1754                   [this](VPUser *U) { return U->onlyFirstLaneUsed(this); });
1755   }
1756 };
1757 
1758 /// VPInterleaveRecipe is a recipe for transforming an interleave group of load
1759 /// or stores into one wide load/store and shuffles. The first operand of a
1760 /// VPInterleave recipe is the address, followed by the stored values, followed
1761 /// by an optional mask.
1762 class VPInterleaveRecipe : public VPRecipeBase {
1763   const InterleaveGroup<Instruction> *IG;
1764 
1765   /// Indicates if the interleave group is in a conditional block and requires a
1766   /// mask.
1767   bool HasMask = false;
1768 
1769   /// Indicates if gaps between members of the group need to be masked out or if
1770   /// unusued gaps can be loaded speculatively.
1771   bool NeedsMaskForGaps = false;
1772 
1773 public:
1774   VPInterleaveRecipe(const InterleaveGroup<Instruction> *IG, VPValue *Addr,
1775                      ArrayRef<VPValue *> StoredValues, VPValue *Mask,
1776                      bool NeedsMaskForGaps)
1777       : VPRecipeBase(VPDef::VPInterleaveSC, {Addr}), IG(IG),
1778         NeedsMaskForGaps(NeedsMaskForGaps) {
1779     for (unsigned i = 0; i < IG->getFactor(); ++i)
1780       if (Instruction *I = IG->getMember(i)) {
1781         if (I->getType()->isVoidTy())
1782           continue;
1783         new VPValue(I, this);
1784       }
1785 
1786     for (auto *SV : StoredValues)
1787       addOperand(SV);
1788     if (Mask) {
1789       HasMask = true;
1790       addOperand(Mask);
1791     }
1792   }
1793   ~VPInterleaveRecipe() override = default;
1794 
1795   VP_CLASSOF_IMPL(VPDef::VPInterleaveSC)
1796 
1797   /// Return the address accessed by this recipe.
1798   VPValue *getAddr() const {
1799     return getOperand(0); // Address is the 1st, mandatory operand.
1800   }
1801 
1802   /// Return the mask used by this recipe. Note that a full mask is represented
1803   /// by a nullptr.
1804   VPValue *getMask() const {
1805     // Mask is optional and therefore the last, currently 2nd operand.
1806     return HasMask ? getOperand(getNumOperands() - 1) : nullptr;
1807   }
1808 
1809   /// Return the VPValues stored by this interleave group. If it is a load
1810   /// interleave group, return an empty ArrayRef.
1811   ArrayRef<VPValue *> getStoredValues() const {
1812     // The first operand is the address, followed by the stored values, followed
1813     // by an optional mask.
1814     return ArrayRef<VPValue *>(op_begin(), getNumOperands())
1815         .slice(1, getNumStoreOperands());
1816   }
1817 
1818   /// Generate the wide load or store, and shuffles.
1819   void execute(VPTransformState &State) override;
1820 
1821 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1822   /// Print the recipe.
1823   void print(raw_ostream &O, const Twine &Indent,
1824              VPSlotTracker &SlotTracker) const override;
1825 #endif
1826 
1827   const InterleaveGroup<Instruction> *getInterleaveGroup() { return IG; }
1828 
1829   /// Returns the number of stored operands of this interleave group. Returns 0
1830   /// for load interleave groups.
1831   unsigned getNumStoreOperands() const {
1832     return getNumOperands() - (HasMask ? 2 : 1);
1833   }
1834 
1835   /// The recipe only uses the first lane of the address.
1836   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1837     assert(is_contained(operands(), Op) &&
1838            "Op must be an operand of the recipe");
1839     return Op == getAddr() && !llvm::is_contained(getStoredValues(), Op);
1840   }
1841 };
1842 
1843 /// A recipe to represent inloop reduction operations, performing a reduction on
1844 /// a vector operand into a scalar value, and adding the result to a chain.
1845 /// The Operands are {ChainOp, VecOp, [Condition]}.
1846 class VPReductionRecipe : public VPRecipeBase, public VPValue {
1847   /// The recurrence decriptor for the reduction in question.
1848   const RecurrenceDescriptor &RdxDesc;
1849 
1850 public:
1851   VPReductionRecipe(const RecurrenceDescriptor &R, Instruction *I,
1852                     VPValue *ChainOp, VPValue *VecOp, VPValue *CondOp)
1853       : VPRecipeBase(VPDef::VPReductionSC, {ChainOp, VecOp}), VPValue(this, I),
1854         RdxDesc(R) {
1855     if (CondOp)
1856       addOperand(CondOp);
1857   }
1858 
1859   ~VPReductionRecipe() override = default;
1860 
1861   VP_CLASSOF_IMPL(VPDef::VPReductionSC)
1862 
1863   /// Generate the reduction in the loop
1864   void execute(VPTransformState &State) override;
1865 
1866 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1867   /// Print the recipe.
1868   void print(raw_ostream &O, const Twine &Indent,
1869              VPSlotTracker &SlotTracker) const override;
1870 #endif
1871 
1872   /// The VPValue of the scalar Chain being accumulated.
1873   VPValue *getChainOp() const { return getOperand(0); }
1874   /// The VPValue of the vector value to be reduced.
1875   VPValue *getVecOp() const { return getOperand(1); }
1876   /// The VPValue of the condition for the block.
1877   VPValue *getCondOp() const {
1878     return getNumOperands() > 2 ? getOperand(2) : nullptr;
1879   }
1880 };
1881 
1882 /// VPReplicateRecipe replicates a given instruction producing multiple scalar
1883 /// copies of the original scalar type, one per lane, instead of producing a
1884 /// single copy of widened type for all lanes. If the instruction is known to be
1885 /// uniform only one copy, per lane zero, will be generated.
1886 class VPReplicateRecipe : public VPRecipeWithIRFlags, public VPValue {
1887   /// Indicator if only a single replica per lane is needed.
1888   bool IsUniform;
1889 
1890   /// Indicator if the replicas are also predicated.
1891   bool IsPredicated;
1892 
1893 public:
1894   template <typename IterT>
1895   VPReplicateRecipe(Instruction *I, iterator_range<IterT> Operands,
1896                     bool IsUniform, VPValue *Mask = nullptr)
1897       : VPRecipeWithIRFlags(VPDef::VPReplicateSC, Operands, *I),
1898         VPValue(this, I), IsUniform(IsUniform), IsPredicated(Mask) {
1899     if (Mask)
1900       addOperand(Mask);
1901   }
1902 
1903   ~VPReplicateRecipe() override = default;
1904 
1905   VP_CLASSOF_IMPL(VPDef::VPReplicateSC)
1906 
1907   /// Generate replicas of the desired Ingredient. Replicas will be generated
1908   /// for all parts and lanes unless a specific part and lane are specified in
1909   /// the \p State.
1910   void execute(VPTransformState &State) override;
1911 
1912 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1913   /// Print the recipe.
1914   void print(raw_ostream &O, const Twine &Indent,
1915              VPSlotTracker &SlotTracker) const override;
1916 #endif
1917 
1918   bool isUniform() const { return IsUniform; }
1919 
1920   bool isPredicated() const { return IsPredicated; }
1921 
1922   /// Returns true if the recipe only uses the first lane of operand \p Op.
1923   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1924     assert(is_contained(operands(), Op) &&
1925            "Op must be an operand of the recipe");
1926     return isUniform();
1927   }
1928 
1929   /// Returns true if the recipe uses scalars of operand \p Op.
1930   bool usesScalars(const VPValue *Op) const override {
1931     assert(is_contained(operands(), Op) &&
1932            "Op must be an operand of the recipe");
1933     return true;
1934   }
1935 
1936   /// Returns true if the recipe is used by a widened recipe via an intervening
1937   /// VPPredInstPHIRecipe. In this case, the scalar values should also be packed
1938   /// in a vector.
1939   bool shouldPack() const;
1940 
1941   /// Return the mask of a predicated VPReplicateRecipe.
1942   VPValue *getMask() {
1943     assert(isPredicated() && "Trying to get the mask of a unpredicated recipe");
1944     return getOperand(getNumOperands() - 1);
1945   }
1946 };
1947 
1948 /// A recipe for generating conditional branches on the bits of a mask.
1949 class VPBranchOnMaskRecipe : public VPRecipeBase {
1950 public:
1951   VPBranchOnMaskRecipe(VPValue *BlockInMask)
1952       : VPRecipeBase(VPDef::VPBranchOnMaskSC, {}) {
1953     if (BlockInMask) // nullptr means all-one mask.
1954       addOperand(BlockInMask);
1955   }
1956 
1957   VP_CLASSOF_IMPL(VPDef::VPBranchOnMaskSC)
1958 
1959   /// Generate the extraction of the appropriate bit from the block mask and the
1960   /// conditional branch.
1961   void execute(VPTransformState &State) override;
1962 
1963 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1964   /// Print the recipe.
1965   void print(raw_ostream &O, const Twine &Indent,
1966              VPSlotTracker &SlotTracker) const override {
1967     O << Indent << "BRANCH-ON-MASK ";
1968     if (VPValue *Mask = getMask())
1969       Mask->printAsOperand(O, SlotTracker);
1970     else
1971       O << " All-One";
1972   }
1973 #endif
1974 
1975   /// Return the mask used by this recipe. Note that a full mask is represented
1976   /// by a nullptr.
1977   VPValue *getMask() const {
1978     assert(getNumOperands() <= 1 && "should have either 0 or 1 operands");
1979     // Mask is optional.
1980     return getNumOperands() == 1 ? getOperand(0) : nullptr;
1981   }
1982 
1983   /// Returns true if the recipe uses scalars of operand \p Op.
1984   bool usesScalars(const VPValue *Op) const override {
1985     assert(is_contained(operands(), Op) &&
1986            "Op must be an operand of the recipe");
1987     return true;
1988   }
1989 };
1990 
1991 /// VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when
1992 /// control converges back from a Branch-on-Mask. The phi nodes are needed in
1993 /// order to merge values that are set under such a branch and feed their uses.
1994 /// The phi nodes can be scalar or vector depending on the users of the value.
1995 /// This recipe works in concert with VPBranchOnMaskRecipe.
1996 class VPPredInstPHIRecipe : public VPRecipeBase, public VPValue {
1997 public:
1998   /// Construct a VPPredInstPHIRecipe given \p PredInst whose value needs a phi
1999   /// nodes after merging back from a Branch-on-Mask.
2000   VPPredInstPHIRecipe(VPValue *PredV)
2001       : VPRecipeBase(VPDef::VPPredInstPHISC, PredV), VPValue(this) {}
2002   ~VPPredInstPHIRecipe() override = default;
2003 
2004   VP_CLASSOF_IMPL(VPDef::VPPredInstPHISC)
2005 
2006   /// Generates phi nodes for live-outs as needed to retain SSA form.
2007   void execute(VPTransformState &State) override;
2008 
2009 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2010   /// Print the recipe.
2011   void print(raw_ostream &O, const Twine &Indent,
2012              VPSlotTracker &SlotTracker) const override;
2013 #endif
2014 
2015   /// Returns true if the recipe uses scalars of operand \p Op.
2016   bool usesScalars(const VPValue *Op) const override {
2017     assert(is_contained(operands(), Op) &&
2018            "Op must be an operand of the recipe");
2019     return true;
2020   }
2021 };
2022 
2023 /// A Recipe for widening load/store operations.
2024 /// The recipe uses the following VPValues:
2025 /// - For load: Address, optional mask
2026 /// - For store: Address, stored value, optional mask
2027 /// TODO: We currently execute only per-part unless a specific instance is
2028 /// provided.
2029 class VPWidenMemoryInstructionRecipe : public VPRecipeBase {
2030   Instruction &Ingredient;
2031 
2032   // Whether the loaded-from / stored-to addresses are consecutive.
2033   bool Consecutive;
2034 
2035   // Whether the consecutive loaded/stored addresses are in reverse order.
2036   bool Reverse;
2037 
2038   void setMask(VPValue *Mask) {
2039     if (!Mask)
2040       return;
2041     addOperand(Mask);
2042   }
2043 
2044   bool isMasked() const {
2045     return isStore() ? getNumOperands() == 3 : getNumOperands() == 2;
2046   }
2047 
2048 public:
2049   VPWidenMemoryInstructionRecipe(LoadInst &Load, VPValue *Addr, VPValue *Mask,
2050                                  bool Consecutive, bool Reverse)
2051       : VPRecipeBase(VPDef::VPWidenMemoryInstructionSC, {Addr}),
2052         Ingredient(Load), Consecutive(Consecutive), Reverse(Reverse) {
2053     assert((Consecutive || !Reverse) && "Reverse implies consecutive");
2054     new VPValue(this, &Load);
2055     setMask(Mask);
2056   }
2057 
2058   VPWidenMemoryInstructionRecipe(StoreInst &Store, VPValue *Addr,
2059                                  VPValue *StoredValue, VPValue *Mask,
2060                                  bool Consecutive, bool Reverse)
2061       : VPRecipeBase(VPDef::VPWidenMemoryInstructionSC, {Addr, StoredValue}),
2062         Ingredient(Store), Consecutive(Consecutive), Reverse(Reverse) {
2063     assert((Consecutive || !Reverse) && "Reverse implies consecutive");
2064     setMask(Mask);
2065   }
2066 
2067   VP_CLASSOF_IMPL(VPDef::VPWidenMemoryInstructionSC)
2068 
2069   /// Return the address accessed by this recipe.
2070   VPValue *getAddr() const {
2071     return getOperand(0); // Address is the 1st, mandatory operand.
2072   }
2073 
2074   /// Return the mask used by this recipe. Note that a full mask is represented
2075   /// by a nullptr.
2076   VPValue *getMask() const {
2077     // Mask is optional and therefore the last operand.
2078     return isMasked() ? getOperand(getNumOperands() - 1) : nullptr;
2079   }
2080 
2081   /// Returns true if this recipe is a store.
2082   bool isStore() const { return isa<StoreInst>(Ingredient); }
2083 
2084   /// Return the address accessed by this recipe.
2085   VPValue *getStoredValue() const {
2086     assert(isStore() && "Stored value only available for store instructions");
2087     return getOperand(1); // Stored value is the 2nd, mandatory operand.
2088   }
2089 
2090   // Return whether the loaded-from / stored-to addresses are consecutive.
2091   bool isConsecutive() const { return Consecutive; }
2092 
2093   // Return whether the consecutive loaded/stored addresses are in reverse
2094   // order.
2095   bool isReverse() const { return Reverse; }
2096 
2097   /// Generate the wide load/store.
2098   void execute(VPTransformState &State) override;
2099 
2100 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2101   /// Print the recipe.
2102   void print(raw_ostream &O, const Twine &Indent,
2103              VPSlotTracker &SlotTracker) const override;
2104 #endif
2105 
2106   /// Returns true if the recipe only uses the first lane of operand \p Op.
2107   bool onlyFirstLaneUsed(const VPValue *Op) const override {
2108     assert(is_contained(operands(), Op) &&
2109            "Op must be an operand of the recipe");
2110 
2111     // Widened, consecutive memory operations only demand the first lane of
2112     // their address, unless the same operand is also stored. That latter can
2113     // happen with opaque pointers.
2114     return Op == getAddr() && isConsecutive() &&
2115            (!isStore() || Op != getStoredValue());
2116   }
2117 
2118   Instruction &getIngredient() const { return Ingredient; }
2119 };
2120 
2121 /// Recipe to expand a SCEV expression.
2122 class VPExpandSCEVRecipe : public VPRecipeBase, public VPValue {
2123   const SCEV *Expr;
2124   ScalarEvolution &SE;
2125 
2126 public:
2127   VPExpandSCEVRecipe(const SCEV *Expr, ScalarEvolution &SE)
2128       : VPRecipeBase(VPDef::VPExpandSCEVSC, {}), VPValue(this), Expr(Expr),
2129         SE(SE) {}
2130 
2131   ~VPExpandSCEVRecipe() override = default;
2132 
2133   VP_CLASSOF_IMPL(VPDef::VPExpandSCEVSC)
2134 
2135   /// Generate a canonical vector induction variable of the vector loop, with
2136   void execute(VPTransformState &State) override;
2137 
2138 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2139   /// Print the recipe.
2140   void print(raw_ostream &O, const Twine &Indent,
2141              VPSlotTracker &SlotTracker) const override;
2142 #endif
2143 
2144   const SCEV *getSCEV() const { return Expr; }
2145 };
2146 
2147 /// Canonical scalar induction phi of the vector loop. Starting at the specified
2148 /// start value (either 0 or the resume value when vectorizing the epilogue
2149 /// loop). VPWidenCanonicalIVRecipe represents the vector version of the
2150 /// canonical induction variable.
2151 class VPCanonicalIVPHIRecipe : public VPHeaderPHIRecipe {
2152 public:
2153   VPCanonicalIVPHIRecipe(VPValue *StartV, DebugLoc DL)
2154       : VPHeaderPHIRecipe(VPDef::VPCanonicalIVPHISC, nullptr, StartV, DL) {}
2155 
2156   ~VPCanonicalIVPHIRecipe() override = default;
2157 
2158   VP_CLASSOF_IMPL(VPDef::VPCanonicalIVPHISC)
2159 
2160   static inline bool classof(const VPHeaderPHIRecipe *D) {
2161     return D->getVPDefID() == VPDef::VPCanonicalIVPHISC;
2162   }
2163 
2164   /// Generate the canonical scalar induction phi of the vector loop.
2165   void execute(VPTransformState &State) override;
2166 
2167 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2168   /// Print the recipe.
2169   void print(raw_ostream &O, const Twine &Indent,
2170              VPSlotTracker &SlotTracker) const override;
2171 #endif
2172 
2173   /// Returns the scalar type of the induction.
2174   Type *getScalarType() const {
2175     return getStartValue()->getLiveInIRValue()->getType();
2176   }
2177 
2178   /// Returns true if the recipe only uses the first lane of operand \p Op.
2179   bool onlyFirstLaneUsed(const VPValue *Op) const override {
2180     assert(is_contained(operands(), Op) &&
2181            "Op must be an operand of the recipe");
2182     return true;
2183   }
2184 
2185   /// Returns true if the recipe only uses the first part of operand \p Op.
2186   bool onlyFirstPartUsed(const VPValue *Op) const override {
2187     assert(is_contained(operands(), Op) &&
2188            "Op must be an operand of the recipe");
2189     return true;
2190   }
2191 
2192   /// Check if the induction described by \p Kind, /p Start and \p Step is
2193   /// canonical, i.e.  has the same start, step (of 1), and type as the
2194   /// canonical IV.
2195   bool isCanonical(InductionDescriptor::InductionKind Kind, VPValue *Start,
2196                    VPValue *Step, Type *Ty) const;
2197 };
2198 
2199 /// A recipe for generating the active lane mask for the vector loop that is
2200 /// used to predicate the vector operations.
2201 /// TODO: It would be good to use the existing VPWidenPHIRecipe instead and
2202 /// remove VPActiveLaneMaskPHIRecipe.
2203 class VPActiveLaneMaskPHIRecipe : public VPHeaderPHIRecipe {
2204 public:
2205   VPActiveLaneMaskPHIRecipe(VPValue *StartMask, DebugLoc DL)
2206       : VPHeaderPHIRecipe(VPDef::VPActiveLaneMaskPHISC, nullptr, StartMask,
2207                           DL) {}
2208 
2209   ~VPActiveLaneMaskPHIRecipe() override = default;
2210 
2211   VP_CLASSOF_IMPL(VPDef::VPActiveLaneMaskPHISC)
2212 
2213   static inline bool classof(const VPHeaderPHIRecipe *D) {
2214     return D->getVPDefID() == VPDef::VPActiveLaneMaskPHISC;
2215   }
2216 
2217   /// Generate the active lane mask phi of the vector loop.
2218   void execute(VPTransformState &State) override;
2219 
2220 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2221   /// Print the recipe.
2222   void print(raw_ostream &O, const Twine &Indent,
2223              VPSlotTracker &SlotTracker) const override;
2224 #endif
2225 };
2226 
2227 /// A Recipe for widening the canonical induction variable of the vector loop.
2228 class VPWidenCanonicalIVRecipe : public VPRecipeBase, public VPValue {
2229 public:
2230   VPWidenCanonicalIVRecipe(VPCanonicalIVPHIRecipe *CanonicalIV)
2231       : VPRecipeBase(VPDef::VPWidenCanonicalIVSC, {CanonicalIV}),
2232         VPValue(this) {}
2233 
2234   ~VPWidenCanonicalIVRecipe() override = default;
2235 
2236   VP_CLASSOF_IMPL(VPDef::VPWidenCanonicalIVSC)
2237 
2238   /// Generate a canonical vector induction variable of the vector loop, with
2239   /// start = {<Part*VF, Part*VF+1, ..., Part*VF+VF-1> for 0 <= Part < UF}, and
2240   /// step = <VF*UF, VF*UF, ..., VF*UF>.
2241   void execute(VPTransformState &State) override;
2242 
2243 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2244   /// Print the recipe.
2245   void print(raw_ostream &O, const Twine &Indent,
2246              VPSlotTracker &SlotTracker) const override;
2247 #endif
2248 
2249   /// Returns the scalar type of the induction.
2250   const Type *getScalarType() const {
2251     return cast<VPCanonicalIVPHIRecipe>(getOperand(0)->getDefiningRecipe())
2252         ->getScalarType();
2253   }
2254 };
2255 
2256 /// A recipe for converting the canonical IV value to the corresponding value of
2257 /// an IV with different start and step values, using Start + CanonicalIV *
2258 /// Step.
2259 class VPDerivedIVRecipe : public VPRecipeBase, public VPValue {
2260   /// If not nullptr, the result of the induction will get truncated to
2261   /// TruncResultTy.
2262   Type *TruncResultTy;
2263 
2264   /// Kind of the induction.
2265   const InductionDescriptor::InductionKind Kind;
2266   /// If not nullptr, the floating point induction binary operator. Must be set
2267   /// for floating point inductions.
2268   const FPMathOperator *FPBinOp;
2269 
2270 public:
2271   VPDerivedIVRecipe(const InductionDescriptor &IndDesc, VPValue *Start,
2272                     VPCanonicalIVPHIRecipe *CanonicalIV, VPValue *Step,
2273                     Type *TruncResultTy)
2274       : VPRecipeBase(VPDef::VPDerivedIVSC, {Start, CanonicalIV, Step}),
2275         VPValue(this), TruncResultTy(TruncResultTy), Kind(IndDesc.getKind()),
2276         FPBinOp(dyn_cast_or_null<FPMathOperator>(IndDesc.getInductionBinOp())) {
2277   }
2278 
2279   ~VPDerivedIVRecipe() override = default;
2280 
2281   VP_CLASSOF_IMPL(VPDef::VPDerivedIVSC)
2282 
2283   /// Generate the transformed value of the induction at offset StartValue (1.
2284   /// operand) + IV (2. operand) * StepValue (3, operand).
2285   void execute(VPTransformState &State) override;
2286 
2287 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2288   /// Print the recipe.
2289   void print(raw_ostream &O, const Twine &Indent,
2290              VPSlotTracker &SlotTracker) const override;
2291 #endif
2292 
2293   Type *getScalarType() const {
2294     return TruncResultTy ? TruncResultTy
2295                          : getStartValue()->getLiveInIRValue()->getType();
2296   }
2297 
2298   VPValue *getStartValue() const { return getOperand(0); }
2299   VPValue *getCanonicalIV() const { return getOperand(1); }
2300   VPValue *getStepValue() const { return getOperand(2); }
2301 
2302   /// Returns true if the recipe only uses the first lane of operand \p Op.
2303   bool onlyFirstLaneUsed(const VPValue *Op) const override {
2304     assert(is_contained(operands(), Op) &&
2305            "Op must be an operand of the recipe");
2306     return true;
2307   }
2308 };
2309 
2310 /// A recipe for handling phi nodes of integer and floating-point inductions,
2311 /// producing their scalar values.
2312 class VPScalarIVStepsRecipe : public VPRecipeWithIRFlags, public VPValue {
2313   Instruction::BinaryOps InductionOpcode;
2314 
2315 public:
2316   VPScalarIVStepsRecipe(VPValue *IV, VPValue *Step,
2317                         Instruction::BinaryOps Opcode, FastMathFlags FMFs)
2318       : VPRecipeWithIRFlags(VPDef::VPScalarIVStepsSC,
2319                             ArrayRef<VPValue *>({IV, Step}), FMFs),
2320         VPValue(this), InductionOpcode(Opcode) {}
2321 
2322   VPScalarIVStepsRecipe(const InductionDescriptor &IndDesc, VPValue *IV,
2323                         VPValue *Step)
2324       : VPScalarIVStepsRecipe(
2325             IV, Step, IndDesc.getInductionOpcode(),
2326             dyn_cast_or_null<FPMathOperator>(IndDesc.getInductionBinOp())
2327                 ? IndDesc.getInductionBinOp()->getFastMathFlags()
2328                 : FastMathFlags()) {}
2329 
2330   ~VPScalarIVStepsRecipe() override = default;
2331 
2332   VP_CLASSOF_IMPL(VPDef::VPScalarIVStepsSC)
2333 
2334   /// Generate the scalarized versions of the phi node as needed by their users.
2335   void execute(VPTransformState &State) override;
2336 
2337 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2338   /// Print the recipe.
2339   void print(raw_ostream &O, const Twine &Indent,
2340              VPSlotTracker &SlotTracker) const override;
2341 #endif
2342 
2343   VPValue *getStepValue() const { return getOperand(1); }
2344 
2345   /// Returns true if the recipe only uses the first lane of operand \p Op.
2346   bool onlyFirstLaneUsed(const VPValue *Op) const override {
2347     assert(is_contained(operands(), Op) &&
2348            "Op must be an operand of the recipe");
2349     return true;
2350   }
2351 };
2352 
2353 /// VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph. It
2354 /// holds a sequence of zero or more VPRecipe's each representing a sequence of
2355 /// output IR instructions. All PHI-like recipes must come before any non-PHI recipes.
2356 class VPBasicBlock : public VPBlockBase {
2357 public:
2358   using RecipeListTy = iplist<VPRecipeBase>;
2359 
2360 private:
2361   /// The VPRecipes held in the order of output instructions to generate.
2362   RecipeListTy Recipes;
2363 
2364 public:
2365   VPBasicBlock(const Twine &Name = "", VPRecipeBase *Recipe = nullptr)
2366       : VPBlockBase(VPBasicBlockSC, Name.str()) {
2367     if (Recipe)
2368       appendRecipe(Recipe);
2369   }
2370 
2371   ~VPBasicBlock() override {
2372     while (!Recipes.empty())
2373       Recipes.pop_back();
2374   }
2375 
2376   /// Instruction iterators...
2377   using iterator = RecipeListTy::iterator;
2378   using const_iterator = RecipeListTy::const_iterator;
2379   using reverse_iterator = RecipeListTy::reverse_iterator;
2380   using const_reverse_iterator = RecipeListTy::const_reverse_iterator;
2381 
2382   //===--------------------------------------------------------------------===//
2383   /// Recipe iterator methods
2384   ///
2385   inline iterator begin() { return Recipes.begin(); }
2386   inline const_iterator begin() const { return Recipes.begin(); }
2387   inline iterator end() { return Recipes.end(); }
2388   inline const_iterator end() const { return Recipes.end(); }
2389 
2390   inline reverse_iterator rbegin() { return Recipes.rbegin(); }
2391   inline const_reverse_iterator rbegin() const { return Recipes.rbegin(); }
2392   inline reverse_iterator rend() { return Recipes.rend(); }
2393   inline const_reverse_iterator rend() const { return Recipes.rend(); }
2394 
2395   inline size_t size() const { return Recipes.size(); }
2396   inline bool empty() const { return Recipes.empty(); }
2397   inline const VPRecipeBase &front() const { return Recipes.front(); }
2398   inline VPRecipeBase &front() { return Recipes.front(); }
2399   inline const VPRecipeBase &back() const { return Recipes.back(); }
2400   inline VPRecipeBase &back() { return Recipes.back(); }
2401 
2402   /// Returns a reference to the list of recipes.
2403   RecipeListTy &getRecipeList() { return Recipes; }
2404 
2405   /// Returns a pointer to a member of the recipe list.
2406   static RecipeListTy VPBasicBlock::*getSublistAccess(VPRecipeBase *) {
2407     return &VPBasicBlock::Recipes;
2408   }
2409 
2410   /// Method to support type inquiry through isa, cast, and dyn_cast.
2411   static inline bool classof(const VPBlockBase *V) {
2412     return V->getVPBlockID() == VPBlockBase::VPBasicBlockSC;
2413   }
2414 
2415   void insert(VPRecipeBase *Recipe, iterator InsertPt) {
2416     assert(Recipe && "No recipe to append.");
2417     assert(!Recipe->Parent && "Recipe already in VPlan");
2418     Recipe->Parent = this;
2419     Recipes.insert(InsertPt, Recipe);
2420   }
2421 
2422   /// Augment the existing recipes of a VPBasicBlock with an additional
2423   /// \p Recipe as the last recipe.
2424   void appendRecipe(VPRecipeBase *Recipe) { insert(Recipe, end()); }
2425 
2426   /// The method which generates the output IR instructions that correspond to
2427   /// this VPBasicBlock, thereby "executing" the VPlan.
2428   void execute(VPTransformState *State) override;
2429 
2430   /// Return the position of the first non-phi node recipe in the block.
2431   iterator getFirstNonPhi();
2432 
2433   /// Returns an iterator range over the PHI-like recipes in the block.
2434   iterator_range<iterator> phis() {
2435     return make_range(begin(), getFirstNonPhi());
2436   }
2437 
2438   void dropAllReferences(VPValue *NewValue) override;
2439 
2440   /// Split current block at \p SplitAt by inserting a new block between the
2441   /// current block and its successors and moving all recipes starting at
2442   /// SplitAt to the new block. Returns the new block.
2443   VPBasicBlock *splitAt(iterator SplitAt);
2444 
2445   VPRegionBlock *getEnclosingLoopRegion();
2446 
2447 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2448   /// Print this VPBsicBlock to \p O, prefixing all lines with \p Indent. \p
2449   /// SlotTracker is used to print unnamed VPValue's using consequtive numbers.
2450   ///
2451   /// Note that the numbering is applied to the whole VPlan, so printing
2452   /// individual blocks is consistent with the whole VPlan printing.
2453   void print(raw_ostream &O, const Twine &Indent,
2454              VPSlotTracker &SlotTracker) const override;
2455   using VPBlockBase::print; // Get the print(raw_stream &O) version.
2456 #endif
2457 
2458   /// If the block has multiple successors, return the branch recipe terminating
2459   /// the block. If there are no or only a single successor, return nullptr;
2460   VPRecipeBase *getTerminator();
2461   const VPRecipeBase *getTerminator() const;
2462 
2463   /// Returns true if the block is exiting it's parent region.
2464   bool isExiting() const;
2465 
2466 private:
2467   /// Create an IR BasicBlock to hold the output instructions generated by this
2468   /// VPBasicBlock, and return it. Update the CFGState accordingly.
2469   BasicBlock *createEmptyBasicBlock(VPTransformState::CFGState &CFG);
2470 };
2471 
2472 /// VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks
2473 /// which form a Single-Entry-Single-Exiting subgraph of the output IR CFG.
2474 /// A VPRegionBlock may indicate that its contents are to be replicated several
2475 /// times. This is designed to support predicated scalarization, in which a
2476 /// scalar if-then code structure needs to be generated VF * UF times. Having
2477 /// this replication indicator helps to keep a single model for multiple
2478 /// candidate VF's. The actual replication takes place only once the desired VF
2479 /// and UF have been determined.
2480 class VPRegionBlock : public VPBlockBase {
2481   /// Hold the Single Entry of the SESE region modelled by the VPRegionBlock.
2482   VPBlockBase *Entry;
2483 
2484   /// Hold the Single Exiting block of the SESE region modelled by the
2485   /// VPRegionBlock.
2486   VPBlockBase *Exiting;
2487 
2488   /// An indicator whether this region is to generate multiple replicated
2489   /// instances of output IR corresponding to its VPBlockBases.
2490   bool IsReplicator;
2491 
2492 public:
2493   VPRegionBlock(VPBlockBase *Entry, VPBlockBase *Exiting,
2494                 const std::string &Name = "", bool IsReplicator = false)
2495       : VPBlockBase(VPRegionBlockSC, Name), Entry(Entry), Exiting(Exiting),
2496         IsReplicator(IsReplicator) {
2497     assert(Entry->getPredecessors().empty() && "Entry block has predecessors.");
2498     assert(Exiting->getSuccessors().empty() && "Exit block has successors.");
2499     Entry->setParent(this);
2500     Exiting->setParent(this);
2501   }
2502   VPRegionBlock(const std::string &Name = "", bool IsReplicator = false)
2503       : VPBlockBase(VPRegionBlockSC, Name), Entry(nullptr), Exiting(nullptr),
2504         IsReplicator(IsReplicator) {}
2505 
2506   ~VPRegionBlock() override {
2507     if (Entry) {
2508       VPValue DummyValue;
2509       Entry->dropAllReferences(&DummyValue);
2510       deleteCFG(Entry);
2511     }
2512   }
2513 
2514   /// Method to support type inquiry through isa, cast, and dyn_cast.
2515   static inline bool classof(const VPBlockBase *V) {
2516     return V->getVPBlockID() == VPBlockBase::VPRegionBlockSC;
2517   }
2518 
2519   const VPBlockBase *getEntry() const { return Entry; }
2520   VPBlockBase *getEntry() { return Entry; }
2521 
2522   /// Set \p EntryBlock as the entry VPBlockBase of this VPRegionBlock. \p
2523   /// EntryBlock must have no predecessors.
2524   void setEntry(VPBlockBase *EntryBlock) {
2525     assert(EntryBlock->getPredecessors().empty() &&
2526            "Entry block cannot have predecessors.");
2527     Entry = EntryBlock;
2528     EntryBlock->setParent(this);
2529   }
2530 
2531   const VPBlockBase *getExiting() const { return Exiting; }
2532   VPBlockBase *getExiting() { return Exiting; }
2533 
2534   /// Set \p ExitingBlock as the exiting VPBlockBase of this VPRegionBlock. \p
2535   /// ExitingBlock must have no successors.
2536   void setExiting(VPBlockBase *ExitingBlock) {
2537     assert(ExitingBlock->getSuccessors().empty() &&
2538            "Exit block cannot have successors.");
2539     Exiting = ExitingBlock;
2540     ExitingBlock->setParent(this);
2541   }
2542 
2543   /// Returns the pre-header VPBasicBlock of the loop region.
2544   VPBasicBlock *getPreheaderVPBB() {
2545     assert(!isReplicator() && "should only get pre-header of loop regions");
2546     return getSinglePredecessor()->getExitingBasicBlock();
2547   }
2548 
2549   /// An indicator whether this region is to generate multiple replicated
2550   /// instances of output IR corresponding to its VPBlockBases.
2551   bool isReplicator() const { return IsReplicator; }
2552 
2553   /// The method which generates the output IR instructions that correspond to
2554   /// this VPRegionBlock, thereby "executing" the VPlan.
2555   void execute(VPTransformState *State) override;
2556 
2557   void dropAllReferences(VPValue *NewValue) override;
2558 
2559 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2560   /// Print this VPRegionBlock to \p O (recursively), prefixing all lines with
2561   /// \p Indent. \p SlotTracker is used to print unnamed VPValue's using
2562   /// consequtive numbers.
2563   ///
2564   /// Note that the numbering is applied to the whole VPlan, so printing
2565   /// individual regions is consistent with the whole VPlan printing.
2566   void print(raw_ostream &O, const Twine &Indent,
2567              VPSlotTracker &SlotTracker) const override;
2568   using VPBlockBase::print; // Get the print(raw_stream &O) version.
2569 #endif
2570 };
2571 
2572 /// VPlan models a candidate for vectorization, encoding various decisions take
2573 /// to produce efficient output IR, including which branches, basic-blocks and
2574 /// output IR instructions to generate, and their cost. VPlan holds a
2575 /// Hierarchical-CFG of VPBasicBlocks and VPRegionBlocks rooted at an Entry
2576 /// VPBasicBlock.
2577 class VPlan {
2578   friend class VPlanPrinter;
2579   friend class VPSlotTracker;
2580 
2581   /// Hold the single entry to the Hierarchical CFG of the VPlan, i.e. the
2582   /// preheader of the vector loop.
2583   VPBasicBlock *Entry;
2584 
2585   /// VPBasicBlock corresponding to the original preheader. Used to place
2586   /// VPExpandSCEV recipes for expressions used during skeleton creation and the
2587   /// rest of VPlan execution.
2588   VPBasicBlock *Preheader;
2589 
2590   /// Holds the VFs applicable to this VPlan.
2591   SmallSetVector<ElementCount, 2> VFs;
2592 
2593   /// Holds the UFs applicable to this VPlan. If empty, the VPlan is valid for
2594   /// any UF.
2595   SmallSetVector<unsigned, 2> UFs;
2596 
2597   /// Holds the name of the VPlan, for printing.
2598   std::string Name;
2599 
2600   /// Represents the trip count of the original loop, for folding
2601   /// the tail.
2602   VPValue *TripCount = nullptr;
2603 
2604   /// Represents the backedge taken count of the original loop, for folding
2605   /// the tail. It equals TripCount - 1.
2606   VPValue *BackedgeTakenCount = nullptr;
2607 
2608   /// Represents the vector trip count.
2609   VPValue VectorTripCount;
2610 
2611   /// Represents the loop-invariant VF * UF of the vector loop region.
2612   VPValue VFxUF;
2613 
2614   /// Holds a mapping between Values and their corresponding VPValue inside
2615   /// VPlan.
2616   Value2VPValueTy Value2VPValue;
2617 
2618   /// Contains all the external definitions created for this VPlan. External
2619   /// definitions are VPValues that hold a pointer to their underlying IR.
2620   SmallVector<VPValue *, 16> VPLiveInsToFree;
2621 
2622   /// Indicates whether it is safe use the Value2VPValue mapping or if the
2623   /// mapping cannot be used any longer, because it is stale.
2624   bool Value2VPValueEnabled = true;
2625 
2626   /// Values used outside the plan.
2627   MapVector<PHINode *, VPLiveOut *> LiveOuts;
2628 
2629   /// Mapping from SCEVs to the VPValues representing their expansions.
2630   /// NOTE: This mapping is temporary and will be removed once all users have
2631   /// been modeled in VPlan directly.
2632   DenseMap<const SCEV *, VPValue *> SCEVToExpansion;
2633 
2634 public:
2635   /// Construct a VPlan with original preheader \p Preheader, trip count \p TC
2636   /// and \p Entry to the plan. At the moment, \p Preheader and \p Entry need to
2637   /// be disconnected, as the bypass blocks between them are not yet modeled in
2638   /// VPlan.
2639   VPlan(VPBasicBlock *Preheader, VPValue *TC, VPBasicBlock *Entry)
2640       : VPlan(Preheader, Entry) {
2641     TripCount = TC;
2642   }
2643 
2644   /// Construct a VPlan with original preheader \p Preheader and \p Entry to
2645   /// the plan. At the moment, \p Preheader and \p Entry need to be
2646   /// disconnected, as the bypass blocks between them are not yet modeled in
2647   /// VPlan.
2648   VPlan(VPBasicBlock *Preheader, VPBasicBlock *Entry)
2649       : Entry(Entry), Preheader(Preheader) {
2650     Entry->setPlan(this);
2651     Preheader->setPlan(this);
2652     assert(Preheader->getNumSuccessors() == 0 &&
2653            Preheader->getNumPredecessors() == 0 &&
2654            "preheader must be disconnected");
2655   }
2656 
2657   ~VPlan();
2658 
2659   /// Create initial VPlan skeleton, having an "entry" VPBasicBlock (wrapping
2660   /// original scalar pre-header) which contains SCEV expansions that need to
2661   /// happen before the CFG is modified; a VPBasicBlock for the vector
2662   /// pre-header, followed by a region for the vector loop, followed by the
2663   /// middle VPBasicBlock.
2664   static VPlanPtr createInitialVPlan(const SCEV *TripCount,
2665                                      ScalarEvolution &PSE);
2666 
2667   /// Prepare the plan for execution, setting up the required live-in values.
2668   void prepareToExecute(Value *TripCount, Value *VectorTripCount,
2669                         Value *CanonicalIVStartValue, VPTransformState &State);
2670 
2671   /// Generate the IR code for this VPlan.
2672   void execute(VPTransformState *State);
2673 
2674   VPBasicBlock *getEntry() { return Entry; }
2675   const VPBasicBlock *getEntry() const { return Entry; }
2676 
2677   /// The trip count of the original loop.
2678   VPValue *getTripCount() const {
2679     assert(TripCount && "trip count needs to be set before accessing it");
2680     return TripCount;
2681   }
2682 
2683   /// The backedge taken count of the original loop.
2684   VPValue *getOrCreateBackedgeTakenCount() {
2685     if (!BackedgeTakenCount)
2686       BackedgeTakenCount = new VPValue();
2687     return BackedgeTakenCount;
2688   }
2689 
2690   /// The vector trip count.
2691   VPValue &getVectorTripCount() { return VectorTripCount; }
2692 
2693   /// Returns VF * UF of the vector loop region.
2694   VPValue &getVFxUF() { return VFxUF; }
2695 
2696   /// Mark the plan to indicate that using Value2VPValue is not safe any
2697   /// longer, because it may be stale.
2698   void disableValue2VPValue() { Value2VPValueEnabled = false; }
2699 
2700   void addVF(ElementCount VF) { VFs.insert(VF); }
2701 
2702   void setVF(ElementCount VF) {
2703     assert(hasVF(VF) && "Cannot set VF not already in plan");
2704     VFs.clear();
2705     VFs.insert(VF);
2706   }
2707 
2708   bool hasVF(ElementCount VF) { return VFs.count(VF); }
2709 
2710   bool hasScalarVFOnly() const { return VFs.size() == 1 && VFs[0].isScalar(); }
2711 
2712   bool hasUF(unsigned UF) const { return UFs.empty() || UFs.contains(UF); }
2713 
2714   void setUF(unsigned UF) {
2715     assert(hasUF(UF) && "Cannot set the UF not already in plan");
2716     UFs.clear();
2717     UFs.insert(UF);
2718   }
2719 
2720   /// Return a string with the name of the plan and the applicable VFs and UFs.
2721   std::string getName() const;
2722 
2723   void setName(const Twine &newName) { Name = newName.str(); }
2724 
2725   void addVPValue(Value *V, VPValue *VPV) {
2726     assert((Value2VPValueEnabled || VPV->isLiveIn()) &&
2727            "Value2VPValue mapping may be out of date!");
2728     assert(V && "Trying to add a null Value to VPlan");
2729     assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2730     Value2VPValue[V] = VPV;
2731   }
2732 
2733   /// Returns the VPValue for \p V. \p OverrideAllowed can be used to disable
2734   ///   /// checking whether it is safe to query VPValues using IR Values.
2735   VPValue *getVPValue(Value *V, bool OverrideAllowed = false) {
2736     assert(V && "Trying to get the VPValue of a null Value");
2737     assert(Value2VPValue.count(V) && "Value does not exist in VPlan");
2738     assert((Value2VPValueEnabled || OverrideAllowed ||
2739             Value2VPValue[V]->isLiveIn()) &&
2740            "Value2VPValue mapping may be out of date!");
2741     return Value2VPValue[V];
2742   }
2743 
2744   /// Gets the VPValue for \p V or adds a new live-in (if none exists yet) for
2745   /// \p V.
2746   VPValue *getVPValueOrAddLiveIn(Value *V) {
2747     assert(V && "Trying to get or add the VPValue of a null Value");
2748     if (!Value2VPValue.count(V)) {
2749       VPValue *VPV = new VPValue(V);
2750       VPLiveInsToFree.push_back(VPV);
2751       addVPValue(V, VPV);
2752     }
2753 
2754     return getVPValue(V);
2755   }
2756 
2757 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2758   /// Print the live-ins of this VPlan to \p O.
2759   void printLiveIns(raw_ostream &O) const;
2760 
2761   /// Print this VPlan to \p O.
2762   void print(raw_ostream &O) const;
2763 
2764   /// Print this VPlan in DOT format to \p O.
2765   void printDOT(raw_ostream &O) const;
2766 
2767   /// Dump the plan to stderr (for debugging).
2768   LLVM_DUMP_METHOD void dump() const;
2769 #endif
2770 
2771   /// Returns a range mapping the values the range \p Operands to their
2772   /// corresponding VPValues.
2773   iterator_range<mapped_iterator<Use *, std::function<VPValue *(Value *)>>>
2774   mapToVPValues(User::op_range Operands) {
2775     std::function<VPValue *(Value *)> Fn = [this](Value *Op) {
2776       return getVPValueOrAddLiveIn(Op);
2777     };
2778     return map_range(Operands, Fn);
2779   }
2780 
2781   /// Returns the VPRegionBlock of the vector loop.
2782   VPRegionBlock *getVectorLoopRegion() {
2783     return cast<VPRegionBlock>(getEntry()->getSingleSuccessor());
2784   }
2785   const VPRegionBlock *getVectorLoopRegion() const {
2786     return cast<VPRegionBlock>(getEntry()->getSingleSuccessor());
2787   }
2788 
2789   /// Returns the canonical induction recipe of the vector loop.
2790   VPCanonicalIVPHIRecipe *getCanonicalIV() {
2791     VPBasicBlock *EntryVPBB = getVectorLoopRegion()->getEntryBasicBlock();
2792     if (EntryVPBB->empty()) {
2793       // VPlan native path.
2794       EntryVPBB = cast<VPBasicBlock>(EntryVPBB->getSingleSuccessor());
2795     }
2796     return cast<VPCanonicalIVPHIRecipe>(&*EntryVPBB->begin());
2797   }
2798 
2799   void addLiveOut(PHINode *PN, VPValue *V);
2800 
2801   void removeLiveOut(PHINode *PN) {
2802     delete LiveOuts[PN];
2803     LiveOuts.erase(PN);
2804   }
2805 
2806   const MapVector<PHINode *, VPLiveOut *> &getLiveOuts() const {
2807     return LiveOuts;
2808   }
2809 
2810   VPValue *getSCEVExpansion(const SCEV *S) const {
2811     return SCEVToExpansion.lookup(S);
2812   }
2813 
2814   void addSCEVExpansion(const SCEV *S, VPValue *V) {
2815     assert(!SCEVToExpansion.contains(S) && "SCEV already expanded");
2816     SCEVToExpansion[S] = V;
2817   }
2818 
2819   /// \return The block corresponding to the original preheader.
2820   VPBasicBlock *getPreheader() { return Preheader; }
2821   const VPBasicBlock *getPreheader() const { return Preheader; }
2822 
2823 private:
2824   /// Add to the given dominator tree the header block and every new basic block
2825   /// that was created between it and the latch block, inclusive.
2826   static void updateDominatorTree(DominatorTree *DT, BasicBlock *LoopLatchBB,
2827                                   BasicBlock *LoopPreHeaderBB,
2828                                   BasicBlock *LoopExitBB);
2829 };
2830 
2831 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2832 /// VPlanPrinter prints a given VPlan to a given output stream. The printing is
2833 /// indented and follows the dot format.
2834 class VPlanPrinter {
2835   raw_ostream &OS;
2836   const VPlan &Plan;
2837   unsigned Depth = 0;
2838   unsigned TabWidth = 2;
2839   std::string Indent;
2840   unsigned BID = 0;
2841   SmallDenseMap<const VPBlockBase *, unsigned> BlockID;
2842 
2843   VPSlotTracker SlotTracker;
2844 
2845   /// Handle indentation.
2846   void bumpIndent(int b) { Indent = std::string((Depth += b) * TabWidth, ' '); }
2847 
2848   /// Print a given \p Block of the Plan.
2849   void dumpBlock(const VPBlockBase *Block);
2850 
2851   /// Print the information related to the CFG edges going out of a given
2852   /// \p Block, followed by printing the successor blocks themselves.
2853   void dumpEdges(const VPBlockBase *Block);
2854 
2855   /// Print a given \p BasicBlock, including its VPRecipes, followed by printing
2856   /// its successor blocks.
2857   void dumpBasicBlock(const VPBasicBlock *BasicBlock);
2858 
2859   /// Print a given \p Region of the Plan.
2860   void dumpRegion(const VPRegionBlock *Region);
2861 
2862   unsigned getOrCreateBID(const VPBlockBase *Block) {
2863     return BlockID.count(Block) ? BlockID[Block] : BlockID[Block] = BID++;
2864   }
2865 
2866   Twine getOrCreateName(const VPBlockBase *Block);
2867 
2868   Twine getUID(const VPBlockBase *Block);
2869 
2870   /// Print the information related to a CFG edge between two VPBlockBases.
2871   void drawEdge(const VPBlockBase *From, const VPBlockBase *To, bool Hidden,
2872                 const Twine &Label);
2873 
2874 public:
2875   VPlanPrinter(raw_ostream &O, const VPlan &P)
2876       : OS(O), Plan(P), SlotTracker(&P) {}
2877 
2878   LLVM_DUMP_METHOD void dump();
2879 };
2880 
2881 struct VPlanIngredient {
2882   const Value *V;
2883 
2884   VPlanIngredient(const Value *V) : V(V) {}
2885 
2886   void print(raw_ostream &O) const;
2887 };
2888 
2889 inline raw_ostream &operator<<(raw_ostream &OS, const VPlanIngredient &I) {
2890   I.print(OS);
2891   return OS;
2892 }
2893 
2894 inline raw_ostream &operator<<(raw_ostream &OS, const VPlan &Plan) {
2895   Plan.print(OS);
2896   return OS;
2897 }
2898 #endif
2899 
2900 //===----------------------------------------------------------------------===//
2901 // VPlan Utilities
2902 //===----------------------------------------------------------------------===//
2903 
2904 /// Class that provides utilities for VPBlockBases in VPlan.
2905 class VPBlockUtils {
2906 public:
2907   VPBlockUtils() = delete;
2908 
2909   /// Insert disconnected VPBlockBase \p NewBlock after \p BlockPtr. Add \p
2910   /// NewBlock as successor of \p BlockPtr and \p BlockPtr as predecessor of \p
2911   /// NewBlock, and propagate \p BlockPtr parent to \p NewBlock. \p BlockPtr's
2912   /// successors are moved from \p BlockPtr to \p NewBlock. \p NewBlock must
2913   /// have neither successors nor predecessors.
2914   static void insertBlockAfter(VPBlockBase *NewBlock, VPBlockBase *BlockPtr) {
2915     assert(NewBlock->getSuccessors().empty() &&
2916            NewBlock->getPredecessors().empty() &&
2917            "Can't insert new block with predecessors or successors.");
2918     NewBlock->setParent(BlockPtr->getParent());
2919     SmallVector<VPBlockBase *> Succs(BlockPtr->successors());
2920     for (VPBlockBase *Succ : Succs) {
2921       disconnectBlocks(BlockPtr, Succ);
2922       connectBlocks(NewBlock, Succ);
2923     }
2924     connectBlocks(BlockPtr, NewBlock);
2925   }
2926 
2927   /// Insert disconnected VPBlockBases \p IfTrue and \p IfFalse after \p
2928   /// BlockPtr. Add \p IfTrue and \p IfFalse as succesors of \p BlockPtr and \p
2929   /// BlockPtr as predecessor of \p IfTrue and \p IfFalse. Propagate \p BlockPtr
2930   /// parent to \p IfTrue and \p IfFalse. \p BlockPtr must have no successors
2931   /// and \p IfTrue and \p IfFalse must have neither successors nor
2932   /// predecessors.
2933   static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
2934                                    VPBlockBase *BlockPtr) {
2935     assert(IfTrue->getSuccessors().empty() &&
2936            "Can't insert IfTrue with successors.");
2937     assert(IfFalse->getSuccessors().empty() &&
2938            "Can't insert IfFalse with successors.");
2939     BlockPtr->setTwoSuccessors(IfTrue, IfFalse);
2940     IfTrue->setPredecessors({BlockPtr});
2941     IfFalse->setPredecessors({BlockPtr});
2942     IfTrue->setParent(BlockPtr->getParent());
2943     IfFalse->setParent(BlockPtr->getParent());
2944   }
2945 
2946   /// Connect VPBlockBases \p From and \p To bi-directionally. Append \p To to
2947   /// the successors of \p From and \p From to the predecessors of \p To. Both
2948   /// VPBlockBases must have the same parent, which can be null. Both
2949   /// VPBlockBases can be already connected to other VPBlockBases.
2950   static void connectBlocks(VPBlockBase *From, VPBlockBase *To) {
2951     assert((From->getParent() == To->getParent()) &&
2952            "Can't connect two block with different parents");
2953     assert(From->getNumSuccessors() < 2 &&
2954            "Blocks can't have more than two successors.");
2955     From->appendSuccessor(To);
2956     To->appendPredecessor(From);
2957   }
2958 
2959   /// Disconnect VPBlockBases \p From and \p To bi-directionally. Remove \p To
2960   /// from the successors of \p From and \p From from the predecessors of \p To.
2961   static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To) {
2962     assert(To && "Successor to disconnect is null.");
2963     From->removeSuccessor(To);
2964     To->removePredecessor(From);
2965   }
2966 
2967   /// Return an iterator range over \p Range which only includes \p BlockTy
2968   /// blocks. The accesses are casted to \p BlockTy.
2969   template <typename BlockTy, typename T>
2970   static auto blocksOnly(const T &Range) {
2971     // Create BaseTy with correct const-ness based on BlockTy.
2972     using BaseTy = std::conditional_t<std::is_const<BlockTy>::value,
2973                                       const VPBlockBase, VPBlockBase>;
2974 
2975     // We need to first create an iterator range over (const) BlocktTy & instead
2976     // of (const) BlockTy * for filter_range to work properly.
2977     auto Mapped =
2978         map_range(Range, [](BaseTy *Block) -> BaseTy & { return *Block; });
2979     auto Filter = make_filter_range(
2980         Mapped, [](BaseTy &Block) { return isa<BlockTy>(&Block); });
2981     return map_range(Filter, [](BaseTy &Block) -> BlockTy * {
2982       return cast<BlockTy>(&Block);
2983     });
2984   }
2985 };
2986 
2987 class VPInterleavedAccessInfo {
2988   DenseMap<VPInstruction *, InterleaveGroup<VPInstruction> *>
2989       InterleaveGroupMap;
2990 
2991   /// Type for mapping of instruction based interleave groups to VPInstruction
2992   /// interleave groups
2993   using Old2NewTy = DenseMap<InterleaveGroup<Instruction> *,
2994                              InterleaveGroup<VPInstruction> *>;
2995 
2996   /// Recursively \p Region and populate VPlan based interleave groups based on
2997   /// \p IAI.
2998   void visitRegion(VPRegionBlock *Region, Old2NewTy &Old2New,
2999                    InterleavedAccessInfo &IAI);
3000   /// Recursively traverse \p Block and populate VPlan based interleave groups
3001   /// based on \p IAI.
3002   void visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
3003                   InterleavedAccessInfo &IAI);
3004 
3005 public:
3006   VPInterleavedAccessInfo(VPlan &Plan, InterleavedAccessInfo &IAI);
3007 
3008   ~VPInterleavedAccessInfo() {
3009     SmallPtrSet<InterleaveGroup<VPInstruction> *, 4> DelSet;
3010     // Avoid releasing a pointer twice.
3011     for (auto &I : InterleaveGroupMap)
3012       DelSet.insert(I.second);
3013     for (auto *Ptr : DelSet)
3014       delete Ptr;
3015   }
3016 
3017   /// Get the interleave group that \p Instr belongs to.
3018   ///
3019   /// \returns nullptr if doesn't have such group.
3020   InterleaveGroup<VPInstruction> *
3021   getInterleaveGroup(VPInstruction *Instr) const {
3022     return InterleaveGroupMap.lookup(Instr);
3023   }
3024 };
3025 
3026 /// Class that maps (parts of) an existing VPlan to trees of combined
3027 /// VPInstructions.
3028 class VPlanSlp {
3029   enum class OpMode { Failed, Load, Opcode };
3030 
3031   /// A DenseMapInfo implementation for using SmallVector<VPValue *, 4> as
3032   /// DenseMap keys.
3033   struct BundleDenseMapInfo {
3034     static SmallVector<VPValue *, 4> getEmptyKey() {
3035       return {reinterpret_cast<VPValue *>(-1)};
3036     }
3037 
3038     static SmallVector<VPValue *, 4> getTombstoneKey() {
3039       return {reinterpret_cast<VPValue *>(-2)};
3040     }
3041 
3042     static unsigned getHashValue(const SmallVector<VPValue *, 4> &V) {
3043       return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
3044     }
3045 
3046     static bool isEqual(const SmallVector<VPValue *, 4> &LHS,
3047                         const SmallVector<VPValue *, 4> &RHS) {
3048       return LHS == RHS;
3049     }
3050   };
3051 
3052   /// Mapping of values in the original VPlan to a combined VPInstruction.
3053   DenseMap<SmallVector<VPValue *, 4>, VPInstruction *, BundleDenseMapInfo>
3054       BundleToCombined;
3055 
3056   VPInterleavedAccessInfo &IAI;
3057 
3058   /// Basic block to operate on. For now, only instructions in a single BB are
3059   /// considered.
3060   const VPBasicBlock &BB;
3061 
3062   /// Indicates whether we managed to combine all visited instructions or not.
3063   bool CompletelySLP = true;
3064 
3065   /// Width of the widest combined bundle in bits.
3066   unsigned WidestBundleBits = 0;
3067 
3068   using MultiNodeOpTy =
3069       typename std::pair<VPInstruction *, SmallVector<VPValue *, 4>>;
3070 
3071   // Input operand bundles for the current multi node. Each multi node operand
3072   // bundle contains values not matching the multi node's opcode. They will
3073   // be reordered in reorderMultiNodeOps, once we completed building a
3074   // multi node.
3075   SmallVector<MultiNodeOpTy, 4> MultiNodeOps;
3076 
3077   /// Indicates whether we are building a multi node currently.
3078   bool MultiNodeActive = false;
3079 
3080   /// Check if we can vectorize Operands together.
3081   bool areVectorizable(ArrayRef<VPValue *> Operands) const;
3082 
3083   /// Add combined instruction \p New for the bundle \p Operands.
3084   void addCombined(ArrayRef<VPValue *> Operands, VPInstruction *New);
3085 
3086   /// Indicate we hit a bundle we failed to combine. Returns nullptr for now.
3087   VPInstruction *markFailed();
3088 
3089   /// Reorder operands in the multi node to maximize sequential memory access
3090   /// and commutative operations.
3091   SmallVector<MultiNodeOpTy, 4> reorderMultiNodeOps();
3092 
3093   /// Choose the best candidate to use for the lane after \p Last. The set of
3094   /// candidates to choose from are values with an opcode matching \p Last's
3095   /// or loads consecutive to \p Last.
3096   std::pair<OpMode, VPValue *> getBest(OpMode Mode, VPValue *Last,
3097                                        SmallPtrSetImpl<VPValue *> &Candidates,
3098                                        VPInterleavedAccessInfo &IAI);
3099 
3100 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3101   /// Print bundle \p Values to dbgs().
3102   void dumpBundle(ArrayRef<VPValue *> Values);
3103 #endif
3104 
3105 public:
3106   VPlanSlp(VPInterleavedAccessInfo &IAI, VPBasicBlock &BB) : IAI(IAI), BB(BB) {}
3107 
3108   ~VPlanSlp() = default;
3109 
3110   /// Tries to build an SLP tree rooted at \p Operands and returns a
3111   /// VPInstruction combining \p Operands, if they can be combined.
3112   VPInstruction *buildGraph(ArrayRef<VPValue *> Operands);
3113 
3114   /// Return the width of the widest combined bundle in bits.
3115   unsigned getWidestBundleBits() const { return WidestBundleBits; }
3116 
3117   /// Return true if all visited instruction can be combined.
3118   bool isCompletelySLP() const { return CompletelySLP; }
3119 };
3120 
3121 namespace vputils {
3122 
3123 /// Returns true if only the first lane of \p Def is used.
3124 bool onlyFirstLaneUsed(VPValue *Def);
3125 
3126 /// Returns true if only the first part of \p Def is used.
3127 bool onlyFirstPartUsed(VPValue *Def);
3128 
3129 /// Get or create a VPValue that corresponds to the expansion of \p Expr. If \p
3130 /// Expr is a SCEVConstant or SCEVUnknown, return a VPValue wrapping the live-in
3131 /// value. Otherwise return a VPExpandSCEVRecipe to expand \p Expr. If \p Plan's
3132 /// pre-header already contains a recipe expanding \p Expr, return it. If not,
3133 /// create a new one.
3134 VPValue *getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr,
3135                                        ScalarEvolution &SE);
3136 
3137 /// Returns true if \p VPV is uniform after vectorization.
3138 inline bool isUniformAfterVectorization(VPValue *VPV) {
3139   // A value defined outside the vector region must be uniform after
3140   // vectorization inside a vector region.
3141   if (VPV->isDefinedOutsideVectorRegions())
3142     return true;
3143   VPRecipeBase *Def = VPV->getDefiningRecipe();
3144   assert(Def && "Must have definition for value defined inside vector region");
3145   if (auto Rep = dyn_cast<VPReplicateRecipe>(Def))
3146     return Rep->isUniform();
3147   if (auto *GEP = dyn_cast<VPWidenGEPRecipe>(Def))
3148     return all_of(GEP->operands(), isUniformAfterVectorization);
3149   if (auto *VPI = dyn_cast<VPInstruction>(Def))
3150     return VPI->getOpcode() == VPInstruction::ComputeReductionResult;
3151   return false;
3152 }
3153 } // end namespace vputils
3154 
3155 } // end namespace llvm
3156 
3157 #endif // LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
3158