xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlan.h (revision a4a491e2238b12ccd64d3faf9e6401487f6f1f1b)
1 //===- VPlan.h - Represent A Vectorizer Plan --------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file contains the declarations of the Vectorization Plan base classes:
11 /// 1. VPBasicBlock and VPRegionBlock that inherit from a common pure virtual
12 ///    VPBlockBase, together implementing a Hierarchical CFG;
13 /// 2. Specializations of GraphTraits that allow VPBlockBase graphs to be
14 ///    treated as proper graphs for generic algorithms;
15 /// 3. Pure virtual VPRecipeBase serving as the base class for recipes contained
16 ///    within VPBasicBlocks;
17 /// 4. VPInstruction, a concrete Recipe and VPUser modeling a single planned
18 ///    instruction;
19 /// 5. The VPlan class holding a candidate for vectorization;
20 /// 6. The VPlanPrinter class providing a way to print a plan in dot format;
21 /// These are documented in docs/VectorizationPlan.rst.
22 //
23 //===----------------------------------------------------------------------===//
24 
25 #ifndef LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
26 #define LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
27 
28 #include "VPlanValue.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/DepthFirstIterator.h"
31 #include "llvm/ADT/GraphTraits.h"
32 #include "llvm/ADT/MapVector.h"
33 #include "llvm/ADT/Optional.h"
34 #include "llvm/ADT/SmallBitVector.h"
35 #include "llvm/ADT/SmallPtrSet.h"
36 #include "llvm/ADT/SmallVector.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/ADT/ilist.h"
39 #include "llvm/ADT/ilist_node.h"
40 #include "llvm/Analysis/LoopInfo.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/IR/FMF.h"
44 #include "llvm/Transforms/Utils/LoopVersioning.h"
45 #include <algorithm>
46 #include <cassert>
47 #include <cstddef>
48 #include <string>
49 
50 namespace llvm {
51 
52 class BasicBlock;
53 class DominatorTree;
54 class InductionDescriptor;
55 class InnerLoopVectorizer;
56 class IRBuilderBase;
57 class LoopInfo;
58 class raw_ostream;
59 class RecurrenceDescriptor;
60 class Value;
61 class VPBasicBlock;
62 class VPRegionBlock;
63 class VPlan;
64 class VPReplicateRecipe;
65 class VPlanSlp;
66 
67 /// Returns a calculation for the total number of elements for a given \p VF.
68 /// For fixed width vectors this value is a constant, whereas for scalable
69 /// vectors it is an expression determined at runtime.
70 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF);
71 
72 /// Return a value for Step multiplied by VF.
73 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF,
74                        int64_t Step);
75 
76 /// A range of powers-of-2 vectorization factors with fixed start and
77 /// adjustable end. The range includes start and excludes end, e.g.,:
78 /// [1, 9) = {1, 2, 4, 8}
79 struct VFRange {
80   // A power of 2.
81   const ElementCount Start;
82 
83   // Need not be a power of 2. If End <= Start range is empty.
84   ElementCount End;
85 
86   bool isEmpty() const {
87     return End.getKnownMinValue() <= Start.getKnownMinValue();
88   }
89 
90   VFRange(const ElementCount &Start, const ElementCount &End)
91       : Start(Start), End(End) {
92     assert(Start.isScalable() == End.isScalable() &&
93            "Both Start and End should have the same scalable flag");
94     assert(isPowerOf2_32(Start.getKnownMinValue()) &&
95            "Expected Start to be a power of 2");
96   }
97 };
98 
99 using VPlanPtr = std::unique_ptr<VPlan>;
100 
101 /// In what follows, the term "input IR" refers to code that is fed into the
102 /// vectorizer whereas the term "output IR" refers to code that is generated by
103 /// the vectorizer.
104 
105 /// VPLane provides a way to access lanes in both fixed width and scalable
106 /// vectors, where for the latter the lane index sometimes needs calculating
107 /// as a runtime expression.
108 class VPLane {
109 public:
110   /// Kind describes how to interpret Lane.
111   enum class Kind : uint8_t {
112     /// For First, Lane is the index into the first N elements of a
113     /// fixed-vector <N x <ElTy>> or a scalable vector <vscale x N x <ElTy>>.
114     First,
115     /// For ScalableLast, Lane is the offset from the start of the last
116     /// N-element subvector in a scalable vector <vscale x N x <ElTy>>. For
117     /// example, a Lane of 0 corresponds to lane `(vscale - 1) * N`, a Lane of
118     /// 1 corresponds to `((vscale - 1) * N) + 1`, etc.
119     ScalableLast
120   };
121 
122 private:
123   /// in [0..VF)
124   unsigned Lane;
125 
126   /// Indicates how the Lane should be interpreted, as described above.
127   Kind LaneKind;
128 
129 public:
130   VPLane(unsigned Lane, Kind LaneKind) : Lane(Lane), LaneKind(LaneKind) {}
131 
132   static VPLane getFirstLane() { return VPLane(0, VPLane::Kind::First); }
133 
134   static VPLane getLastLaneForVF(const ElementCount &VF) {
135     unsigned LaneOffset = VF.getKnownMinValue() - 1;
136     Kind LaneKind;
137     if (VF.isScalable())
138       // In this case 'LaneOffset' refers to the offset from the start of the
139       // last subvector with VF.getKnownMinValue() elements.
140       LaneKind = VPLane::Kind::ScalableLast;
141     else
142       LaneKind = VPLane::Kind::First;
143     return VPLane(LaneOffset, LaneKind);
144   }
145 
146   /// Returns a compile-time known value for the lane index and asserts if the
147   /// lane can only be calculated at runtime.
148   unsigned getKnownLane() const {
149     assert(LaneKind == Kind::First);
150     return Lane;
151   }
152 
153   /// Returns an expression describing the lane index that can be used at
154   /// runtime.
155   Value *getAsRuntimeExpr(IRBuilderBase &Builder, const ElementCount &VF) const;
156 
157   /// Returns the Kind of lane offset.
158   Kind getKind() const { return LaneKind; }
159 
160   /// Returns true if this is the first lane of the whole vector.
161   bool isFirstLane() const { return Lane == 0 && LaneKind == Kind::First; }
162 
163   /// Maps the lane to a cache index based on \p VF.
164   unsigned mapToCacheIndex(const ElementCount &VF) const {
165     switch (LaneKind) {
166     case VPLane::Kind::ScalableLast:
167       assert(VF.isScalable() && Lane < VF.getKnownMinValue());
168       return VF.getKnownMinValue() + Lane;
169     default:
170       assert(Lane < VF.getKnownMinValue());
171       return Lane;
172     }
173   }
174 
175   /// Returns the maxmimum number of lanes that we are able to consider
176   /// caching for \p VF.
177   static unsigned getNumCachedLanes(const ElementCount &VF) {
178     return VF.getKnownMinValue() * (VF.isScalable() ? 2 : 1);
179   }
180 };
181 
182 /// VPIteration represents a single point in the iteration space of the output
183 /// (vectorized and/or unrolled) IR loop.
184 struct VPIteration {
185   /// in [0..UF)
186   unsigned Part;
187 
188   VPLane Lane;
189 
190   VPIteration(unsigned Part, unsigned Lane,
191               VPLane::Kind Kind = VPLane::Kind::First)
192       : Part(Part), Lane(Lane, Kind) {}
193 
194   VPIteration(unsigned Part, const VPLane &Lane) : Part(Part), Lane(Lane) {}
195 
196   bool isFirstIteration() const { return Part == 0 && Lane.isFirstLane(); }
197 };
198 
199 /// VPTransformState holds information passed down when "executing" a VPlan,
200 /// needed for generating the output IR.
201 struct VPTransformState {
202   VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
203                    DominatorTree *DT, IRBuilderBase &Builder,
204                    InnerLoopVectorizer *ILV, VPlan *Plan)
205       : VF(VF), UF(UF), LI(LI), DT(DT), Builder(Builder), ILV(ILV), Plan(Plan),
206         LVer(nullptr) {}
207 
208   /// The chosen Vectorization and Unroll Factors of the loop being vectorized.
209   ElementCount VF;
210   unsigned UF;
211 
212   /// Hold the indices to generate specific scalar instructions. Null indicates
213   /// that all instances are to be generated, using either scalar or vector
214   /// instructions.
215   Optional<VPIteration> Instance;
216 
217   struct DataState {
218     /// A type for vectorized values in the new loop. Each value from the
219     /// original loop, when vectorized, is represented by UF vector values in
220     /// the new unrolled loop, where UF is the unroll factor.
221     typedef SmallVector<Value *, 2> PerPartValuesTy;
222 
223     DenseMap<VPValue *, PerPartValuesTy> PerPartOutput;
224 
225     using ScalarsPerPartValuesTy = SmallVector<SmallVector<Value *, 4>, 2>;
226     DenseMap<VPValue *, ScalarsPerPartValuesTy> PerPartScalars;
227   } Data;
228 
229   /// Get the generated Value for a given VPValue and a given Part. Note that
230   /// as some Defs are still created by ILV and managed in its ValueMap, this
231   /// method will delegate the call to ILV in such cases in order to provide
232   /// callers a consistent API.
233   /// \see set.
234   Value *get(VPValue *Def, unsigned Part);
235 
236   /// Get the generated Value for a given VPValue and given Part and Lane.
237   Value *get(VPValue *Def, const VPIteration &Instance);
238 
239   bool hasVectorValue(VPValue *Def, unsigned Part) {
240     auto I = Data.PerPartOutput.find(Def);
241     return I != Data.PerPartOutput.end() && Part < I->second.size() &&
242            I->second[Part];
243   }
244 
245   bool hasAnyVectorValue(VPValue *Def) const {
246     return Data.PerPartOutput.find(Def) != Data.PerPartOutput.end();
247   }
248 
249   bool hasScalarValue(VPValue *Def, VPIteration Instance) {
250     auto I = Data.PerPartScalars.find(Def);
251     if (I == Data.PerPartScalars.end())
252       return false;
253     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
254     return Instance.Part < I->second.size() &&
255            CacheIdx < I->second[Instance.Part].size() &&
256            I->second[Instance.Part][CacheIdx];
257   }
258 
259   /// Set the generated Value for a given VPValue and a given Part.
260   void set(VPValue *Def, Value *V, unsigned Part) {
261     if (!Data.PerPartOutput.count(Def)) {
262       DataState::PerPartValuesTy Entry(UF);
263       Data.PerPartOutput[Def] = Entry;
264     }
265     Data.PerPartOutput[Def][Part] = V;
266   }
267   /// Reset an existing vector value for \p Def and a given \p Part.
268   void reset(VPValue *Def, Value *V, unsigned Part) {
269     auto Iter = Data.PerPartOutput.find(Def);
270     assert(Iter != Data.PerPartOutput.end() &&
271            "need to overwrite existing value");
272     Iter->second[Part] = V;
273   }
274 
275   /// Set the generated scalar \p V for \p Def and the given \p Instance.
276   void set(VPValue *Def, Value *V, const VPIteration &Instance) {
277     auto Iter = Data.PerPartScalars.insert({Def, {}});
278     auto &PerPartVec = Iter.first->second;
279     while (PerPartVec.size() <= Instance.Part)
280       PerPartVec.emplace_back();
281     auto &Scalars = PerPartVec[Instance.Part];
282     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
283     while (Scalars.size() <= CacheIdx)
284       Scalars.push_back(nullptr);
285     assert(!Scalars[CacheIdx] && "should overwrite existing value");
286     Scalars[CacheIdx] = V;
287   }
288 
289   /// Reset an existing scalar value for \p Def and a given \p Instance.
290   void reset(VPValue *Def, Value *V, const VPIteration &Instance) {
291     auto Iter = Data.PerPartScalars.find(Def);
292     assert(Iter != Data.PerPartScalars.end() &&
293            "need to overwrite existing value");
294     assert(Instance.Part < Iter->second.size() &&
295            "need to overwrite existing value");
296     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
297     assert(CacheIdx < Iter->second[Instance.Part].size() &&
298            "need to overwrite existing value");
299     Iter->second[Instance.Part][CacheIdx] = V;
300   }
301 
302   /// Add additional metadata to \p To that was not present on \p Orig.
303   ///
304   /// Currently this is used to add the noalias annotations based on the
305   /// inserted memchecks.  Use this for instructions that are *cloned* into the
306   /// vector loop.
307   void addNewMetadata(Instruction *To, const Instruction *Orig);
308 
309   /// Add metadata from one instruction to another.
310   ///
311   /// This includes both the original MDs from \p From and additional ones (\see
312   /// addNewMetadata).  Use this for *newly created* instructions in the vector
313   /// loop.
314   void addMetadata(Instruction *To, Instruction *From);
315 
316   /// Similar to the previous function but it adds the metadata to a
317   /// vector of instructions.
318   void addMetadata(ArrayRef<Value *> To, Instruction *From);
319 
320   /// Set the debug location in the builder using the debug location in \p V.
321   void setDebugLocFromInst(const Value *V);
322 
323   /// Hold state information used when constructing the CFG of the output IR,
324   /// traversing the VPBasicBlocks and generating corresponding IR BasicBlocks.
325   struct CFGState {
326     /// The previous VPBasicBlock visited. Initially set to null.
327     VPBasicBlock *PrevVPBB = nullptr;
328 
329     /// The previous IR BasicBlock created or used. Initially set to the new
330     /// header BasicBlock.
331     BasicBlock *PrevBB = nullptr;
332 
333     /// The last IR BasicBlock in the output IR. Set to the exit block of the
334     /// vector loop.
335     BasicBlock *ExitBB = nullptr;
336 
337     /// A mapping of each VPBasicBlock to the corresponding BasicBlock. In case
338     /// of replication, maps the BasicBlock of the last replica created.
339     SmallDenseMap<VPBasicBlock *, BasicBlock *> VPBB2IRBB;
340 
341     CFGState() = default;
342 
343     /// Returns the BasicBlock* mapped to the pre-header of the loop region
344     /// containing \p R.
345     BasicBlock *getPreheaderBBFor(VPRecipeBase *R);
346   } CFG;
347 
348   /// Hold a pointer to LoopInfo to register new basic blocks in the loop.
349   LoopInfo *LI;
350 
351   /// Hold a pointer to Dominator Tree to register new basic blocks in the loop.
352   DominatorTree *DT;
353 
354   /// Hold a reference to the IRBuilder used to generate output IR code.
355   IRBuilderBase &Builder;
356 
357   VPValue2ValueTy VPValue2Value;
358 
359   /// Hold the canonical scalar IV of the vector loop (start=0, step=VF*UF).
360   Value *CanonicalIV = nullptr;
361 
362   /// Hold a pointer to InnerLoopVectorizer to reuse its IR generation methods.
363   InnerLoopVectorizer *ILV;
364 
365   /// Pointer to the VPlan code is generated for.
366   VPlan *Plan;
367 
368   /// Holds recipes that may generate a poison value that is used after
369   /// vectorization, even when their operands are not poison.
370   SmallPtrSet<VPRecipeBase *, 16> MayGeneratePoisonRecipes;
371 
372   /// The loop object for the current parent region, or nullptr.
373   Loop *CurrentVectorLoop = nullptr;
374 
375   /// LoopVersioning.  It's only set up (non-null) if memchecks were
376   /// used.
377   ///
378   /// This is currently only used to add no-alias metadata based on the
379   /// memchecks.  The actually versioning is performed manually.
380   std::unique_ptr<LoopVersioning> LVer;
381 };
382 
383 /// VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
384 /// A VPBlockBase can be either a VPBasicBlock or a VPRegionBlock.
385 class VPBlockBase {
386   friend class VPBlockUtils;
387 
388   const unsigned char SubclassID; ///< Subclass identifier (for isa/dyn_cast).
389 
390   /// An optional name for the block.
391   std::string Name;
392 
393   /// The immediate VPRegionBlock which this VPBlockBase belongs to, or null if
394   /// it is a topmost VPBlockBase.
395   VPRegionBlock *Parent = nullptr;
396 
397   /// List of predecessor blocks.
398   SmallVector<VPBlockBase *, 1> Predecessors;
399 
400   /// List of successor blocks.
401   SmallVector<VPBlockBase *, 1> Successors;
402 
403   /// VPlan containing the block. Can only be set on the entry block of the
404   /// plan.
405   VPlan *Plan = nullptr;
406 
407   /// Add \p Successor as the last successor to this block.
408   void appendSuccessor(VPBlockBase *Successor) {
409     assert(Successor && "Cannot add nullptr successor!");
410     Successors.push_back(Successor);
411   }
412 
413   /// Add \p Predecessor as the last predecessor to this block.
414   void appendPredecessor(VPBlockBase *Predecessor) {
415     assert(Predecessor && "Cannot add nullptr predecessor!");
416     Predecessors.push_back(Predecessor);
417   }
418 
419   /// Remove \p Predecessor from the predecessors of this block.
420   void removePredecessor(VPBlockBase *Predecessor) {
421     auto Pos = find(Predecessors, Predecessor);
422     assert(Pos && "Predecessor does not exist");
423     Predecessors.erase(Pos);
424   }
425 
426   /// Remove \p Successor from the successors of this block.
427   void removeSuccessor(VPBlockBase *Successor) {
428     auto Pos = find(Successors, Successor);
429     assert(Pos && "Successor does not exist");
430     Successors.erase(Pos);
431   }
432 
433 protected:
434   VPBlockBase(const unsigned char SC, const std::string &N)
435       : SubclassID(SC), Name(N) {}
436 
437 public:
438   /// An enumeration for keeping track of the concrete subclass of VPBlockBase
439   /// that are actually instantiated. Values of this enumeration are kept in the
440   /// SubclassID field of the VPBlockBase objects. They are used for concrete
441   /// type identification.
442   using VPBlockTy = enum { VPBasicBlockSC, VPRegionBlockSC };
443 
444   using VPBlocksTy = SmallVectorImpl<VPBlockBase *>;
445 
446   virtual ~VPBlockBase() = default;
447 
448   const std::string &getName() const { return Name; }
449 
450   void setName(const Twine &newName) { Name = newName.str(); }
451 
452   /// \return an ID for the concrete type of this object.
453   /// This is used to implement the classof checks. This should not be used
454   /// for any other purpose, as the values may change as LLVM evolves.
455   unsigned getVPBlockID() const { return SubclassID; }
456 
457   VPRegionBlock *getParent() { return Parent; }
458   const VPRegionBlock *getParent() const { return Parent; }
459 
460   /// \return A pointer to the plan containing the current block.
461   VPlan *getPlan();
462   const VPlan *getPlan() const;
463 
464   /// Sets the pointer of the plan containing the block. The block must be the
465   /// entry block into the VPlan.
466   void setPlan(VPlan *ParentPlan);
467 
468   void setParent(VPRegionBlock *P) { Parent = P; }
469 
470   /// \return the VPBasicBlock that is the entry of this VPBlockBase,
471   /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
472   /// VPBlockBase is a VPBasicBlock, it is returned.
473   const VPBasicBlock *getEntryBasicBlock() const;
474   VPBasicBlock *getEntryBasicBlock();
475 
476   /// \return the VPBasicBlock that is the exiting this VPBlockBase,
477   /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
478   /// VPBlockBase is a VPBasicBlock, it is returned.
479   const VPBasicBlock *getExitingBasicBlock() const;
480   VPBasicBlock *getExitingBasicBlock();
481 
482   const VPBlocksTy &getSuccessors() const { return Successors; }
483   VPBlocksTy &getSuccessors() { return Successors; }
484 
485   iterator_range<VPBlockBase **> successors() { return Successors; }
486 
487   const VPBlocksTy &getPredecessors() const { return Predecessors; }
488   VPBlocksTy &getPredecessors() { return Predecessors; }
489 
490   /// \return the successor of this VPBlockBase if it has a single successor.
491   /// Otherwise return a null pointer.
492   VPBlockBase *getSingleSuccessor() const {
493     return (Successors.size() == 1 ? *Successors.begin() : nullptr);
494   }
495 
496   /// \return the predecessor of this VPBlockBase if it has a single
497   /// predecessor. Otherwise return a null pointer.
498   VPBlockBase *getSinglePredecessor() const {
499     return (Predecessors.size() == 1 ? *Predecessors.begin() : nullptr);
500   }
501 
502   size_t getNumSuccessors() const { return Successors.size(); }
503   size_t getNumPredecessors() const { return Predecessors.size(); }
504 
505   /// An Enclosing Block of a block B is any block containing B, including B
506   /// itself. \return the closest enclosing block starting from "this", which
507   /// has successors. \return the root enclosing block if all enclosing blocks
508   /// have no successors.
509   VPBlockBase *getEnclosingBlockWithSuccessors();
510 
511   /// \return the closest enclosing block starting from "this", which has
512   /// predecessors. \return the root enclosing block if all enclosing blocks
513   /// have no predecessors.
514   VPBlockBase *getEnclosingBlockWithPredecessors();
515 
516   /// \return the successors either attached directly to this VPBlockBase or, if
517   /// this VPBlockBase is the exit block of a VPRegionBlock and has no
518   /// successors of its own, search recursively for the first enclosing
519   /// VPRegionBlock that has successors and return them. If no such
520   /// VPRegionBlock exists, return the (empty) successors of the topmost
521   /// VPBlockBase reached.
522   const VPBlocksTy &getHierarchicalSuccessors() {
523     return getEnclosingBlockWithSuccessors()->getSuccessors();
524   }
525 
526   /// \return the hierarchical successor of this VPBlockBase if it has a single
527   /// hierarchical successor. Otherwise return a null pointer.
528   VPBlockBase *getSingleHierarchicalSuccessor() {
529     return getEnclosingBlockWithSuccessors()->getSingleSuccessor();
530   }
531 
532   /// \return the predecessors either attached directly to this VPBlockBase or,
533   /// if this VPBlockBase is the entry block of a VPRegionBlock and has no
534   /// predecessors of its own, search recursively for the first enclosing
535   /// VPRegionBlock that has predecessors and return them. If no such
536   /// VPRegionBlock exists, return the (empty) predecessors of the topmost
537   /// VPBlockBase reached.
538   const VPBlocksTy &getHierarchicalPredecessors() {
539     return getEnclosingBlockWithPredecessors()->getPredecessors();
540   }
541 
542   /// \return the hierarchical predecessor of this VPBlockBase if it has a
543   /// single hierarchical predecessor. Otherwise return a null pointer.
544   VPBlockBase *getSingleHierarchicalPredecessor() {
545     return getEnclosingBlockWithPredecessors()->getSinglePredecessor();
546   }
547 
548   /// Set a given VPBlockBase \p Successor as the single successor of this
549   /// VPBlockBase. This VPBlockBase is not added as predecessor of \p Successor.
550   /// This VPBlockBase must have no successors.
551   void setOneSuccessor(VPBlockBase *Successor) {
552     assert(Successors.empty() && "Setting one successor when others exist.");
553     appendSuccessor(Successor);
554   }
555 
556   /// Set two given VPBlockBases \p IfTrue and \p IfFalse to be the two
557   /// successors of this VPBlockBase. This VPBlockBase is not added as
558   /// predecessor of \p IfTrue or \p IfFalse. This VPBlockBase must have no
559   /// successors.
560   void setTwoSuccessors(VPBlockBase *IfTrue, VPBlockBase *IfFalse) {
561     assert(Successors.empty() && "Setting two successors when others exist.");
562     appendSuccessor(IfTrue);
563     appendSuccessor(IfFalse);
564   }
565 
566   /// Set each VPBasicBlock in \p NewPreds as predecessor of this VPBlockBase.
567   /// This VPBlockBase must have no predecessors. This VPBlockBase is not added
568   /// as successor of any VPBasicBlock in \p NewPreds.
569   void setPredecessors(ArrayRef<VPBlockBase *> NewPreds) {
570     assert(Predecessors.empty() && "Block predecessors already set.");
571     for (auto *Pred : NewPreds)
572       appendPredecessor(Pred);
573   }
574 
575   /// Remove all the predecessor of this block.
576   void clearPredecessors() { Predecessors.clear(); }
577 
578   /// Remove all the successors of this block.
579   void clearSuccessors() { Successors.clear(); }
580 
581   /// The method which generates the output IR that correspond to this
582   /// VPBlockBase, thereby "executing" the VPlan.
583   virtual void execute(struct VPTransformState *State) = 0;
584 
585   /// Delete all blocks reachable from a given VPBlockBase, inclusive.
586   static void deleteCFG(VPBlockBase *Entry);
587 
588   /// Return true if it is legal to hoist instructions into this block.
589   bool isLegalToHoistInto() {
590     // There are currently no constraints that prevent an instruction to be
591     // hoisted into a VPBlockBase.
592     return true;
593   }
594 
595   /// Replace all operands of VPUsers in the block with \p NewValue and also
596   /// replaces all uses of VPValues defined in the block with NewValue.
597   virtual void dropAllReferences(VPValue *NewValue) = 0;
598 
599 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
600   void printAsOperand(raw_ostream &OS, bool PrintType) const {
601     OS << getName();
602   }
603 
604   /// Print plain-text dump of this VPBlockBase to \p O, prefixing all lines
605   /// with \p Indent. \p SlotTracker is used to print unnamed VPValue's using
606   /// consequtive numbers.
607   ///
608   /// Note that the numbering is applied to the whole VPlan, so printing
609   /// individual blocks is consistent with the whole VPlan printing.
610   virtual void print(raw_ostream &O, const Twine &Indent,
611                      VPSlotTracker &SlotTracker) const = 0;
612 
613   /// Print plain-text dump of this VPlan to \p O.
614   void print(raw_ostream &O) const {
615     VPSlotTracker SlotTracker(getPlan());
616     print(O, "", SlotTracker);
617   }
618 
619   /// Print the successors of this block to \p O, prefixing all lines with \p
620   /// Indent.
621   void printSuccessors(raw_ostream &O, const Twine &Indent) const;
622 
623   /// Dump this VPBlockBase to dbgs().
624   LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
625 #endif
626 };
627 
628 /// A value that is used outside the VPlan. The operand of the user needs to be
629 /// added to the associated LCSSA phi node.
630 class VPLiveOut : public VPUser {
631   PHINode *Phi;
632 
633 public:
634   VPLiveOut(PHINode *Phi, VPValue *Op)
635       : VPUser({Op}, VPUser::VPUserID::LiveOut), Phi(Phi) {}
636 
637   /// Fixup the wrapped LCSSA phi node in the unique exit block.  This simply
638   /// means we need to add the appropriate incoming value from the middle
639   /// block as exiting edges from the scalar epilogue loop (if present) are
640   /// already in place, and we exit the vector loop exclusively to the middle
641   /// block.
642   void fixPhi(VPlan &Plan, VPTransformState &State);
643 
644   /// Returns true if the VPLiveOut uses scalars of operand \p Op.
645   bool usesScalars(const VPValue *Op) const override {
646     assert(is_contained(operands(), Op) &&
647            "Op must be an operand of the recipe");
648     return true;
649   }
650 
651   PHINode *getPhi() const { return Phi; }
652 };
653 
654 /// VPRecipeBase is a base class modeling a sequence of one or more output IR
655 /// instructions. VPRecipeBase owns the the VPValues it defines through VPDef
656 /// and is responsible for deleting its defined values. Single-value
657 /// VPRecipeBases that also inherit from VPValue must make sure to inherit from
658 /// VPRecipeBase before VPValue.
659 class VPRecipeBase : public ilist_node_with_parent<VPRecipeBase, VPBasicBlock>,
660                      public VPDef,
661                      public VPUser {
662   friend VPBasicBlock;
663   friend class VPBlockUtils;
664 
665   /// Each VPRecipe belongs to a single VPBasicBlock.
666   VPBasicBlock *Parent = nullptr;
667 
668 public:
669   VPRecipeBase(const unsigned char SC, ArrayRef<VPValue *> Operands)
670       : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
671 
672   template <typename IterT>
673   VPRecipeBase(const unsigned char SC, iterator_range<IterT> Operands)
674       : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
675   virtual ~VPRecipeBase() = default;
676 
677   /// \return the VPBasicBlock which this VPRecipe belongs to.
678   VPBasicBlock *getParent() { return Parent; }
679   const VPBasicBlock *getParent() const { return Parent; }
680 
681   /// The method which generates the output IR instructions that correspond to
682   /// this VPRecipe, thereby "executing" the VPlan.
683   virtual void execute(struct VPTransformState &State) = 0;
684 
685   /// Insert an unlinked recipe into a basic block immediately before
686   /// the specified recipe.
687   void insertBefore(VPRecipeBase *InsertPos);
688   /// Insert an unlinked recipe into \p BB immediately before the insertion
689   /// point \p IP;
690   void insertBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator IP);
691 
692   /// Insert an unlinked Recipe into a basic block immediately after
693   /// the specified Recipe.
694   void insertAfter(VPRecipeBase *InsertPos);
695 
696   /// Unlink this recipe from its current VPBasicBlock and insert it into
697   /// the VPBasicBlock that MovePos lives in, right after MovePos.
698   void moveAfter(VPRecipeBase *MovePos);
699 
700   /// Unlink this recipe and insert into BB before I.
701   ///
702   /// \pre I is a valid iterator into BB.
703   void moveBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator I);
704 
705   /// This method unlinks 'this' from the containing basic block, but does not
706   /// delete it.
707   void removeFromParent();
708 
709   /// This method unlinks 'this' from the containing basic block and deletes it.
710   ///
711   /// \returns an iterator pointing to the element after the erased one
712   iplist<VPRecipeBase>::iterator eraseFromParent();
713 
714   /// Returns the underlying instruction, if the recipe is a VPValue or nullptr
715   /// otherwise.
716   Instruction *getUnderlyingInstr() {
717     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
718   }
719   const Instruction *getUnderlyingInstr() const {
720     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
721   }
722 
723   /// Method to support type inquiry through isa, cast, and dyn_cast.
724   static inline bool classof(const VPDef *D) {
725     // All VPDefs are also VPRecipeBases.
726     return true;
727   }
728 
729   static inline bool classof(const VPUser *U) {
730     return U->getVPUserID() == VPUser::VPUserID::Recipe;
731   }
732 
733   /// Returns true if the recipe may have side-effects.
734   bool mayHaveSideEffects() const;
735 
736   /// Returns true for PHI-like recipes.
737   bool isPhi() const {
738     return getVPDefID() >= VPFirstPHISC && getVPDefID() <= VPLastPHISC;
739   }
740 
741   /// Returns true if the recipe may read from memory.
742   bool mayReadFromMemory() const;
743 
744   /// Returns true if the recipe may write to memory.
745   bool mayWriteToMemory() const;
746 
747   /// Returns true if the recipe may read from or write to memory.
748   bool mayReadOrWriteMemory() const {
749     return mayReadFromMemory() || mayWriteToMemory();
750   }
751 };
752 
753 inline bool VPUser::classof(const VPDef *Def) {
754   return Def->getVPDefID() == VPRecipeBase::VPInstructionSC ||
755          Def->getVPDefID() == VPRecipeBase::VPWidenSC ||
756          Def->getVPDefID() == VPRecipeBase::VPWidenCallSC ||
757          Def->getVPDefID() == VPRecipeBase::VPWidenSelectSC ||
758          Def->getVPDefID() == VPRecipeBase::VPWidenGEPSC ||
759          Def->getVPDefID() == VPRecipeBase::VPBlendSC ||
760          Def->getVPDefID() == VPRecipeBase::VPInterleaveSC ||
761          Def->getVPDefID() == VPRecipeBase::VPReplicateSC ||
762          Def->getVPDefID() == VPRecipeBase::VPReductionSC ||
763          Def->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC ||
764          Def->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
765 }
766 
767 /// This is a concrete Recipe that models a single VPlan-level instruction.
768 /// While as any Recipe it may generate a sequence of IR instructions when
769 /// executed, these instructions would always form a single-def expression as
770 /// the VPInstruction is also a single def-use vertex.
771 class VPInstruction : public VPRecipeBase, public VPValue {
772   friend class VPlanSlp;
773 
774 public:
775   /// VPlan opcodes, extending LLVM IR with idiomatics instructions.
776   enum {
777     FirstOrderRecurrenceSplice =
778         Instruction::OtherOpsEnd + 1, // Combines the incoming and previous
779                                       // values of a first-order recurrence.
780     Not,
781     ICmpULE,
782     SLPLoad,
783     SLPStore,
784     ActiveLaneMask,
785     CanonicalIVIncrement,
786     CanonicalIVIncrementNUW,
787     // The next two are similar to the above, but instead increment the
788     // canonical IV separately for each unrolled part.
789     CanonicalIVIncrementForPart,
790     CanonicalIVIncrementForPartNUW,
791     BranchOnCount,
792     BranchOnCond
793   };
794 
795 private:
796   typedef unsigned char OpcodeTy;
797   OpcodeTy Opcode;
798   FastMathFlags FMF;
799   DebugLoc DL;
800 
801   /// An optional name that can be used for the generated IR instruction.
802   const std::string Name;
803 
804   /// Utility method serving execute(): generates a single instance of the
805   /// modeled instruction.
806   void generateInstruction(VPTransformState &State, unsigned Part);
807 
808 protected:
809   void setUnderlyingInstr(Instruction *I) { setUnderlyingValue(I); }
810 
811 public:
812   VPInstruction(unsigned Opcode, ArrayRef<VPValue *> Operands, DebugLoc DL,
813                 const Twine &Name = "")
814       : VPRecipeBase(VPRecipeBase::VPInstructionSC, Operands),
815         VPValue(VPValue::VPVInstructionSC, nullptr, this), Opcode(Opcode),
816         DL(DL), Name(Name.str()) {}
817 
818   VPInstruction(unsigned Opcode, std::initializer_list<VPValue *> Operands,
819                 DebugLoc DL = {}, const Twine &Name = "")
820       : VPInstruction(Opcode, ArrayRef<VPValue *>(Operands), DL, Name) {}
821 
822   /// Method to support type inquiry through isa, cast, and dyn_cast.
823   static inline bool classof(const VPValue *V) {
824     return V->getVPValueID() == VPValue::VPVInstructionSC;
825   }
826 
827   VPInstruction *clone() const {
828     SmallVector<VPValue *, 2> Operands(operands());
829     return new VPInstruction(Opcode, Operands, DL, Name);
830   }
831 
832   /// Method to support type inquiry through isa, cast, and dyn_cast.
833   static inline bool classof(const VPDef *R) {
834     return R->getVPDefID() == VPRecipeBase::VPInstructionSC;
835   }
836 
837   /// Extra classof implementations to allow directly casting from VPUser ->
838   /// VPInstruction.
839   static inline bool classof(const VPUser *U) {
840     auto *R = dyn_cast<VPRecipeBase>(U);
841     return R && R->getVPDefID() == VPRecipeBase::VPInstructionSC;
842   }
843   static inline bool classof(const VPRecipeBase *R) {
844     return R->getVPDefID() == VPRecipeBase::VPInstructionSC;
845   }
846 
847   unsigned getOpcode() const { return Opcode; }
848 
849   /// Generate the instruction.
850   /// TODO: We currently execute only per-part unless a specific instance is
851   /// provided.
852   void execute(VPTransformState &State) override;
853 
854 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
855   /// Print the VPInstruction to \p O.
856   void print(raw_ostream &O, const Twine &Indent,
857              VPSlotTracker &SlotTracker) const override;
858 
859   /// Print the VPInstruction to dbgs() (for debugging).
860   LLVM_DUMP_METHOD void dump() const;
861 #endif
862 
863   /// Return true if this instruction may modify memory.
864   bool mayWriteToMemory() const {
865     // TODO: we can use attributes of the called function to rule out memory
866     //       modifications.
867     return Opcode == Instruction::Store || Opcode == Instruction::Call ||
868            Opcode == Instruction::Invoke || Opcode == SLPStore;
869   }
870 
871   bool hasResult() const {
872     // CallInst may or may not have a result, depending on the called function.
873     // Conservatively return calls have results for now.
874     switch (getOpcode()) {
875     case Instruction::Ret:
876     case Instruction::Br:
877     case Instruction::Store:
878     case Instruction::Switch:
879     case Instruction::IndirectBr:
880     case Instruction::Resume:
881     case Instruction::CatchRet:
882     case Instruction::Unreachable:
883     case Instruction::Fence:
884     case Instruction::AtomicRMW:
885     case VPInstruction::BranchOnCond:
886     case VPInstruction::BranchOnCount:
887       return false;
888     default:
889       return true;
890     }
891   }
892 
893   /// Set the fast-math flags.
894   void setFastMathFlags(FastMathFlags FMFNew);
895 
896   /// Returns true if the recipe only uses the first lane of operand \p Op.
897   bool onlyFirstLaneUsed(const VPValue *Op) const override {
898     assert(is_contained(operands(), Op) &&
899            "Op must be an operand of the recipe");
900     if (getOperand(0) != Op)
901       return false;
902     switch (getOpcode()) {
903     default:
904       return false;
905     case VPInstruction::ActiveLaneMask:
906     case VPInstruction::CanonicalIVIncrement:
907     case VPInstruction::CanonicalIVIncrementNUW:
908     case VPInstruction::CanonicalIVIncrementForPart:
909     case VPInstruction::CanonicalIVIncrementForPartNUW:
910     case VPInstruction::BranchOnCount:
911       return true;
912     };
913     llvm_unreachable("switch should return");
914   }
915 };
916 
917 /// VPWidenRecipe is a recipe for producing a copy of vector type its
918 /// ingredient. This recipe covers most of the traditional vectorization cases
919 /// where each ingredient transforms into a vectorized version of itself.
920 class VPWidenRecipe : public VPRecipeBase, public VPValue {
921 public:
922   template <typename IterT>
923   VPWidenRecipe(Instruction &I, iterator_range<IterT> Operands)
924       : VPRecipeBase(VPRecipeBase::VPWidenSC, Operands),
925         VPValue(VPValue::VPVWidenSC, &I, this) {}
926 
927   ~VPWidenRecipe() override = default;
928 
929   /// Method to support type inquiry through isa, cast, and dyn_cast.
930   static inline bool classof(const VPDef *D) {
931     return D->getVPDefID() == VPRecipeBase::VPWidenSC;
932   }
933   static inline bool classof(const VPValue *V) {
934     return V->getVPValueID() == VPValue::VPVWidenSC;
935   }
936 
937   /// Produce widened copies of all Ingredients.
938   void execute(VPTransformState &State) override;
939 
940 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
941   /// Print the recipe.
942   void print(raw_ostream &O, const Twine &Indent,
943              VPSlotTracker &SlotTracker) const override;
944 #endif
945 };
946 
947 /// A recipe for widening Call instructions.
948 class VPWidenCallRecipe : public VPRecipeBase, public VPValue {
949 
950 public:
951   template <typename IterT>
952   VPWidenCallRecipe(CallInst &I, iterator_range<IterT> CallArguments)
953       : VPRecipeBase(VPRecipeBase::VPWidenCallSC, CallArguments),
954         VPValue(VPValue::VPVWidenCallSC, &I, this) {}
955 
956   ~VPWidenCallRecipe() override = default;
957 
958   /// Method to support type inquiry through isa, cast, and dyn_cast.
959   static inline bool classof(const VPDef *D) {
960     return D->getVPDefID() == VPRecipeBase::VPWidenCallSC;
961   }
962 
963   /// Produce a widened version of the call instruction.
964   void execute(VPTransformState &State) override;
965 
966 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
967   /// Print the recipe.
968   void print(raw_ostream &O, const Twine &Indent,
969              VPSlotTracker &SlotTracker) const override;
970 #endif
971 };
972 
973 /// A recipe for widening select instructions.
974 class VPWidenSelectRecipe : public VPRecipeBase, public VPValue {
975 
976   /// Is the condition of the select loop invariant?
977   bool InvariantCond;
978 
979 public:
980   template <typename IterT>
981   VPWidenSelectRecipe(SelectInst &I, iterator_range<IterT> Operands,
982                       bool InvariantCond)
983       : VPRecipeBase(VPRecipeBase::VPWidenSelectSC, Operands),
984         VPValue(VPValue::VPVWidenSelectSC, &I, this),
985         InvariantCond(InvariantCond) {}
986 
987   ~VPWidenSelectRecipe() override = default;
988 
989   /// Method to support type inquiry through isa, cast, and dyn_cast.
990   static inline bool classof(const VPDef *D) {
991     return D->getVPDefID() == VPRecipeBase::VPWidenSelectSC;
992   }
993 
994   /// Produce a widened version of the select instruction.
995   void execute(VPTransformState &State) override;
996 
997 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
998   /// Print the recipe.
999   void print(raw_ostream &O, const Twine &Indent,
1000              VPSlotTracker &SlotTracker) const override;
1001 #endif
1002 };
1003 
1004 /// A recipe for handling GEP instructions.
1005 class VPWidenGEPRecipe : public VPRecipeBase, public VPValue {
1006   bool IsPtrLoopInvariant;
1007   SmallBitVector IsIndexLoopInvariant;
1008 
1009 public:
1010   template <typename IterT>
1011   VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands)
1012       : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
1013         VPValue(VPWidenGEPSC, GEP, this),
1014         IsIndexLoopInvariant(GEP->getNumIndices(), false) {}
1015 
1016   template <typename IterT>
1017   VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands,
1018                    Loop *OrigLoop)
1019       : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
1020         VPValue(VPValue::VPVWidenGEPSC, GEP, this),
1021         IsIndexLoopInvariant(GEP->getNumIndices(), false) {
1022     IsPtrLoopInvariant = OrigLoop->isLoopInvariant(GEP->getPointerOperand());
1023     for (auto Index : enumerate(GEP->indices()))
1024       IsIndexLoopInvariant[Index.index()] =
1025           OrigLoop->isLoopInvariant(Index.value().get());
1026   }
1027   ~VPWidenGEPRecipe() override = default;
1028 
1029   /// Method to support type inquiry through isa, cast, and dyn_cast.
1030   static inline bool classof(const VPDef *D) {
1031     return D->getVPDefID() == VPRecipeBase::VPWidenGEPSC;
1032   }
1033 
1034   /// Generate the gep nodes.
1035   void execute(VPTransformState &State) override;
1036 
1037 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1038   /// Print the recipe.
1039   void print(raw_ostream &O, const Twine &Indent,
1040              VPSlotTracker &SlotTracker) const override;
1041 #endif
1042 };
1043 
1044 /// A recipe for handling phi nodes of integer and floating-point inductions,
1045 /// producing their vector values.
1046 class VPWidenIntOrFpInductionRecipe : public VPRecipeBase, public VPValue {
1047   PHINode *IV;
1048   const InductionDescriptor &IndDesc;
1049   bool NeedsVectorIV;
1050 
1051 public:
1052   VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start, VPValue *Step,
1053                                 const InductionDescriptor &IndDesc,
1054                                 bool NeedsVectorIV)
1055       : VPRecipeBase(VPWidenIntOrFpInductionSC, {Start, Step}),
1056         VPValue(IV, this), IV(IV), IndDesc(IndDesc),
1057         NeedsVectorIV(NeedsVectorIV) {}
1058 
1059   VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start, VPValue *Step,
1060                                 const InductionDescriptor &IndDesc,
1061                                 TruncInst *Trunc, bool NeedsVectorIV)
1062       : VPRecipeBase(VPWidenIntOrFpInductionSC, {Start, Step}),
1063         VPValue(Trunc, this), IV(IV), IndDesc(IndDesc),
1064         NeedsVectorIV(NeedsVectorIV) {}
1065 
1066   ~VPWidenIntOrFpInductionRecipe() override = default;
1067 
1068   /// Method to support type inquiry through isa, cast, and dyn_cast.
1069   static inline bool classof(const VPDef *D) {
1070     return D->getVPDefID() == VPRecipeBase::VPWidenIntOrFpInductionSC;
1071   }
1072 
1073   /// Generate the vectorized and scalarized versions of the phi node as
1074   /// needed by their users.
1075   void execute(VPTransformState &State) override;
1076 
1077 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1078   /// Print the recipe.
1079   void print(raw_ostream &O, const Twine &Indent,
1080              VPSlotTracker &SlotTracker) const override;
1081 #endif
1082 
1083   /// Returns the start value of the induction.
1084   VPValue *getStartValue() { return getOperand(0); }
1085   const VPValue *getStartValue() const { return getOperand(0); }
1086 
1087   /// Returns the step value of the induction.
1088   VPValue *getStepValue() { return getOperand(1); }
1089   const VPValue *getStepValue() const { return getOperand(1); }
1090 
1091   /// Returns the first defined value as TruncInst, if it is one or nullptr
1092   /// otherwise.
1093   TruncInst *getTruncInst() {
1094     return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1095   }
1096   const TruncInst *getTruncInst() const {
1097     return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1098   }
1099 
1100   PHINode *getPHINode() { return IV; }
1101 
1102   /// Returns the induction descriptor for the recipe.
1103   const InductionDescriptor &getInductionDescriptor() const { return IndDesc; }
1104 
1105   /// Returns true if the induction is canonical, i.e. starting at 0 and
1106   /// incremented by UF * VF (= the original IV is incremented by 1).
1107   bool isCanonical() const;
1108 
1109   /// Returns the scalar type of the induction.
1110   const Type *getScalarType() const {
1111     const TruncInst *TruncI = getTruncInst();
1112     return TruncI ? TruncI->getType() : IV->getType();
1113   }
1114 
1115   /// Returns true if a vector phi needs to be created for the induction.
1116   bool needsVectorIV() const { return NeedsVectorIV; }
1117 };
1118 
1119 /// A pure virtual base class for all recipes modeling header phis, including
1120 /// phis for first order recurrences, pointer inductions and reductions. The
1121 /// start value is the first operand of the recipe and the incoming value from
1122 /// the backedge is the second operand.
1123 class VPHeaderPHIRecipe : public VPRecipeBase, public VPValue {
1124 protected:
1125   VPHeaderPHIRecipe(unsigned char VPVID, unsigned char VPDefID, PHINode *Phi,
1126                     VPValue *Start = nullptr)
1127       : VPRecipeBase(VPDefID, {}), VPValue(VPVID, Phi, this) {
1128     if (Start)
1129       addOperand(Start);
1130   }
1131 
1132 public:
1133   ~VPHeaderPHIRecipe() override = default;
1134 
1135   /// Method to support type inquiry through isa, cast, and dyn_cast.
1136   static inline bool classof(const VPRecipeBase *B) {
1137     return B->getVPDefID() == VPRecipeBase::VPCanonicalIVPHISC ||
1138            B->getVPDefID() == VPRecipeBase::VPActiveLaneMaskPHISC ||
1139            B->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC ||
1140            B->getVPDefID() == VPRecipeBase::VPReductionPHISC ||
1141            B->getVPDefID() == VPRecipeBase::VPWidenIntOrFpInductionSC ||
1142            B->getVPDefID() == VPRecipeBase::VPWidenPHISC;
1143   }
1144   static inline bool classof(const VPValue *V) {
1145     return V->getVPValueID() == VPValue::VPVCanonicalIVPHISC ||
1146            V->getVPValueID() == VPValue::VPVActiveLaneMaskPHISC ||
1147            V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC ||
1148            V->getVPValueID() == VPValue::VPVReductionPHISC ||
1149            V->getVPValueID() == VPValue::VPVWidenIntOrFpInductionSC ||
1150            V->getVPValueID() == VPValue::VPVWidenPHISC;
1151   }
1152 
1153   /// Generate the phi nodes.
1154   void execute(VPTransformState &State) override = 0;
1155 
1156 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1157   /// Print the recipe.
1158   void print(raw_ostream &O, const Twine &Indent,
1159              VPSlotTracker &SlotTracker) const override = 0;
1160 #endif
1161 
1162   /// Returns the start value of the phi, if one is set.
1163   VPValue *getStartValue() {
1164     return getNumOperands() == 0 ? nullptr : getOperand(0);
1165   }
1166   VPValue *getStartValue() const {
1167     return getNumOperands() == 0 ? nullptr : getOperand(0);
1168   }
1169 
1170   /// Returns the incoming value from the loop backedge.
1171   VPValue *getBackedgeValue() {
1172     return getOperand(1);
1173   }
1174 
1175   /// Returns the backedge value as a recipe. The backedge value is guaranteed
1176   /// to be a recipe.
1177   VPRecipeBase *getBackedgeRecipe() {
1178     return cast<VPRecipeBase>(getBackedgeValue()->getDef());
1179   }
1180 };
1181 
1182 class VPWidenPointerInductionRecipe : public VPHeaderPHIRecipe {
1183   const InductionDescriptor &IndDesc;
1184 
1185   /// SCEV used to expand step.
1186   /// FIXME: move expansion of step to the pre-header, once it is modeled
1187   /// explicitly.
1188   ScalarEvolution &SE;
1189 
1190 public:
1191   /// Create a new VPWidenPointerInductionRecipe for \p Phi with start value \p
1192   /// Start.
1193   VPWidenPointerInductionRecipe(PHINode *Phi, VPValue *Start,
1194                                 const InductionDescriptor &IndDesc,
1195                                 ScalarEvolution &SE)
1196       : VPHeaderPHIRecipe(VPVWidenPointerInductionSC, VPWidenPointerInductionSC,
1197                           Phi),
1198         IndDesc(IndDesc), SE(SE) {
1199     addOperand(Start);
1200   }
1201 
1202   ~VPWidenPointerInductionRecipe() override = default;
1203 
1204   /// Method to support type inquiry through isa, cast, and dyn_cast.
1205   static inline bool classof(const VPRecipeBase *B) {
1206     return B->getVPDefID() == VPRecipeBase::VPWidenPointerInductionSC;
1207   }
1208   static inline bool classof(const VPHeaderPHIRecipe *R) {
1209     return R->getVPDefID() == VPRecipeBase::VPWidenPointerInductionSC;
1210   }
1211   static inline bool classof(const VPValue *V) {
1212     return V->getVPValueID() == VPValue::VPVWidenPointerInductionSC;
1213   }
1214 
1215   /// Generate vector values for the pointer induction.
1216   void execute(VPTransformState &State) override;
1217 
1218   /// Returns true if only scalar values will be generated.
1219   bool onlyScalarsGenerated(ElementCount VF);
1220 
1221 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1222   /// Print the recipe.
1223   void print(raw_ostream &O, const Twine &Indent,
1224              VPSlotTracker &SlotTracker) const override;
1225 #endif
1226 };
1227 
1228 /// A recipe for handling header phis that are widened in the vector loop.
1229 /// In the VPlan native path, all incoming VPValues & VPBasicBlock pairs are
1230 /// managed in the recipe directly.
1231 class VPWidenPHIRecipe : public VPHeaderPHIRecipe {
1232   /// List of incoming blocks. Only used in the VPlan native path.
1233   SmallVector<VPBasicBlock *, 2> IncomingBlocks;
1234 
1235 public:
1236   /// Create a new VPWidenPHIRecipe for \p Phi with start value \p Start.
1237   VPWidenPHIRecipe(PHINode *Phi, VPValue *Start = nullptr)
1238       : VPHeaderPHIRecipe(VPVWidenPHISC, VPWidenPHISC, Phi) {
1239     if (Start)
1240       addOperand(Start);
1241   }
1242 
1243   ~VPWidenPHIRecipe() override = default;
1244 
1245   /// Method to support type inquiry through isa, cast, and dyn_cast.
1246   static inline bool classof(const VPRecipeBase *B) {
1247     return B->getVPDefID() == VPRecipeBase::VPWidenPHISC;
1248   }
1249   static inline bool classof(const VPHeaderPHIRecipe *R) {
1250     return R->getVPDefID() == VPRecipeBase::VPWidenPHISC;
1251   }
1252   static inline bool classof(const VPValue *V) {
1253     return V->getVPValueID() == VPValue::VPVWidenPHISC;
1254   }
1255 
1256   /// Generate the phi/select nodes.
1257   void execute(VPTransformState &State) override;
1258 
1259 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1260   /// Print the recipe.
1261   void print(raw_ostream &O, const Twine &Indent,
1262              VPSlotTracker &SlotTracker) const override;
1263 #endif
1264 
1265   /// Adds a pair (\p IncomingV, \p IncomingBlock) to the phi.
1266   void addIncoming(VPValue *IncomingV, VPBasicBlock *IncomingBlock) {
1267     addOperand(IncomingV);
1268     IncomingBlocks.push_back(IncomingBlock);
1269   }
1270 
1271   /// Returns the \p I th incoming VPBasicBlock.
1272   VPBasicBlock *getIncomingBlock(unsigned I) { return IncomingBlocks[I]; }
1273 
1274   /// Returns the \p I th incoming VPValue.
1275   VPValue *getIncomingValue(unsigned I) { return getOperand(I); }
1276 };
1277 
1278 /// A recipe for handling first-order recurrence phis. The start value is the
1279 /// first operand of the recipe and the incoming value from the backedge is the
1280 /// second operand.
1281 struct VPFirstOrderRecurrencePHIRecipe : public VPHeaderPHIRecipe {
1282   VPFirstOrderRecurrencePHIRecipe(PHINode *Phi, VPValue &Start)
1283       : VPHeaderPHIRecipe(VPVFirstOrderRecurrencePHISC,
1284                           VPFirstOrderRecurrencePHISC, Phi, &Start) {}
1285 
1286   /// Method to support type inquiry through isa, cast, and dyn_cast.
1287   static inline bool classof(const VPRecipeBase *R) {
1288     return R->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
1289   }
1290   static inline bool classof(const VPHeaderPHIRecipe *R) {
1291     return R->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
1292   }
1293   static inline bool classof(const VPValue *V) {
1294     return V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC;
1295   }
1296 
1297   void execute(VPTransformState &State) override;
1298 
1299 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1300   /// Print the recipe.
1301   void print(raw_ostream &O, const Twine &Indent,
1302              VPSlotTracker &SlotTracker) const override;
1303 #endif
1304 };
1305 
1306 /// A recipe for handling reduction phis. The start value is the first operand
1307 /// of the recipe and the incoming value from the backedge is the second
1308 /// operand.
1309 class VPReductionPHIRecipe : public VPHeaderPHIRecipe {
1310   /// Descriptor for the reduction.
1311   const RecurrenceDescriptor &RdxDesc;
1312 
1313   /// The phi is part of an in-loop reduction.
1314   bool IsInLoop;
1315 
1316   /// The phi is part of an ordered reduction. Requires IsInLoop to be true.
1317   bool IsOrdered;
1318 
1319 public:
1320   /// Create a new VPReductionPHIRecipe for the reduction \p Phi described by \p
1321   /// RdxDesc.
1322   VPReductionPHIRecipe(PHINode *Phi, const RecurrenceDescriptor &RdxDesc,
1323                        VPValue &Start, bool IsInLoop = false,
1324                        bool IsOrdered = false)
1325       : VPHeaderPHIRecipe(VPVReductionPHISC, VPReductionPHISC, Phi, &Start),
1326         RdxDesc(RdxDesc), IsInLoop(IsInLoop), IsOrdered(IsOrdered) {
1327     assert((!IsOrdered || IsInLoop) && "IsOrdered requires IsInLoop");
1328   }
1329 
1330   ~VPReductionPHIRecipe() override = default;
1331 
1332   /// Method to support type inquiry through isa, cast, and dyn_cast.
1333   static inline bool classof(const VPRecipeBase *R) {
1334     return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1335   }
1336   static inline bool classof(const VPHeaderPHIRecipe *R) {
1337     return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1338   }
1339   static inline bool classof(const VPValue *V) {
1340     return V->getVPValueID() == VPValue::VPVReductionPHISC;
1341   }
1342 
1343   /// Generate the phi/select nodes.
1344   void execute(VPTransformState &State) override;
1345 
1346 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1347   /// Print the recipe.
1348   void print(raw_ostream &O, const Twine &Indent,
1349              VPSlotTracker &SlotTracker) const override;
1350 #endif
1351 
1352   const RecurrenceDescriptor &getRecurrenceDescriptor() const {
1353     return RdxDesc;
1354   }
1355 
1356   /// Returns true, if the phi is part of an ordered reduction.
1357   bool isOrdered() const { return IsOrdered; }
1358 
1359   /// Returns true, if the phi is part of an in-loop reduction.
1360   bool isInLoop() const { return IsInLoop; }
1361 };
1362 
1363 /// A recipe for vectorizing a phi-node as a sequence of mask-based select
1364 /// instructions.
1365 class VPBlendRecipe : public VPRecipeBase, public VPValue {
1366   PHINode *Phi;
1367 
1368 public:
1369   /// The blend operation is a User of the incoming values and of their
1370   /// respective masks, ordered [I0, M0, I1, M1, ...]. Note that a single value
1371   /// might be incoming with a full mask for which there is no VPValue.
1372   VPBlendRecipe(PHINode *Phi, ArrayRef<VPValue *> Operands)
1373       : VPRecipeBase(VPBlendSC, Operands),
1374         VPValue(VPValue::VPVBlendSC, Phi, this), Phi(Phi) {
1375     assert(Operands.size() > 0 &&
1376            ((Operands.size() == 1) || (Operands.size() % 2 == 0)) &&
1377            "Expected either a single incoming value or a positive even number "
1378            "of operands");
1379   }
1380 
1381   /// Method to support type inquiry through isa, cast, and dyn_cast.
1382   static inline bool classof(const VPDef *D) {
1383     return D->getVPDefID() == VPRecipeBase::VPBlendSC;
1384   }
1385 
1386   /// Return the number of incoming values, taking into account that a single
1387   /// incoming value has no mask.
1388   unsigned getNumIncomingValues() const { return (getNumOperands() + 1) / 2; }
1389 
1390   /// Return incoming value number \p Idx.
1391   VPValue *getIncomingValue(unsigned Idx) const { return getOperand(Idx * 2); }
1392 
1393   /// Return mask number \p Idx.
1394   VPValue *getMask(unsigned Idx) const { return getOperand(Idx * 2 + 1); }
1395 
1396   /// Generate the phi/select nodes.
1397   void execute(VPTransformState &State) override;
1398 
1399 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1400   /// Print the recipe.
1401   void print(raw_ostream &O, const Twine &Indent,
1402              VPSlotTracker &SlotTracker) const override;
1403 #endif
1404 
1405   /// Returns true if the recipe only uses the first lane of operand \p Op.
1406   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1407     assert(is_contained(operands(), Op) &&
1408            "Op must be an operand of the recipe");
1409     // Recursing through Blend recipes only, must terminate at header phi's the
1410     // latest.
1411     return all_of(users(),
1412                   [this](VPUser *U) { return U->onlyFirstLaneUsed(this); });
1413   }
1414 };
1415 
1416 /// VPInterleaveRecipe is a recipe for transforming an interleave group of load
1417 /// or stores into one wide load/store and shuffles. The first operand of a
1418 /// VPInterleave recipe is the address, followed by the stored values, followed
1419 /// by an optional mask.
1420 class VPInterleaveRecipe : public VPRecipeBase {
1421   const InterleaveGroup<Instruction> *IG;
1422 
1423   bool HasMask = false;
1424 
1425 public:
1426   VPInterleaveRecipe(const InterleaveGroup<Instruction> *IG, VPValue *Addr,
1427                      ArrayRef<VPValue *> StoredValues, VPValue *Mask)
1428       : VPRecipeBase(VPInterleaveSC, {Addr}), IG(IG) {
1429     for (unsigned i = 0; i < IG->getFactor(); ++i)
1430       if (Instruction *I = IG->getMember(i)) {
1431         if (I->getType()->isVoidTy())
1432           continue;
1433         new VPValue(I, this);
1434       }
1435 
1436     for (auto *SV : StoredValues)
1437       addOperand(SV);
1438     if (Mask) {
1439       HasMask = true;
1440       addOperand(Mask);
1441     }
1442   }
1443   ~VPInterleaveRecipe() override = default;
1444 
1445   /// Method to support type inquiry through isa, cast, and dyn_cast.
1446   static inline bool classof(const VPDef *D) {
1447     return D->getVPDefID() == VPRecipeBase::VPInterleaveSC;
1448   }
1449 
1450   /// Return the address accessed by this recipe.
1451   VPValue *getAddr() const {
1452     return getOperand(0); // Address is the 1st, mandatory operand.
1453   }
1454 
1455   /// Return the mask used by this recipe. Note that a full mask is represented
1456   /// by a nullptr.
1457   VPValue *getMask() const {
1458     // Mask is optional and therefore the last, currently 2nd operand.
1459     return HasMask ? getOperand(getNumOperands() - 1) : nullptr;
1460   }
1461 
1462   /// Return the VPValues stored by this interleave group. If it is a load
1463   /// interleave group, return an empty ArrayRef.
1464   ArrayRef<VPValue *> getStoredValues() const {
1465     // The first operand is the address, followed by the stored values, followed
1466     // by an optional mask.
1467     return ArrayRef<VPValue *>(op_begin(), getNumOperands())
1468         .slice(1, getNumStoreOperands());
1469   }
1470 
1471   /// Generate the wide load or store, and shuffles.
1472   void execute(VPTransformState &State) override;
1473 
1474 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1475   /// Print the recipe.
1476   void print(raw_ostream &O, const Twine &Indent,
1477              VPSlotTracker &SlotTracker) const override;
1478 #endif
1479 
1480   const InterleaveGroup<Instruction> *getInterleaveGroup() { return IG; }
1481 
1482   /// Returns the number of stored operands of this interleave group. Returns 0
1483   /// for load interleave groups.
1484   unsigned getNumStoreOperands() const {
1485     return getNumOperands() - (HasMask ? 2 : 1);
1486   }
1487 
1488   /// The recipe only uses the first lane of the address.
1489   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1490     assert(is_contained(operands(), Op) &&
1491            "Op must be an operand of the recipe");
1492     return Op == getAddr() && all_of(getStoredValues(), [Op](VPValue *StoredV) {
1493              return Op != StoredV;
1494            });
1495   }
1496 };
1497 
1498 /// A recipe to represent inloop reduction operations, performing a reduction on
1499 /// a vector operand into a scalar value, and adding the result to a chain.
1500 /// The Operands are {ChainOp, VecOp, [Condition]}.
1501 class VPReductionRecipe : public VPRecipeBase, public VPValue {
1502   /// The recurrence decriptor for the reduction in question.
1503   const RecurrenceDescriptor *RdxDesc;
1504   /// Pointer to the TTI, needed to create the target reduction
1505   const TargetTransformInfo *TTI;
1506 
1507 public:
1508   VPReductionRecipe(const RecurrenceDescriptor *R, Instruction *I,
1509                     VPValue *ChainOp, VPValue *VecOp, VPValue *CondOp,
1510                     const TargetTransformInfo *TTI)
1511       : VPRecipeBase(VPRecipeBase::VPReductionSC, {ChainOp, VecOp}),
1512         VPValue(VPValue::VPVReductionSC, I, this), RdxDesc(R), TTI(TTI) {
1513     if (CondOp)
1514       addOperand(CondOp);
1515   }
1516 
1517   ~VPReductionRecipe() override = default;
1518 
1519   /// Method to support type inquiry through isa, cast, and dyn_cast.
1520   static inline bool classof(const VPValue *V) {
1521     return V->getVPValueID() == VPValue::VPVReductionSC;
1522   }
1523 
1524   /// Generate the reduction in the loop
1525   void execute(VPTransformState &State) override;
1526 
1527 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1528   /// Print the recipe.
1529   void print(raw_ostream &O, const Twine &Indent,
1530              VPSlotTracker &SlotTracker) const override;
1531 #endif
1532 
1533   /// The VPValue of the scalar Chain being accumulated.
1534   VPValue *getChainOp() const { return getOperand(0); }
1535   /// The VPValue of the vector value to be reduced.
1536   VPValue *getVecOp() const { return getOperand(1); }
1537   /// The VPValue of the condition for the block.
1538   VPValue *getCondOp() const {
1539     return getNumOperands() > 2 ? getOperand(2) : nullptr;
1540   }
1541 };
1542 
1543 /// VPReplicateRecipe replicates a given instruction producing multiple scalar
1544 /// copies of the original scalar type, one per lane, instead of producing a
1545 /// single copy of widened type for all lanes. If the instruction is known to be
1546 /// uniform only one copy, per lane zero, will be generated.
1547 class VPReplicateRecipe : public VPRecipeBase, public VPValue {
1548   /// Indicator if only a single replica per lane is needed.
1549   bool IsUniform;
1550 
1551   /// Indicator if the replicas are also predicated.
1552   bool IsPredicated;
1553 
1554   /// Indicator if the scalar values should also be packed into a vector.
1555   bool AlsoPack;
1556 
1557 public:
1558   template <typename IterT>
1559   VPReplicateRecipe(Instruction *I, iterator_range<IterT> Operands,
1560                     bool IsUniform, bool IsPredicated = false)
1561       : VPRecipeBase(VPReplicateSC, Operands), VPValue(VPVReplicateSC, I, this),
1562         IsUniform(IsUniform), IsPredicated(IsPredicated) {
1563     // Retain the previous behavior of predicateInstructions(), where an
1564     // insert-element of a predicated instruction got hoisted into the
1565     // predicated basic block iff it was its only user. This is achieved by
1566     // having predicated instructions also pack their values into a vector by
1567     // default unless they have a replicated user which uses their scalar value.
1568     AlsoPack = IsPredicated && !I->use_empty();
1569   }
1570 
1571   ~VPReplicateRecipe() override = default;
1572 
1573   /// Method to support type inquiry through isa, cast, and dyn_cast.
1574   static inline bool classof(const VPDef *D) {
1575     return D->getVPDefID() == VPRecipeBase::VPReplicateSC;
1576   }
1577 
1578   static inline bool classof(const VPValue *V) {
1579     return V->getVPValueID() == VPValue::VPVReplicateSC;
1580   }
1581 
1582   /// Generate replicas of the desired Ingredient. Replicas will be generated
1583   /// for all parts and lanes unless a specific part and lane are specified in
1584   /// the \p State.
1585   void execute(VPTransformState &State) override;
1586 
1587   void setAlsoPack(bool Pack) { AlsoPack = Pack; }
1588 
1589 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1590   /// Print the recipe.
1591   void print(raw_ostream &O, const Twine &Indent,
1592              VPSlotTracker &SlotTracker) const override;
1593 #endif
1594 
1595   bool isUniform() const { return IsUniform; }
1596 
1597   bool isPacked() const { return AlsoPack; }
1598 
1599   bool isPredicated() const { return IsPredicated; }
1600 
1601   /// Returns true if the recipe only uses the first lane of operand \p Op.
1602   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1603     assert(is_contained(operands(), Op) &&
1604            "Op must be an operand of the recipe");
1605     return isUniform();
1606   }
1607 
1608   /// Returns true if the recipe uses scalars of operand \p Op.
1609   bool usesScalars(const VPValue *Op) const override {
1610     assert(is_contained(operands(), Op) &&
1611            "Op must be an operand of the recipe");
1612     return true;
1613   }
1614 };
1615 
1616 /// A recipe for generating conditional branches on the bits of a mask.
1617 class VPBranchOnMaskRecipe : public VPRecipeBase {
1618 public:
1619   VPBranchOnMaskRecipe(VPValue *BlockInMask)
1620       : VPRecipeBase(VPBranchOnMaskSC, {}) {
1621     if (BlockInMask) // nullptr means all-one mask.
1622       addOperand(BlockInMask);
1623   }
1624 
1625   /// Method to support type inquiry through isa, cast, and dyn_cast.
1626   static inline bool classof(const VPDef *D) {
1627     return D->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC;
1628   }
1629 
1630   /// Generate the extraction of the appropriate bit from the block mask and the
1631   /// conditional branch.
1632   void execute(VPTransformState &State) override;
1633 
1634 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1635   /// Print the recipe.
1636   void print(raw_ostream &O, const Twine &Indent,
1637              VPSlotTracker &SlotTracker) const override {
1638     O << Indent << "BRANCH-ON-MASK ";
1639     if (VPValue *Mask = getMask())
1640       Mask->printAsOperand(O, SlotTracker);
1641     else
1642       O << " All-One";
1643   }
1644 #endif
1645 
1646   /// Return the mask used by this recipe. Note that a full mask is represented
1647   /// by a nullptr.
1648   VPValue *getMask() const {
1649     assert(getNumOperands() <= 1 && "should have either 0 or 1 operands");
1650     // Mask is optional.
1651     return getNumOperands() == 1 ? getOperand(0) : nullptr;
1652   }
1653 
1654   /// Returns true if the recipe uses scalars of operand \p Op.
1655   bool usesScalars(const VPValue *Op) const override {
1656     assert(is_contained(operands(), Op) &&
1657            "Op must be an operand of the recipe");
1658     return true;
1659   }
1660 };
1661 
1662 /// VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when
1663 /// control converges back from a Branch-on-Mask. The phi nodes are needed in
1664 /// order to merge values that are set under such a branch and feed their uses.
1665 /// The phi nodes can be scalar or vector depending on the users of the value.
1666 /// This recipe works in concert with VPBranchOnMaskRecipe.
1667 class VPPredInstPHIRecipe : public VPRecipeBase, public VPValue {
1668 public:
1669   /// Construct a VPPredInstPHIRecipe given \p PredInst whose value needs a phi
1670   /// nodes after merging back from a Branch-on-Mask.
1671   VPPredInstPHIRecipe(VPValue *PredV)
1672       : VPRecipeBase(VPPredInstPHISC, PredV),
1673         VPValue(VPValue::VPVPredInstPHI, nullptr, this) {}
1674   ~VPPredInstPHIRecipe() override = default;
1675 
1676   /// Method to support type inquiry through isa, cast, and dyn_cast.
1677   static inline bool classof(const VPDef *D) {
1678     return D->getVPDefID() == VPRecipeBase::VPPredInstPHISC;
1679   }
1680 
1681   /// Generates phi nodes for live-outs as needed to retain SSA form.
1682   void execute(VPTransformState &State) override;
1683 
1684 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1685   /// Print the recipe.
1686   void print(raw_ostream &O, const Twine &Indent,
1687              VPSlotTracker &SlotTracker) const override;
1688 #endif
1689 
1690   /// Returns true if the recipe uses scalars of operand \p Op.
1691   bool usesScalars(const VPValue *Op) const override {
1692     assert(is_contained(operands(), Op) &&
1693            "Op must be an operand of the recipe");
1694     return true;
1695   }
1696 };
1697 
1698 /// A Recipe for widening load/store operations.
1699 /// The recipe uses the following VPValues:
1700 /// - For load: Address, optional mask
1701 /// - For store: Address, stored value, optional mask
1702 /// TODO: We currently execute only per-part unless a specific instance is
1703 /// provided.
1704 class VPWidenMemoryInstructionRecipe : public VPRecipeBase {
1705   Instruction &Ingredient;
1706 
1707   // Whether the loaded-from / stored-to addresses are consecutive.
1708   bool Consecutive;
1709 
1710   // Whether the consecutive loaded/stored addresses are in reverse order.
1711   bool Reverse;
1712 
1713   void setMask(VPValue *Mask) {
1714     if (!Mask)
1715       return;
1716     addOperand(Mask);
1717   }
1718 
1719   bool isMasked() const {
1720     return isStore() ? getNumOperands() == 3 : getNumOperands() == 2;
1721   }
1722 
1723 public:
1724   VPWidenMemoryInstructionRecipe(LoadInst &Load, VPValue *Addr, VPValue *Mask,
1725                                  bool Consecutive, bool Reverse)
1726       : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr}), Ingredient(Load),
1727         Consecutive(Consecutive), Reverse(Reverse) {
1728     assert((Consecutive || !Reverse) && "Reverse implies consecutive");
1729     new VPValue(VPValue::VPVMemoryInstructionSC, &Load, this);
1730     setMask(Mask);
1731   }
1732 
1733   VPWidenMemoryInstructionRecipe(StoreInst &Store, VPValue *Addr,
1734                                  VPValue *StoredValue, VPValue *Mask,
1735                                  bool Consecutive, bool Reverse)
1736       : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr, StoredValue}),
1737         Ingredient(Store), Consecutive(Consecutive), Reverse(Reverse) {
1738     assert((Consecutive || !Reverse) && "Reverse implies consecutive");
1739     setMask(Mask);
1740   }
1741 
1742   /// Method to support type inquiry through isa, cast, and dyn_cast.
1743   static inline bool classof(const VPDef *D) {
1744     return D->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
1745   }
1746 
1747   /// Return the address accessed by this recipe.
1748   VPValue *getAddr() const {
1749     return getOperand(0); // Address is the 1st, mandatory operand.
1750   }
1751 
1752   /// Return the mask used by this recipe. Note that a full mask is represented
1753   /// by a nullptr.
1754   VPValue *getMask() const {
1755     // Mask is optional and therefore the last operand.
1756     return isMasked() ? getOperand(getNumOperands() - 1) : nullptr;
1757   }
1758 
1759   /// Returns true if this recipe is a store.
1760   bool isStore() const { return isa<StoreInst>(Ingredient); }
1761 
1762   /// Return the address accessed by this recipe.
1763   VPValue *getStoredValue() const {
1764     assert(isStore() && "Stored value only available for store instructions");
1765     return getOperand(1); // Stored value is the 2nd, mandatory operand.
1766   }
1767 
1768   // Return whether the loaded-from / stored-to addresses are consecutive.
1769   bool isConsecutive() const { return Consecutive; }
1770 
1771   // Return whether the consecutive loaded/stored addresses are in reverse
1772   // order.
1773   bool isReverse() const { return Reverse; }
1774 
1775   /// Generate the wide load/store.
1776   void execute(VPTransformState &State) override;
1777 
1778 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1779   /// Print the recipe.
1780   void print(raw_ostream &O, const Twine &Indent,
1781              VPSlotTracker &SlotTracker) const override;
1782 #endif
1783 
1784   /// Returns true if the recipe only uses the first lane of operand \p Op.
1785   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1786     assert(is_contained(operands(), Op) &&
1787            "Op must be an operand of the recipe");
1788 
1789     // Widened, consecutive memory operations only demand the first lane of
1790     // their address, unless the same operand is also stored. That latter can
1791     // happen with opaque pointers.
1792     return Op == getAddr() && isConsecutive() &&
1793            (!isStore() || Op != getStoredValue());
1794   }
1795 
1796   Instruction &getIngredient() const { return Ingredient; }
1797 };
1798 
1799 /// Recipe to expand a SCEV expression.
1800 class VPExpandSCEVRecipe : public VPRecipeBase, public VPValue {
1801   const SCEV *Expr;
1802   ScalarEvolution &SE;
1803 
1804 public:
1805   VPExpandSCEVRecipe(const SCEV *Expr, ScalarEvolution &SE)
1806       : VPRecipeBase(VPExpandSCEVSC, {}), VPValue(nullptr, this), Expr(Expr),
1807         SE(SE) {}
1808 
1809   ~VPExpandSCEVRecipe() override = default;
1810 
1811   /// Method to support type inquiry through isa, cast, and dyn_cast.
1812   static inline bool classof(const VPDef *D) {
1813     return D->getVPDefID() == VPExpandSCEVSC;
1814   }
1815 
1816   /// Generate a canonical vector induction variable of the vector loop, with
1817   void execute(VPTransformState &State) override;
1818 
1819 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1820   /// Print the recipe.
1821   void print(raw_ostream &O, const Twine &Indent,
1822              VPSlotTracker &SlotTracker) const override;
1823 #endif
1824 
1825   const SCEV *getSCEV() const { return Expr; }
1826 };
1827 
1828 /// Canonical scalar induction phi of the vector loop. Starting at the specified
1829 /// start value (either 0 or the resume value when vectorizing the epilogue
1830 /// loop). VPWidenCanonicalIVRecipe represents the vector version of the
1831 /// canonical induction variable.
1832 class VPCanonicalIVPHIRecipe : public VPHeaderPHIRecipe {
1833   DebugLoc DL;
1834 
1835 public:
1836   VPCanonicalIVPHIRecipe(VPValue *StartV, DebugLoc DL)
1837       : VPHeaderPHIRecipe(VPValue::VPVCanonicalIVPHISC, VPCanonicalIVPHISC,
1838                           nullptr, StartV),
1839         DL(DL) {}
1840 
1841   ~VPCanonicalIVPHIRecipe() override = default;
1842 
1843   /// Method to support type inquiry through isa, cast, and dyn_cast.
1844   static inline bool classof(const VPDef *D) {
1845     return D->getVPDefID() == VPCanonicalIVPHISC;
1846   }
1847   static inline bool classof(const VPHeaderPHIRecipe *D) {
1848     return D->getVPDefID() == VPCanonicalIVPHISC;
1849   }
1850   static inline bool classof(const VPValue *V) {
1851     return V->getVPValueID() == VPValue::VPVCanonicalIVPHISC;
1852   }
1853 
1854   /// Generate the canonical scalar induction phi of the vector loop.
1855   void execute(VPTransformState &State) override;
1856 
1857 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1858   /// Print the recipe.
1859   void print(raw_ostream &O, const Twine &Indent,
1860              VPSlotTracker &SlotTracker) const override;
1861 #endif
1862 
1863   /// Returns the scalar type of the induction.
1864   const Type *getScalarType() const {
1865     return getOperand(0)->getLiveInIRValue()->getType();
1866   }
1867 
1868   /// Returns true if the recipe only uses the first lane of operand \p Op.
1869   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1870     assert(is_contained(operands(), Op) &&
1871            "Op must be an operand of the recipe");
1872     return true;
1873   }
1874 };
1875 
1876 /// A recipe for generating the active lane mask for the vector loop that is
1877 /// used to predicate the vector operations.
1878 /// TODO: It would be good to use the existing VPWidenPHIRecipe instead and
1879 /// remove VPActiveLaneMaskPHIRecipe.
1880 class VPActiveLaneMaskPHIRecipe : public VPHeaderPHIRecipe {
1881   DebugLoc DL;
1882 
1883 public:
1884   VPActiveLaneMaskPHIRecipe(VPValue *StartMask, DebugLoc DL)
1885       : VPHeaderPHIRecipe(VPValue::VPVActiveLaneMaskPHISC,
1886                           VPActiveLaneMaskPHISC, nullptr, StartMask),
1887         DL(DL) {}
1888 
1889   ~VPActiveLaneMaskPHIRecipe() override = default;
1890 
1891   /// Method to support type inquiry through isa, cast, and dyn_cast.
1892   static inline bool classof(const VPDef *D) {
1893     return D->getVPDefID() == VPActiveLaneMaskPHISC;
1894   }
1895   static inline bool classof(const VPHeaderPHIRecipe *D) {
1896     return D->getVPDefID() == VPActiveLaneMaskPHISC;
1897   }
1898   static inline bool classof(const VPValue *V) {
1899     return V->getVPValueID() == VPValue::VPVActiveLaneMaskPHISC;
1900   }
1901 
1902   /// Generate the active lane mask phi of the vector loop.
1903   void execute(VPTransformState &State) override;
1904 
1905 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1906   /// Print the recipe.
1907   void print(raw_ostream &O, const Twine &Indent,
1908              VPSlotTracker &SlotTracker) const override;
1909 #endif
1910 };
1911 
1912 /// A Recipe for widening the canonical induction variable of the vector loop.
1913 class VPWidenCanonicalIVRecipe : public VPRecipeBase, public VPValue {
1914 public:
1915   VPWidenCanonicalIVRecipe(VPCanonicalIVPHIRecipe *CanonicalIV)
1916       : VPRecipeBase(VPWidenCanonicalIVSC, {CanonicalIV}),
1917         VPValue(VPValue::VPVWidenCanonicalIVSC, nullptr, this) {}
1918 
1919   ~VPWidenCanonicalIVRecipe() override = default;
1920 
1921   /// Method to support type inquiry through isa, cast, and dyn_cast.
1922   static inline bool classof(const VPDef *D) {
1923     return D->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1924   }
1925 
1926   /// Extra classof implementations to allow directly casting from VPUser ->
1927   /// VPWidenCanonicalIVRecipe.
1928   static inline bool classof(const VPUser *U) {
1929     auto *R = dyn_cast<VPRecipeBase>(U);
1930     return R && R->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1931   }
1932   static inline bool classof(const VPRecipeBase *R) {
1933     return R->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1934   }
1935 
1936   /// Generate a canonical vector induction variable of the vector loop, with
1937   /// start = {<Part*VF, Part*VF+1, ..., Part*VF+VF-1> for 0 <= Part < UF}, and
1938   /// step = <VF*UF, VF*UF, ..., VF*UF>.
1939   void execute(VPTransformState &State) override;
1940 
1941 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1942   /// Print the recipe.
1943   void print(raw_ostream &O, const Twine &Indent,
1944              VPSlotTracker &SlotTracker) const override;
1945 #endif
1946 
1947   /// Returns the scalar type of the induction.
1948   const Type *getScalarType() const {
1949     return cast<VPCanonicalIVPHIRecipe>(getOperand(0)->getDef())
1950         ->getScalarType();
1951   }
1952 };
1953 
1954 /// A recipe for handling phi nodes of integer and floating-point inductions,
1955 /// producing their scalar values.
1956 class VPScalarIVStepsRecipe : public VPRecipeBase, public VPValue {
1957   /// Scalar type to use for the generated values.
1958   Type *Ty;
1959   /// If not nullptr, truncate the generated values to TruncToTy.
1960   Type *TruncToTy;
1961   const InductionDescriptor &IndDesc;
1962 
1963 public:
1964   VPScalarIVStepsRecipe(Type *Ty, const InductionDescriptor &IndDesc,
1965                         VPValue *CanonicalIV, VPValue *Start, VPValue *Step,
1966                         Type *TruncToTy)
1967       : VPRecipeBase(VPScalarIVStepsSC, {CanonicalIV, Start, Step}),
1968         VPValue(nullptr, this), Ty(Ty), TruncToTy(TruncToTy), IndDesc(IndDesc) {
1969   }
1970 
1971   ~VPScalarIVStepsRecipe() override = default;
1972 
1973   /// Method to support type inquiry through isa, cast, and dyn_cast.
1974   static inline bool classof(const VPDef *D) {
1975     return D->getVPDefID() == VPRecipeBase::VPScalarIVStepsSC;
1976   }
1977   /// Extra classof implementations to allow directly casting from VPUser ->
1978   /// VPScalarIVStepsRecipe.
1979   static inline bool classof(const VPUser *U) {
1980     auto *R = dyn_cast<VPRecipeBase>(U);
1981     return R && R->getVPDefID() == VPRecipeBase::VPScalarIVStepsSC;
1982   }
1983   static inline bool classof(const VPRecipeBase *R) {
1984     return R->getVPDefID() == VPRecipeBase::VPScalarIVStepsSC;
1985   }
1986 
1987   /// Generate the scalarized versions of the phi node as needed by their users.
1988   void execute(VPTransformState &State) override;
1989 
1990 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1991   /// Print the recipe.
1992   void print(raw_ostream &O, const Twine &Indent,
1993              VPSlotTracker &SlotTracker) const override;
1994 #endif
1995 
1996   /// Returns true if the induction is canonical, i.e. starting at 0 and
1997   /// incremented by UF * VF (= the original IV is incremented by 1).
1998   bool isCanonical() const;
1999 
2000   VPCanonicalIVPHIRecipe *getCanonicalIV() const;
2001   VPValue *getStartValue() const { return getOperand(1); }
2002   VPValue *getStepValue() const { return getOperand(2); }
2003 
2004   /// Returns true if the recipe only uses the first lane of operand \p Op.
2005   bool onlyFirstLaneUsed(const VPValue *Op) const override {
2006     assert(is_contained(operands(), Op) &&
2007            "Op must be an operand of the recipe");
2008     return true;
2009   }
2010 };
2011 
2012 /// VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph. It
2013 /// holds a sequence of zero or more VPRecipe's each representing a sequence of
2014 /// output IR instructions. All PHI-like recipes must come before any non-PHI recipes.
2015 class VPBasicBlock : public VPBlockBase {
2016 public:
2017   using RecipeListTy = iplist<VPRecipeBase>;
2018 
2019 private:
2020   /// The VPRecipes held in the order of output instructions to generate.
2021   RecipeListTy Recipes;
2022 
2023 public:
2024   VPBasicBlock(const Twine &Name = "", VPRecipeBase *Recipe = nullptr)
2025       : VPBlockBase(VPBasicBlockSC, Name.str()) {
2026     if (Recipe)
2027       appendRecipe(Recipe);
2028   }
2029 
2030   ~VPBasicBlock() override {
2031     while (!Recipes.empty())
2032       Recipes.pop_back();
2033   }
2034 
2035   /// Instruction iterators...
2036   using iterator = RecipeListTy::iterator;
2037   using const_iterator = RecipeListTy::const_iterator;
2038   using reverse_iterator = RecipeListTy::reverse_iterator;
2039   using const_reverse_iterator = RecipeListTy::const_reverse_iterator;
2040 
2041   //===--------------------------------------------------------------------===//
2042   /// Recipe iterator methods
2043   ///
2044   inline iterator begin() { return Recipes.begin(); }
2045   inline const_iterator begin() const { return Recipes.begin(); }
2046   inline iterator end() { return Recipes.end(); }
2047   inline const_iterator end() const { return Recipes.end(); }
2048 
2049   inline reverse_iterator rbegin() { return Recipes.rbegin(); }
2050   inline const_reverse_iterator rbegin() const { return Recipes.rbegin(); }
2051   inline reverse_iterator rend() { return Recipes.rend(); }
2052   inline const_reverse_iterator rend() const { return Recipes.rend(); }
2053 
2054   inline size_t size() const { return Recipes.size(); }
2055   inline bool empty() const { return Recipes.empty(); }
2056   inline const VPRecipeBase &front() const { return Recipes.front(); }
2057   inline VPRecipeBase &front() { return Recipes.front(); }
2058   inline const VPRecipeBase &back() const { return Recipes.back(); }
2059   inline VPRecipeBase &back() { return Recipes.back(); }
2060 
2061   /// Returns a reference to the list of recipes.
2062   RecipeListTy &getRecipeList() { return Recipes; }
2063 
2064   /// Returns a pointer to a member of the recipe list.
2065   static RecipeListTy VPBasicBlock::*getSublistAccess(VPRecipeBase *) {
2066     return &VPBasicBlock::Recipes;
2067   }
2068 
2069   /// Method to support type inquiry through isa, cast, and dyn_cast.
2070   static inline bool classof(const VPBlockBase *V) {
2071     return V->getVPBlockID() == VPBlockBase::VPBasicBlockSC;
2072   }
2073 
2074   void insert(VPRecipeBase *Recipe, iterator InsertPt) {
2075     assert(Recipe && "No recipe to append.");
2076     assert(!Recipe->Parent && "Recipe already in VPlan");
2077     Recipe->Parent = this;
2078     Recipes.insert(InsertPt, Recipe);
2079   }
2080 
2081   /// Augment the existing recipes of a VPBasicBlock with an additional
2082   /// \p Recipe as the last recipe.
2083   void appendRecipe(VPRecipeBase *Recipe) { insert(Recipe, end()); }
2084 
2085   /// The method which generates the output IR instructions that correspond to
2086   /// this VPBasicBlock, thereby "executing" the VPlan.
2087   void execute(struct VPTransformState *State) override;
2088 
2089   /// Return the position of the first non-phi node recipe in the block.
2090   iterator getFirstNonPhi();
2091 
2092   /// Returns an iterator range over the PHI-like recipes in the block.
2093   iterator_range<iterator> phis() {
2094     return make_range(begin(), getFirstNonPhi());
2095   }
2096 
2097   void dropAllReferences(VPValue *NewValue) override;
2098 
2099   /// Split current block at \p SplitAt by inserting a new block between the
2100   /// current block and its successors and moving all recipes starting at
2101   /// SplitAt to the new block. Returns the new block.
2102   VPBasicBlock *splitAt(iterator SplitAt);
2103 
2104   VPRegionBlock *getEnclosingLoopRegion();
2105 
2106 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2107   /// Print this VPBsicBlock to \p O, prefixing all lines with \p Indent. \p
2108   /// SlotTracker is used to print unnamed VPValue's using consequtive numbers.
2109   ///
2110   /// Note that the numbering is applied to the whole VPlan, so printing
2111   /// individual blocks is consistent with the whole VPlan printing.
2112   void print(raw_ostream &O, const Twine &Indent,
2113              VPSlotTracker &SlotTracker) const override;
2114   using VPBlockBase::print; // Get the print(raw_stream &O) version.
2115 #endif
2116 
2117   /// If the block has multiple successors, return the branch recipe terminating
2118   /// the block. If there are no or only a single successor, return nullptr;
2119   VPRecipeBase *getTerminator();
2120   const VPRecipeBase *getTerminator() const;
2121 
2122   /// Returns true if the block is exiting it's parent region.
2123   bool isExiting() const;
2124 
2125 private:
2126   /// Create an IR BasicBlock to hold the output instructions generated by this
2127   /// VPBasicBlock, and return it. Update the CFGState accordingly.
2128   BasicBlock *createEmptyBasicBlock(VPTransformState::CFGState &CFG);
2129 };
2130 
2131 /// VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks
2132 /// which form a Single-Entry-Single-Exiting subgraph of the output IR CFG.
2133 /// A VPRegionBlock may indicate that its contents are to be replicated several
2134 /// times. This is designed to support predicated scalarization, in which a
2135 /// scalar if-then code structure needs to be generated VF * UF times. Having
2136 /// this replication indicator helps to keep a single model for multiple
2137 /// candidate VF's. The actual replication takes place only once the desired VF
2138 /// and UF have been determined.
2139 class VPRegionBlock : public VPBlockBase {
2140   /// Hold the Single Entry of the SESE region modelled by the VPRegionBlock.
2141   VPBlockBase *Entry;
2142 
2143   /// Hold the Single Exiting block of the SESE region modelled by the
2144   /// VPRegionBlock.
2145   VPBlockBase *Exiting;
2146 
2147   /// An indicator whether this region is to generate multiple replicated
2148   /// instances of output IR corresponding to its VPBlockBases.
2149   bool IsReplicator;
2150 
2151 public:
2152   VPRegionBlock(VPBlockBase *Entry, VPBlockBase *Exiting,
2153                 const std::string &Name = "", bool IsReplicator = false)
2154       : VPBlockBase(VPRegionBlockSC, Name), Entry(Entry), Exiting(Exiting),
2155         IsReplicator(IsReplicator) {
2156     assert(Entry->getPredecessors().empty() && "Entry block has predecessors.");
2157     assert(Exiting->getSuccessors().empty() && "Exit block has successors.");
2158     Entry->setParent(this);
2159     Exiting->setParent(this);
2160   }
2161   VPRegionBlock(const std::string &Name = "", bool IsReplicator = false)
2162       : VPBlockBase(VPRegionBlockSC, Name), Entry(nullptr), Exiting(nullptr),
2163         IsReplicator(IsReplicator) {}
2164 
2165   ~VPRegionBlock() override {
2166     if (Entry) {
2167       VPValue DummyValue;
2168       Entry->dropAllReferences(&DummyValue);
2169       deleteCFG(Entry);
2170     }
2171   }
2172 
2173   /// Method to support type inquiry through isa, cast, and dyn_cast.
2174   static inline bool classof(const VPBlockBase *V) {
2175     return V->getVPBlockID() == VPBlockBase::VPRegionBlockSC;
2176   }
2177 
2178   const VPBlockBase *getEntry() const { return Entry; }
2179   VPBlockBase *getEntry() { return Entry; }
2180 
2181   /// Set \p EntryBlock as the entry VPBlockBase of this VPRegionBlock. \p
2182   /// EntryBlock must have no predecessors.
2183   void setEntry(VPBlockBase *EntryBlock) {
2184     assert(EntryBlock->getPredecessors().empty() &&
2185            "Entry block cannot have predecessors.");
2186     Entry = EntryBlock;
2187     EntryBlock->setParent(this);
2188   }
2189 
2190   // FIXME: DominatorTreeBase is doing 'A->getParent()->front()'. 'front' is a
2191   // specific interface of llvm::Function, instead of using
2192   // GraphTraints::getEntryNode. We should add a new template parameter to
2193   // DominatorTreeBase representing the Graph type.
2194   VPBlockBase &front() const { return *Entry; }
2195 
2196   const VPBlockBase *getExiting() const { return Exiting; }
2197   VPBlockBase *getExiting() { return Exiting; }
2198 
2199   /// Set \p ExitingBlock as the exiting VPBlockBase of this VPRegionBlock. \p
2200   /// ExitingBlock must have no successors.
2201   void setExiting(VPBlockBase *ExitingBlock) {
2202     assert(ExitingBlock->getSuccessors().empty() &&
2203            "Exit block cannot have successors.");
2204     Exiting = ExitingBlock;
2205     ExitingBlock->setParent(this);
2206   }
2207 
2208   /// Returns the pre-header VPBasicBlock of the loop region.
2209   VPBasicBlock *getPreheaderVPBB() {
2210     assert(!isReplicator() && "should only get pre-header of loop regions");
2211     return getSinglePredecessor()->getExitingBasicBlock();
2212   }
2213 
2214   /// An indicator whether this region is to generate multiple replicated
2215   /// instances of output IR corresponding to its VPBlockBases.
2216   bool isReplicator() const { return IsReplicator; }
2217 
2218   /// The method which generates the output IR instructions that correspond to
2219   /// this VPRegionBlock, thereby "executing" the VPlan.
2220   void execute(struct VPTransformState *State) override;
2221 
2222   void dropAllReferences(VPValue *NewValue) override;
2223 
2224 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2225   /// Print this VPRegionBlock to \p O (recursively), prefixing all lines with
2226   /// \p Indent. \p SlotTracker is used to print unnamed VPValue's using
2227   /// consequtive numbers.
2228   ///
2229   /// Note that the numbering is applied to the whole VPlan, so printing
2230   /// individual regions is consistent with the whole VPlan printing.
2231   void print(raw_ostream &O, const Twine &Indent,
2232              VPSlotTracker &SlotTracker) const override;
2233   using VPBlockBase::print; // Get the print(raw_stream &O) version.
2234 #endif
2235 };
2236 
2237 //===----------------------------------------------------------------------===//
2238 // GraphTraits specializations for VPlan Hierarchical Control-Flow Graphs     //
2239 //===----------------------------------------------------------------------===//
2240 
2241 // The following set of template specializations implement GraphTraits to treat
2242 // any VPBlockBase as a node in a graph of VPBlockBases. It's important to note
2243 // that VPBlockBase traits don't recurse into VPRegioBlocks, i.e., if the
2244 // VPBlockBase is a VPRegionBlock, this specialization provides access to its
2245 // successors/predecessors but not to the blocks inside the region.
2246 
2247 template <> struct GraphTraits<VPBlockBase *> {
2248   using NodeRef = VPBlockBase *;
2249   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
2250 
2251   static NodeRef getEntryNode(NodeRef N) { return N; }
2252 
2253   static inline ChildIteratorType child_begin(NodeRef N) {
2254     return N->getSuccessors().begin();
2255   }
2256 
2257   static inline ChildIteratorType child_end(NodeRef N) {
2258     return N->getSuccessors().end();
2259   }
2260 };
2261 
2262 template <> struct GraphTraits<const VPBlockBase *> {
2263   using NodeRef = const VPBlockBase *;
2264   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::const_iterator;
2265 
2266   static NodeRef getEntryNode(NodeRef N) { return N; }
2267 
2268   static inline ChildIteratorType child_begin(NodeRef N) {
2269     return N->getSuccessors().begin();
2270   }
2271 
2272   static inline ChildIteratorType child_end(NodeRef N) {
2273     return N->getSuccessors().end();
2274   }
2275 };
2276 
2277 // Inverse order specialization for VPBasicBlocks. Predecessors are used instead
2278 // of successors for the inverse traversal.
2279 template <> struct GraphTraits<Inverse<VPBlockBase *>> {
2280   using NodeRef = VPBlockBase *;
2281   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
2282 
2283   static NodeRef getEntryNode(Inverse<NodeRef> B) { return B.Graph; }
2284 
2285   static inline ChildIteratorType child_begin(NodeRef N) {
2286     return N->getPredecessors().begin();
2287   }
2288 
2289   static inline ChildIteratorType child_end(NodeRef N) {
2290     return N->getPredecessors().end();
2291   }
2292 };
2293 
2294 // The following set of template specializations implement GraphTraits to
2295 // treat VPRegionBlock as a graph and recurse inside its nodes. It's important
2296 // to note that the blocks inside the VPRegionBlock are treated as VPBlockBases
2297 // (i.e., no dyn_cast is performed, VPBlockBases specialization is used), so
2298 // there won't be automatic recursion into other VPBlockBases that turn to be
2299 // VPRegionBlocks.
2300 
2301 template <>
2302 struct GraphTraits<VPRegionBlock *> : public GraphTraits<VPBlockBase *> {
2303   using GraphRef = VPRegionBlock *;
2304   using nodes_iterator = df_iterator<NodeRef>;
2305 
2306   static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
2307 
2308   static nodes_iterator nodes_begin(GraphRef N) {
2309     return nodes_iterator::begin(N->getEntry());
2310   }
2311 
2312   static nodes_iterator nodes_end(GraphRef N) {
2313     // df_iterator::end() returns an empty iterator so the node used doesn't
2314     // matter.
2315     return nodes_iterator::end(N);
2316   }
2317 };
2318 
2319 template <>
2320 struct GraphTraits<const VPRegionBlock *>
2321     : public GraphTraits<const VPBlockBase *> {
2322   using GraphRef = const VPRegionBlock *;
2323   using nodes_iterator = df_iterator<NodeRef>;
2324 
2325   static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
2326 
2327   static nodes_iterator nodes_begin(GraphRef N) {
2328     return nodes_iterator::begin(N->getEntry());
2329   }
2330 
2331   static nodes_iterator nodes_end(GraphRef N) {
2332     // df_iterator::end() returns an empty iterator so the node used doesn't
2333     // matter.
2334     return nodes_iterator::end(N);
2335   }
2336 };
2337 
2338 template <>
2339 struct GraphTraits<Inverse<VPRegionBlock *>>
2340     : public GraphTraits<Inverse<VPBlockBase *>> {
2341   using GraphRef = VPRegionBlock *;
2342   using nodes_iterator = df_iterator<NodeRef>;
2343 
2344   static NodeRef getEntryNode(Inverse<GraphRef> N) {
2345     return N.Graph->getExiting();
2346   }
2347 
2348   static nodes_iterator nodes_begin(GraphRef N) {
2349     return nodes_iterator::begin(N->getExiting());
2350   }
2351 
2352   static nodes_iterator nodes_end(GraphRef N) {
2353     // df_iterator::end() returns an empty iterator so the node used doesn't
2354     // matter.
2355     return nodes_iterator::end(N);
2356   }
2357 };
2358 
2359 /// Iterator to traverse all successors of a VPBlockBase node. This includes the
2360 /// entry node of VPRegionBlocks. Exit blocks of a region implicitly have their
2361 /// parent region's successors. This ensures all blocks in a region are visited
2362 /// before any blocks in a successor region when doing a reverse post-order
2363 // traversal of the graph.
2364 template <typename BlockPtrTy>
2365 class VPAllSuccessorsIterator
2366     : public iterator_facade_base<VPAllSuccessorsIterator<BlockPtrTy>,
2367                                   std::forward_iterator_tag, VPBlockBase> {
2368   BlockPtrTy Block;
2369   /// Index of the current successor. For VPBasicBlock nodes, this simply is the
2370   /// index for the successor array. For VPRegionBlock, SuccessorIdx == 0 is
2371   /// used for the region's entry block, and SuccessorIdx - 1 are the indices
2372   /// for the successor array.
2373   size_t SuccessorIdx;
2374 
2375   static BlockPtrTy getBlockWithSuccs(BlockPtrTy Current) {
2376     while (Current && Current->getNumSuccessors() == 0)
2377       Current = Current->getParent();
2378     return Current;
2379   }
2380 
2381   /// Templated helper to dereference successor \p SuccIdx of \p Block. Used by
2382   /// both the const and non-const operator* implementations.
2383   template <typename T1> static T1 deref(T1 Block, unsigned SuccIdx) {
2384     if (auto *R = dyn_cast<VPRegionBlock>(Block)) {
2385       if (SuccIdx == 0)
2386         return R->getEntry();
2387       SuccIdx--;
2388     }
2389 
2390     // For exit blocks, use the next parent region with successors.
2391     return getBlockWithSuccs(Block)->getSuccessors()[SuccIdx];
2392   }
2393 
2394 public:
2395   VPAllSuccessorsIterator(BlockPtrTy Block, size_t Idx = 0)
2396       : Block(Block), SuccessorIdx(Idx) {}
2397   VPAllSuccessorsIterator(const VPAllSuccessorsIterator &Other)
2398       : Block(Other.Block), SuccessorIdx(Other.SuccessorIdx) {}
2399 
2400   VPAllSuccessorsIterator &operator=(const VPAllSuccessorsIterator &R) {
2401     Block = R.Block;
2402     SuccessorIdx = R.SuccessorIdx;
2403     return *this;
2404   }
2405 
2406   static VPAllSuccessorsIterator end(BlockPtrTy Block) {
2407     BlockPtrTy ParentWithSuccs = getBlockWithSuccs(Block);
2408     unsigned NumSuccessors = ParentWithSuccs
2409                                  ? ParentWithSuccs->getNumSuccessors()
2410                                  : Block->getNumSuccessors();
2411 
2412     if (auto *R = dyn_cast<VPRegionBlock>(Block))
2413       return {R, NumSuccessors + 1};
2414     return {Block, NumSuccessors};
2415   }
2416 
2417   bool operator==(const VPAllSuccessorsIterator &R) const {
2418     return Block == R.Block && SuccessorIdx == R.SuccessorIdx;
2419   }
2420 
2421   const VPBlockBase *operator*() const { return deref(Block, SuccessorIdx); }
2422 
2423   BlockPtrTy operator*() { return deref(Block, SuccessorIdx); }
2424 
2425   VPAllSuccessorsIterator &operator++() {
2426     SuccessorIdx++;
2427     return *this;
2428   }
2429 
2430   VPAllSuccessorsIterator operator++(int X) {
2431     VPAllSuccessorsIterator Orig = *this;
2432     SuccessorIdx++;
2433     return Orig;
2434   }
2435 };
2436 
2437 /// Helper for GraphTraits specialization that traverses through VPRegionBlocks.
2438 template <typename BlockTy> class VPBlockRecursiveTraversalWrapper {
2439   BlockTy Entry;
2440 
2441 public:
2442   VPBlockRecursiveTraversalWrapper(BlockTy Entry) : Entry(Entry) {}
2443   BlockTy getEntry() { return Entry; }
2444 };
2445 
2446 /// GraphTraits specialization to recursively traverse VPBlockBase nodes,
2447 /// including traversing through VPRegionBlocks.  Exit blocks of a region
2448 /// implicitly have their parent region's successors. This ensures all blocks in
2449 /// a region are visited before any blocks in a successor region when doing a
2450 /// reverse post-order traversal of the graph.
2451 template <>
2452 struct GraphTraits<VPBlockRecursiveTraversalWrapper<VPBlockBase *>> {
2453   using NodeRef = VPBlockBase *;
2454   using ChildIteratorType = VPAllSuccessorsIterator<VPBlockBase *>;
2455 
2456   static NodeRef
2457   getEntryNode(VPBlockRecursiveTraversalWrapper<VPBlockBase *> N) {
2458     return N.getEntry();
2459   }
2460 
2461   static inline ChildIteratorType child_begin(NodeRef N) {
2462     return ChildIteratorType(N);
2463   }
2464 
2465   static inline ChildIteratorType child_end(NodeRef N) {
2466     return ChildIteratorType::end(N);
2467   }
2468 };
2469 
2470 template <>
2471 struct GraphTraits<VPBlockRecursiveTraversalWrapper<const VPBlockBase *>> {
2472   using NodeRef = const VPBlockBase *;
2473   using ChildIteratorType = VPAllSuccessorsIterator<const VPBlockBase *>;
2474 
2475   static NodeRef
2476   getEntryNode(VPBlockRecursiveTraversalWrapper<const VPBlockBase *> N) {
2477     return N.getEntry();
2478   }
2479 
2480   static inline ChildIteratorType child_begin(NodeRef N) {
2481     return ChildIteratorType(N);
2482   }
2483 
2484   static inline ChildIteratorType child_end(NodeRef N) {
2485     return ChildIteratorType::end(N);
2486   }
2487 };
2488 
2489 /// VPlan models a candidate for vectorization, encoding various decisions take
2490 /// to produce efficient output IR, including which branches, basic-blocks and
2491 /// output IR instructions to generate, and their cost. VPlan holds a
2492 /// Hierarchical-CFG of VPBasicBlocks and VPRegionBlocks rooted at an Entry
2493 /// VPBlock.
2494 class VPlan {
2495   friend class VPlanPrinter;
2496   friend class VPSlotTracker;
2497 
2498   /// Hold the single entry to the Hierarchical CFG of the VPlan.
2499   VPBlockBase *Entry;
2500 
2501   /// Holds the VFs applicable to this VPlan.
2502   SmallSetVector<ElementCount, 2> VFs;
2503 
2504   /// Holds the name of the VPlan, for printing.
2505   std::string Name;
2506 
2507   /// Holds all the external definitions created for this VPlan. External
2508   /// definitions must be immutable and hold a pointer to their underlying IR.
2509   DenseMap<Value *, VPValue *> VPExternalDefs;
2510 
2511   /// Represents the trip count of the original loop, for folding
2512   /// the tail.
2513   VPValue *TripCount = nullptr;
2514 
2515   /// Represents the backedge taken count of the original loop, for folding
2516   /// the tail. It equals TripCount - 1.
2517   VPValue *BackedgeTakenCount = nullptr;
2518 
2519   /// Represents the vector trip count.
2520   VPValue VectorTripCount;
2521 
2522   /// Holds a mapping between Values and their corresponding VPValue inside
2523   /// VPlan.
2524   Value2VPValueTy Value2VPValue;
2525 
2526   /// Contains all VPValues that been allocated by addVPValue directly and need
2527   /// to be free when the plan's destructor is called.
2528   SmallVector<VPValue *, 16> VPValuesToFree;
2529 
2530   /// Indicates whether it is safe use the Value2VPValue mapping or if the
2531   /// mapping cannot be used any longer, because it is stale.
2532   bool Value2VPValueEnabled = true;
2533 
2534   /// Values used outside the plan.
2535   MapVector<PHINode *, VPLiveOut *> LiveOuts;
2536 
2537 public:
2538   VPlan(VPBlockBase *Entry = nullptr) : Entry(Entry) {
2539     if (Entry)
2540       Entry->setPlan(this);
2541   }
2542 
2543   ~VPlan() {
2544     clearLiveOuts();
2545 
2546     if (Entry) {
2547       VPValue DummyValue;
2548       for (VPBlockBase *Block : depth_first(Entry))
2549         Block->dropAllReferences(&DummyValue);
2550 
2551       VPBlockBase::deleteCFG(Entry);
2552     }
2553     for (VPValue *VPV : VPValuesToFree)
2554       delete VPV;
2555     if (TripCount)
2556       delete TripCount;
2557     if (BackedgeTakenCount)
2558       delete BackedgeTakenCount;
2559     for (auto &P : VPExternalDefs)
2560       delete P.second;
2561   }
2562 
2563   /// Prepare the plan for execution, setting up the required live-in values.
2564   void prepareToExecute(Value *TripCount, Value *VectorTripCount,
2565                         Value *CanonicalIVStartValue, VPTransformState &State,
2566                         bool IsEpilogueVectorization);
2567 
2568   /// Generate the IR code for this VPlan.
2569   void execute(struct VPTransformState *State);
2570 
2571   VPBlockBase *getEntry() { return Entry; }
2572   const VPBlockBase *getEntry() const { return Entry; }
2573 
2574   VPBlockBase *setEntry(VPBlockBase *Block) {
2575     Entry = Block;
2576     Block->setPlan(this);
2577     return Entry;
2578   }
2579 
2580   /// The trip count of the original loop.
2581   VPValue *getOrCreateTripCount() {
2582     if (!TripCount)
2583       TripCount = new VPValue();
2584     return TripCount;
2585   }
2586 
2587   /// The backedge taken count of the original loop.
2588   VPValue *getOrCreateBackedgeTakenCount() {
2589     if (!BackedgeTakenCount)
2590       BackedgeTakenCount = new VPValue();
2591     return BackedgeTakenCount;
2592   }
2593 
2594   /// The vector trip count.
2595   VPValue &getVectorTripCount() { return VectorTripCount; }
2596 
2597   /// Mark the plan to indicate that using Value2VPValue is not safe any
2598   /// longer, because it may be stale.
2599   void disableValue2VPValue() { Value2VPValueEnabled = false; }
2600 
2601   void addVF(ElementCount VF) { VFs.insert(VF); }
2602 
2603   bool hasVF(ElementCount VF) { return VFs.count(VF); }
2604 
2605   const std::string &getName() const { return Name; }
2606 
2607   void setName(const Twine &newName) { Name = newName.str(); }
2608 
2609   /// Get the existing or add a new external definition for \p V.
2610   VPValue *getOrAddExternalDef(Value *V) {
2611     auto I = VPExternalDefs.insert({V, nullptr});
2612     if (I.second)
2613       I.first->second = new VPValue(V);
2614     return I.first->second;
2615   }
2616 
2617   void addVPValue(Value *V) {
2618     assert(Value2VPValueEnabled &&
2619            "IR value to VPValue mapping may be out of date!");
2620     assert(V && "Trying to add a null Value to VPlan");
2621     assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2622     VPValue *VPV = new VPValue(V);
2623     Value2VPValue[V] = VPV;
2624     VPValuesToFree.push_back(VPV);
2625   }
2626 
2627   void addVPValue(Value *V, VPValue *VPV) {
2628     assert(Value2VPValueEnabled && "Value2VPValue mapping may be out of date!");
2629     assert(V && "Trying to add a null Value to VPlan");
2630     assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2631     Value2VPValue[V] = VPV;
2632   }
2633 
2634   /// Returns the VPValue for \p V. \p OverrideAllowed can be used to disable
2635   /// checking whether it is safe to query VPValues using IR Values.
2636   VPValue *getVPValue(Value *V, bool OverrideAllowed = false) {
2637     assert((OverrideAllowed || isa<Constant>(V) || Value2VPValueEnabled) &&
2638            "Value2VPValue mapping may be out of date!");
2639     assert(V && "Trying to get the VPValue of a null Value");
2640     assert(Value2VPValue.count(V) && "Value does not exist in VPlan");
2641     return Value2VPValue[V];
2642   }
2643 
2644   /// Gets the VPValue or adds a new one (if none exists yet) for \p V. \p
2645   /// OverrideAllowed can be used to disable checking whether it is safe to
2646   /// query VPValues using IR Values.
2647   VPValue *getOrAddVPValue(Value *V, bool OverrideAllowed = false) {
2648     assert((OverrideAllowed || isa<Constant>(V) || Value2VPValueEnabled) &&
2649            "Value2VPValue mapping may be out of date!");
2650     assert(V && "Trying to get or add the VPValue of a null Value");
2651     if (!Value2VPValue.count(V))
2652       addVPValue(V);
2653     return getVPValue(V);
2654   }
2655 
2656   void removeVPValueFor(Value *V) {
2657     assert(Value2VPValueEnabled &&
2658            "IR value to VPValue mapping may be out of date!");
2659     Value2VPValue.erase(V);
2660   }
2661 
2662 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2663   /// Print this VPlan to \p O.
2664   void print(raw_ostream &O) const;
2665 
2666   /// Print this VPlan in DOT format to \p O.
2667   void printDOT(raw_ostream &O) const;
2668 
2669   /// Dump the plan to stderr (for debugging).
2670   LLVM_DUMP_METHOD void dump() const;
2671 #endif
2672 
2673   /// Returns a range mapping the values the range \p Operands to their
2674   /// corresponding VPValues.
2675   iterator_range<mapped_iterator<Use *, std::function<VPValue *(Value *)>>>
2676   mapToVPValues(User::op_range Operands) {
2677     std::function<VPValue *(Value *)> Fn = [this](Value *Op) {
2678       return getOrAddVPValue(Op);
2679     };
2680     return map_range(Operands, Fn);
2681   }
2682 
2683   /// Returns true if \p VPV is uniform after vectorization.
2684   bool isUniformAfterVectorization(VPValue *VPV) const {
2685     auto RepR = dyn_cast_or_null<VPReplicateRecipe>(VPV->getDef());
2686     return !VPV->getDef() || (RepR && RepR->isUniform());
2687   }
2688 
2689   /// Returns the VPRegionBlock of the vector loop.
2690   VPRegionBlock *getVectorLoopRegion() {
2691     return cast<VPRegionBlock>(getEntry()->getSingleSuccessor());
2692   }
2693   const VPRegionBlock *getVectorLoopRegion() const {
2694     return cast<VPRegionBlock>(getEntry()->getSingleSuccessor());
2695   }
2696 
2697   /// Returns the canonical induction recipe of the vector loop.
2698   VPCanonicalIVPHIRecipe *getCanonicalIV() {
2699     VPBasicBlock *EntryVPBB = getVectorLoopRegion()->getEntryBasicBlock();
2700     if (EntryVPBB->empty()) {
2701       // VPlan native path.
2702       EntryVPBB = cast<VPBasicBlock>(EntryVPBB->getSingleSuccessor());
2703     }
2704     return cast<VPCanonicalIVPHIRecipe>(&*EntryVPBB->begin());
2705   }
2706 
2707   /// Find and return the VPActiveLaneMaskPHIRecipe from the header - there
2708   /// be only one at most. If there isn't one, then return nullptr.
2709   VPActiveLaneMaskPHIRecipe *getActiveLaneMaskPhi();
2710 
2711   void addLiveOut(PHINode *PN, VPValue *V);
2712 
2713   void clearLiveOuts() {
2714     for (auto &KV : LiveOuts)
2715       delete KV.second;
2716     LiveOuts.clear();
2717   }
2718 
2719   void removeLiveOut(PHINode *PN) {
2720     delete LiveOuts[PN];
2721     LiveOuts.erase(PN);
2722   }
2723 
2724   const MapVector<PHINode *, VPLiveOut *> &getLiveOuts() const {
2725     return LiveOuts;
2726   }
2727 
2728 private:
2729   /// Add to the given dominator tree the header block and every new basic block
2730   /// that was created between it and the latch block, inclusive.
2731   static void updateDominatorTree(DominatorTree *DT, BasicBlock *LoopLatchBB,
2732                                   BasicBlock *LoopPreHeaderBB,
2733                                   BasicBlock *LoopExitBB);
2734 };
2735 
2736 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2737 /// VPlanPrinter prints a given VPlan to a given output stream. The printing is
2738 /// indented and follows the dot format.
2739 class VPlanPrinter {
2740   raw_ostream &OS;
2741   const VPlan &Plan;
2742   unsigned Depth = 0;
2743   unsigned TabWidth = 2;
2744   std::string Indent;
2745   unsigned BID = 0;
2746   SmallDenseMap<const VPBlockBase *, unsigned> BlockID;
2747 
2748   VPSlotTracker SlotTracker;
2749 
2750   /// Handle indentation.
2751   void bumpIndent(int b) { Indent = std::string((Depth += b) * TabWidth, ' '); }
2752 
2753   /// Print a given \p Block of the Plan.
2754   void dumpBlock(const VPBlockBase *Block);
2755 
2756   /// Print the information related to the CFG edges going out of a given
2757   /// \p Block, followed by printing the successor blocks themselves.
2758   void dumpEdges(const VPBlockBase *Block);
2759 
2760   /// Print a given \p BasicBlock, including its VPRecipes, followed by printing
2761   /// its successor blocks.
2762   void dumpBasicBlock(const VPBasicBlock *BasicBlock);
2763 
2764   /// Print a given \p Region of the Plan.
2765   void dumpRegion(const VPRegionBlock *Region);
2766 
2767   unsigned getOrCreateBID(const VPBlockBase *Block) {
2768     return BlockID.count(Block) ? BlockID[Block] : BlockID[Block] = BID++;
2769   }
2770 
2771   Twine getOrCreateName(const VPBlockBase *Block);
2772 
2773   Twine getUID(const VPBlockBase *Block);
2774 
2775   /// Print the information related to a CFG edge between two VPBlockBases.
2776   void drawEdge(const VPBlockBase *From, const VPBlockBase *To, bool Hidden,
2777                 const Twine &Label);
2778 
2779 public:
2780   VPlanPrinter(raw_ostream &O, const VPlan &P)
2781       : OS(O), Plan(P), SlotTracker(&P) {}
2782 
2783   LLVM_DUMP_METHOD void dump();
2784 };
2785 
2786 struct VPlanIngredient {
2787   const Value *V;
2788 
2789   VPlanIngredient(const Value *V) : V(V) {}
2790 
2791   void print(raw_ostream &O) const;
2792 };
2793 
2794 inline raw_ostream &operator<<(raw_ostream &OS, const VPlanIngredient &I) {
2795   I.print(OS);
2796   return OS;
2797 }
2798 
2799 inline raw_ostream &operator<<(raw_ostream &OS, const VPlan &Plan) {
2800   Plan.print(OS);
2801   return OS;
2802 }
2803 #endif
2804 
2805 //===----------------------------------------------------------------------===//
2806 // VPlan Utilities
2807 //===----------------------------------------------------------------------===//
2808 
2809 /// Class that provides utilities for VPBlockBases in VPlan.
2810 class VPBlockUtils {
2811 public:
2812   VPBlockUtils() = delete;
2813 
2814   /// Insert disconnected VPBlockBase \p NewBlock after \p BlockPtr. Add \p
2815   /// NewBlock as successor of \p BlockPtr and \p BlockPtr as predecessor of \p
2816   /// NewBlock, and propagate \p BlockPtr parent to \p NewBlock. \p BlockPtr's
2817   /// successors are moved from \p BlockPtr to \p NewBlock. \p NewBlock must
2818   /// have neither successors nor predecessors.
2819   static void insertBlockAfter(VPBlockBase *NewBlock, VPBlockBase *BlockPtr) {
2820     assert(NewBlock->getSuccessors().empty() &&
2821            NewBlock->getPredecessors().empty() &&
2822            "Can't insert new block with predecessors or successors.");
2823     NewBlock->setParent(BlockPtr->getParent());
2824     SmallVector<VPBlockBase *> Succs(BlockPtr->successors());
2825     for (VPBlockBase *Succ : Succs) {
2826       disconnectBlocks(BlockPtr, Succ);
2827       connectBlocks(NewBlock, Succ);
2828     }
2829     connectBlocks(BlockPtr, NewBlock);
2830   }
2831 
2832   /// Insert disconnected VPBlockBases \p IfTrue and \p IfFalse after \p
2833   /// BlockPtr. Add \p IfTrue and \p IfFalse as succesors of \p BlockPtr and \p
2834   /// BlockPtr as predecessor of \p IfTrue and \p IfFalse. Propagate \p BlockPtr
2835   /// parent to \p IfTrue and \p IfFalse. \p BlockPtr must have no successors
2836   /// and \p IfTrue and \p IfFalse must have neither successors nor
2837   /// predecessors.
2838   static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
2839                                    VPBlockBase *BlockPtr) {
2840     assert(IfTrue->getSuccessors().empty() &&
2841            "Can't insert IfTrue with successors.");
2842     assert(IfFalse->getSuccessors().empty() &&
2843            "Can't insert IfFalse with successors.");
2844     BlockPtr->setTwoSuccessors(IfTrue, IfFalse);
2845     IfTrue->setPredecessors({BlockPtr});
2846     IfFalse->setPredecessors({BlockPtr});
2847     IfTrue->setParent(BlockPtr->getParent());
2848     IfFalse->setParent(BlockPtr->getParent());
2849   }
2850 
2851   /// Connect VPBlockBases \p From and \p To bi-directionally. Append \p To to
2852   /// the successors of \p From and \p From to the predecessors of \p To. Both
2853   /// VPBlockBases must have the same parent, which can be null. Both
2854   /// VPBlockBases can be already connected to other VPBlockBases.
2855   static void connectBlocks(VPBlockBase *From, VPBlockBase *To) {
2856     assert((From->getParent() == To->getParent()) &&
2857            "Can't connect two block with different parents");
2858     assert(From->getNumSuccessors() < 2 &&
2859            "Blocks can't have more than two successors.");
2860     From->appendSuccessor(To);
2861     To->appendPredecessor(From);
2862   }
2863 
2864   /// Disconnect VPBlockBases \p From and \p To bi-directionally. Remove \p To
2865   /// from the successors of \p From and \p From from the predecessors of \p To.
2866   static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To) {
2867     assert(To && "Successor to disconnect is null.");
2868     From->removeSuccessor(To);
2869     To->removePredecessor(From);
2870   }
2871 
2872   /// Try to merge \p Block into its single predecessor, if \p Block is a
2873   /// VPBasicBlock and its predecessor has a single successor. Returns a pointer
2874   /// to the predecessor \p Block was merged into or nullptr otherwise.
2875   static VPBasicBlock *tryToMergeBlockIntoPredecessor(VPBlockBase *Block) {
2876     auto *VPBB = dyn_cast<VPBasicBlock>(Block);
2877     auto *PredVPBB =
2878         dyn_cast_or_null<VPBasicBlock>(Block->getSinglePredecessor());
2879     if (!VPBB || !PredVPBB || PredVPBB->getNumSuccessors() != 1)
2880       return nullptr;
2881 
2882     for (VPRecipeBase &R : make_early_inc_range(*VPBB))
2883       R.moveBefore(*PredVPBB, PredVPBB->end());
2884     VPBlockUtils::disconnectBlocks(PredVPBB, VPBB);
2885     auto *ParentRegion = cast<VPRegionBlock>(Block->getParent());
2886     if (ParentRegion->getExiting() == Block)
2887       ParentRegion->setExiting(PredVPBB);
2888     SmallVector<VPBlockBase *> Successors(Block->successors());
2889     for (auto *Succ : Successors) {
2890       VPBlockUtils::disconnectBlocks(Block, Succ);
2891       VPBlockUtils::connectBlocks(PredVPBB, Succ);
2892     }
2893     delete Block;
2894     return PredVPBB;
2895   }
2896 
2897   /// Return an iterator range over \p Range which only includes \p BlockTy
2898   /// blocks. The accesses are casted to \p BlockTy.
2899   template <typename BlockTy, typename T>
2900   static auto blocksOnly(const T &Range) {
2901     // Create BaseTy with correct const-ness based on BlockTy.
2902     using BaseTy =
2903         typename std::conditional<std::is_const<BlockTy>::value,
2904                                   const VPBlockBase, VPBlockBase>::type;
2905 
2906     // We need to first create an iterator range over (const) BlocktTy & instead
2907     // of (const) BlockTy * for filter_range to work properly.
2908     auto Mapped =
2909         map_range(Range, [](BaseTy *Block) -> BaseTy & { return *Block; });
2910     auto Filter = make_filter_range(
2911         Mapped, [](BaseTy &Block) { return isa<BlockTy>(&Block); });
2912     return map_range(Filter, [](BaseTy &Block) -> BlockTy * {
2913       return cast<BlockTy>(&Block);
2914     });
2915   }
2916 };
2917 
2918 class VPInterleavedAccessInfo {
2919   DenseMap<VPInstruction *, InterleaveGroup<VPInstruction> *>
2920       InterleaveGroupMap;
2921 
2922   /// Type for mapping of instruction based interleave groups to VPInstruction
2923   /// interleave groups
2924   using Old2NewTy = DenseMap<InterleaveGroup<Instruction> *,
2925                              InterleaveGroup<VPInstruction> *>;
2926 
2927   /// Recursively \p Region and populate VPlan based interleave groups based on
2928   /// \p IAI.
2929   void visitRegion(VPRegionBlock *Region, Old2NewTy &Old2New,
2930                    InterleavedAccessInfo &IAI);
2931   /// Recursively traverse \p Block and populate VPlan based interleave groups
2932   /// based on \p IAI.
2933   void visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
2934                   InterleavedAccessInfo &IAI);
2935 
2936 public:
2937   VPInterleavedAccessInfo(VPlan &Plan, InterleavedAccessInfo &IAI);
2938 
2939   ~VPInterleavedAccessInfo() {
2940     SmallPtrSet<InterleaveGroup<VPInstruction> *, 4> DelSet;
2941     // Avoid releasing a pointer twice.
2942     for (auto &I : InterleaveGroupMap)
2943       DelSet.insert(I.second);
2944     for (auto *Ptr : DelSet)
2945       delete Ptr;
2946   }
2947 
2948   /// Get the interleave group that \p Instr belongs to.
2949   ///
2950   /// \returns nullptr if doesn't have such group.
2951   InterleaveGroup<VPInstruction> *
2952   getInterleaveGroup(VPInstruction *Instr) const {
2953     return InterleaveGroupMap.lookup(Instr);
2954   }
2955 };
2956 
2957 /// Class that maps (parts of) an existing VPlan to trees of combined
2958 /// VPInstructions.
2959 class VPlanSlp {
2960   enum class OpMode { Failed, Load, Opcode };
2961 
2962   /// A DenseMapInfo implementation for using SmallVector<VPValue *, 4> as
2963   /// DenseMap keys.
2964   struct BundleDenseMapInfo {
2965     static SmallVector<VPValue *, 4> getEmptyKey() {
2966       return {reinterpret_cast<VPValue *>(-1)};
2967     }
2968 
2969     static SmallVector<VPValue *, 4> getTombstoneKey() {
2970       return {reinterpret_cast<VPValue *>(-2)};
2971     }
2972 
2973     static unsigned getHashValue(const SmallVector<VPValue *, 4> &V) {
2974       return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
2975     }
2976 
2977     static bool isEqual(const SmallVector<VPValue *, 4> &LHS,
2978                         const SmallVector<VPValue *, 4> &RHS) {
2979       return LHS == RHS;
2980     }
2981   };
2982 
2983   /// Mapping of values in the original VPlan to a combined VPInstruction.
2984   DenseMap<SmallVector<VPValue *, 4>, VPInstruction *, BundleDenseMapInfo>
2985       BundleToCombined;
2986 
2987   VPInterleavedAccessInfo &IAI;
2988 
2989   /// Basic block to operate on. For now, only instructions in a single BB are
2990   /// considered.
2991   const VPBasicBlock &BB;
2992 
2993   /// Indicates whether we managed to combine all visited instructions or not.
2994   bool CompletelySLP = true;
2995 
2996   /// Width of the widest combined bundle in bits.
2997   unsigned WidestBundleBits = 0;
2998 
2999   using MultiNodeOpTy =
3000       typename std::pair<VPInstruction *, SmallVector<VPValue *, 4>>;
3001 
3002   // Input operand bundles for the current multi node. Each multi node operand
3003   // bundle contains values not matching the multi node's opcode. They will
3004   // be reordered in reorderMultiNodeOps, once we completed building a
3005   // multi node.
3006   SmallVector<MultiNodeOpTy, 4> MultiNodeOps;
3007 
3008   /// Indicates whether we are building a multi node currently.
3009   bool MultiNodeActive = false;
3010 
3011   /// Check if we can vectorize Operands together.
3012   bool areVectorizable(ArrayRef<VPValue *> Operands) const;
3013 
3014   /// Add combined instruction \p New for the bundle \p Operands.
3015   void addCombined(ArrayRef<VPValue *> Operands, VPInstruction *New);
3016 
3017   /// Indicate we hit a bundle we failed to combine. Returns nullptr for now.
3018   VPInstruction *markFailed();
3019 
3020   /// Reorder operands in the multi node to maximize sequential memory access
3021   /// and commutative operations.
3022   SmallVector<MultiNodeOpTy, 4> reorderMultiNodeOps();
3023 
3024   /// Choose the best candidate to use for the lane after \p Last. The set of
3025   /// candidates to choose from are values with an opcode matching \p Last's
3026   /// or loads consecutive to \p Last.
3027   std::pair<OpMode, VPValue *> getBest(OpMode Mode, VPValue *Last,
3028                                        SmallPtrSetImpl<VPValue *> &Candidates,
3029                                        VPInterleavedAccessInfo &IAI);
3030 
3031 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3032   /// Print bundle \p Values to dbgs().
3033   void dumpBundle(ArrayRef<VPValue *> Values);
3034 #endif
3035 
3036 public:
3037   VPlanSlp(VPInterleavedAccessInfo &IAI, VPBasicBlock &BB) : IAI(IAI), BB(BB) {}
3038 
3039   ~VPlanSlp() = default;
3040 
3041   /// Tries to build an SLP tree rooted at \p Operands and returns a
3042   /// VPInstruction combining \p Operands, if they can be combined.
3043   VPInstruction *buildGraph(ArrayRef<VPValue *> Operands);
3044 
3045   /// Return the width of the widest combined bundle in bits.
3046   unsigned getWidestBundleBits() const { return WidestBundleBits; }
3047 
3048   /// Return true if all visited instruction can be combined.
3049   bool isCompletelySLP() const { return CompletelySLP; }
3050 };
3051 
3052 namespace vputils {
3053 
3054 /// Returns true if only the first lane of \p Def is used.
3055 bool onlyFirstLaneUsed(VPValue *Def);
3056 
3057 /// Get or create a VPValue that corresponds to the expansion of \p Expr. If \p
3058 /// Expr is a SCEVConstant or SCEVUnknown, return a VPValue wrapping the live-in
3059 /// value. Otherwise return a VPExpandSCEVRecipe to expand \p Expr. If \p Plan's
3060 /// pre-header already contains a recipe expanding \p Expr, return it. If not,
3061 /// create a new one.
3062 VPValue *getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr,
3063                                        ScalarEvolution &SE);
3064 } // end namespace vputils
3065 
3066 } // end namespace llvm
3067 
3068 #endif // LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
3069