xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlan.h (revision 3e8eb5c7f4909209c042403ddee340b2ee7003a5)
1 //===- VPlan.h - Represent A Vectorizer Plan --------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file contains the declarations of the Vectorization Plan base classes:
11 /// 1. VPBasicBlock and VPRegionBlock that inherit from a common pure virtual
12 ///    VPBlockBase, together implementing a Hierarchical CFG;
13 /// 2. Specializations of GraphTraits that allow VPBlockBase graphs to be
14 ///    treated as proper graphs for generic algorithms;
15 /// 3. Pure virtual VPRecipeBase serving as the base class for recipes contained
16 ///    within VPBasicBlocks;
17 /// 4. VPInstruction, a concrete Recipe and VPUser modeling a single planned
18 ///    instruction;
19 /// 5. The VPlan class holding a candidate for vectorization;
20 /// 6. The VPlanPrinter class providing a way to print a plan in dot format;
21 /// These are documented in docs/VectorizationPlan.rst.
22 //
23 //===----------------------------------------------------------------------===//
24 
25 #ifndef LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
26 #define LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
27 
28 #include "VPlanLoopInfo.h"
29 #include "VPlanValue.h"
30 #include "llvm/ADT/DenseMap.h"
31 #include "llvm/ADT/DepthFirstIterator.h"
32 #include "llvm/ADT/GraphTraits.h"
33 #include "llvm/ADT/Optional.h"
34 #include "llvm/ADT/SmallBitVector.h"
35 #include "llvm/ADT/SmallPtrSet.h"
36 #include "llvm/ADT/SmallSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Twine.h"
39 #include "llvm/ADT/ilist.h"
40 #include "llvm/ADT/ilist_node.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/IR/IRBuilder.h"
44 #include "llvm/Support/InstructionCost.h"
45 #include <algorithm>
46 #include <cassert>
47 #include <cstddef>
48 #include <map>
49 #include <string>
50 
51 namespace llvm {
52 
53 class BasicBlock;
54 class DominatorTree;
55 class InductionDescriptor;
56 class InnerLoopVectorizer;
57 class LoopInfo;
58 class raw_ostream;
59 class RecurrenceDescriptor;
60 class Value;
61 class VPBasicBlock;
62 class VPRegionBlock;
63 class VPlan;
64 class VPReplicateRecipe;
65 class VPlanSlp;
66 
67 /// Returns a calculation for the total number of elements for a given \p VF.
68 /// For fixed width vectors this value is a constant, whereas for scalable
69 /// vectors it is an expression determined at runtime.
70 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF);
71 
72 /// Return a value for Step multiplied by VF.
73 Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF, int64_t Step);
74 
75 /// A range of powers-of-2 vectorization factors with fixed start and
76 /// adjustable end. The range includes start and excludes end, e.g.,:
77 /// [1, 9) = {1, 2, 4, 8}
78 struct VFRange {
79   // A power of 2.
80   const ElementCount Start;
81 
82   // Need not be a power of 2. If End <= Start range is empty.
83   ElementCount End;
84 
85   bool isEmpty() const {
86     return End.getKnownMinValue() <= Start.getKnownMinValue();
87   }
88 
89   VFRange(const ElementCount &Start, const ElementCount &End)
90       : Start(Start), End(End) {
91     assert(Start.isScalable() == End.isScalable() &&
92            "Both Start and End should have the same scalable flag");
93     assert(isPowerOf2_32(Start.getKnownMinValue()) &&
94            "Expected Start to be a power of 2");
95   }
96 };
97 
98 using VPlanPtr = std::unique_ptr<VPlan>;
99 
100 /// In what follows, the term "input IR" refers to code that is fed into the
101 /// vectorizer whereas the term "output IR" refers to code that is generated by
102 /// the vectorizer.
103 
104 /// VPLane provides a way to access lanes in both fixed width and scalable
105 /// vectors, where for the latter the lane index sometimes needs calculating
106 /// as a runtime expression.
107 class VPLane {
108 public:
109   /// Kind describes how to interpret Lane.
110   enum class Kind : uint8_t {
111     /// For First, Lane is the index into the first N elements of a
112     /// fixed-vector <N x <ElTy>> or a scalable vector <vscale x N x <ElTy>>.
113     First,
114     /// For ScalableLast, Lane is the offset from the start of the last
115     /// N-element subvector in a scalable vector <vscale x N x <ElTy>>. For
116     /// example, a Lane of 0 corresponds to lane `(vscale - 1) * N`, a Lane of
117     /// 1 corresponds to `((vscale - 1) * N) + 1`, etc.
118     ScalableLast
119   };
120 
121 private:
122   /// in [0..VF)
123   unsigned Lane;
124 
125   /// Indicates how the Lane should be interpreted, as described above.
126   Kind LaneKind;
127 
128 public:
129   VPLane(unsigned Lane, Kind LaneKind) : Lane(Lane), LaneKind(LaneKind) {}
130 
131   static VPLane getFirstLane() { return VPLane(0, VPLane::Kind::First); }
132 
133   static VPLane getLastLaneForVF(const ElementCount &VF) {
134     unsigned LaneOffset = VF.getKnownMinValue() - 1;
135     Kind LaneKind;
136     if (VF.isScalable())
137       // In this case 'LaneOffset' refers to the offset from the start of the
138       // last subvector with VF.getKnownMinValue() elements.
139       LaneKind = VPLane::Kind::ScalableLast;
140     else
141       LaneKind = VPLane::Kind::First;
142     return VPLane(LaneOffset, LaneKind);
143   }
144 
145   /// Returns a compile-time known value for the lane index and asserts if the
146   /// lane can only be calculated at runtime.
147   unsigned getKnownLane() const {
148     assert(LaneKind == Kind::First);
149     return Lane;
150   }
151 
152   /// Returns an expression describing the lane index that can be used at
153   /// runtime.
154   Value *getAsRuntimeExpr(IRBuilder<> &Builder, const ElementCount &VF) const;
155 
156   /// Returns the Kind of lane offset.
157   Kind getKind() const { return LaneKind; }
158 
159   /// Returns true if this is the first lane of the whole vector.
160   bool isFirstLane() const { return Lane == 0 && LaneKind == Kind::First; }
161 
162   /// Maps the lane to a cache index based on \p VF.
163   unsigned mapToCacheIndex(const ElementCount &VF) const {
164     switch (LaneKind) {
165     case VPLane::Kind::ScalableLast:
166       assert(VF.isScalable() && Lane < VF.getKnownMinValue());
167       return VF.getKnownMinValue() + Lane;
168     default:
169       assert(Lane < VF.getKnownMinValue());
170       return Lane;
171     }
172   }
173 
174   /// Returns the maxmimum number of lanes that we are able to consider
175   /// caching for \p VF.
176   static unsigned getNumCachedLanes(const ElementCount &VF) {
177     return VF.getKnownMinValue() * (VF.isScalable() ? 2 : 1);
178   }
179 };
180 
181 /// VPIteration represents a single point in the iteration space of the output
182 /// (vectorized and/or unrolled) IR loop.
183 struct VPIteration {
184   /// in [0..UF)
185   unsigned Part;
186 
187   VPLane Lane;
188 
189   VPIteration(unsigned Part, unsigned Lane,
190               VPLane::Kind Kind = VPLane::Kind::First)
191       : Part(Part), Lane(Lane, Kind) {}
192 
193   VPIteration(unsigned Part, const VPLane &Lane) : Part(Part), Lane(Lane) {}
194 
195   bool isFirstIteration() const { return Part == 0 && Lane.isFirstLane(); }
196 };
197 
198 /// VPTransformState holds information passed down when "executing" a VPlan,
199 /// needed for generating the output IR.
200 struct VPTransformState {
201   VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
202                    DominatorTree *DT, IRBuilder<> &Builder,
203                    InnerLoopVectorizer *ILV, VPlan *Plan)
204       : VF(VF), UF(UF), LI(LI), DT(DT), Builder(Builder), ILV(ILV), Plan(Plan) {
205   }
206 
207   /// The chosen Vectorization and Unroll Factors of the loop being vectorized.
208   ElementCount VF;
209   unsigned UF;
210 
211   /// Hold the indices to generate specific scalar instructions. Null indicates
212   /// that all instances are to be generated, using either scalar or vector
213   /// instructions.
214   Optional<VPIteration> Instance;
215 
216   struct DataState {
217     /// A type for vectorized values in the new loop. Each value from the
218     /// original loop, when vectorized, is represented by UF vector values in
219     /// the new unrolled loop, where UF is the unroll factor.
220     typedef SmallVector<Value *, 2> PerPartValuesTy;
221 
222     DenseMap<VPValue *, PerPartValuesTy> PerPartOutput;
223 
224     using ScalarsPerPartValuesTy = SmallVector<SmallVector<Value *, 4>, 2>;
225     DenseMap<VPValue *, ScalarsPerPartValuesTy> PerPartScalars;
226   } Data;
227 
228   /// Get the generated Value for a given VPValue and a given Part. Note that
229   /// as some Defs are still created by ILV and managed in its ValueMap, this
230   /// method will delegate the call to ILV in such cases in order to provide
231   /// callers a consistent API.
232   /// \see set.
233   Value *get(VPValue *Def, unsigned Part);
234 
235   /// Get the generated Value for a given VPValue and given Part and Lane.
236   Value *get(VPValue *Def, const VPIteration &Instance);
237 
238   bool hasVectorValue(VPValue *Def, unsigned Part) {
239     auto I = Data.PerPartOutput.find(Def);
240     return I != Data.PerPartOutput.end() && Part < I->second.size() &&
241            I->second[Part];
242   }
243 
244   bool hasAnyVectorValue(VPValue *Def) const {
245     return Data.PerPartOutput.find(Def) != Data.PerPartOutput.end();
246   }
247 
248   bool hasScalarValue(VPValue *Def, VPIteration Instance) {
249     auto I = Data.PerPartScalars.find(Def);
250     if (I == Data.PerPartScalars.end())
251       return false;
252     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
253     return Instance.Part < I->second.size() &&
254            CacheIdx < I->second[Instance.Part].size() &&
255            I->second[Instance.Part][CacheIdx];
256   }
257 
258   /// Set the generated Value for a given VPValue and a given Part.
259   void set(VPValue *Def, Value *V, unsigned Part) {
260     if (!Data.PerPartOutput.count(Def)) {
261       DataState::PerPartValuesTy Entry(UF);
262       Data.PerPartOutput[Def] = Entry;
263     }
264     Data.PerPartOutput[Def][Part] = V;
265   }
266   /// Reset an existing vector value for \p Def and a given \p Part.
267   void reset(VPValue *Def, Value *V, unsigned Part) {
268     auto Iter = Data.PerPartOutput.find(Def);
269     assert(Iter != Data.PerPartOutput.end() &&
270            "need to overwrite existing value");
271     Iter->second[Part] = V;
272   }
273 
274   /// Set the generated scalar \p V for \p Def and the given \p Instance.
275   void set(VPValue *Def, Value *V, const VPIteration &Instance) {
276     auto Iter = Data.PerPartScalars.insert({Def, {}});
277     auto &PerPartVec = Iter.first->second;
278     while (PerPartVec.size() <= Instance.Part)
279       PerPartVec.emplace_back();
280     auto &Scalars = PerPartVec[Instance.Part];
281     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
282     while (Scalars.size() <= CacheIdx)
283       Scalars.push_back(nullptr);
284     assert(!Scalars[CacheIdx] && "should overwrite existing value");
285     Scalars[CacheIdx] = V;
286   }
287 
288   /// Reset an existing scalar value for \p Def and a given \p Instance.
289   void reset(VPValue *Def, Value *V, const VPIteration &Instance) {
290     auto Iter = Data.PerPartScalars.find(Def);
291     assert(Iter != Data.PerPartScalars.end() &&
292            "need to overwrite existing value");
293     assert(Instance.Part < Iter->second.size() &&
294            "need to overwrite existing value");
295     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
296     assert(CacheIdx < Iter->second[Instance.Part].size() &&
297            "need to overwrite existing value");
298     Iter->second[Instance.Part][CacheIdx] = V;
299   }
300 
301   /// Hold state information used when constructing the CFG of the output IR,
302   /// traversing the VPBasicBlocks and generating corresponding IR BasicBlocks.
303   struct CFGState {
304     /// The previous VPBasicBlock visited. Initially set to null.
305     VPBasicBlock *PrevVPBB = nullptr;
306 
307     /// The previous IR BasicBlock created or used. Initially set to the new
308     /// header BasicBlock.
309     BasicBlock *PrevBB = nullptr;
310 
311     /// The last IR BasicBlock in the output IR. Set to the new latch
312     /// BasicBlock, used for placing the newly created BasicBlocks.
313     BasicBlock *LastBB = nullptr;
314 
315     /// The IR BasicBlock that is the preheader of the vector loop in the output
316     /// IR.
317     /// FIXME: The vector preheader should also be modeled in VPlan, so any code
318     /// that needs to be added to the preheader gets directly generated by
319     /// VPlan. There should be no need to manage a pointer to the IR BasicBlock.
320     BasicBlock *VectorPreHeader = nullptr;
321 
322     /// A mapping of each VPBasicBlock to the corresponding BasicBlock. In case
323     /// of replication, maps the BasicBlock of the last replica created.
324     SmallDenseMap<VPBasicBlock *, BasicBlock *> VPBB2IRBB;
325 
326     /// Vector of VPBasicBlocks whose terminator instruction needs to be fixed
327     /// up at the end of vector code generation.
328     SmallVector<VPBasicBlock *, 8> VPBBsToFix;
329 
330     CFGState() = default;
331   } CFG;
332 
333   /// Hold a pointer to LoopInfo to register new basic blocks in the loop.
334   LoopInfo *LI;
335 
336   /// Hold a pointer to Dominator Tree to register new basic blocks in the loop.
337   DominatorTree *DT;
338 
339   /// Hold a reference to the IRBuilder used to generate output IR code.
340   IRBuilder<> &Builder;
341 
342   VPValue2ValueTy VPValue2Value;
343 
344   /// Hold the canonical scalar IV of the vector loop (start=0, step=VF*UF).
345   Value *CanonicalIV = nullptr;
346 
347   /// Hold a pointer to InnerLoopVectorizer to reuse its IR generation methods.
348   InnerLoopVectorizer *ILV;
349 
350   /// Pointer to the VPlan code is generated for.
351   VPlan *Plan;
352 
353   /// Holds recipes that may generate a poison value that is used after
354   /// vectorization, even when their operands are not poison.
355   SmallPtrSet<VPRecipeBase *, 16> MayGeneratePoisonRecipes;
356 };
357 
358 /// VPUsers instance used by VPBlockBase to manage CondBit and the block
359 /// predicate. Currently VPBlockUsers are used in VPBlockBase for historical
360 /// reasons, but in the future the only VPUsers should either be recipes or
361 /// live-outs.VPBlockBase uses.
362 struct VPBlockUser : public VPUser {
363   VPBlockUser() : VPUser({}, VPUserID::Block) {}
364 
365   VPValue *getSingleOperandOrNull() {
366     if (getNumOperands() == 1)
367       return getOperand(0);
368 
369     return nullptr;
370   }
371   const VPValue *getSingleOperandOrNull() const {
372     if (getNumOperands() == 1)
373       return getOperand(0);
374 
375     return nullptr;
376   }
377 
378   void resetSingleOpUser(VPValue *NewVal) {
379     assert(getNumOperands() <= 1 && "Didn't expect more than one operand!");
380     if (!NewVal) {
381       if (getNumOperands() == 1)
382         removeLastOperand();
383       return;
384     }
385 
386     if (getNumOperands() == 1)
387       setOperand(0, NewVal);
388     else
389       addOperand(NewVal);
390   }
391 };
392 
393 /// VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
394 /// A VPBlockBase can be either a VPBasicBlock or a VPRegionBlock.
395 class VPBlockBase {
396   friend class VPBlockUtils;
397 
398   const unsigned char SubclassID; ///< Subclass identifier (for isa/dyn_cast).
399 
400   /// An optional name for the block.
401   std::string Name;
402 
403   /// The immediate VPRegionBlock which this VPBlockBase belongs to, or null if
404   /// it is a topmost VPBlockBase.
405   VPRegionBlock *Parent = nullptr;
406 
407   /// List of predecessor blocks.
408   SmallVector<VPBlockBase *, 1> Predecessors;
409 
410   /// List of successor blocks.
411   SmallVector<VPBlockBase *, 1> Successors;
412 
413   /// Successor selector managed by a VPUser. For blocks with zero or one
414   /// successors, there is no operand. Otherwise there is exactly one operand
415   /// which is the branch condition.
416   VPBlockUser CondBitUser;
417 
418   /// If the block is predicated, its predicate is stored as an operand of this
419   /// VPUser to maintain the def-use relations. Otherwise there is no operand
420   /// here.
421   VPBlockUser PredicateUser;
422 
423   /// VPlan containing the block. Can only be set on the entry block of the
424   /// plan.
425   VPlan *Plan = nullptr;
426 
427   /// Add \p Successor as the last successor to this block.
428   void appendSuccessor(VPBlockBase *Successor) {
429     assert(Successor && "Cannot add nullptr successor!");
430     Successors.push_back(Successor);
431   }
432 
433   /// Add \p Predecessor as the last predecessor to this block.
434   void appendPredecessor(VPBlockBase *Predecessor) {
435     assert(Predecessor && "Cannot add nullptr predecessor!");
436     Predecessors.push_back(Predecessor);
437   }
438 
439   /// Remove \p Predecessor from the predecessors of this block.
440   void removePredecessor(VPBlockBase *Predecessor) {
441     auto Pos = find(Predecessors, Predecessor);
442     assert(Pos && "Predecessor does not exist");
443     Predecessors.erase(Pos);
444   }
445 
446   /// Remove \p Successor from the successors of this block.
447   void removeSuccessor(VPBlockBase *Successor) {
448     auto Pos = find(Successors, Successor);
449     assert(Pos && "Successor does not exist");
450     Successors.erase(Pos);
451   }
452 
453 protected:
454   VPBlockBase(const unsigned char SC, const std::string &N)
455       : SubclassID(SC), Name(N) {}
456 
457 public:
458   /// An enumeration for keeping track of the concrete subclass of VPBlockBase
459   /// that are actually instantiated. Values of this enumeration are kept in the
460   /// SubclassID field of the VPBlockBase objects. They are used for concrete
461   /// type identification.
462   using VPBlockTy = enum { VPBasicBlockSC, VPRegionBlockSC };
463 
464   using VPBlocksTy = SmallVectorImpl<VPBlockBase *>;
465 
466   virtual ~VPBlockBase() = default;
467 
468   const std::string &getName() const { return Name; }
469 
470   void setName(const Twine &newName) { Name = newName.str(); }
471 
472   /// \return an ID for the concrete type of this object.
473   /// This is used to implement the classof checks. This should not be used
474   /// for any other purpose, as the values may change as LLVM evolves.
475   unsigned getVPBlockID() const { return SubclassID; }
476 
477   VPRegionBlock *getParent() { return Parent; }
478   const VPRegionBlock *getParent() const { return Parent; }
479 
480   /// \return A pointer to the plan containing the current block.
481   VPlan *getPlan();
482   const VPlan *getPlan() const;
483 
484   /// Sets the pointer of the plan containing the block. The block must be the
485   /// entry block into the VPlan.
486   void setPlan(VPlan *ParentPlan);
487 
488   void setParent(VPRegionBlock *P) { Parent = P; }
489 
490   /// \return the VPBasicBlock that is the entry of this VPBlockBase,
491   /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
492   /// VPBlockBase is a VPBasicBlock, it is returned.
493   const VPBasicBlock *getEntryBasicBlock() const;
494   VPBasicBlock *getEntryBasicBlock();
495 
496   /// \return the VPBasicBlock that is the exit of this VPBlockBase,
497   /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
498   /// VPBlockBase is a VPBasicBlock, it is returned.
499   const VPBasicBlock *getExitBasicBlock() const;
500   VPBasicBlock *getExitBasicBlock();
501 
502   const VPBlocksTy &getSuccessors() const { return Successors; }
503   VPBlocksTy &getSuccessors() { return Successors; }
504 
505   iterator_range<VPBlockBase **> successors() { return Successors; }
506 
507   const VPBlocksTy &getPredecessors() const { return Predecessors; }
508   VPBlocksTy &getPredecessors() { return Predecessors; }
509 
510   /// \return the successor of this VPBlockBase if it has a single successor.
511   /// Otherwise return a null pointer.
512   VPBlockBase *getSingleSuccessor() const {
513     return (Successors.size() == 1 ? *Successors.begin() : nullptr);
514   }
515 
516   /// \return the predecessor of this VPBlockBase if it has a single
517   /// predecessor. Otherwise return a null pointer.
518   VPBlockBase *getSinglePredecessor() const {
519     return (Predecessors.size() == 1 ? *Predecessors.begin() : nullptr);
520   }
521 
522   size_t getNumSuccessors() const { return Successors.size(); }
523   size_t getNumPredecessors() const { return Predecessors.size(); }
524 
525   /// An Enclosing Block of a block B is any block containing B, including B
526   /// itself. \return the closest enclosing block starting from "this", which
527   /// has successors. \return the root enclosing block if all enclosing blocks
528   /// have no successors.
529   VPBlockBase *getEnclosingBlockWithSuccessors();
530 
531   /// \return the closest enclosing block starting from "this", which has
532   /// predecessors. \return the root enclosing block if all enclosing blocks
533   /// have no predecessors.
534   VPBlockBase *getEnclosingBlockWithPredecessors();
535 
536   /// \return the successors either attached directly to this VPBlockBase or, if
537   /// this VPBlockBase is the exit block of a VPRegionBlock and has no
538   /// successors of its own, search recursively for the first enclosing
539   /// VPRegionBlock that has successors and return them. If no such
540   /// VPRegionBlock exists, return the (empty) successors of the topmost
541   /// VPBlockBase reached.
542   const VPBlocksTy &getHierarchicalSuccessors() {
543     return getEnclosingBlockWithSuccessors()->getSuccessors();
544   }
545 
546   /// \return the hierarchical successor of this VPBlockBase if it has a single
547   /// hierarchical successor. Otherwise return a null pointer.
548   VPBlockBase *getSingleHierarchicalSuccessor() {
549     return getEnclosingBlockWithSuccessors()->getSingleSuccessor();
550   }
551 
552   /// \return the predecessors either attached directly to this VPBlockBase or,
553   /// if this VPBlockBase is the entry block of a VPRegionBlock and has no
554   /// predecessors of its own, search recursively for the first enclosing
555   /// VPRegionBlock that has predecessors and return them. If no such
556   /// VPRegionBlock exists, return the (empty) predecessors of the topmost
557   /// VPBlockBase reached.
558   const VPBlocksTy &getHierarchicalPredecessors() {
559     return getEnclosingBlockWithPredecessors()->getPredecessors();
560   }
561 
562   /// \return the hierarchical predecessor of this VPBlockBase if it has a
563   /// single hierarchical predecessor. Otherwise return a null pointer.
564   VPBlockBase *getSingleHierarchicalPredecessor() {
565     return getEnclosingBlockWithPredecessors()->getSinglePredecessor();
566   }
567 
568   /// \return the condition bit selecting the successor.
569   VPValue *getCondBit();
570   /// \return the condition bit selecting the successor.
571   const VPValue *getCondBit() const;
572   /// Set the condition bit selecting the successor.
573   void setCondBit(VPValue *CV);
574 
575   /// \return the block's predicate.
576   VPValue *getPredicate();
577   /// \return the block's predicate.
578   const VPValue *getPredicate() const;
579   /// Set the block's predicate.
580   void setPredicate(VPValue *Pred);
581 
582   /// Set a given VPBlockBase \p Successor as the single successor of this
583   /// VPBlockBase. This VPBlockBase is not added as predecessor of \p Successor.
584   /// This VPBlockBase must have no successors.
585   void setOneSuccessor(VPBlockBase *Successor) {
586     assert(Successors.empty() && "Setting one successor when others exist.");
587     appendSuccessor(Successor);
588   }
589 
590   /// Set two given VPBlockBases \p IfTrue and \p IfFalse to be the two
591   /// successors of this VPBlockBase. \p Condition is set as the successor
592   /// selector. This VPBlockBase is not added as predecessor of \p IfTrue or \p
593   /// IfFalse. This VPBlockBase must have no successors.
594   void setTwoSuccessors(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
595                         VPValue *Condition) {
596     assert(Successors.empty() && "Setting two successors when others exist.");
597     assert(Condition && "Setting two successors without condition!");
598     setCondBit(Condition);
599     appendSuccessor(IfTrue);
600     appendSuccessor(IfFalse);
601   }
602 
603   /// Set each VPBasicBlock in \p NewPreds as predecessor of this VPBlockBase.
604   /// This VPBlockBase must have no predecessors. This VPBlockBase is not added
605   /// as successor of any VPBasicBlock in \p NewPreds.
606   void setPredecessors(ArrayRef<VPBlockBase *> NewPreds) {
607     assert(Predecessors.empty() && "Block predecessors already set.");
608     for (auto *Pred : NewPreds)
609       appendPredecessor(Pred);
610   }
611 
612   /// Remove all the predecessor of this block.
613   void clearPredecessors() { Predecessors.clear(); }
614 
615   /// Remove all the successors of this block and set to null its condition bit
616   void clearSuccessors() {
617     Successors.clear();
618     setCondBit(nullptr);
619   }
620 
621   /// The method which generates the output IR that correspond to this
622   /// VPBlockBase, thereby "executing" the VPlan.
623   virtual void execute(struct VPTransformState *State) = 0;
624 
625   /// Delete all blocks reachable from a given VPBlockBase, inclusive.
626   static void deleteCFG(VPBlockBase *Entry);
627 
628   /// Return true if it is legal to hoist instructions into this block.
629   bool isLegalToHoistInto() {
630     // There are currently no constraints that prevent an instruction to be
631     // hoisted into a VPBlockBase.
632     return true;
633   }
634 
635   /// Replace all operands of VPUsers in the block with \p NewValue and also
636   /// replaces all uses of VPValues defined in the block with NewValue.
637   virtual void dropAllReferences(VPValue *NewValue) = 0;
638 
639 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
640   void printAsOperand(raw_ostream &OS, bool PrintType) const {
641     OS << getName();
642   }
643 
644   /// Print plain-text dump of this VPBlockBase to \p O, prefixing all lines
645   /// with \p Indent. \p SlotTracker is used to print unnamed VPValue's using
646   /// consequtive numbers.
647   ///
648   /// Note that the numbering is applied to the whole VPlan, so printing
649   /// individual blocks is consistent with the whole VPlan printing.
650   virtual void print(raw_ostream &O, const Twine &Indent,
651                      VPSlotTracker &SlotTracker) const = 0;
652 
653   /// Print plain-text dump of this VPlan to \p O.
654   void print(raw_ostream &O) const {
655     VPSlotTracker SlotTracker(getPlan());
656     print(O, "", SlotTracker);
657   }
658 
659   /// Print the successors of this block to \p O, prefixing all lines with \p
660   /// Indent.
661   void printSuccessors(raw_ostream &O, const Twine &Indent) const;
662 
663   /// Dump this VPBlockBase to dbgs().
664   LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
665 #endif
666 };
667 
668 /// VPRecipeBase is a base class modeling a sequence of one or more output IR
669 /// instructions. VPRecipeBase owns the the VPValues it defines through VPDef
670 /// and is responsible for deleting its defined values. Single-value
671 /// VPRecipeBases that also inherit from VPValue must make sure to inherit from
672 /// VPRecipeBase before VPValue.
673 class VPRecipeBase : public ilist_node_with_parent<VPRecipeBase, VPBasicBlock>,
674                      public VPDef,
675                      public VPUser {
676   friend VPBasicBlock;
677   friend class VPBlockUtils;
678 
679   /// Each VPRecipe belongs to a single VPBasicBlock.
680   VPBasicBlock *Parent = nullptr;
681 
682 public:
683   VPRecipeBase(const unsigned char SC, ArrayRef<VPValue *> Operands)
684       : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
685 
686   template <typename IterT>
687   VPRecipeBase(const unsigned char SC, iterator_range<IterT> Operands)
688       : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
689   virtual ~VPRecipeBase() = default;
690 
691   /// \return the VPBasicBlock which this VPRecipe belongs to.
692   VPBasicBlock *getParent() { return Parent; }
693   const VPBasicBlock *getParent() const { return Parent; }
694 
695   /// The method which generates the output IR instructions that correspond to
696   /// this VPRecipe, thereby "executing" the VPlan.
697   virtual void execute(struct VPTransformState &State) = 0;
698 
699   /// Insert an unlinked recipe into a basic block immediately before
700   /// the specified recipe.
701   void insertBefore(VPRecipeBase *InsertPos);
702 
703   /// Insert an unlinked Recipe into a basic block immediately after
704   /// the specified Recipe.
705   void insertAfter(VPRecipeBase *InsertPos);
706 
707   /// Unlink this recipe from its current VPBasicBlock and insert it into
708   /// the VPBasicBlock that MovePos lives in, right after MovePos.
709   void moveAfter(VPRecipeBase *MovePos);
710 
711   /// Unlink this recipe and insert into BB before I.
712   ///
713   /// \pre I is a valid iterator into BB.
714   void moveBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator I);
715 
716   /// This method unlinks 'this' from the containing basic block, but does not
717   /// delete it.
718   void removeFromParent();
719 
720   /// This method unlinks 'this' from the containing basic block and deletes it.
721   ///
722   /// \returns an iterator pointing to the element after the erased one
723   iplist<VPRecipeBase>::iterator eraseFromParent();
724 
725   /// Returns the underlying instruction, if the recipe is a VPValue or nullptr
726   /// otherwise.
727   Instruction *getUnderlyingInstr() {
728     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
729   }
730   const Instruction *getUnderlyingInstr() const {
731     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
732   }
733 
734   /// Method to support type inquiry through isa, cast, and dyn_cast.
735   static inline bool classof(const VPDef *D) {
736     // All VPDefs are also VPRecipeBases.
737     return true;
738   }
739 
740   static inline bool classof(const VPUser *U) {
741     return U->getVPUserID() == VPUser::VPUserID::Recipe;
742   }
743 
744   /// Returns true if the recipe may have side-effects.
745   bool mayHaveSideEffects() const;
746 
747   /// Returns true for PHI-like recipes.
748   bool isPhi() const {
749     return getVPDefID() >= VPFirstPHISC && getVPDefID() <= VPLastPHISC;
750   }
751 
752   /// Returns true if the recipe may read from memory.
753   bool mayReadFromMemory() const;
754 
755   /// Returns true if the recipe may write to memory.
756   bool mayWriteToMemory() const;
757 
758   /// Returns true if the recipe may read from or write to memory.
759   bool mayReadOrWriteMemory() const {
760     return mayReadFromMemory() || mayWriteToMemory();
761   }
762 
763   /// Returns true if the recipe only uses the first lane of operand \p Op.
764   /// Conservatively returns false.
765   virtual bool onlyFirstLaneUsed(const VPValue *Op) const {
766     assert(is_contained(operands(), Op) &&
767            "Op must be an operand of the recipe");
768     return false;
769   }
770 };
771 
772 inline bool VPUser::classof(const VPDef *Def) {
773   return Def->getVPDefID() == VPRecipeBase::VPInstructionSC ||
774          Def->getVPDefID() == VPRecipeBase::VPWidenSC ||
775          Def->getVPDefID() == VPRecipeBase::VPWidenCallSC ||
776          Def->getVPDefID() == VPRecipeBase::VPWidenSelectSC ||
777          Def->getVPDefID() == VPRecipeBase::VPWidenGEPSC ||
778          Def->getVPDefID() == VPRecipeBase::VPBlendSC ||
779          Def->getVPDefID() == VPRecipeBase::VPInterleaveSC ||
780          Def->getVPDefID() == VPRecipeBase::VPReplicateSC ||
781          Def->getVPDefID() == VPRecipeBase::VPReductionSC ||
782          Def->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC ||
783          Def->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
784 }
785 
786 /// This is a concrete Recipe that models a single VPlan-level instruction.
787 /// While as any Recipe it may generate a sequence of IR instructions when
788 /// executed, these instructions would always form a single-def expression as
789 /// the VPInstruction is also a single def-use vertex.
790 class VPInstruction : public VPRecipeBase, public VPValue {
791   friend class VPlanSlp;
792 
793 public:
794   /// VPlan opcodes, extending LLVM IR with idiomatics instructions.
795   enum {
796     FirstOrderRecurrenceSplice =
797         Instruction::OtherOpsEnd + 1, // Combines the incoming and previous
798                                       // values of a first-order recurrence.
799     Not,
800     ICmpULE,
801     SLPLoad,
802     SLPStore,
803     ActiveLaneMask,
804     CanonicalIVIncrement,
805     CanonicalIVIncrementNUW,
806     BranchOnCount,
807   };
808 
809 private:
810   typedef unsigned char OpcodeTy;
811   OpcodeTy Opcode;
812   FastMathFlags FMF;
813   DebugLoc DL;
814 
815   /// Utility method serving execute(): generates a single instance of the
816   /// modeled instruction.
817   void generateInstruction(VPTransformState &State, unsigned Part);
818 
819 protected:
820   void setUnderlyingInstr(Instruction *I) { setUnderlyingValue(I); }
821 
822 public:
823   VPInstruction(unsigned Opcode, ArrayRef<VPValue *> Operands, DebugLoc DL)
824       : VPRecipeBase(VPRecipeBase::VPInstructionSC, Operands),
825         VPValue(VPValue::VPVInstructionSC, nullptr, this), Opcode(Opcode),
826         DL(DL) {}
827 
828   VPInstruction(unsigned Opcode, std::initializer_list<VPValue *> Operands,
829                 DebugLoc DL = {})
830       : VPInstruction(Opcode, ArrayRef<VPValue *>(Operands), DL) {}
831 
832   /// Method to support type inquiry through isa, cast, and dyn_cast.
833   static inline bool classof(const VPValue *V) {
834     return V->getVPValueID() == VPValue::VPVInstructionSC;
835   }
836 
837   VPInstruction *clone() const {
838     SmallVector<VPValue *, 2> Operands(operands());
839     return new VPInstruction(Opcode, Operands, DL);
840   }
841 
842   /// Method to support type inquiry through isa, cast, and dyn_cast.
843   static inline bool classof(const VPDef *R) {
844     return R->getVPDefID() == VPRecipeBase::VPInstructionSC;
845   }
846 
847   /// Extra classof implementations to allow directly casting from VPUser ->
848   /// VPInstruction.
849   static inline bool classof(const VPUser *U) {
850     auto *R = dyn_cast<VPRecipeBase>(U);
851     return R && R->getVPDefID() == VPRecipeBase::VPInstructionSC;
852   }
853   static inline bool classof(const VPRecipeBase *R) {
854     return R->getVPDefID() == VPRecipeBase::VPInstructionSC;
855   }
856 
857   unsigned getOpcode() const { return Opcode; }
858 
859   /// Generate the instruction.
860   /// TODO: We currently execute only per-part unless a specific instance is
861   /// provided.
862   void execute(VPTransformState &State) override;
863 
864 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
865   /// Print the VPInstruction to \p O.
866   void print(raw_ostream &O, const Twine &Indent,
867              VPSlotTracker &SlotTracker) const override;
868 
869   /// Print the VPInstruction to dbgs() (for debugging).
870   LLVM_DUMP_METHOD void dump() const;
871 #endif
872 
873   /// Return true if this instruction may modify memory.
874   bool mayWriteToMemory() const {
875     // TODO: we can use attributes of the called function to rule out memory
876     //       modifications.
877     return Opcode == Instruction::Store || Opcode == Instruction::Call ||
878            Opcode == Instruction::Invoke || Opcode == SLPStore;
879   }
880 
881   bool hasResult() const {
882     // CallInst may or may not have a result, depending on the called function.
883     // Conservatively return calls have results for now.
884     switch (getOpcode()) {
885     case Instruction::Ret:
886     case Instruction::Br:
887     case Instruction::Store:
888     case Instruction::Switch:
889     case Instruction::IndirectBr:
890     case Instruction::Resume:
891     case Instruction::CatchRet:
892     case Instruction::Unreachable:
893     case Instruction::Fence:
894     case Instruction::AtomicRMW:
895     case VPInstruction::BranchOnCount:
896       return false;
897     default:
898       return true;
899     }
900   }
901 
902   /// Set the fast-math flags.
903   void setFastMathFlags(FastMathFlags FMFNew);
904 
905   /// Returns true if the recipe only uses the first lane of operand \p Op.
906   bool onlyFirstLaneUsed(const VPValue *Op) const override {
907     assert(is_contained(operands(), Op) &&
908            "Op must be an operand of the recipe");
909     if (getOperand(0) != Op)
910       return false;
911     switch (getOpcode()) {
912     default:
913       return false;
914     case VPInstruction::ActiveLaneMask:
915     case VPInstruction::CanonicalIVIncrement:
916     case VPInstruction::CanonicalIVIncrementNUW:
917     case VPInstruction::BranchOnCount:
918       return true;
919     };
920     llvm_unreachable("switch should return");
921   }
922 };
923 
924 /// VPWidenRecipe is a recipe for producing a copy of vector type its
925 /// ingredient. This recipe covers most of the traditional vectorization cases
926 /// where each ingredient transforms into a vectorized version of itself.
927 class VPWidenRecipe : public VPRecipeBase, public VPValue {
928 public:
929   template <typename IterT>
930   VPWidenRecipe(Instruction &I, iterator_range<IterT> Operands)
931       : VPRecipeBase(VPRecipeBase::VPWidenSC, Operands),
932         VPValue(VPValue::VPVWidenSC, &I, this) {}
933 
934   ~VPWidenRecipe() override = default;
935 
936   /// Method to support type inquiry through isa, cast, and dyn_cast.
937   static inline bool classof(const VPDef *D) {
938     return D->getVPDefID() == VPRecipeBase::VPWidenSC;
939   }
940   static inline bool classof(const VPValue *V) {
941     return V->getVPValueID() == VPValue::VPVWidenSC;
942   }
943 
944   /// Produce widened copies of all Ingredients.
945   void execute(VPTransformState &State) override;
946 
947 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
948   /// Print the recipe.
949   void print(raw_ostream &O, const Twine &Indent,
950              VPSlotTracker &SlotTracker) const override;
951 #endif
952 };
953 
954 /// A recipe for widening Call instructions.
955 class VPWidenCallRecipe : public VPRecipeBase, public VPValue {
956 
957 public:
958   template <typename IterT>
959   VPWidenCallRecipe(CallInst &I, iterator_range<IterT> CallArguments)
960       : VPRecipeBase(VPRecipeBase::VPWidenCallSC, CallArguments),
961         VPValue(VPValue::VPVWidenCallSC, &I, this) {}
962 
963   ~VPWidenCallRecipe() override = default;
964 
965   /// Method to support type inquiry through isa, cast, and dyn_cast.
966   static inline bool classof(const VPDef *D) {
967     return D->getVPDefID() == VPRecipeBase::VPWidenCallSC;
968   }
969 
970   /// Produce a widened version of the call instruction.
971   void execute(VPTransformState &State) override;
972 
973 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
974   /// Print the recipe.
975   void print(raw_ostream &O, const Twine &Indent,
976              VPSlotTracker &SlotTracker) const override;
977 #endif
978 };
979 
980 /// A recipe for widening select instructions.
981 class VPWidenSelectRecipe : public VPRecipeBase, public VPValue {
982 
983   /// Is the condition of the select loop invariant?
984   bool InvariantCond;
985 
986 public:
987   template <typename IterT>
988   VPWidenSelectRecipe(SelectInst &I, iterator_range<IterT> Operands,
989                       bool InvariantCond)
990       : VPRecipeBase(VPRecipeBase::VPWidenSelectSC, Operands),
991         VPValue(VPValue::VPVWidenSelectSC, &I, this),
992         InvariantCond(InvariantCond) {}
993 
994   ~VPWidenSelectRecipe() override = default;
995 
996   /// Method to support type inquiry through isa, cast, and dyn_cast.
997   static inline bool classof(const VPDef *D) {
998     return D->getVPDefID() == VPRecipeBase::VPWidenSelectSC;
999   }
1000 
1001   /// Produce a widened version of the select instruction.
1002   void execute(VPTransformState &State) override;
1003 
1004 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1005   /// Print the recipe.
1006   void print(raw_ostream &O, const Twine &Indent,
1007              VPSlotTracker &SlotTracker) const override;
1008 #endif
1009 };
1010 
1011 /// A recipe for handling GEP instructions.
1012 class VPWidenGEPRecipe : public VPRecipeBase, public VPValue {
1013   bool IsPtrLoopInvariant;
1014   SmallBitVector IsIndexLoopInvariant;
1015 
1016 public:
1017   template <typename IterT>
1018   VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands)
1019       : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
1020         VPValue(VPWidenGEPSC, GEP, this),
1021         IsIndexLoopInvariant(GEP->getNumIndices(), false) {}
1022 
1023   template <typename IterT>
1024   VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands,
1025                    Loop *OrigLoop)
1026       : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
1027         VPValue(VPValue::VPVWidenGEPSC, GEP, this),
1028         IsIndexLoopInvariant(GEP->getNumIndices(), false) {
1029     IsPtrLoopInvariant = OrigLoop->isLoopInvariant(GEP->getPointerOperand());
1030     for (auto Index : enumerate(GEP->indices()))
1031       IsIndexLoopInvariant[Index.index()] =
1032           OrigLoop->isLoopInvariant(Index.value().get());
1033   }
1034   ~VPWidenGEPRecipe() override = default;
1035 
1036   /// Method to support type inquiry through isa, cast, and dyn_cast.
1037   static inline bool classof(const VPDef *D) {
1038     return D->getVPDefID() == VPRecipeBase::VPWidenGEPSC;
1039   }
1040 
1041   /// Generate the gep nodes.
1042   void execute(VPTransformState &State) override;
1043 
1044 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1045   /// Print the recipe.
1046   void print(raw_ostream &O, const Twine &Indent,
1047              VPSlotTracker &SlotTracker) const override;
1048 #endif
1049 };
1050 
1051 /// A recipe for handling phi nodes of integer and floating-point inductions,
1052 /// producing their vector and scalar values.
1053 class VPWidenIntOrFpInductionRecipe : public VPRecipeBase, public VPValue {
1054   PHINode *IV;
1055   const InductionDescriptor &IndDesc;
1056   bool NeedsScalarIV;
1057   bool NeedsVectorIV;
1058 
1059 public:
1060   VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start,
1061                                 const InductionDescriptor &IndDesc,
1062                                 bool NeedsScalarIV, bool NeedsVectorIV)
1063       : VPRecipeBase(VPWidenIntOrFpInductionSC, {Start}), VPValue(IV, this),
1064         IV(IV), IndDesc(IndDesc), NeedsScalarIV(NeedsScalarIV),
1065         NeedsVectorIV(NeedsVectorIV) {}
1066 
1067   VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start,
1068                                 const InductionDescriptor &IndDesc,
1069                                 TruncInst *Trunc, bool NeedsScalarIV,
1070                                 bool NeedsVectorIV)
1071       : VPRecipeBase(VPWidenIntOrFpInductionSC, {Start}), VPValue(Trunc, this),
1072         IV(IV), IndDesc(IndDesc), NeedsScalarIV(NeedsScalarIV),
1073         NeedsVectorIV(NeedsVectorIV) {}
1074 
1075   ~VPWidenIntOrFpInductionRecipe() override = default;
1076 
1077   /// Method to support type inquiry through isa, cast, and dyn_cast.
1078   static inline bool classof(const VPDef *D) {
1079     return D->getVPDefID() == VPRecipeBase::VPWidenIntOrFpInductionSC;
1080   }
1081 
1082   /// Generate the vectorized and scalarized versions of the phi node as
1083   /// needed by their users.
1084   void execute(VPTransformState &State) override;
1085 
1086 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1087   /// Print the recipe.
1088   void print(raw_ostream &O, const Twine &Indent,
1089              VPSlotTracker &SlotTracker) const override;
1090 #endif
1091 
1092   /// Returns the start value of the induction.
1093   VPValue *getStartValue() { return getOperand(0); }
1094   const VPValue *getStartValue() const { return getOperand(0); }
1095 
1096   /// Returns the first defined value as TruncInst, if it is one or nullptr
1097   /// otherwise.
1098   TruncInst *getTruncInst() {
1099     return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1100   }
1101   const TruncInst *getTruncInst() const {
1102     return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1103   }
1104 
1105   /// Returns the induction descriptor for the recipe.
1106   const InductionDescriptor &getInductionDescriptor() const { return IndDesc; }
1107 
1108   /// Returns true if the induction is canonical, i.e. starting at 0 and
1109   /// incremented by UF * VF (= the original IV is incremented by 1).
1110   bool isCanonical() const;
1111 
1112   /// Returns the scalar type of the induction.
1113   const Type *getScalarType() const {
1114     const TruncInst *TruncI = getTruncInst();
1115     return TruncI ? TruncI->getType() : IV->getType();
1116   }
1117 
1118   /// Returns true if a scalar phi needs to be created for the induction.
1119   bool needsScalarIV() const { return NeedsScalarIV; }
1120 
1121   /// Returns true if a vector phi needs to be created for the induction.
1122   bool needsVectorIV() const { return NeedsVectorIV; }
1123 };
1124 
1125 /// A pure virtual base class for all recipes modeling header phis, including
1126 /// phis for first order recurrences, pointer inductions and reductions. The
1127 /// start value is the first operand of the recipe and the incoming value from
1128 /// the backedge is the second operand.
1129 class VPHeaderPHIRecipe : public VPRecipeBase, public VPValue {
1130 protected:
1131   VPHeaderPHIRecipe(unsigned char VPVID, unsigned char VPDefID, PHINode *Phi,
1132                     VPValue *Start = nullptr)
1133       : VPRecipeBase(VPDefID, {}), VPValue(VPVID, Phi, this) {
1134     if (Start)
1135       addOperand(Start);
1136   }
1137 
1138 public:
1139   ~VPHeaderPHIRecipe() override = default;
1140 
1141   /// Method to support type inquiry through isa, cast, and dyn_cast.
1142   static inline bool classof(const VPRecipeBase *B) {
1143     return B->getVPDefID() == VPRecipeBase::VPCanonicalIVPHISC ||
1144            B->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC ||
1145            B->getVPDefID() == VPRecipeBase::VPReductionPHISC ||
1146            B->getVPDefID() == VPRecipeBase::VPWidenIntOrFpInductionSC ||
1147            B->getVPDefID() == VPRecipeBase::VPWidenPHISC;
1148   }
1149   static inline bool classof(const VPValue *V) {
1150     return V->getVPValueID() == VPValue::VPVCanonicalIVPHISC ||
1151            V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC ||
1152            V->getVPValueID() == VPValue::VPVReductionPHISC ||
1153            V->getVPValueID() == VPValue::VPVWidenIntOrFpInductionSC ||
1154            V->getVPValueID() == VPValue::VPVWidenPHISC;
1155   }
1156 
1157   /// Generate the phi nodes.
1158   void execute(VPTransformState &State) override = 0;
1159 
1160 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1161   /// Print the recipe.
1162   void print(raw_ostream &O, const Twine &Indent,
1163              VPSlotTracker &SlotTracker) const override = 0;
1164 #endif
1165 
1166   /// Returns the start value of the phi, if one is set.
1167   VPValue *getStartValue() {
1168     return getNumOperands() == 0 ? nullptr : getOperand(0);
1169   }
1170 
1171   /// Returns the incoming value from the loop backedge.
1172   VPValue *getBackedgeValue() {
1173     return getOperand(1);
1174   }
1175 
1176   /// Returns the backedge value as a recipe. The backedge value is guaranteed
1177   /// to be a recipe.
1178   VPRecipeBase *getBackedgeRecipe() {
1179     return cast<VPRecipeBase>(getBackedgeValue()->getDef());
1180   }
1181 };
1182 
1183 /// A recipe for handling header phis that are widened in the vector loop.
1184 /// In the VPlan native path, all incoming VPValues & VPBasicBlock pairs are
1185 /// managed in the recipe directly.
1186 class VPWidenPHIRecipe : public VPHeaderPHIRecipe {
1187   /// List of incoming blocks. Only used in the VPlan native path.
1188   SmallVector<VPBasicBlock *, 2> IncomingBlocks;
1189 
1190 public:
1191   /// Create a new VPWidenPHIRecipe for \p Phi with start value \p Start.
1192   VPWidenPHIRecipe(PHINode *Phi, VPValue *Start = nullptr)
1193       : VPHeaderPHIRecipe(VPVWidenPHISC, VPWidenPHISC, Phi) {
1194     if (Start)
1195       addOperand(Start);
1196   }
1197 
1198   ~VPWidenPHIRecipe() override = default;
1199 
1200   /// Method to support type inquiry through isa, cast, and dyn_cast.
1201   static inline bool classof(const VPRecipeBase *B) {
1202     return B->getVPDefID() == VPRecipeBase::VPWidenPHISC;
1203   }
1204   static inline bool classof(const VPHeaderPHIRecipe *R) {
1205     return R->getVPDefID() == VPRecipeBase::VPWidenPHISC;
1206   }
1207   static inline bool classof(const VPValue *V) {
1208     return V->getVPValueID() == VPValue::VPVWidenPHISC;
1209   }
1210 
1211   /// Generate the phi/select nodes.
1212   void execute(VPTransformState &State) override;
1213 
1214 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1215   /// Print the recipe.
1216   void print(raw_ostream &O, const Twine &Indent,
1217              VPSlotTracker &SlotTracker) const override;
1218 #endif
1219 
1220   /// Adds a pair (\p IncomingV, \p IncomingBlock) to the phi.
1221   void addIncoming(VPValue *IncomingV, VPBasicBlock *IncomingBlock) {
1222     addOperand(IncomingV);
1223     IncomingBlocks.push_back(IncomingBlock);
1224   }
1225 
1226   /// Returns the \p I th incoming VPBasicBlock.
1227   VPBasicBlock *getIncomingBlock(unsigned I) { return IncomingBlocks[I]; }
1228 
1229   /// Returns the \p I th incoming VPValue.
1230   VPValue *getIncomingValue(unsigned I) { return getOperand(I); }
1231 };
1232 
1233 /// A recipe for handling first-order recurrence phis. The start value is the
1234 /// first operand of the recipe and the incoming value from the backedge is the
1235 /// second operand.
1236 struct VPFirstOrderRecurrencePHIRecipe : public VPHeaderPHIRecipe {
1237   VPFirstOrderRecurrencePHIRecipe(PHINode *Phi, VPValue &Start)
1238       : VPHeaderPHIRecipe(VPVFirstOrderRecurrencePHISC,
1239                           VPFirstOrderRecurrencePHISC, Phi, &Start) {}
1240 
1241   /// Method to support type inquiry through isa, cast, and dyn_cast.
1242   static inline bool classof(const VPRecipeBase *R) {
1243     return R->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
1244   }
1245   static inline bool classof(const VPHeaderPHIRecipe *R) {
1246     return R->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
1247   }
1248   static inline bool classof(const VPValue *V) {
1249     return V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC;
1250   }
1251 
1252   void execute(VPTransformState &State) override;
1253 
1254 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1255   /// Print the recipe.
1256   void print(raw_ostream &O, const Twine &Indent,
1257              VPSlotTracker &SlotTracker) const override;
1258 #endif
1259 };
1260 
1261 /// A recipe for handling reduction phis. The start value is the first operand
1262 /// of the recipe and the incoming value from the backedge is the second
1263 /// operand.
1264 class VPReductionPHIRecipe : public VPHeaderPHIRecipe {
1265   /// Descriptor for the reduction.
1266   const RecurrenceDescriptor &RdxDesc;
1267 
1268   /// The phi is part of an in-loop reduction.
1269   bool IsInLoop;
1270 
1271   /// The phi is part of an ordered reduction. Requires IsInLoop to be true.
1272   bool IsOrdered;
1273 
1274 public:
1275   /// Create a new VPReductionPHIRecipe for the reduction \p Phi described by \p
1276   /// RdxDesc.
1277   VPReductionPHIRecipe(PHINode *Phi, const RecurrenceDescriptor &RdxDesc,
1278                        VPValue &Start, bool IsInLoop = false,
1279                        bool IsOrdered = false)
1280       : VPHeaderPHIRecipe(VPVReductionPHISC, VPReductionPHISC, Phi, &Start),
1281         RdxDesc(RdxDesc), IsInLoop(IsInLoop), IsOrdered(IsOrdered) {
1282     assert((!IsOrdered || IsInLoop) && "IsOrdered requires IsInLoop");
1283   }
1284 
1285   ~VPReductionPHIRecipe() override = default;
1286 
1287   /// Method to support type inquiry through isa, cast, and dyn_cast.
1288   static inline bool classof(const VPRecipeBase *R) {
1289     return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1290   }
1291   static inline bool classof(const VPHeaderPHIRecipe *R) {
1292     return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1293   }
1294   static inline bool classof(const VPValue *V) {
1295     return V->getVPValueID() == VPValue::VPVReductionPHISC;
1296   }
1297 
1298   /// Generate the phi/select nodes.
1299   void execute(VPTransformState &State) override;
1300 
1301 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1302   /// Print the recipe.
1303   void print(raw_ostream &O, const Twine &Indent,
1304              VPSlotTracker &SlotTracker) const override;
1305 #endif
1306 
1307   const RecurrenceDescriptor &getRecurrenceDescriptor() const {
1308     return RdxDesc;
1309   }
1310 
1311   /// Returns true, if the phi is part of an ordered reduction.
1312   bool isOrdered() const { return IsOrdered; }
1313 
1314   /// Returns true, if the phi is part of an in-loop reduction.
1315   bool isInLoop() const { return IsInLoop; }
1316 };
1317 
1318 /// A recipe for vectorizing a phi-node as a sequence of mask-based select
1319 /// instructions.
1320 class VPBlendRecipe : public VPRecipeBase, public VPValue {
1321   PHINode *Phi;
1322 
1323 public:
1324   /// The blend operation is a User of the incoming values and of their
1325   /// respective masks, ordered [I0, M0, I1, M1, ...]. Note that a single value
1326   /// might be incoming with a full mask for which there is no VPValue.
1327   VPBlendRecipe(PHINode *Phi, ArrayRef<VPValue *> Operands)
1328       : VPRecipeBase(VPBlendSC, Operands),
1329         VPValue(VPValue::VPVBlendSC, Phi, this), Phi(Phi) {
1330     assert(Operands.size() > 0 &&
1331            ((Operands.size() == 1) || (Operands.size() % 2 == 0)) &&
1332            "Expected either a single incoming value or a positive even number "
1333            "of operands");
1334   }
1335 
1336   /// Method to support type inquiry through isa, cast, and dyn_cast.
1337   static inline bool classof(const VPDef *D) {
1338     return D->getVPDefID() == VPRecipeBase::VPBlendSC;
1339   }
1340 
1341   /// Return the number of incoming values, taking into account that a single
1342   /// incoming value has no mask.
1343   unsigned getNumIncomingValues() const { return (getNumOperands() + 1) / 2; }
1344 
1345   /// Return incoming value number \p Idx.
1346   VPValue *getIncomingValue(unsigned Idx) const { return getOperand(Idx * 2); }
1347 
1348   /// Return mask number \p Idx.
1349   VPValue *getMask(unsigned Idx) const { return getOperand(Idx * 2 + 1); }
1350 
1351   /// Generate the phi/select nodes.
1352   void execute(VPTransformState &State) override;
1353 
1354 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1355   /// Print the recipe.
1356   void print(raw_ostream &O, const Twine &Indent,
1357              VPSlotTracker &SlotTracker) const override;
1358 #endif
1359 
1360   /// Returns true if the recipe only uses the first lane of operand \p Op.
1361   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1362     assert(is_contained(operands(), Op) &&
1363            "Op must be an operand of the recipe");
1364     // Recursing through Blend recipes only, must terminate at header phi's the
1365     // latest.
1366     return all_of(users(), [this](VPUser *U) {
1367       return cast<VPRecipeBase>(U)->onlyFirstLaneUsed(this);
1368     });
1369   }
1370 };
1371 
1372 /// VPInterleaveRecipe is a recipe for transforming an interleave group of load
1373 /// or stores into one wide load/store and shuffles. The first operand of a
1374 /// VPInterleave recipe is the address, followed by the stored values, followed
1375 /// by an optional mask.
1376 class VPInterleaveRecipe : public VPRecipeBase {
1377   const InterleaveGroup<Instruction> *IG;
1378 
1379   bool HasMask = false;
1380 
1381 public:
1382   VPInterleaveRecipe(const InterleaveGroup<Instruction> *IG, VPValue *Addr,
1383                      ArrayRef<VPValue *> StoredValues, VPValue *Mask)
1384       : VPRecipeBase(VPInterleaveSC, {Addr}), IG(IG) {
1385     for (unsigned i = 0; i < IG->getFactor(); ++i)
1386       if (Instruction *I = IG->getMember(i)) {
1387         if (I->getType()->isVoidTy())
1388           continue;
1389         new VPValue(I, this);
1390       }
1391 
1392     for (auto *SV : StoredValues)
1393       addOperand(SV);
1394     if (Mask) {
1395       HasMask = true;
1396       addOperand(Mask);
1397     }
1398   }
1399   ~VPInterleaveRecipe() override = default;
1400 
1401   /// Method to support type inquiry through isa, cast, and dyn_cast.
1402   static inline bool classof(const VPDef *D) {
1403     return D->getVPDefID() == VPRecipeBase::VPInterleaveSC;
1404   }
1405 
1406   /// Return the address accessed by this recipe.
1407   VPValue *getAddr() const {
1408     return getOperand(0); // Address is the 1st, mandatory operand.
1409   }
1410 
1411   /// Return the mask used by this recipe. Note that a full mask is represented
1412   /// by a nullptr.
1413   VPValue *getMask() const {
1414     // Mask is optional and therefore the last, currently 2nd operand.
1415     return HasMask ? getOperand(getNumOperands() - 1) : nullptr;
1416   }
1417 
1418   /// Return the VPValues stored by this interleave group. If it is a load
1419   /// interleave group, return an empty ArrayRef.
1420   ArrayRef<VPValue *> getStoredValues() const {
1421     // The first operand is the address, followed by the stored values, followed
1422     // by an optional mask.
1423     return ArrayRef<VPValue *>(op_begin(), getNumOperands())
1424         .slice(1, getNumStoreOperands());
1425   }
1426 
1427   /// Generate the wide load or store, and shuffles.
1428   void execute(VPTransformState &State) override;
1429 
1430 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1431   /// Print the recipe.
1432   void print(raw_ostream &O, const Twine &Indent,
1433              VPSlotTracker &SlotTracker) const override;
1434 #endif
1435 
1436   const InterleaveGroup<Instruction> *getInterleaveGroup() { return IG; }
1437 
1438   /// Returns the number of stored operands of this interleave group. Returns 0
1439   /// for load interleave groups.
1440   unsigned getNumStoreOperands() const {
1441     return getNumOperands() - (HasMask ? 2 : 1);
1442   }
1443 };
1444 
1445 /// A recipe to represent inloop reduction operations, performing a reduction on
1446 /// a vector operand into a scalar value, and adding the result to a chain.
1447 /// The Operands are {ChainOp, VecOp, [Condition]}.
1448 class VPReductionRecipe : public VPRecipeBase, public VPValue {
1449   /// The recurrence decriptor for the reduction in question.
1450   const RecurrenceDescriptor *RdxDesc;
1451   /// Pointer to the TTI, needed to create the target reduction
1452   const TargetTransformInfo *TTI;
1453 
1454 public:
1455   VPReductionRecipe(const RecurrenceDescriptor *R, Instruction *I,
1456                     VPValue *ChainOp, VPValue *VecOp, VPValue *CondOp,
1457                     const TargetTransformInfo *TTI)
1458       : VPRecipeBase(VPRecipeBase::VPReductionSC, {ChainOp, VecOp}),
1459         VPValue(VPValue::VPVReductionSC, I, this), RdxDesc(R), TTI(TTI) {
1460     if (CondOp)
1461       addOperand(CondOp);
1462   }
1463 
1464   ~VPReductionRecipe() override = default;
1465 
1466   /// Method to support type inquiry through isa, cast, and dyn_cast.
1467   static inline bool classof(const VPValue *V) {
1468     return V->getVPValueID() == VPValue::VPVReductionSC;
1469   }
1470 
1471   /// Generate the reduction in the loop
1472   void execute(VPTransformState &State) override;
1473 
1474 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1475   /// Print the recipe.
1476   void print(raw_ostream &O, const Twine &Indent,
1477              VPSlotTracker &SlotTracker) const override;
1478 #endif
1479 
1480   /// The VPValue of the scalar Chain being accumulated.
1481   VPValue *getChainOp() const { return getOperand(0); }
1482   /// The VPValue of the vector value to be reduced.
1483   VPValue *getVecOp() const { return getOperand(1); }
1484   /// The VPValue of the condition for the block.
1485   VPValue *getCondOp() const {
1486     return getNumOperands() > 2 ? getOperand(2) : nullptr;
1487   }
1488 };
1489 
1490 /// VPReplicateRecipe replicates a given instruction producing multiple scalar
1491 /// copies of the original scalar type, one per lane, instead of producing a
1492 /// single copy of widened type for all lanes. If the instruction is known to be
1493 /// uniform only one copy, per lane zero, will be generated.
1494 class VPReplicateRecipe : public VPRecipeBase, public VPValue {
1495   /// Indicator if only a single replica per lane is needed.
1496   bool IsUniform;
1497 
1498   /// Indicator if the replicas are also predicated.
1499   bool IsPredicated;
1500 
1501   /// Indicator if the scalar values should also be packed into a vector.
1502   bool AlsoPack;
1503 
1504 public:
1505   template <typename IterT>
1506   VPReplicateRecipe(Instruction *I, iterator_range<IterT> Operands,
1507                     bool IsUniform, bool IsPredicated = false)
1508       : VPRecipeBase(VPReplicateSC, Operands), VPValue(VPVReplicateSC, I, this),
1509         IsUniform(IsUniform), IsPredicated(IsPredicated) {
1510     // Retain the previous behavior of predicateInstructions(), where an
1511     // insert-element of a predicated instruction got hoisted into the
1512     // predicated basic block iff it was its only user. This is achieved by
1513     // having predicated instructions also pack their values into a vector by
1514     // default unless they have a replicated user which uses their scalar value.
1515     AlsoPack = IsPredicated && !I->use_empty();
1516   }
1517 
1518   ~VPReplicateRecipe() override = default;
1519 
1520   /// Method to support type inquiry through isa, cast, and dyn_cast.
1521   static inline bool classof(const VPDef *D) {
1522     return D->getVPDefID() == VPRecipeBase::VPReplicateSC;
1523   }
1524 
1525   static inline bool classof(const VPValue *V) {
1526     return V->getVPValueID() == VPValue::VPVReplicateSC;
1527   }
1528 
1529   /// Generate replicas of the desired Ingredient. Replicas will be generated
1530   /// for all parts and lanes unless a specific part and lane are specified in
1531   /// the \p State.
1532   void execute(VPTransformState &State) override;
1533 
1534   void setAlsoPack(bool Pack) { AlsoPack = Pack; }
1535 
1536 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1537   /// Print the recipe.
1538   void print(raw_ostream &O, const Twine &Indent,
1539              VPSlotTracker &SlotTracker) const override;
1540 #endif
1541 
1542   bool isUniform() const { return IsUniform; }
1543 
1544   bool isPacked() const { return AlsoPack; }
1545 
1546   bool isPredicated() const { return IsPredicated; }
1547 
1548   /// Returns true if the recipe only uses the first lane of operand \p Op.
1549   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1550     assert(is_contained(operands(), Op) &&
1551            "Op must be an operand of the recipe");
1552     return isUniform();
1553   }
1554 };
1555 
1556 /// A recipe for generating conditional branches on the bits of a mask.
1557 class VPBranchOnMaskRecipe : public VPRecipeBase {
1558 public:
1559   VPBranchOnMaskRecipe(VPValue *BlockInMask)
1560       : VPRecipeBase(VPBranchOnMaskSC, {}) {
1561     if (BlockInMask) // nullptr means all-one mask.
1562       addOperand(BlockInMask);
1563   }
1564 
1565   /// Method to support type inquiry through isa, cast, and dyn_cast.
1566   static inline bool classof(const VPDef *D) {
1567     return D->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC;
1568   }
1569 
1570   /// Generate the extraction of the appropriate bit from the block mask and the
1571   /// conditional branch.
1572   void execute(VPTransformState &State) override;
1573 
1574 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1575   /// Print the recipe.
1576   void print(raw_ostream &O, const Twine &Indent,
1577              VPSlotTracker &SlotTracker) const override {
1578     O << Indent << "BRANCH-ON-MASK ";
1579     if (VPValue *Mask = getMask())
1580       Mask->printAsOperand(O, SlotTracker);
1581     else
1582       O << " All-One";
1583   }
1584 #endif
1585 
1586   /// Return the mask used by this recipe. Note that a full mask is represented
1587   /// by a nullptr.
1588   VPValue *getMask() const {
1589     assert(getNumOperands() <= 1 && "should have either 0 or 1 operands");
1590     // Mask is optional.
1591     return getNumOperands() == 1 ? getOperand(0) : nullptr;
1592   }
1593 };
1594 
1595 /// VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when
1596 /// control converges back from a Branch-on-Mask. The phi nodes are needed in
1597 /// order to merge values that are set under such a branch and feed their uses.
1598 /// The phi nodes can be scalar or vector depending on the users of the value.
1599 /// This recipe works in concert with VPBranchOnMaskRecipe.
1600 class VPPredInstPHIRecipe : public VPRecipeBase, public VPValue {
1601 public:
1602   /// Construct a VPPredInstPHIRecipe given \p PredInst whose value needs a phi
1603   /// nodes after merging back from a Branch-on-Mask.
1604   VPPredInstPHIRecipe(VPValue *PredV)
1605       : VPRecipeBase(VPPredInstPHISC, PredV),
1606         VPValue(VPValue::VPVPredInstPHI, nullptr, this) {}
1607   ~VPPredInstPHIRecipe() override = default;
1608 
1609   /// Method to support type inquiry through isa, cast, and dyn_cast.
1610   static inline bool classof(const VPDef *D) {
1611     return D->getVPDefID() == VPRecipeBase::VPPredInstPHISC;
1612   }
1613 
1614   /// Generates phi nodes for live-outs as needed to retain SSA form.
1615   void execute(VPTransformState &State) override;
1616 
1617 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1618   /// Print the recipe.
1619   void print(raw_ostream &O, const Twine &Indent,
1620              VPSlotTracker &SlotTracker) const override;
1621 #endif
1622 };
1623 
1624 /// A Recipe for widening load/store operations.
1625 /// The recipe uses the following VPValues:
1626 /// - For load: Address, optional mask
1627 /// - For store: Address, stored value, optional mask
1628 /// TODO: We currently execute only per-part unless a specific instance is
1629 /// provided.
1630 class VPWidenMemoryInstructionRecipe : public VPRecipeBase, public VPValue {
1631   Instruction &Ingredient;
1632 
1633   // Whether the loaded-from / stored-to addresses are consecutive.
1634   bool Consecutive;
1635 
1636   // Whether the consecutive loaded/stored addresses are in reverse order.
1637   bool Reverse;
1638 
1639   void setMask(VPValue *Mask) {
1640     if (!Mask)
1641       return;
1642     addOperand(Mask);
1643   }
1644 
1645   bool isMasked() const {
1646     return isStore() ? getNumOperands() == 3 : getNumOperands() == 2;
1647   }
1648 
1649 public:
1650   VPWidenMemoryInstructionRecipe(LoadInst &Load, VPValue *Addr, VPValue *Mask,
1651                                  bool Consecutive, bool Reverse)
1652       : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr}),
1653         VPValue(VPValue::VPVMemoryInstructionSC, &Load, this), Ingredient(Load),
1654         Consecutive(Consecutive), Reverse(Reverse) {
1655     assert((Consecutive || !Reverse) && "Reverse implies consecutive");
1656     setMask(Mask);
1657   }
1658 
1659   VPWidenMemoryInstructionRecipe(StoreInst &Store, VPValue *Addr,
1660                                  VPValue *StoredValue, VPValue *Mask,
1661                                  bool Consecutive, bool Reverse)
1662       : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr, StoredValue}),
1663         VPValue(VPValue::VPVMemoryInstructionSC, &Store, this),
1664         Ingredient(Store), Consecutive(Consecutive), Reverse(Reverse) {
1665     assert((Consecutive || !Reverse) && "Reverse implies consecutive");
1666     setMask(Mask);
1667   }
1668 
1669   /// Method to support type inquiry through isa, cast, and dyn_cast.
1670   static inline bool classof(const VPDef *D) {
1671     return D->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
1672   }
1673 
1674   /// Return the address accessed by this recipe.
1675   VPValue *getAddr() const {
1676     return getOperand(0); // Address is the 1st, mandatory operand.
1677   }
1678 
1679   /// Return the mask used by this recipe. Note that a full mask is represented
1680   /// by a nullptr.
1681   VPValue *getMask() const {
1682     // Mask is optional and therefore the last operand.
1683     return isMasked() ? getOperand(getNumOperands() - 1) : nullptr;
1684   }
1685 
1686   /// Returns true if this recipe is a store.
1687   bool isStore() const { return isa<StoreInst>(Ingredient); }
1688 
1689   /// Return the address accessed by this recipe.
1690   VPValue *getStoredValue() const {
1691     assert(isStore() && "Stored value only available for store instructions");
1692     return getOperand(1); // Stored value is the 2nd, mandatory operand.
1693   }
1694 
1695   // Return whether the loaded-from / stored-to addresses are consecutive.
1696   bool isConsecutive() const { return Consecutive; }
1697 
1698   // Return whether the consecutive loaded/stored addresses are in reverse
1699   // order.
1700   bool isReverse() const { return Reverse; }
1701 
1702   /// Generate the wide load/store.
1703   void execute(VPTransformState &State) override;
1704 
1705 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1706   /// Print the recipe.
1707   void print(raw_ostream &O, const Twine &Indent,
1708              VPSlotTracker &SlotTracker) const override;
1709 #endif
1710 
1711   /// Returns true if the recipe only uses the first lane of operand \p Op.
1712   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1713     assert(is_contained(operands(), Op) &&
1714            "Op must be an operand of the recipe");
1715 
1716     // Widened, consecutive memory operations only demand the first lane of
1717     // their address.
1718     return Op == getAddr() && isConsecutive();
1719   }
1720 };
1721 
1722 /// Canonical scalar induction phi of the vector loop. Starting at the specified
1723 /// start value (either 0 or the resume value when vectorizing the epilogue
1724 /// loop). VPWidenCanonicalIVRecipe represents the vector version of the
1725 /// canonical induction variable.
1726 class VPCanonicalIVPHIRecipe : public VPHeaderPHIRecipe {
1727   DebugLoc DL;
1728 
1729 public:
1730   VPCanonicalIVPHIRecipe(VPValue *StartV, DebugLoc DL)
1731       : VPHeaderPHIRecipe(VPValue::VPVCanonicalIVPHISC, VPCanonicalIVPHISC,
1732                           nullptr, StartV),
1733         DL(DL) {}
1734 
1735   ~VPCanonicalIVPHIRecipe() override = default;
1736 
1737   /// Method to support type inquiry through isa, cast, and dyn_cast.
1738   static inline bool classof(const VPDef *D) {
1739     return D->getVPDefID() == VPCanonicalIVPHISC;
1740   }
1741 
1742   /// Generate the canonical scalar induction phi of the vector loop.
1743   void execute(VPTransformState &State) override;
1744 
1745 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1746   /// Print the recipe.
1747   void print(raw_ostream &O, const Twine &Indent,
1748              VPSlotTracker &SlotTracker) const override;
1749 #endif
1750 
1751   /// Returns the scalar type of the induction.
1752   const Type *getScalarType() const {
1753     return getOperand(0)->getLiveInIRValue()->getType();
1754   }
1755 
1756   /// Returns true if the recipe only uses the first lane of operand \p Op.
1757   bool onlyFirstLaneUsed(const VPValue *Op) const override {
1758     assert(is_contained(operands(), Op) &&
1759            "Op must be an operand of the recipe");
1760     return true;
1761   }
1762 };
1763 
1764 /// A Recipe for widening the canonical induction variable of the vector loop.
1765 class VPWidenCanonicalIVRecipe : public VPRecipeBase, public VPValue {
1766 public:
1767   VPWidenCanonicalIVRecipe(VPCanonicalIVPHIRecipe *CanonicalIV)
1768       : VPRecipeBase(VPWidenCanonicalIVSC, {CanonicalIV}),
1769         VPValue(VPValue::VPVWidenCanonicalIVSC, nullptr, this) {}
1770 
1771   ~VPWidenCanonicalIVRecipe() override = default;
1772 
1773   /// Method to support type inquiry through isa, cast, and dyn_cast.
1774   static inline bool classof(const VPDef *D) {
1775     return D->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1776   }
1777 
1778   /// Extra classof implementations to allow directly casting from VPUser ->
1779   /// VPWidenCanonicalIVRecipe.
1780   static inline bool classof(const VPUser *U) {
1781     auto *R = dyn_cast<VPRecipeBase>(U);
1782     return R && R->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1783   }
1784   static inline bool classof(const VPRecipeBase *R) {
1785     return R->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1786   }
1787 
1788   /// Generate a canonical vector induction variable of the vector loop, with
1789   /// start = {<Part*VF, Part*VF+1, ..., Part*VF+VF-1> for 0 <= Part < UF}, and
1790   /// step = <VF*UF, VF*UF, ..., VF*UF>.
1791   void execute(VPTransformState &State) override;
1792 
1793 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1794   /// Print the recipe.
1795   void print(raw_ostream &O, const Twine &Indent,
1796              VPSlotTracker &SlotTracker) const override;
1797 #endif
1798 
1799   /// Returns the scalar type of the induction.
1800   const Type *getScalarType() const {
1801     return cast<VPCanonicalIVPHIRecipe>(getOperand(0)->getDef())
1802         ->getScalarType();
1803   }
1804 };
1805 
1806 /// VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph. It
1807 /// holds a sequence of zero or more VPRecipe's each representing a sequence of
1808 /// output IR instructions. All PHI-like recipes must come before any non-PHI recipes.
1809 class VPBasicBlock : public VPBlockBase {
1810 public:
1811   using RecipeListTy = iplist<VPRecipeBase>;
1812 
1813 private:
1814   /// The VPRecipes held in the order of output instructions to generate.
1815   RecipeListTy Recipes;
1816 
1817 public:
1818   VPBasicBlock(const Twine &Name = "", VPRecipeBase *Recipe = nullptr)
1819       : VPBlockBase(VPBasicBlockSC, Name.str()) {
1820     if (Recipe)
1821       appendRecipe(Recipe);
1822   }
1823 
1824   ~VPBasicBlock() override {
1825     while (!Recipes.empty())
1826       Recipes.pop_back();
1827   }
1828 
1829   /// Instruction iterators...
1830   using iterator = RecipeListTy::iterator;
1831   using const_iterator = RecipeListTy::const_iterator;
1832   using reverse_iterator = RecipeListTy::reverse_iterator;
1833   using const_reverse_iterator = RecipeListTy::const_reverse_iterator;
1834 
1835   //===--------------------------------------------------------------------===//
1836   /// Recipe iterator methods
1837   ///
1838   inline iterator begin() { return Recipes.begin(); }
1839   inline const_iterator begin() const { return Recipes.begin(); }
1840   inline iterator end() { return Recipes.end(); }
1841   inline const_iterator end() const { return Recipes.end(); }
1842 
1843   inline reverse_iterator rbegin() { return Recipes.rbegin(); }
1844   inline const_reverse_iterator rbegin() const { return Recipes.rbegin(); }
1845   inline reverse_iterator rend() { return Recipes.rend(); }
1846   inline const_reverse_iterator rend() const { return Recipes.rend(); }
1847 
1848   inline size_t size() const { return Recipes.size(); }
1849   inline bool empty() const { return Recipes.empty(); }
1850   inline const VPRecipeBase &front() const { return Recipes.front(); }
1851   inline VPRecipeBase &front() { return Recipes.front(); }
1852   inline const VPRecipeBase &back() const { return Recipes.back(); }
1853   inline VPRecipeBase &back() { return Recipes.back(); }
1854 
1855   /// Returns a reference to the list of recipes.
1856   RecipeListTy &getRecipeList() { return Recipes; }
1857 
1858   /// Returns a pointer to a member of the recipe list.
1859   static RecipeListTy VPBasicBlock::*getSublistAccess(VPRecipeBase *) {
1860     return &VPBasicBlock::Recipes;
1861   }
1862 
1863   /// Method to support type inquiry through isa, cast, and dyn_cast.
1864   static inline bool classof(const VPBlockBase *V) {
1865     return V->getVPBlockID() == VPBlockBase::VPBasicBlockSC;
1866   }
1867 
1868   void insert(VPRecipeBase *Recipe, iterator InsertPt) {
1869     assert(Recipe && "No recipe to append.");
1870     assert(!Recipe->Parent && "Recipe already in VPlan");
1871     Recipe->Parent = this;
1872     Recipes.insert(InsertPt, Recipe);
1873   }
1874 
1875   /// Augment the existing recipes of a VPBasicBlock with an additional
1876   /// \p Recipe as the last recipe.
1877   void appendRecipe(VPRecipeBase *Recipe) { insert(Recipe, end()); }
1878 
1879   /// The method which generates the output IR instructions that correspond to
1880   /// this VPBasicBlock, thereby "executing" the VPlan.
1881   void execute(struct VPTransformState *State) override;
1882 
1883   /// Return the position of the first non-phi node recipe in the block.
1884   iterator getFirstNonPhi();
1885 
1886   /// Returns an iterator range over the PHI-like recipes in the block.
1887   iterator_range<iterator> phis() {
1888     return make_range(begin(), getFirstNonPhi());
1889   }
1890 
1891   void dropAllReferences(VPValue *NewValue) override;
1892 
1893   /// Split current block at \p SplitAt by inserting a new block between the
1894   /// current block and its successors and moving all recipes starting at
1895   /// SplitAt to the new block. Returns the new block.
1896   VPBasicBlock *splitAt(iterator SplitAt);
1897 
1898 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1899   /// Print this VPBsicBlock to \p O, prefixing all lines with \p Indent. \p
1900   /// SlotTracker is used to print unnamed VPValue's using consequtive numbers.
1901   ///
1902   /// Note that the numbering is applied to the whole VPlan, so printing
1903   /// individual blocks is consistent with the whole VPlan printing.
1904   void print(raw_ostream &O, const Twine &Indent,
1905              VPSlotTracker &SlotTracker) const override;
1906   using VPBlockBase::print; // Get the print(raw_stream &O) version.
1907 #endif
1908 
1909 private:
1910   /// Create an IR BasicBlock to hold the output instructions generated by this
1911   /// VPBasicBlock, and return it. Update the CFGState accordingly.
1912   BasicBlock *createEmptyBasicBlock(VPTransformState::CFGState &CFG);
1913 };
1914 
1915 /// VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks
1916 /// which form a Single-Entry-Single-Exit subgraph of the output IR CFG.
1917 /// A VPRegionBlock may indicate that its contents are to be replicated several
1918 /// times. This is designed to support predicated scalarization, in which a
1919 /// scalar if-then code structure needs to be generated VF * UF times. Having
1920 /// this replication indicator helps to keep a single model for multiple
1921 /// candidate VF's. The actual replication takes place only once the desired VF
1922 /// and UF have been determined.
1923 class VPRegionBlock : public VPBlockBase {
1924   /// Hold the Single Entry of the SESE region modelled by the VPRegionBlock.
1925   VPBlockBase *Entry;
1926 
1927   /// Hold the Single Exit of the SESE region modelled by the VPRegionBlock.
1928   VPBlockBase *Exit;
1929 
1930   /// An indicator whether this region is to generate multiple replicated
1931   /// instances of output IR corresponding to its VPBlockBases.
1932   bool IsReplicator;
1933 
1934 public:
1935   VPRegionBlock(VPBlockBase *Entry, VPBlockBase *Exit,
1936                 const std::string &Name = "", bool IsReplicator = false)
1937       : VPBlockBase(VPRegionBlockSC, Name), Entry(Entry), Exit(Exit),
1938         IsReplicator(IsReplicator) {
1939     assert(Entry->getPredecessors().empty() && "Entry block has predecessors.");
1940     assert(Exit->getSuccessors().empty() && "Exit block has successors.");
1941     Entry->setParent(this);
1942     Exit->setParent(this);
1943   }
1944   VPRegionBlock(const std::string &Name = "", bool IsReplicator = false)
1945       : VPBlockBase(VPRegionBlockSC, Name), Entry(nullptr), Exit(nullptr),
1946         IsReplicator(IsReplicator) {}
1947 
1948   ~VPRegionBlock() override {
1949     if (Entry) {
1950       VPValue DummyValue;
1951       Entry->dropAllReferences(&DummyValue);
1952       deleteCFG(Entry);
1953     }
1954   }
1955 
1956   /// Method to support type inquiry through isa, cast, and dyn_cast.
1957   static inline bool classof(const VPBlockBase *V) {
1958     return V->getVPBlockID() == VPBlockBase::VPRegionBlockSC;
1959   }
1960 
1961   const VPBlockBase *getEntry() const { return Entry; }
1962   VPBlockBase *getEntry() { return Entry; }
1963 
1964   /// Set \p EntryBlock as the entry VPBlockBase of this VPRegionBlock. \p
1965   /// EntryBlock must have no predecessors.
1966   void setEntry(VPBlockBase *EntryBlock) {
1967     assert(EntryBlock->getPredecessors().empty() &&
1968            "Entry block cannot have predecessors.");
1969     Entry = EntryBlock;
1970     EntryBlock->setParent(this);
1971   }
1972 
1973   // FIXME: DominatorTreeBase is doing 'A->getParent()->front()'. 'front' is a
1974   // specific interface of llvm::Function, instead of using
1975   // GraphTraints::getEntryNode. We should add a new template parameter to
1976   // DominatorTreeBase representing the Graph type.
1977   VPBlockBase &front() const { return *Entry; }
1978 
1979   const VPBlockBase *getExit() const { return Exit; }
1980   VPBlockBase *getExit() { return Exit; }
1981 
1982   /// Set \p ExitBlock as the exit VPBlockBase of this VPRegionBlock. \p
1983   /// ExitBlock must have no successors.
1984   void setExit(VPBlockBase *ExitBlock) {
1985     assert(ExitBlock->getSuccessors().empty() &&
1986            "Exit block cannot have successors.");
1987     Exit = ExitBlock;
1988     ExitBlock->setParent(this);
1989   }
1990 
1991   /// An indicator whether this region is to generate multiple replicated
1992   /// instances of output IR corresponding to its VPBlockBases.
1993   bool isReplicator() const { return IsReplicator; }
1994 
1995   /// The method which generates the output IR instructions that correspond to
1996   /// this VPRegionBlock, thereby "executing" the VPlan.
1997   void execute(struct VPTransformState *State) override;
1998 
1999   void dropAllReferences(VPValue *NewValue) override;
2000 
2001 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2002   /// Print this VPRegionBlock to \p O (recursively), prefixing all lines with
2003   /// \p Indent. \p SlotTracker is used to print unnamed VPValue's using
2004   /// consequtive numbers.
2005   ///
2006   /// Note that the numbering is applied to the whole VPlan, so printing
2007   /// individual regions is consistent with the whole VPlan printing.
2008   void print(raw_ostream &O, const Twine &Indent,
2009              VPSlotTracker &SlotTracker) const override;
2010   using VPBlockBase::print; // Get the print(raw_stream &O) version.
2011 #endif
2012 };
2013 
2014 //===----------------------------------------------------------------------===//
2015 // GraphTraits specializations for VPlan Hierarchical Control-Flow Graphs     //
2016 //===----------------------------------------------------------------------===//
2017 
2018 // The following set of template specializations implement GraphTraits to treat
2019 // any VPBlockBase as a node in a graph of VPBlockBases. It's important to note
2020 // that VPBlockBase traits don't recurse into VPRegioBlocks, i.e., if the
2021 // VPBlockBase is a VPRegionBlock, this specialization provides access to its
2022 // successors/predecessors but not to the blocks inside the region.
2023 
2024 template <> struct GraphTraits<VPBlockBase *> {
2025   using NodeRef = VPBlockBase *;
2026   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
2027 
2028   static NodeRef getEntryNode(NodeRef N) { return N; }
2029 
2030   static inline ChildIteratorType child_begin(NodeRef N) {
2031     return N->getSuccessors().begin();
2032   }
2033 
2034   static inline ChildIteratorType child_end(NodeRef N) {
2035     return N->getSuccessors().end();
2036   }
2037 };
2038 
2039 template <> struct GraphTraits<const VPBlockBase *> {
2040   using NodeRef = const VPBlockBase *;
2041   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::const_iterator;
2042 
2043   static NodeRef getEntryNode(NodeRef N) { return N; }
2044 
2045   static inline ChildIteratorType child_begin(NodeRef N) {
2046     return N->getSuccessors().begin();
2047   }
2048 
2049   static inline ChildIteratorType child_end(NodeRef N) {
2050     return N->getSuccessors().end();
2051   }
2052 };
2053 
2054 // Inverse order specialization for VPBasicBlocks. Predecessors are used instead
2055 // of successors for the inverse traversal.
2056 template <> struct GraphTraits<Inverse<VPBlockBase *>> {
2057   using NodeRef = VPBlockBase *;
2058   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
2059 
2060   static NodeRef getEntryNode(Inverse<NodeRef> B) { return B.Graph; }
2061 
2062   static inline ChildIteratorType child_begin(NodeRef N) {
2063     return N->getPredecessors().begin();
2064   }
2065 
2066   static inline ChildIteratorType child_end(NodeRef N) {
2067     return N->getPredecessors().end();
2068   }
2069 };
2070 
2071 // The following set of template specializations implement GraphTraits to
2072 // treat VPRegionBlock as a graph and recurse inside its nodes. It's important
2073 // to note that the blocks inside the VPRegionBlock are treated as VPBlockBases
2074 // (i.e., no dyn_cast is performed, VPBlockBases specialization is used), so
2075 // there won't be automatic recursion into other VPBlockBases that turn to be
2076 // VPRegionBlocks.
2077 
2078 template <>
2079 struct GraphTraits<VPRegionBlock *> : public GraphTraits<VPBlockBase *> {
2080   using GraphRef = VPRegionBlock *;
2081   using nodes_iterator = df_iterator<NodeRef>;
2082 
2083   static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
2084 
2085   static nodes_iterator nodes_begin(GraphRef N) {
2086     return nodes_iterator::begin(N->getEntry());
2087   }
2088 
2089   static nodes_iterator nodes_end(GraphRef N) {
2090     // df_iterator::end() returns an empty iterator so the node used doesn't
2091     // matter.
2092     return nodes_iterator::end(N);
2093   }
2094 };
2095 
2096 template <>
2097 struct GraphTraits<const VPRegionBlock *>
2098     : public GraphTraits<const VPBlockBase *> {
2099   using GraphRef = const VPRegionBlock *;
2100   using nodes_iterator = df_iterator<NodeRef>;
2101 
2102   static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
2103 
2104   static nodes_iterator nodes_begin(GraphRef N) {
2105     return nodes_iterator::begin(N->getEntry());
2106   }
2107 
2108   static nodes_iterator nodes_end(GraphRef N) {
2109     // df_iterator::end() returns an empty iterator so the node used doesn't
2110     // matter.
2111     return nodes_iterator::end(N);
2112   }
2113 };
2114 
2115 template <>
2116 struct GraphTraits<Inverse<VPRegionBlock *>>
2117     : public GraphTraits<Inverse<VPBlockBase *>> {
2118   using GraphRef = VPRegionBlock *;
2119   using nodes_iterator = df_iterator<NodeRef>;
2120 
2121   static NodeRef getEntryNode(Inverse<GraphRef> N) {
2122     return N.Graph->getExit();
2123   }
2124 
2125   static nodes_iterator nodes_begin(GraphRef N) {
2126     return nodes_iterator::begin(N->getExit());
2127   }
2128 
2129   static nodes_iterator nodes_end(GraphRef N) {
2130     // df_iterator::end() returns an empty iterator so the node used doesn't
2131     // matter.
2132     return nodes_iterator::end(N);
2133   }
2134 };
2135 
2136 /// Iterator to traverse all successors of a VPBlockBase node. This includes the
2137 /// entry node of VPRegionBlocks. Exit blocks of a region implicitly have their
2138 /// parent region's successors. This ensures all blocks in a region are visited
2139 /// before any blocks in a successor region when doing a reverse post-order
2140 // traversal of the graph.
2141 template <typename BlockPtrTy>
2142 class VPAllSuccessorsIterator
2143     : public iterator_facade_base<VPAllSuccessorsIterator<BlockPtrTy>,
2144                                   std::forward_iterator_tag, VPBlockBase> {
2145   BlockPtrTy Block;
2146   /// Index of the current successor. For VPBasicBlock nodes, this simply is the
2147   /// index for the successor array. For VPRegionBlock, SuccessorIdx == 0 is
2148   /// used for the region's entry block, and SuccessorIdx - 1 are the indices
2149   /// for the successor array.
2150   size_t SuccessorIdx;
2151 
2152   static BlockPtrTy getBlockWithSuccs(BlockPtrTy Current) {
2153     while (Current && Current->getNumSuccessors() == 0)
2154       Current = Current->getParent();
2155     return Current;
2156   }
2157 
2158   /// Templated helper to dereference successor \p SuccIdx of \p Block. Used by
2159   /// both the const and non-const operator* implementations.
2160   template <typename T1> static T1 deref(T1 Block, unsigned SuccIdx) {
2161     if (auto *R = dyn_cast<VPRegionBlock>(Block)) {
2162       if (SuccIdx == 0)
2163         return R->getEntry();
2164       SuccIdx--;
2165     }
2166 
2167     // For exit blocks, use the next parent region with successors.
2168     return getBlockWithSuccs(Block)->getSuccessors()[SuccIdx];
2169   }
2170 
2171 public:
2172   VPAllSuccessorsIterator(BlockPtrTy Block, size_t Idx = 0)
2173       : Block(Block), SuccessorIdx(Idx) {}
2174   VPAllSuccessorsIterator(const VPAllSuccessorsIterator &Other)
2175       : Block(Other.Block), SuccessorIdx(Other.SuccessorIdx) {}
2176 
2177   VPAllSuccessorsIterator &operator=(const VPAllSuccessorsIterator &R) {
2178     Block = R.Block;
2179     SuccessorIdx = R.SuccessorIdx;
2180     return *this;
2181   }
2182 
2183   static VPAllSuccessorsIterator end(BlockPtrTy Block) {
2184     BlockPtrTy ParentWithSuccs = getBlockWithSuccs(Block);
2185     unsigned NumSuccessors = ParentWithSuccs
2186                                  ? ParentWithSuccs->getNumSuccessors()
2187                                  : Block->getNumSuccessors();
2188 
2189     if (auto *R = dyn_cast<VPRegionBlock>(Block))
2190       return {R, NumSuccessors + 1};
2191     return {Block, NumSuccessors};
2192   }
2193 
2194   bool operator==(const VPAllSuccessorsIterator &R) const {
2195     return Block == R.Block && SuccessorIdx == R.SuccessorIdx;
2196   }
2197 
2198   const VPBlockBase *operator*() const { return deref(Block, SuccessorIdx); }
2199 
2200   BlockPtrTy operator*() { return deref(Block, SuccessorIdx); }
2201 
2202   VPAllSuccessorsIterator &operator++() {
2203     SuccessorIdx++;
2204     return *this;
2205   }
2206 
2207   VPAllSuccessorsIterator operator++(int X) {
2208     VPAllSuccessorsIterator Orig = *this;
2209     SuccessorIdx++;
2210     return Orig;
2211   }
2212 };
2213 
2214 /// Helper for GraphTraits specialization that traverses through VPRegionBlocks.
2215 template <typename BlockTy> class VPBlockRecursiveTraversalWrapper {
2216   BlockTy Entry;
2217 
2218 public:
2219   VPBlockRecursiveTraversalWrapper(BlockTy Entry) : Entry(Entry) {}
2220   BlockTy getEntry() { return Entry; }
2221 };
2222 
2223 /// GraphTraits specialization to recursively traverse VPBlockBase nodes,
2224 /// including traversing through VPRegionBlocks.  Exit blocks of a region
2225 /// implicitly have their parent region's successors. This ensures all blocks in
2226 /// a region are visited before any blocks in a successor region when doing a
2227 /// reverse post-order traversal of the graph.
2228 template <>
2229 struct GraphTraits<VPBlockRecursiveTraversalWrapper<VPBlockBase *>> {
2230   using NodeRef = VPBlockBase *;
2231   using ChildIteratorType = VPAllSuccessorsIterator<VPBlockBase *>;
2232 
2233   static NodeRef
2234   getEntryNode(VPBlockRecursiveTraversalWrapper<VPBlockBase *> N) {
2235     return N.getEntry();
2236   }
2237 
2238   static inline ChildIteratorType child_begin(NodeRef N) {
2239     return ChildIteratorType(N);
2240   }
2241 
2242   static inline ChildIteratorType child_end(NodeRef N) {
2243     return ChildIteratorType::end(N);
2244   }
2245 };
2246 
2247 template <>
2248 struct GraphTraits<VPBlockRecursiveTraversalWrapper<const VPBlockBase *>> {
2249   using NodeRef = const VPBlockBase *;
2250   using ChildIteratorType = VPAllSuccessorsIterator<const VPBlockBase *>;
2251 
2252   static NodeRef
2253   getEntryNode(VPBlockRecursiveTraversalWrapper<const VPBlockBase *> N) {
2254     return N.getEntry();
2255   }
2256 
2257   static inline ChildIteratorType child_begin(NodeRef N) {
2258     return ChildIteratorType(N);
2259   }
2260 
2261   static inline ChildIteratorType child_end(NodeRef N) {
2262     return ChildIteratorType::end(N);
2263   }
2264 };
2265 
2266 /// VPlan models a candidate for vectorization, encoding various decisions take
2267 /// to produce efficient output IR, including which branches, basic-blocks and
2268 /// output IR instructions to generate, and their cost. VPlan holds a
2269 /// Hierarchical-CFG of VPBasicBlocks and VPRegionBlocks rooted at an Entry
2270 /// VPBlock.
2271 class VPlan {
2272   friend class VPlanPrinter;
2273   friend class VPSlotTracker;
2274 
2275   /// Hold the single entry to the Hierarchical CFG of the VPlan.
2276   VPBlockBase *Entry;
2277 
2278   /// Holds the VFs applicable to this VPlan.
2279   SmallSetVector<ElementCount, 2> VFs;
2280 
2281   /// Holds the name of the VPlan, for printing.
2282   std::string Name;
2283 
2284   /// Holds all the external definitions created for this VPlan.
2285   // TODO: Introduce a specific representation for external definitions in
2286   // VPlan. External definitions must be immutable and hold a pointer to its
2287   // underlying IR that will be used to implement its structural comparison
2288   // (operators '==' and '<').
2289   SetVector<VPValue *> VPExternalDefs;
2290 
2291   /// Represents the trip count of the original loop, for folding
2292   /// the tail.
2293   VPValue *TripCount = nullptr;
2294 
2295   /// Represents the backedge taken count of the original loop, for folding
2296   /// the tail. It equals TripCount - 1.
2297   VPValue *BackedgeTakenCount = nullptr;
2298 
2299   /// Represents the vector trip count.
2300   VPValue VectorTripCount;
2301 
2302   /// Holds a mapping between Values and their corresponding VPValue inside
2303   /// VPlan.
2304   Value2VPValueTy Value2VPValue;
2305 
2306   /// Contains all VPValues that been allocated by addVPValue directly and need
2307   /// to be free when the plan's destructor is called.
2308   SmallVector<VPValue *, 16> VPValuesToFree;
2309 
2310   /// Holds the VPLoopInfo analysis for this VPlan.
2311   VPLoopInfo VPLInfo;
2312 
2313   /// Indicates whether it is safe use the Value2VPValue mapping or if the
2314   /// mapping cannot be used any longer, because it is stale.
2315   bool Value2VPValueEnabled = true;
2316 
2317 public:
2318   VPlan(VPBlockBase *Entry = nullptr) : Entry(Entry) {
2319     if (Entry)
2320       Entry->setPlan(this);
2321   }
2322 
2323   ~VPlan() {
2324     if (Entry) {
2325       VPValue DummyValue;
2326       for (VPBlockBase *Block : depth_first(Entry))
2327         Block->dropAllReferences(&DummyValue);
2328 
2329       VPBlockBase::deleteCFG(Entry);
2330     }
2331     for (VPValue *VPV : VPValuesToFree)
2332       delete VPV;
2333     if (TripCount)
2334       delete TripCount;
2335     if (BackedgeTakenCount)
2336       delete BackedgeTakenCount;
2337     for (VPValue *Def : VPExternalDefs)
2338       delete Def;
2339   }
2340 
2341   /// Prepare the plan for execution, setting up the required live-in values.
2342   void prepareToExecute(Value *TripCount, Value *VectorTripCount,
2343                         Value *CanonicalIVStartValue, VPTransformState &State);
2344 
2345   /// Generate the IR code for this VPlan.
2346   void execute(struct VPTransformState *State);
2347 
2348   VPBlockBase *getEntry() { return Entry; }
2349   const VPBlockBase *getEntry() const { return Entry; }
2350 
2351   VPBlockBase *setEntry(VPBlockBase *Block) {
2352     Entry = Block;
2353     Block->setPlan(this);
2354     return Entry;
2355   }
2356 
2357   /// The trip count of the original loop.
2358   VPValue *getOrCreateTripCount() {
2359     if (!TripCount)
2360       TripCount = new VPValue();
2361     return TripCount;
2362   }
2363 
2364   /// The backedge taken count of the original loop.
2365   VPValue *getOrCreateBackedgeTakenCount() {
2366     if (!BackedgeTakenCount)
2367       BackedgeTakenCount = new VPValue();
2368     return BackedgeTakenCount;
2369   }
2370 
2371   /// The vector trip count.
2372   VPValue &getVectorTripCount() { return VectorTripCount; }
2373 
2374   /// Mark the plan to indicate that using Value2VPValue is not safe any
2375   /// longer, because it may be stale.
2376   void disableValue2VPValue() { Value2VPValueEnabled = false; }
2377 
2378   void addVF(ElementCount VF) { VFs.insert(VF); }
2379 
2380   bool hasVF(ElementCount VF) { return VFs.count(VF); }
2381 
2382   const std::string &getName() const { return Name; }
2383 
2384   void setName(const Twine &newName) { Name = newName.str(); }
2385 
2386   /// Add \p VPVal to the pool of external definitions if it's not already
2387   /// in the pool.
2388   void addExternalDef(VPValue *VPVal) { VPExternalDefs.insert(VPVal); }
2389 
2390   void addVPValue(Value *V) {
2391     assert(Value2VPValueEnabled &&
2392            "IR value to VPValue mapping may be out of date!");
2393     assert(V && "Trying to add a null Value to VPlan");
2394     assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2395     VPValue *VPV = new VPValue(V);
2396     Value2VPValue[V] = VPV;
2397     VPValuesToFree.push_back(VPV);
2398   }
2399 
2400   void addVPValue(Value *V, VPValue *VPV) {
2401     assert(Value2VPValueEnabled && "Value2VPValue mapping may be out of date!");
2402     assert(V && "Trying to add a null Value to VPlan");
2403     assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2404     Value2VPValue[V] = VPV;
2405   }
2406 
2407   /// Returns the VPValue for \p V. \p OverrideAllowed can be used to disable
2408   /// checking whether it is safe to query VPValues using IR Values.
2409   VPValue *getVPValue(Value *V, bool OverrideAllowed = false) {
2410     assert((OverrideAllowed || isa<Constant>(V) || Value2VPValueEnabled) &&
2411            "Value2VPValue mapping may be out of date!");
2412     assert(V && "Trying to get the VPValue of a null Value");
2413     assert(Value2VPValue.count(V) && "Value does not exist in VPlan");
2414     return Value2VPValue[V];
2415   }
2416 
2417   /// Gets the VPValue or adds a new one (if none exists yet) for \p V. \p
2418   /// OverrideAllowed can be used to disable checking whether it is safe to
2419   /// query VPValues using IR Values.
2420   VPValue *getOrAddVPValue(Value *V, bool OverrideAllowed = false) {
2421     assert((OverrideAllowed || isa<Constant>(V) || Value2VPValueEnabled) &&
2422            "Value2VPValue mapping may be out of date!");
2423     assert(V && "Trying to get or add the VPValue of a null Value");
2424     if (!Value2VPValue.count(V))
2425       addVPValue(V);
2426     return getVPValue(V);
2427   }
2428 
2429   void removeVPValueFor(Value *V) {
2430     assert(Value2VPValueEnabled &&
2431            "IR value to VPValue mapping may be out of date!");
2432     Value2VPValue.erase(V);
2433   }
2434 
2435   /// Return the VPLoopInfo analysis for this VPlan.
2436   VPLoopInfo &getVPLoopInfo() { return VPLInfo; }
2437   const VPLoopInfo &getVPLoopInfo() const { return VPLInfo; }
2438 
2439 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2440   /// Print this VPlan to \p O.
2441   void print(raw_ostream &O) const;
2442 
2443   /// Print this VPlan in DOT format to \p O.
2444   void printDOT(raw_ostream &O) const;
2445 
2446   /// Dump the plan to stderr (for debugging).
2447   LLVM_DUMP_METHOD void dump() const;
2448 #endif
2449 
2450   /// Returns a range mapping the values the range \p Operands to their
2451   /// corresponding VPValues.
2452   iterator_range<mapped_iterator<Use *, std::function<VPValue *(Value *)>>>
2453   mapToVPValues(User::op_range Operands) {
2454     std::function<VPValue *(Value *)> Fn = [this](Value *Op) {
2455       return getOrAddVPValue(Op);
2456     };
2457     return map_range(Operands, Fn);
2458   }
2459 
2460   /// Returns true if \p VPV is uniform after vectorization.
2461   bool isUniformAfterVectorization(VPValue *VPV) const {
2462     auto RepR = dyn_cast_or_null<VPReplicateRecipe>(VPV->getDef());
2463     return !VPV->getDef() || (RepR && RepR->isUniform());
2464   }
2465 
2466   /// Returns the VPRegionBlock of the vector loop.
2467   VPRegionBlock *getVectorLoopRegion() {
2468     return cast<VPRegionBlock>(getEntry());
2469   }
2470 
2471   /// Returns the canonical induction recipe of the vector loop.
2472   VPCanonicalIVPHIRecipe *getCanonicalIV() {
2473     VPBasicBlock *EntryVPBB = getVectorLoopRegion()->getEntryBasicBlock();
2474     if (EntryVPBB->empty()) {
2475       // VPlan native path.
2476       EntryVPBB = cast<VPBasicBlock>(EntryVPBB->getSingleSuccessor());
2477     }
2478     return cast<VPCanonicalIVPHIRecipe>(&*EntryVPBB->begin());
2479   }
2480 
2481 private:
2482   /// Add to the given dominator tree the header block and every new basic block
2483   /// that was created between it and the latch block, inclusive.
2484   static void updateDominatorTree(DominatorTree *DT, BasicBlock *LoopLatchBB,
2485                                   BasicBlock *LoopPreHeaderBB,
2486                                   BasicBlock *LoopExitBB);
2487 };
2488 
2489 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2490 /// VPlanPrinter prints a given VPlan to a given output stream. The printing is
2491 /// indented and follows the dot format.
2492 class VPlanPrinter {
2493   raw_ostream &OS;
2494   const VPlan &Plan;
2495   unsigned Depth = 0;
2496   unsigned TabWidth = 2;
2497   std::string Indent;
2498   unsigned BID = 0;
2499   SmallDenseMap<const VPBlockBase *, unsigned> BlockID;
2500 
2501   VPSlotTracker SlotTracker;
2502 
2503   /// Handle indentation.
2504   void bumpIndent(int b) { Indent = std::string((Depth += b) * TabWidth, ' '); }
2505 
2506   /// Print a given \p Block of the Plan.
2507   void dumpBlock(const VPBlockBase *Block);
2508 
2509   /// Print the information related to the CFG edges going out of a given
2510   /// \p Block, followed by printing the successor blocks themselves.
2511   void dumpEdges(const VPBlockBase *Block);
2512 
2513   /// Print a given \p BasicBlock, including its VPRecipes, followed by printing
2514   /// its successor blocks.
2515   void dumpBasicBlock(const VPBasicBlock *BasicBlock);
2516 
2517   /// Print a given \p Region of the Plan.
2518   void dumpRegion(const VPRegionBlock *Region);
2519 
2520   unsigned getOrCreateBID(const VPBlockBase *Block) {
2521     return BlockID.count(Block) ? BlockID[Block] : BlockID[Block] = BID++;
2522   }
2523 
2524   Twine getOrCreateName(const VPBlockBase *Block);
2525 
2526   Twine getUID(const VPBlockBase *Block);
2527 
2528   /// Print the information related to a CFG edge between two VPBlockBases.
2529   void drawEdge(const VPBlockBase *From, const VPBlockBase *To, bool Hidden,
2530                 const Twine &Label);
2531 
2532 public:
2533   VPlanPrinter(raw_ostream &O, const VPlan &P)
2534       : OS(O), Plan(P), SlotTracker(&P) {}
2535 
2536   LLVM_DUMP_METHOD void dump();
2537 };
2538 
2539 struct VPlanIngredient {
2540   const Value *V;
2541 
2542   VPlanIngredient(const Value *V) : V(V) {}
2543 
2544   void print(raw_ostream &O) const;
2545 };
2546 
2547 inline raw_ostream &operator<<(raw_ostream &OS, const VPlanIngredient &I) {
2548   I.print(OS);
2549   return OS;
2550 }
2551 
2552 inline raw_ostream &operator<<(raw_ostream &OS, const VPlan &Plan) {
2553   Plan.print(OS);
2554   return OS;
2555 }
2556 #endif
2557 
2558 //===----------------------------------------------------------------------===//
2559 // VPlan Utilities
2560 //===----------------------------------------------------------------------===//
2561 
2562 /// Class that provides utilities for VPBlockBases in VPlan.
2563 class VPBlockUtils {
2564 public:
2565   VPBlockUtils() = delete;
2566 
2567   /// Insert disconnected VPBlockBase \p NewBlock after \p BlockPtr. Add \p
2568   /// NewBlock as successor of \p BlockPtr and \p BlockPtr as predecessor of \p
2569   /// NewBlock, and propagate \p BlockPtr parent to \p NewBlock. \p BlockPtr's
2570   /// successors are moved from \p BlockPtr to \p NewBlock and \p BlockPtr's
2571   /// conditional bit is propagated to \p NewBlock. \p NewBlock must have
2572   /// neither successors nor predecessors.
2573   static void insertBlockAfter(VPBlockBase *NewBlock, VPBlockBase *BlockPtr) {
2574     assert(NewBlock->getSuccessors().empty() &&
2575            NewBlock->getPredecessors().empty() &&
2576            "Can't insert new block with predecessors or successors.");
2577     NewBlock->setParent(BlockPtr->getParent());
2578     SmallVector<VPBlockBase *> Succs(BlockPtr->successors());
2579     for (VPBlockBase *Succ : Succs) {
2580       disconnectBlocks(BlockPtr, Succ);
2581       connectBlocks(NewBlock, Succ);
2582     }
2583     NewBlock->setCondBit(BlockPtr->getCondBit());
2584     BlockPtr->setCondBit(nullptr);
2585     connectBlocks(BlockPtr, NewBlock);
2586   }
2587 
2588   /// Insert disconnected VPBlockBases \p IfTrue and \p IfFalse after \p
2589   /// BlockPtr. Add \p IfTrue and \p IfFalse as succesors of \p BlockPtr and \p
2590   /// BlockPtr as predecessor of \p IfTrue and \p IfFalse. Propagate \p BlockPtr
2591   /// parent to \p IfTrue and \p IfFalse. \p Condition is set as the successor
2592   /// selector. \p BlockPtr must have no successors and \p IfTrue and \p IfFalse
2593   /// must have neither successors nor predecessors.
2594   static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
2595                                    VPValue *Condition, VPBlockBase *BlockPtr) {
2596     assert(IfTrue->getSuccessors().empty() &&
2597            "Can't insert IfTrue with successors.");
2598     assert(IfFalse->getSuccessors().empty() &&
2599            "Can't insert IfFalse with successors.");
2600     BlockPtr->setTwoSuccessors(IfTrue, IfFalse, Condition);
2601     IfTrue->setPredecessors({BlockPtr});
2602     IfFalse->setPredecessors({BlockPtr});
2603     IfTrue->setParent(BlockPtr->getParent());
2604     IfFalse->setParent(BlockPtr->getParent());
2605   }
2606 
2607   /// Connect VPBlockBases \p From and \p To bi-directionally. Append \p To to
2608   /// the successors of \p From and \p From to the predecessors of \p To. Both
2609   /// VPBlockBases must have the same parent, which can be null. Both
2610   /// VPBlockBases can be already connected to other VPBlockBases.
2611   static void connectBlocks(VPBlockBase *From, VPBlockBase *To) {
2612     assert((From->getParent() == To->getParent()) &&
2613            "Can't connect two block with different parents");
2614     assert(From->getNumSuccessors() < 2 &&
2615            "Blocks can't have more than two successors.");
2616     From->appendSuccessor(To);
2617     To->appendPredecessor(From);
2618   }
2619 
2620   /// Disconnect VPBlockBases \p From and \p To bi-directionally. Remove \p To
2621   /// from the successors of \p From and \p From from the predecessors of \p To.
2622   static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To) {
2623     assert(To && "Successor to disconnect is null.");
2624     From->removeSuccessor(To);
2625     To->removePredecessor(From);
2626   }
2627 
2628   /// Try to merge \p Block into its single predecessor, if \p Block is a
2629   /// VPBasicBlock and its predecessor has a single successor. Returns a pointer
2630   /// to the predecessor \p Block was merged into or nullptr otherwise.
2631   static VPBasicBlock *tryToMergeBlockIntoPredecessor(VPBlockBase *Block) {
2632     auto *VPBB = dyn_cast<VPBasicBlock>(Block);
2633     auto *PredVPBB =
2634         dyn_cast_or_null<VPBasicBlock>(Block->getSinglePredecessor());
2635     if (!VPBB || !PredVPBB || PredVPBB->getNumSuccessors() != 1)
2636       return nullptr;
2637 
2638     for (VPRecipeBase &R : make_early_inc_range(*VPBB))
2639       R.moveBefore(*PredVPBB, PredVPBB->end());
2640     VPBlockUtils::disconnectBlocks(PredVPBB, VPBB);
2641     auto *ParentRegion = cast<VPRegionBlock>(Block->getParent());
2642     if (ParentRegion->getExit() == Block)
2643       ParentRegion->setExit(PredVPBB);
2644     SmallVector<VPBlockBase *> Successors(Block->successors());
2645     for (auto *Succ : Successors) {
2646       VPBlockUtils::disconnectBlocks(Block, Succ);
2647       VPBlockUtils::connectBlocks(PredVPBB, Succ);
2648     }
2649     delete Block;
2650     return PredVPBB;
2651   }
2652 
2653   /// Returns true if the edge \p FromBlock -> \p ToBlock is a back-edge.
2654   static bool isBackEdge(const VPBlockBase *FromBlock,
2655                          const VPBlockBase *ToBlock, const VPLoopInfo *VPLI) {
2656     assert(FromBlock->getParent() == ToBlock->getParent() &&
2657            FromBlock->getParent() && "Must be in same region");
2658     const VPLoop *FromLoop = VPLI->getLoopFor(FromBlock);
2659     const VPLoop *ToLoop = VPLI->getLoopFor(ToBlock);
2660     if (!FromLoop || !ToLoop || FromLoop != ToLoop)
2661       return false;
2662 
2663     // A back-edge is a branch from the loop latch to its header.
2664     return ToLoop->isLoopLatch(FromBlock) && ToBlock == ToLoop->getHeader();
2665   }
2666 
2667   /// Returns true if \p Block is a loop latch
2668   static bool blockIsLoopLatch(const VPBlockBase *Block,
2669                                const VPLoopInfo *VPLInfo) {
2670     if (const VPLoop *ParentVPL = VPLInfo->getLoopFor(Block))
2671       return ParentVPL->isLoopLatch(Block);
2672 
2673     return false;
2674   }
2675 
2676   /// Count and return the number of succesors of \p PredBlock excluding any
2677   /// backedges.
2678   static unsigned countSuccessorsNoBE(VPBlockBase *PredBlock,
2679                                       VPLoopInfo *VPLI) {
2680     unsigned Count = 0;
2681     for (VPBlockBase *SuccBlock : PredBlock->getSuccessors()) {
2682       if (!VPBlockUtils::isBackEdge(PredBlock, SuccBlock, VPLI))
2683         Count++;
2684     }
2685     return Count;
2686   }
2687 
2688   /// Return an iterator range over \p Range which only includes \p BlockTy
2689   /// blocks. The accesses are casted to \p BlockTy.
2690   template <typename BlockTy, typename T>
2691   static auto blocksOnly(const T &Range) {
2692     // Create BaseTy with correct const-ness based on BlockTy.
2693     using BaseTy =
2694         typename std::conditional<std::is_const<BlockTy>::value,
2695                                   const VPBlockBase, VPBlockBase>::type;
2696 
2697     // We need to first create an iterator range over (const) BlocktTy & instead
2698     // of (const) BlockTy * for filter_range to work properly.
2699     auto Mapped =
2700         map_range(Range, [](BaseTy *Block) -> BaseTy & { return *Block; });
2701     auto Filter = make_filter_range(
2702         Mapped, [](BaseTy &Block) { return isa<BlockTy>(&Block); });
2703     return map_range(Filter, [](BaseTy &Block) -> BlockTy * {
2704       return cast<BlockTy>(&Block);
2705     });
2706   }
2707 };
2708 
2709 class VPInterleavedAccessInfo {
2710   DenseMap<VPInstruction *, InterleaveGroup<VPInstruction> *>
2711       InterleaveGroupMap;
2712 
2713   /// Type for mapping of instruction based interleave groups to VPInstruction
2714   /// interleave groups
2715   using Old2NewTy = DenseMap<InterleaveGroup<Instruction> *,
2716                              InterleaveGroup<VPInstruction> *>;
2717 
2718   /// Recursively \p Region and populate VPlan based interleave groups based on
2719   /// \p IAI.
2720   void visitRegion(VPRegionBlock *Region, Old2NewTy &Old2New,
2721                    InterleavedAccessInfo &IAI);
2722   /// Recursively traverse \p Block and populate VPlan based interleave groups
2723   /// based on \p IAI.
2724   void visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
2725                   InterleavedAccessInfo &IAI);
2726 
2727 public:
2728   VPInterleavedAccessInfo(VPlan &Plan, InterleavedAccessInfo &IAI);
2729 
2730   ~VPInterleavedAccessInfo() {
2731     SmallPtrSet<InterleaveGroup<VPInstruction> *, 4> DelSet;
2732     // Avoid releasing a pointer twice.
2733     for (auto &I : InterleaveGroupMap)
2734       DelSet.insert(I.second);
2735     for (auto *Ptr : DelSet)
2736       delete Ptr;
2737   }
2738 
2739   /// Get the interleave group that \p Instr belongs to.
2740   ///
2741   /// \returns nullptr if doesn't have such group.
2742   InterleaveGroup<VPInstruction> *
2743   getInterleaveGroup(VPInstruction *Instr) const {
2744     return InterleaveGroupMap.lookup(Instr);
2745   }
2746 };
2747 
2748 /// Class that maps (parts of) an existing VPlan to trees of combined
2749 /// VPInstructions.
2750 class VPlanSlp {
2751   enum class OpMode { Failed, Load, Opcode };
2752 
2753   /// A DenseMapInfo implementation for using SmallVector<VPValue *, 4> as
2754   /// DenseMap keys.
2755   struct BundleDenseMapInfo {
2756     static SmallVector<VPValue *, 4> getEmptyKey() {
2757       return {reinterpret_cast<VPValue *>(-1)};
2758     }
2759 
2760     static SmallVector<VPValue *, 4> getTombstoneKey() {
2761       return {reinterpret_cast<VPValue *>(-2)};
2762     }
2763 
2764     static unsigned getHashValue(const SmallVector<VPValue *, 4> &V) {
2765       return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
2766     }
2767 
2768     static bool isEqual(const SmallVector<VPValue *, 4> &LHS,
2769                         const SmallVector<VPValue *, 4> &RHS) {
2770       return LHS == RHS;
2771     }
2772   };
2773 
2774   /// Mapping of values in the original VPlan to a combined VPInstruction.
2775   DenseMap<SmallVector<VPValue *, 4>, VPInstruction *, BundleDenseMapInfo>
2776       BundleToCombined;
2777 
2778   VPInterleavedAccessInfo &IAI;
2779 
2780   /// Basic block to operate on. For now, only instructions in a single BB are
2781   /// considered.
2782   const VPBasicBlock &BB;
2783 
2784   /// Indicates whether we managed to combine all visited instructions or not.
2785   bool CompletelySLP = true;
2786 
2787   /// Width of the widest combined bundle in bits.
2788   unsigned WidestBundleBits = 0;
2789 
2790   using MultiNodeOpTy =
2791       typename std::pair<VPInstruction *, SmallVector<VPValue *, 4>>;
2792 
2793   // Input operand bundles for the current multi node. Each multi node operand
2794   // bundle contains values not matching the multi node's opcode. They will
2795   // be reordered in reorderMultiNodeOps, once we completed building a
2796   // multi node.
2797   SmallVector<MultiNodeOpTy, 4> MultiNodeOps;
2798 
2799   /// Indicates whether we are building a multi node currently.
2800   bool MultiNodeActive = false;
2801 
2802   /// Check if we can vectorize Operands together.
2803   bool areVectorizable(ArrayRef<VPValue *> Operands) const;
2804 
2805   /// Add combined instruction \p New for the bundle \p Operands.
2806   void addCombined(ArrayRef<VPValue *> Operands, VPInstruction *New);
2807 
2808   /// Indicate we hit a bundle we failed to combine. Returns nullptr for now.
2809   VPInstruction *markFailed();
2810 
2811   /// Reorder operands in the multi node to maximize sequential memory access
2812   /// and commutative operations.
2813   SmallVector<MultiNodeOpTy, 4> reorderMultiNodeOps();
2814 
2815   /// Choose the best candidate to use for the lane after \p Last. The set of
2816   /// candidates to choose from are values with an opcode matching \p Last's
2817   /// or loads consecutive to \p Last.
2818   std::pair<OpMode, VPValue *> getBest(OpMode Mode, VPValue *Last,
2819                                        SmallPtrSetImpl<VPValue *> &Candidates,
2820                                        VPInterleavedAccessInfo &IAI);
2821 
2822 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2823   /// Print bundle \p Values to dbgs().
2824   void dumpBundle(ArrayRef<VPValue *> Values);
2825 #endif
2826 
2827 public:
2828   VPlanSlp(VPInterleavedAccessInfo &IAI, VPBasicBlock &BB) : IAI(IAI), BB(BB) {}
2829 
2830   ~VPlanSlp() = default;
2831 
2832   /// Tries to build an SLP tree rooted at \p Operands and returns a
2833   /// VPInstruction combining \p Operands, if they can be combined.
2834   VPInstruction *buildGraph(ArrayRef<VPValue *> Operands);
2835 
2836   /// Return the width of the widest combined bundle in bits.
2837   unsigned getWidestBundleBits() const { return WidestBundleBits; }
2838 
2839   /// Return true if all visited instruction can be combined.
2840   bool isCompletelySLP() const { return CompletelySLP; }
2841 };
2842 
2843 namespace vputils {
2844 
2845 /// Returns true if only the first lane of \p Def is used.
2846 bool onlyFirstLaneUsed(VPValue *Def);
2847 
2848 } // end namespace vputils
2849 
2850 } // end namespace llvm
2851 
2852 #endif // LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
2853