//===- VPlan.h - Represent A Vectorizer Plan --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file contains the declarations of the Vectorization Plan base classes:
/// 1. VPBasicBlock and VPRegionBlock that inherit from a common pure virtual
///    VPBlockBase, together implementing a Hierarchical CFG;
/// 2. Specializations of GraphTraits that allow VPBlockBase graphs to be
///    treated as proper graphs for generic algorithms;
/// 3. Pure virtual VPRecipeBase serving as the base class for recipes contained
///    within VPBasicBlocks;
/// 4. VPInstruction, a concrete Recipe and VPUser modeling a single planned
///    instruction;
/// 5. The VPlan class holding a candidate for vectorization;
/// 6. The VPlanPrinter class providing a way to print a plan in dot format;
/// These are documented in docs/VectorizationPlan.rst.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
#define LLVM_TRANSFORMS_VECTORIZE_VPLAN_H

#include "VPlanLoopInfo.h"
#include "VPlanValue.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/IR/IRBuilder.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <map>
#include <string>

namespace llvm {

class BasicBlock;
class DominatorTree;
class InnerLoopVectorizer;
class LoopInfo;
class raw_ostream;
class RecurrenceDescriptor;
class Value;
class VPBasicBlock;
class VPRegionBlock;
class VPlan;
class VPlanSlp;

/// A range of powers-of-2 vectorization factors with fixed start and
/// adjustable end. The range includes start and excludes end, e.g.,:
/// [1, 9) = {1, 2, 4, 8}
struct VFRange {
  // A power of 2.
  const ElementCount Start;

  // Need not be a power of 2. If End <= Start range is empty.
  ElementCount End;

  bool isEmpty() const {
    return End.getKnownMinValue() <= Start.getKnownMinValue();
  }

  VFRange(const ElementCount &Start, const ElementCount &End)
      : Start(Start), End(End) {
    assert(Start.isScalable() == End.isScalable() &&
           "Both Start and End should have the same scalable flag");
    assert(isPowerOf2_32(Start.getKnownMinValue()) &&
           "Expected Start to be a power of 2");
  }
};

using VPlanPtr = std::unique_ptr<VPlan>;

/// In what follows, the term "input IR" refers to code that is fed into the
/// vectorizer whereas the term "output IR" refers to code that is generated by
/// the vectorizer.

/// VPIteration represents a single point in the iteration space of the output
/// (vectorized and/or unrolled) IR loop.
struct VPIteration {
  /// in [0..UF)
  unsigned Part;

  /// in [0..VF)
  unsigned Lane;
};

/// This is a helper struct for maintaining vectorization state. It's used for
/// mapping values from the original loop to their corresponding values in
/// the new loop. Two mappings are maintained: one for vectorized values and
/// one for scalarized values. Vectorized values are represented with UF
/// vector values in the new loop, and scalarized values are represented with
/// UF x VF scalar values in the new loop. UF and VF are the unroll and
/// vectorization factors, respectively.
///
/// Entries can be added to either map with setVectorValue and setScalarValue,
/// which assert that an entry was not already added before. If an entry is to
/// replace an existing one, call resetVectorValue and resetScalarValue. This is
/// currently needed to modify the mapped values during "fix-up" operations that
/// occur once the first phase of widening is complete. These operations include
/// type truncation and the second phase of recurrence widening.
///
/// Entries from either map can be retrieved using the getVectorValue and
/// getScalarValue functions, which assert that the desired value exists.
struct VectorizerValueMap {
  friend struct VPTransformState;

private:
  /// The unroll factor. Each entry in the vector map contains UF vector values.
  unsigned UF;

  /// The vectorization factor. Each entry in the scalar map contains UF x VF
  /// scalar values.
  ElementCount VF;

  /// The vector and scalar map storage. We use std::map and not DenseMap
  /// because insertions to DenseMap invalidate its iterators.
  using VectorParts = SmallVector<Value *, 2>;
  using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
  std::map<Value *, VectorParts> VectorMapStorage;
  std::map<Value *, ScalarParts> ScalarMapStorage;

public:
  /// Construct an empty map with the given unroll and vectorization factors.
  VectorizerValueMap(unsigned UF, ElementCount VF) : UF(UF), VF(VF) {}

  /// \return True if the map has any vector entry for \p Key.
  bool hasAnyVectorValue(Value *Key) const {
    return VectorMapStorage.count(Key);
  }

  /// \return True if the map has a vector entry for \p Key and \p Part.
  bool hasVectorValue(Value *Key, unsigned Part) const {
    assert(Part < UF && "Queried Vector Part is too large.");
    if (!hasAnyVectorValue(Key))
      return false;
    const VectorParts &Entry = VectorMapStorage.find(Key)->second;
    assert(Entry.size() == UF && "VectorParts has wrong dimensions.");
    return Entry[Part] != nullptr;
  }

  /// \return True if the map has any scalar entry for \p Key.
  bool hasAnyScalarValue(Value *Key) const {
    return ScalarMapStorage.count(Key);
  }

  /// \return True if the map has a scalar entry for \p Key and \p Instance.
  bool hasScalarValue(Value *Key, const VPIteration &Instance) const {
    assert(Instance.Part < UF && "Queried Scalar Part is too large.");
    assert(Instance.Lane < VF.getKnownMinValue() &&
           "Queried Scalar Lane is too large.");

    if (!hasAnyScalarValue(Key))
      return false;
    const ScalarParts &Entry = ScalarMapStorage.find(Key)->second;
    assert(Entry.size() == UF && "ScalarParts has wrong dimensions.");
    assert(Entry[Instance.Part].size() == VF.getKnownMinValue() &&
           "ScalarParts has wrong dimensions.");
    return Entry[Instance.Part][Instance.Lane] != nullptr;
  }

  /// Retrieve the existing vector value that corresponds to \p Key and
  /// \p Part.
  Value *getVectorValue(Value *Key, unsigned Part) {
    assert(hasVectorValue(Key, Part) && "Getting non-existent value.");
    return VectorMapStorage[Key][Part];
  }

  /// Retrieve the existing scalar value that corresponds to \p Key and
  /// \p Instance.
  Value *getScalarValue(Value *Key, const VPIteration &Instance) {
    assert(hasScalarValue(Key, Instance) && "Getting non-existent value.");
    return ScalarMapStorage[Key][Instance.Part][Instance.Lane];
  }

  /// Set a vector value associated with \p Key and \p Part. Assumes such a
  /// value is not already set. If it is, use resetVectorValue() instead.
  void setVectorValue(Value *Key, unsigned Part, Value *Vector) {
    assert(!hasVectorValue(Key, Part) && "Vector value already set for part");
    if (!VectorMapStorage.count(Key)) {
      VectorParts Entry(UF);
      VectorMapStorage[Key] = Entry;
    }
    VectorMapStorage[Key][Part] = Vector;
  }

  /// Set a scalar value associated with \p Key and \p Instance. Assumes such a
  /// value is not already set.
  void setScalarValue(Value *Key, const VPIteration &Instance, Value *Scalar) {
    assert(!hasScalarValue(Key, Instance) && "Scalar value already set");
    if (!ScalarMapStorage.count(Key)) {
      ScalarParts Entry(UF);
      // TODO: Consider storing uniform values only per-part, as they occupy
      //       lane 0 only, keeping the other VF-1 redundant entries null.
      for (unsigned Part = 0; Part < UF; ++Part)
        Entry[Part].resize(VF.getKnownMinValue(), nullptr);
      ScalarMapStorage[Key] = Entry;
    }
    ScalarMapStorage[Key][Instance.Part][Instance.Lane] = Scalar;
  }

  /// Reset the vector value associated with \p Key for the given \p Part.
  /// This function can be used to update values that have already been
  /// vectorized. This is the case for "fix-up" operations including type
  /// truncation and the second phase of recurrence vectorization.
  void resetVectorValue(Value *Key, unsigned Part, Value *Vector) {
    assert(hasVectorValue(Key, Part) && "Vector value not set for part");
    VectorMapStorage[Key][Part] = Vector;
  }

  /// Reset the scalar value associated with \p Key for \p Part and \p Lane.
  /// This function can be used to update values that have already been
  /// scalarized. This is the case for "fix-up" operations including scalar phi
  /// nodes for scalarized and predicated instructions.
  void resetScalarValue(Value *Key, const VPIteration &Instance,
                        Value *Scalar) {
    assert(hasScalarValue(Key, Instance) &&
           "Scalar value not set for part and lane");
    ScalarMapStorage[Key][Instance.Part][Instance.Lane] = Scalar;
  }
};

/// This class is used to enable the VPlan to invoke a method of ILV. This is
/// needed until the method is refactored out of ILV and becomes reusable.
struct VPCallback {
  virtual ~VPCallback() {}
  virtual Value *getOrCreateVectorValues(Value *V, unsigned Part) = 0;
  virtual Value *getOrCreateScalarValue(Value *V,
                                        const VPIteration &Instance) = 0;
};

/// VPTransformState holds information passed down when "executing" a VPlan,
/// needed for generating the output IR.
struct VPTransformState {
  VPTransformState(ElementCount VF, unsigned UF, Loop *OrigLoop, LoopInfo *LI,
                   DominatorTree *DT, IRBuilder<> &Builder,
                   VectorizerValueMap &ValueMap, InnerLoopVectorizer *ILV,
                   VPCallback &Callback)
      : VF(VF), UF(UF), Instance(), OrigLoop(OrigLoop), LI(LI), DT(DT),
        Builder(Builder), ValueMap(ValueMap), ILV(ILV), Callback(Callback) {}

  /// The chosen Vectorization and Unroll Factors of the loop being vectorized.
  ElementCount VF;
  unsigned UF;

  /// Hold the indices to generate specific scalar instructions. Null indicates
  /// that all instances are to be generated, using either scalar or vector
  /// instructions.
  Optional<VPIteration> Instance;

  struct DataState {
    /// A type for vectorized values in the new loop. Each value from the
    /// original loop, when vectorized, is represented by UF vector values in
    /// the new unrolled loop, where UF is the unroll factor.
    typedef SmallVector<Value *, 2> PerPartValuesTy;

    DenseMap<VPValue *, PerPartValuesTy> PerPartOutput;

    using ScalarsPerPartValuesTy = SmallVector<SmallVector<Value *, 4>, 2>;
    DenseMap<VPValue *, ScalarsPerPartValuesTy> PerPartScalars;
  } Data;

  /// Get the generated Value for a given VPValue and a given Part. Note that
  /// as some Defs are still created by ILV and managed in its ValueMap, this
  /// method will delegate the call to ILV in such cases in order to provide
  /// callers a consistent API.
  /// \see set.
  Value *get(VPValue *Def, unsigned Part) {
    // If Values have been set for this Def return the one relevant for \p Part.
    if (Data.PerPartOutput.count(Def))
      return Data.PerPartOutput[Def][Part];
    // Def is managed by ILV: bring the Values from ValueMap.
    return Callback.getOrCreateVectorValues(VPValue2Value[Def], Part);
  }

  /// Get the generated Value for a given VPValue and given Part and Lane.
  Value *get(VPValue *Def, const VPIteration &Instance);

  bool hasVectorValue(VPValue *Def, unsigned Part) {
    auto I = Data.PerPartOutput.find(Def);
    return I != Data.PerPartOutput.end() && Part < I->second.size() &&
           I->second[Part];
  }

  bool hasScalarValue(VPValue *Def, VPIteration Instance) {
    auto I = Data.PerPartScalars.find(Def);
    if (I == Data.PerPartScalars.end())
      return false;
    return Instance.Part < I->second.size() &&
           Instance.Lane < I->second[Instance.Part].size() &&
           I->second[Instance.Part][Instance.Lane];
  }

  /// Set the generated Value for a given VPValue and a given Part.
  void set(VPValue *Def, Value *V, unsigned Part) {
    if (!Data.PerPartOutput.count(Def)) {
      DataState::PerPartValuesTy Entry(UF);
      Data.PerPartOutput[Def] = Entry;
    }
    Data.PerPartOutput[Def][Part] = V;
  }
  void set(VPValue *Def, Value *IRDef, Value *V, unsigned Part);

  void set(VPValue *Def, Value *V, const VPIteration &Instance) {
    auto Iter = Data.PerPartScalars.insert({Def, {}});
    auto &PerPartVec = Iter.first->second;
    while (PerPartVec.size() <= Instance.Part)
      PerPartVec.emplace_back();
    auto &Scalars = PerPartVec[Instance.Part];
    while (Scalars.size() <= Instance.Lane)
      Scalars.push_back(nullptr);
    Scalars[Instance.Lane] = V;
  }

  /// Hold state information used when constructing the CFG of the output IR,
  /// traversing the VPBasicBlocks and generating corresponding IR BasicBlocks.
  struct CFGState {
    /// The previous VPBasicBlock visited. Initially set to null.
    VPBasicBlock *PrevVPBB = nullptr;

    /// The previous IR BasicBlock created or used. Initially set to the new
    /// header BasicBlock.
    BasicBlock *PrevBB = nullptr;

    /// The last IR BasicBlock in the output IR. Set to the new latch
    /// BasicBlock, used for placing the newly created BasicBlocks.
    BasicBlock *LastBB = nullptr;

    /// A mapping of each VPBasicBlock to the corresponding BasicBlock. In case
    /// of replication, maps the BasicBlock of the last replica created.
    SmallDenseMap<VPBasicBlock *, BasicBlock *> VPBB2IRBB;

    /// Vector of VPBasicBlocks whose terminator instruction needs to be fixed
    /// up at the end of vector code generation.
    SmallVector<VPBasicBlock *, 8> VPBBsToFix;

    CFGState() = default;
  } CFG;

  /// Hold a pointer to the original loop.
  Loop *OrigLoop;

  /// Hold a pointer to LoopInfo to register new basic blocks in the loop.
  LoopInfo *LI;

  /// Hold a pointer to Dominator Tree to register new basic blocks in the loop.
  DominatorTree *DT;

  /// Hold a reference to the IRBuilder used to generate output IR code.
  IRBuilder<> &Builder;

  /// Hold a reference to the Value state information used when generating the
  /// Values of the output IR.
  VectorizerValueMap &ValueMap;

  /// Hold a reference to a mapping between VPValues in VPlan and original
  /// Values they correspond to.
  VPValue2ValueTy VPValue2Value;

  /// Hold the canonical scalar IV of the vector loop (start=0, step=VF*UF).
  Value *CanonicalIV = nullptr;

  /// Hold the trip count of the scalar loop.
  Value *TripCount = nullptr;

  /// Hold a pointer to InnerLoopVectorizer to reuse its IR generation methods.
  InnerLoopVectorizer *ILV;

  VPCallback &Callback;
};

/// VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
/// A VPBlockBase can be either a VPBasicBlock or a VPRegionBlock.
class VPBlockBase {
  friend class VPBlockUtils;

  const unsigned char SubclassID; ///< Subclass identifier (for isa/dyn_cast).

  /// An optional name for the block.
  std::string Name;

  /// The immediate VPRegionBlock which this VPBlockBase belongs to, or null if
  /// it is a topmost VPBlockBase.
  VPRegionBlock *Parent = nullptr;

  /// List of predecessor blocks.
  SmallVector<VPBlockBase *, 1> Predecessors;

  /// List of successor blocks.
  SmallVector<VPBlockBase *, 1> Successors;

  /// Successor selector, null for zero or single successor blocks.
  VPValue *CondBit = nullptr;

  /// Current block predicate - null if the block does not need a predicate.
  VPValue *Predicate = nullptr;

  /// VPlan containing the block. Can only be set on the entry block of the
  /// plan.
  VPlan *Plan = nullptr;

  /// Add \p Successor as the last successor to this block.
  void appendSuccessor(VPBlockBase *Successor) {
    assert(Successor && "Cannot add nullptr successor!");
    Successors.push_back(Successor);
  }

  /// Add \p Predecessor as the last predecessor to this block.
  void appendPredecessor(VPBlockBase *Predecessor) {
    assert(Predecessor && "Cannot add nullptr predecessor!");
    Predecessors.push_back(Predecessor);
  }

  /// Remove \p Predecessor from the predecessors of this block.
  void removePredecessor(VPBlockBase *Predecessor) {
    auto Pos = find(Predecessors, Predecessor);
    assert(Pos && "Predecessor does not exist");
    Predecessors.erase(Pos);
  }

  /// Remove \p Successor from the successors of this block.
  void removeSuccessor(VPBlockBase *Successor) {
    auto Pos = find(Successors, Successor);
    assert(Pos && "Successor does not exist");
    Successors.erase(Pos);
  }

protected:
  VPBlockBase(const unsigned char SC, const std::string &N)
      : SubclassID(SC), Name(N) {}

public:
  /// An enumeration for keeping track of the concrete subclass of VPBlockBase
  /// that are actually instantiated. Values of this enumeration are kept in the
  /// SubclassID field of the VPBlockBase objects. They are used for concrete
  /// type identification.
  using VPBlockTy = enum { VPBasicBlockSC, VPRegionBlockSC };

  using VPBlocksTy = SmallVectorImpl<VPBlockBase *>;

  virtual ~VPBlockBase() = default;

  const std::string &getName() const { return Name; }

  void setName(const Twine &newName) { Name = newName.str(); }

  /// \return an ID for the concrete type of this object.
  /// This is used to implement the classof checks. This should not be used
  /// for any other purpose, as the values may change as LLVM evolves.
  unsigned getVPBlockID() const { return SubclassID; }

  VPRegionBlock *getParent() { return Parent; }
  const VPRegionBlock *getParent() const { return Parent; }

  /// \return A pointer to the plan containing the current block.
  VPlan *getPlan();
  const VPlan *getPlan() const;

  /// Sets the pointer of the plan containing the block. The block must be the
  /// entry block into the VPlan.
  void setPlan(VPlan *ParentPlan);

  void setParent(VPRegionBlock *P) { Parent = P; }

  /// \return the VPBasicBlock that is the entry of this VPBlockBase,
  /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
  /// VPBlockBase is a VPBasicBlock, it is returned.
  const VPBasicBlock *getEntryBasicBlock() const;
  VPBasicBlock *getEntryBasicBlock();

  /// \return the VPBasicBlock that is the exit of this VPBlockBase,
  /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
  /// VPBlockBase is a VPBasicBlock, it is returned.
  const VPBasicBlock *getExitBasicBlock() const;
  VPBasicBlock *getExitBasicBlock();

  const VPBlocksTy &getSuccessors() const { return Successors; }
  VPBlocksTy &getSuccessors() { return Successors; }

  const VPBlocksTy &getPredecessors() const { return Predecessors; }
  VPBlocksTy &getPredecessors() { return Predecessors; }

  /// \return the successor of this VPBlockBase if it has a single successor.
  /// Otherwise return a null pointer.
  VPBlockBase *getSingleSuccessor() const {
    return (Successors.size() == 1 ? *Successors.begin() : nullptr);
  }

  /// \return the predecessor of this VPBlockBase if it has a single
  /// predecessor. Otherwise return a null pointer.
  VPBlockBase *getSinglePredecessor() const {
    return (Predecessors.size() == 1 ? *Predecessors.begin() : nullptr);
  }

  size_t getNumSuccessors() const { return Successors.size(); }
  size_t getNumPredecessors() const { return Predecessors.size(); }

  /// An Enclosing Block of a block B is any block containing B, including B
  /// itself. \return the closest enclosing block starting from "this", which
  /// has successors. \return the root enclosing block if all enclosing blocks
  /// have no successors.
  VPBlockBase *getEnclosingBlockWithSuccessors();

  /// \return the closest enclosing block starting from "this", which has
  /// predecessors. \return the root enclosing block if all enclosing blocks
  /// have no predecessors.
  VPBlockBase *getEnclosingBlockWithPredecessors();

  /// \return the successors either attached directly to this VPBlockBase or, if
  /// this VPBlockBase is the exit block of a VPRegionBlock and has no
  /// successors of its own, search recursively for the first enclosing
  /// VPRegionBlock that has successors and return them. If no such
  /// VPRegionBlock exists, return the (empty) successors of the topmost
  /// VPBlockBase reached.
  const VPBlocksTy &getHierarchicalSuccessors() {
    return getEnclosingBlockWithSuccessors()->getSuccessors();
  }

  /// \return the hierarchical successor of this VPBlockBase if it has a single
  /// hierarchical successor. Otherwise return a null pointer.
  VPBlockBase *getSingleHierarchicalSuccessor() {
    return getEnclosingBlockWithSuccessors()->getSingleSuccessor();
  }

  /// \return the predecessors either attached directly to this VPBlockBase or,
  /// if this VPBlockBase is the entry block of a VPRegionBlock and has no
  /// predecessors of its own, search recursively for the first enclosing
  /// VPRegionBlock that has predecessors and return them. If no such
  /// VPRegionBlock exists, return the (empty) predecessors of the topmost
  /// VPBlockBase reached.
  const VPBlocksTy &getHierarchicalPredecessors() {
    return getEnclosingBlockWithPredecessors()->getPredecessors();
  }

  /// \return the hierarchical predecessor of this VPBlockBase if it has a
  /// single hierarchical predecessor. Otherwise return a null pointer.
  VPBlockBase *getSingleHierarchicalPredecessor() {
    return getEnclosingBlockWithPredecessors()->getSinglePredecessor();
  }

  /// \return the condition bit selecting the successor.
  VPValue *getCondBit() { return CondBit; }

  const VPValue *getCondBit() const { return CondBit; }

  void setCondBit(VPValue *CV) { CondBit = CV; }

  VPValue *getPredicate() { return Predicate; }

  const VPValue *getPredicate() const { return Predicate; }

  void setPredicate(VPValue *Pred) { Predicate = Pred; }

  /// Set a given VPBlockBase \p Successor as the single successor of this
  /// VPBlockBase. This VPBlockBase is not added as predecessor of \p Successor.
  /// This VPBlockBase must have no successors.
  void setOneSuccessor(VPBlockBase *Successor) {
    assert(Successors.empty() && "Setting one successor when others exist.");
    appendSuccessor(Successor);
  }

  /// Set two given VPBlockBases \p IfTrue and \p IfFalse to be the two
  /// successors of this VPBlockBase. \p Condition is set as the successor
  /// selector. This VPBlockBase is not added as predecessor of \p IfTrue or \p
  /// IfFalse. This VPBlockBase must have no successors.
  void setTwoSuccessors(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
                        VPValue *Condition) {
    assert(Successors.empty() && "Setting two successors when others exist.");
    assert(Condition && "Setting two successors without condition!");
    CondBit = Condition;
    appendSuccessor(IfTrue);
    appendSuccessor(IfFalse);
  }

  /// Set each VPBasicBlock in \p NewPreds as predecessor of this VPBlockBase.
  /// This VPBlockBase must have no predecessors. This VPBlockBase is not added
  /// as successor of any VPBasicBlock in \p NewPreds.
  void setPredecessors(ArrayRef<VPBlockBase *> NewPreds) {
    assert(Predecessors.empty() && "Block predecessors already set.");
    for (auto *Pred : NewPreds)
      appendPredecessor(Pred);
  }

  /// Remove all the predecessor of this block.
  void clearPredecessors() { Predecessors.clear(); }

  /// Remove all the successors of this block and set to null its condition bit
  void clearSuccessors() {
    Successors.clear();
    CondBit = nullptr;
  }

  /// The method which generates the output IR that correspond to this
  /// VPBlockBase, thereby "executing" the VPlan.
  virtual void execute(struct VPTransformState *State) = 0;

  /// Delete all blocks reachable from a given VPBlockBase, inclusive.
  static void deleteCFG(VPBlockBase *Entry);

  void printAsOperand(raw_ostream &OS, bool PrintType) const {
    OS << getName();
  }

  void print(raw_ostream &OS) const {
    // TODO: Only printing VPBB name for now since we only have dot printing
    // support for VPInstructions/Recipes.
    printAsOperand(OS, false);
  }

  /// Return true if it is legal to hoist instructions into this block.
  bool isLegalToHoistInto() {
    // There are currently no constraints that prevent an instruction to be
    // hoisted into a VPBlockBase.
    return true;
  }

  /// Replace all operands of VPUsers in the block with \p NewValue and also
  /// replaces all uses of VPValues defined in the block with NewValue.
  virtual void dropAllReferences(VPValue *NewValue) = 0;
};

/// VPRecipeBase is a base class modeling a sequence of one or more output IR
/// instructions. VPRecipeBase owns the the VPValues it defines through VPDef
/// and is responsible for deleting its defined values. Single-value
/// VPRecipeBases that also inherit from VPValue must make sure to inherit from
/// VPRecipeBase before VPValue.
class VPRecipeBase : public ilist_node_with_parent<VPRecipeBase, VPBasicBlock>,
                     public VPDef {
  friend VPBasicBlock;
  friend class VPBlockUtils;


  /// Each VPRecipe belongs to a single VPBasicBlock.
  VPBasicBlock *Parent = nullptr;

public:
  VPRecipeBase(const unsigned char SC) : VPDef(SC) {}
  virtual ~VPRecipeBase() = default;

  /// \return the VPBasicBlock which this VPRecipe belongs to.
  VPBasicBlock *getParent() { return Parent; }
  const VPBasicBlock *getParent() const { return Parent; }

  /// The method which generates the output IR instructions that correspond to
  /// this VPRecipe, thereby "executing" the VPlan.
  virtual void execute(struct VPTransformState &State) = 0;

  /// Insert an unlinked recipe into a basic block immediately before
  /// the specified recipe.
  void insertBefore(VPRecipeBase *InsertPos);

  /// Insert an unlinked Recipe into a basic block immediately after
  /// the specified Recipe.
  void insertAfter(VPRecipeBase *InsertPos);

  /// Unlink this recipe from its current VPBasicBlock and insert it into
  /// the VPBasicBlock that MovePos lives in, right after MovePos.
  void moveAfter(VPRecipeBase *MovePos);

  /// Unlink this recipe and insert into BB before I.
  ///
  /// \pre I is a valid iterator into BB.
  void moveBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator I);

  /// This method unlinks 'this' from the containing basic block, but does not
  /// delete it.
  void removeFromParent();

  /// This method unlinks 'this' from the containing basic block and deletes it.
  ///
  /// \returns an iterator pointing to the element after the erased one
  iplist<VPRecipeBase>::iterator eraseFromParent();

  /// Returns a pointer to a VPUser, if the recipe inherits from VPUser or
  /// nullptr otherwise.
  VPUser *toVPUser();

  /// Returns the underlying instruction, if the recipe is a VPValue or nullptr
  /// otherwise.
  Instruction *getUnderlyingInstr() {
    return cast<Instruction>(getVPValue()->getUnderlyingValue());
  }
  const Instruction *getUnderlyingInstr() const {
    return cast<Instruction>(getVPValue()->getUnderlyingValue());
  }

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPDef *D) {
    // All VPDefs are also VPRecipeBases.
    return true;
  }
};

inline bool VPUser::classof(const VPDef *Def) {
  return Def->getVPDefID() == VPRecipeBase::VPInstructionSC ||
         Def->getVPDefID() == VPRecipeBase::VPWidenSC ||
         Def->getVPDefID() == VPRecipeBase::VPWidenCallSC ||
         Def->getVPDefID() == VPRecipeBase::VPWidenSelectSC ||
         Def->getVPDefID() == VPRecipeBase::VPWidenGEPSC ||
         Def->getVPDefID() == VPRecipeBase::VPBlendSC ||
         Def->getVPDefID() == VPRecipeBase::VPInterleaveSC ||
         Def->getVPDefID() == VPRecipeBase::VPReplicateSC ||
         Def->getVPDefID() == VPRecipeBase::VPReductionSC ||
         Def->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC ||
         Def->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
}

/// This is a concrete Recipe that models a single VPlan-level instruction.
/// While as any Recipe it may generate a sequence of IR instructions when
/// executed, these instructions would always form a single-def expression as
/// the VPInstruction is also a single def-use vertex.
class VPInstruction : public VPRecipeBase, public VPUser, public VPValue {
  friend class VPlanSlp;

public:
  /// VPlan opcodes, extending LLVM IR with idiomatics instructions.
  enum {
    Not = Instruction::OtherOpsEnd + 1,
    ICmpULE,
    SLPLoad,
    SLPStore,
    ActiveLaneMask,
  };

private:
  typedef unsigned char OpcodeTy;
  OpcodeTy Opcode;

  /// Utility method serving execute(): generates a single instance of the
  /// modeled instruction.
  void generateInstruction(VPTransformState &State, unsigned Part);

protected:
  void setUnderlyingInstr(Instruction *I) { setUnderlyingValue(I); }

public:
  VPInstruction(unsigned Opcode, ArrayRef<VPValue *> Operands)
      : VPRecipeBase(VPRecipeBase::VPInstructionSC), VPUser(Operands),
        VPValue(VPValue::VPVInstructionSC, nullptr, this), Opcode(Opcode) {}

  VPInstruction(unsigned Opcode, ArrayRef<VPInstruction *> Operands)
      : VPRecipeBase(VPRecipeBase::VPInstructionSC), VPUser({}),
        VPValue(VPValue::VPVInstructionSC, nullptr, this), Opcode(Opcode) {
    for (auto *I : Operands)
      addOperand(I->getVPValue());
  }

  VPInstruction(unsigned Opcode, std::initializer_list<VPValue *> Operands)
      : VPInstruction(Opcode, ArrayRef<VPValue *>(Operands)) {}

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPValue *V) {
    return V->getVPValueID() == VPValue::VPVInstructionSC;
  }

  VPInstruction *clone() const {
    SmallVector<VPValue *, 2> Operands(operands());
    return new VPInstruction(Opcode, Operands);
  }

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPDef *R) {
    return R->getVPDefID() == VPRecipeBase::VPInstructionSC;
  }

  unsigned getOpcode() const { return Opcode; }

  /// Generate the instruction.
  /// TODO: We currently execute only per-part unless a specific instance is
  /// provided.
  void execute(VPTransformState &State) override;

  /// Print the VPInstruction to \p O.
  void print(raw_ostream &O, const Twine &Indent,
             VPSlotTracker &SlotTracker) const override;

  /// Print the VPInstruction to dbgs() (for debugging).
  void dump() const;

  /// Return true if this instruction may modify memory.
  bool mayWriteToMemory() const {
    // TODO: we can use attributes of the called function to rule out memory
    //       modifications.
    return Opcode == Instruction::Store || Opcode == Instruction::Call ||
           Opcode == Instruction::Invoke || Opcode == SLPStore;
  }

  bool hasResult() const {
    // CallInst may or may not have a result, depending on the called function.
    // Conservatively return calls have results for now.
    switch (getOpcode()) {
    case Instruction::Ret:
    case Instruction::Br:
    case Instruction::Store:
    case Instruction::Switch:
    case Instruction::IndirectBr:
    case Instruction::Resume:
    case Instruction::CatchRet:
    case Instruction::Unreachable:
    case Instruction::Fence:
    case Instruction::AtomicRMW:
      return false;
    default:
      return true;
    }
  }
};

/// VPWidenRecipe is a recipe for producing a copy of vector type its
/// ingredient. This recipe covers most of the traditional vectorization cases
/// where each ingredient transforms into a vectorized version of itself.
class VPWidenRecipe : public VPRecipeBase, public VPValue, public VPUser {
public:
  template <typename IterT>
  VPWidenRecipe(Instruction &I, iterator_range<IterT> Operands)
      : VPRecipeBase(VPRecipeBase::VPWidenSC),
        VPValue(VPValue::VPVWidenSC, &I, this), VPUser(Operands) {}

  ~VPWidenRecipe() override = default;

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPDef *D) {
    return D->getVPDefID() == VPRecipeBase::VPWidenSC;
  }
  static inline bool classof(const VPValue *V) {
    return V->getVPValueID() == VPValue::VPVWidenSC;
  }

  /// Produce widened copies of all Ingredients.
  void execute(VPTransformState &State) override;

  /// Print the recipe.
  void print(raw_ostream &O, const Twine &Indent,
             VPSlotTracker &SlotTracker) const override;
};

/// A recipe for widening Call instructions.
class VPWidenCallRecipe : public VPRecipeBase, public VPUser, public VPValue {

public:
  template <typename IterT>
  VPWidenCallRecipe(CallInst &I, iterator_range<IterT> CallArguments)
      : VPRecipeBase(VPRecipeBase::VPWidenCallSC), VPUser(CallArguments),
        VPValue(VPValue::VPVWidenCallSC, &I, this) {}

  ~VPWidenCallRecipe() override = default;

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPDef *D) {
    return D->getVPDefID() == VPRecipeBase::VPWidenCallSC;
  }

  /// Produce a widened version of the call instruction.
  void execute(VPTransformState &State) override;

  /// Print the recipe.
  void print(raw_ostream &O, const Twine &Indent,
             VPSlotTracker &SlotTracker) const override;
};

/// A recipe for widening select instructions.
class VPWidenSelectRecipe : public VPRecipeBase, public VPUser, public VPValue {

  /// Is the condition of the select loop invariant?
  bool InvariantCond;

public:
  template <typename IterT>
  VPWidenSelectRecipe(SelectInst &I, iterator_range<IterT> Operands,
                      bool InvariantCond)
      : VPRecipeBase(VPRecipeBase::VPWidenSelectSC), VPUser(Operands),
        VPValue(VPValue::VPVWidenSelectSC, &I, this),
        InvariantCond(InvariantCond) {}

  ~VPWidenSelectRecipe() override = default;

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPDef *D) {
    return D->getVPDefID() == VPRecipeBase::VPWidenSelectSC;
  }

  /// Produce a widened version of the select instruction.
  void execute(VPTransformState &State) override;

  /// Print the recipe.
  void print(raw_ostream &O, const Twine &Indent,
             VPSlotTracker &SlotTracker) const override;
};

/// A recipe for handling GEP instructions.
class VPWidenGEPRecipe : public VPRecipeBase,
                         public VPUser,
                         public VPValue {
  bool IsPtrLoopInvariant;
  SmallBitVector IsIndexLoopInvariant;

public:
  template <typename IterT>
  VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands)
      : VPRecipeBase(VPRecipeBase::VPWidenGEPSC), VPUser(Operands),
        VPValue(VPWidenGEPSC, GEP, this),
        IsIndexLoopInvariant(GEP->getNumIndices(), false) {}

  template <typename IterT>
  VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands,
                   Loop *OrigLoop)
      : VPRecipeBase(VPRecipeBase::VPWidenGEPSC), VPUser(Operands),
        VPValue(VPValue::VPVWidenGEPSC, GEP, this),
        IsIndexLoopInvariant(GEP->getNumIndices(), false) {
    IsPtrLoopInvariant = OrigLoop->isLoopInvariant(GEP->getPointerOperand());
    for (auto Index : enumerate(GEP->indices()))
      IsIndexLoopInvariant[Index.index()] =
          OrigLoop->isLoopInvariant(Index.value().get());
  }
  ~VPWidenGEPRecipe() override = default;

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPDef *D) {
    return D->getVPDefID() == VPRecipeBase::VPWidenGEPSC;
  }

  /// Generate the gep nodes.
  void execute(VPTransformState &State) override;

  /// Print the recipe.
  void print(raw_ostream &O, const Twine &Indent,
             VPSlotTracker &SlotTracker) const override;
};

/// A recipe for handling phi nodes of integer and floating-point inductions,
/// producing their vector and scalar values.
class VPWidenIntOrFpInductionRecipe : public VPRecipeBase, public VPUser {
  PHINode *IV;
  TruncInst *Trunc;

public:
  VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start,
                                TruncInst *Trunc = nullptr)
      : VPRecipeBase(VPWidenIntOrFpInductionSC), VPUser({Start}), IV(IV),
        Trunc(Trunc) {
    if (Trunc)
      new VPValue(Trunc, this);
    else
      new VPValue(IV, this);
  }
  ~VPWidenIntOrFpInductionRecipe() override = default;

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPDef *D) {
    return D->getVPDefID() == VPRecipeBase::VPWidenIntOrFpInductionSC;
  }

  /// Generate the vectorized and scalarized versions of the phi node as
  /// needed by their users.
  void execute(VPTransformState &State) override;

  /// Print the recipe.
  void print(raw_ostream &O, const Twine &Indent,
             VPSlotTracker &SlotTracker) const override;

  /// Returns the start value of the induction.
  VPValue *getStartValue() { return getOperand(0); }
};

/// A recipe for handling all phi nodes except for integer and FP inductions.
/// For reduction PHIs, RdxDesc must point to the corresponding recurrence
/// descriptor and the start value is the first operand of the recipe.
class VPWidenPHIRecipe : public VPRecipeBase, public VPUser {
  PHINode *Phi;

  /// Descriptor for a reduction PHI.
  RecurrenceDescriptor *RdxDesc = nullptr;

public:
  /// Create a new VPWidenPHIRecipe for the reduction \p Phi described by \p
  /// RdxDesc.
  VPWidenPHIRecipe(PHINode *Phi, RecurrenceDescriptor &RdxDesc, VPValue &Start)
      : VPWidenPHIRecipe(Phi) {
    this->RdxDesc = &RdxDesc;
    addOperand(&Start);
  }

  /// Create a VPWidenPHIRecipe for \p Phi
  VPWidenPHIRecipe(PHINode *Phi) : VPRecipeBase(VPWidenPHISC), Phi(Phi) {
    new VPValue(Phi, this);
  }
  ~VPWidenPHIRecipe() override = default;

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPDef *D) {
    return D->getVPDefID() == VPRecipeBase::VPWidenPHISC;
  }

  /// Generate the phi/select nodes.
  void execute(VPTransformState &State) override;

  /// Print the recipe.
  void print(raw_ostream &O, const Twine &Indent,
             VPSlotTracker &SlotTracker) const override;

  /// Returns the start value of the phi, if it is a reduction.
  VPValue *getStartValue() {
    return getNumOperands() == 0 ? nullptr : getOperand(0);
  }
};

/// A recipe for vectorizing a phi-node as a sequence of mask-based select
/// instructions.
class VPBlendRecipe : public VPRecipeBase, public VPUser {
  PHINode *Phi;

public:
  /// The blend operation is a User of the incoming values and of their
  /// respective masks, ordered [I0, M0, I1, M1, ...]. Note that a single value
  /// might be incoming with a full mask for which there is no VPValue.
  VPBlendRecipe(PHINode *Phi, ArrayRef<VPValue *> Operands)
      : VPRecipeBase(VPBlendSC), VPUser(Operands), Phi(Phi) {
    new VPValue(Phi, this);
    assert(Operands.size() > 0 &&
           ((Operands.size() == 1) || (Operands.size() % 2 == 0)) &&
           "Expected either a single incoming value or a positive even number "
           "of operands");
  }

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPDef *D) {
    return D->getVPDefID() == VPRecipeBase::VPBlendSC;
  }

  /// Return the number of incoming values, taking into account that a single
  /// incoming value has no mask.
  unsigned getNumIncomingValues() const { return (getNumOperands() + 1) / 2; }

  /// Return incoming value number \p Idx.
  VPValue *getIncomingValue(unsigned Idx) const { return getOperand(Idx * 2); }

  /// Return mask number \p Idx.
  VPValue *getMask(unsigned Idx) const { return getOperand(Idx * 2 + 1); }

  /// Generate the phi/select nodes.
  void execute(VPTransformState &State) override;

  /// Print the recipe.
  void print(raw_ostream &O, const Twine &Indent,
             VPSlotTracker &SlotTracker) const override;
};

/// VPInterleaveRecipe is a recipe for transforming an interleave group of load
/// or stores into one wide load/store and shuffles. The first operand of a
/// VPInterleave recipe is the address, followed by the stored values, followed
/// by an optional mask.
class VPInterleaveRecipe : public VPRecipeBase, public VPUser {
  const InterleaveGroup<Instruction> *IG;

  bool HasMask = false;

public:
  VPInterleaveRecipe(const InterleaveGroup<Instruction> *IG, VPValue *Addr,
                     ArrayRef<VPValue *> StoredValues, VPValue *Mask)
      : VPRecipeBase(VPInterleaveSC), VPUser(Addr), IG(IG) {
    for (unsigned i = 0; i < IG->getFactor(); ++i)
      if (Instruction *I = IG->getMember(i)) {
        if (I->getType()->isVoidTy())
          continue;
        new VPValue(I, this);
      }

    for (auto *SV : StoredValues)
      addOperand(SV);
    if (Mask) {
      HasMask = true;
      addOperand(Mask);
    }
  }
  ~VPInterleaveRecipe() override = default;

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPDef *D) {
    return D->getVPDefID() == VPRecipeBase::VPInterleaveSC;
  }

  /// Return the address accessed by this recipe.
  VPValue *getAddr() const {
    return getOperand(0); // Address is the 1st, mandatory operand.
  }

  /// Return the mask used by this recipe. Note that a full mask is represented
  /// by a nullptr.
  VPValue *getMask() const {
    // Mask is optional and therefore the last, currently 2nd operand.
    return HasMask ? getOperand(getNumOperands() - 1) : nullptr;
  }

  /// Return the VPValues stored by this interleave group. If it is a load
  /// interleave group, return an empty ArrayRef.
  ArrayRef<VPValue *> getStoredValues() const {
    // The first operand is the address, followed by the stored values, followed
    // by an optional mask.
    return ArrayRef<VPValue *>(op_begin(), getNumOperands())
        .slice(1, getNumOperands() - (HasMask ? 2 : 1));
  }

  /// Generate the wide load or store, and shuffles.
  void execute(VPTransformState &State) override;

  /// Print the recipe.
  void print(raw_ostream &O, const Twine &Indent,
             VPSlotTracker &SlotTracker) const override;

  const InterleaveGroup<Instruction> *getInterleaveGroup() { return IG; }
};

/// A recipe to represent inloop reduction operations, performing a reduction on
/// a vector operand into a scalar value, and adding the result to a chain.
/// The Operands are {ChainOp, VecOp, [Condition]}.
class VPReductionRecipe : public VPRecipeBase, public VPUser, public VPValue {
  /// The recurrence decriptor for the reduction in question.
  RecurrenceDescriptor *RdxDesc;
  /// Fast math flags to use for the resulting reduction operation.
  bool NoNaN;
  /// Pointer to the TTI, needed to create the target reduction
  const TargetTransformInfo *TTI;

public:
  VPReductionRecipe(RecurrenceDescriptor *R, Instruction *I, VPValue *ChainOp,
                    VPValue *VecOp, VPValue *CondOp, bool NoNaN,
                    const TargetTransformInfo *TTI)
      : VPRecipeBase(VPRecipeBase::VPReductionSC), VPUser({ChainOp, VecOp}),
        VPValue(VPValue::VPVReductionSC, I, this), RdxDesc(R), NoNaN(NoNaN),
        TTI(TTI) {
    if (CondOp)
      addOperand(CondOp);
  }

  ~VPReductionRecipe() override = default;

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPValue *V) {
    return V->getVPValueID() == VPValue::VPVReductionSC;
  }

  static inline bool classof(const VPDef *D) {
    return D->getVPDefID() == VPRecipeBase::VPReductionSC;
  }

  /// Generate the reduction in the loop
  void execute(VPTransformState &State) override;

  /// Print the recipe.
  void print(raw_ostream &O, const Twine &Indent,
             VPSlotTracker &SlotTracker) const override;

  /// The VPValue of the scalar Chain being accumulated.
  VPValue *getChainOp() const { return getOperand(0); }
  /// The VPValue of the vector value to be reduced.
  VPValue *getVecOp() const { return getOperand(1); }
  /// The VPValue of the condition for the block.
  VPValue *getCondOp() const {
    return getNumOperands() > 2 ? getOperand(2) : nullptr;
  }
};

/// VPReplicateRecipe replicates a given instruction producing multiple scalar
/// copies of the original scalar type, one per lane, instead of producing a
/// single copy of widened type for all lanes. If the instruction is known to be
/// uniform only one copy, per lane zero, will be generated.
class VPReplicateRecipe : public VPRecipeBase, public VPUser, public VPValue {
  /// Indicator if only a single replica per lane is needed.
  bool IsUniform;

  /// Indicator if the replicas are also predicated.
  bool IsPredicated;

  /// Indicator if the scalar values should also be packed into a vector.
  bool AlsoPack;

public:
  template <typename IterT>
  VPReplicateRecipe(Instruction *I, iterator_range<IterT> Operands,
                    bool IsUniform, bool IsPredicated = false)
      : VPRecipeBase(VPReplicateSC), VPUser(Operands),
        VPValue(VPVReplicateSC, I, this), IsUniform(IsUniform),
        IsPredicated(IsPredicated) {
    // Retain the previous behavior of predicateInstructions(), where an
    // insert-element of a predicated instruction got hoisted into the
    // predicated basic block iff it was its only user. This is achieved by
    // having predicated instructions also pack their values into a vector by
    // default unless they have a replicated user which uses their scalar value.
    AlsoPack = IsPredicated && !I->use_empty();
  }

  ~VPReplicateRecipe() override = default;

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPDef *D) {
    return D->getVPDefID() == VPRecipeBase::VPReplicateSC;
  }

  static inline bool classof(const VPValue *V) {
    return V->getVPValueID() == VPValue::VPVReplicateSC;
  }

  /// Generate replicas of the desired Ingredient. Replicas will be generated
  /// for all parts and lanes unless a specific part and lane are specified in
  /// the \p State.
  void execute(VPTransformState &State) override;

  void setAlsoPack(bool Pack) { AlsoPack = Pack; }

  /// Print the recipe.
  void print(raw_ostream &O, const Twine &Indent,
             VPSlotTracker &SlotTracker) const override;

  bool isUniform() const { return IsUniform; }
};

/// A recipe for generating conditional branches on the bits of a mask.
class VPBranchOnMaskRecipe : public VPRecipeBase, public VPUser {
public:
  VPBranchOnMaskRecipe(VPValue *BlockInMask) : VPRecipeBase(VPBranchOnMaskSC) {
    if (BlockInMask) // nullptr means all-one mask.
      addOperand(BlockInMask);
  }

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPDef *D) {
    return D->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC;
  }

  /// Generate the extraction of the appropriate bit from the block mask and the
  /// conditional branch.
  void execute(VPTransformState &State) override;

  /// Print the recipe.
  void print(raw_ostream &O, const Twine &Indent,
             VPSlotTracker &SlotTracker) const override {
    O << " +\n" << Indent << "\"BRANCH-ON-MASK ";
    if (VPValue *Mask = getMask())
      Mask->printAsOperand(O, SlotTracker);
    else
      O << " All-One";
    O << "\\l\"";
  }

  /// Return the mask used by this recipe. Note that a full mask is represented
  /// by a nullptr.
  VPValue *getMask() const {
    assert(getNumOperands() <= 1 && "should have either 0 or 1 operands");
    // Mask is optional.
    return getNumOperands() == 1 ? getOperand(0) : nullptr;
  }
};

/// VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when
/// control converges back from a Branch-on-Mask. The phi nodes are needed in
/// order to merge values that are set under such a branch and feed their uses.
/// The phi nodes can be scalar or vector depending on the users of the value.
/// This recipe works in concert with VPBranchOnMaskRecipe.
class VPPredInstPHIRecipe : public VPRecipeBase, public VPUser {

public:
  /// Construct a VPPredInstPHIRecipe given \p PredInst whose value needs a phi
  /// nodes after merging back from a Branch-on-Mask.
  VPPredInstPHIRecipe(VPValue *PredV)
      : VPRecipeBase(VPPredInstPHISC), VPUser(PredV) {
    new VPValue(PredV->getUnderlyingValue(), this);
  }
  ~VPPredInstPHIRecipe() override = default;

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPDef *D) {
    return D->getVPDefID() == VPRecipeBase::VPPredInstPHISC;
  }

  /// Generates phi nodes for live-outs as needed to retain SSA form.
  void execute(VPTransformState &State) override;

  /// Print the recipe.
  void print(raw_ostream &O, const Twine &Indent,
             VPSlotTracker &SlotTracker) const override;
};

/// A Recipe for widening load/store operations.
/// The recipe uses the following VPValues:
/// - For load: Address, optional mask
/// - For store: Address, stored value, optional mask
/// TODO: We currently execute only per-part unless a specific instance is
/// provided.
class VPWidenMemoryInstructionRecipe : public VPRecipeBase,
                                       public VPUser {
  Instruction &Ingredient;

  void setMask(VPValue *Mask) {
    if (!Mask)
      return;
    addOperand(Mask);
  }

  bool isMasked() const {
    return isStore() ? getNumOperands() == 3 : getNumOperands() == 2;
  }

public:
  VPWidenMemoryInstructionRecipe(LoadInst &Load, VPValue *Addr, VPValue *Mask)
      : VPRecipeBase(VPWidenMemoryInstructionSC), VPUser({Addr}),
        Ingredient(Load) {
    new VPValue(VPValue::VPVMemoryInstructionSC, &Load, this);
    setMask(Mask);
  }

  VPWidenMemoryInstructionRecipe(StoreInst &Store, VPValue *Addr,
                                 VPValue *StoredValue, VPValue *Mask)
      : VPRecipeBase(VPWidenMemoryInstructionSC), VPUser({Addr, StoredValue}),
        Ingredient(Store) {
    setMask(Mask);
  }

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPDef *D) {
    return D->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
  }

  /// Return the address accessed by this recipe.
  VPValue *getAddr() const {
    return getOperand(0); // Address is the 1st, mandatory operand.
  }

  /// Return the mask used by this recipe. Note that a full mask is represented
  /// by a nullptr.
  VPValue *getMask() const {
    // Mask is optional and therefore the last operand.
    return isMasked() ? getOperand(getNumOperands() - 1) : nullptr;
  }

  /// Returns true if this recipe is a store.
  bool isStore() const { return isa<StoreInst>(Ingredient); }

  /// Return the address accessed by this recipe.
  VPValue *getStoredValue() const {
    assert(isStore() && "Stored value only available for store instructions");
    return getOperand(1); // Stored value is the 2nd, mandatory operand.
  }

  /// Generate the wide load/store.
  void execute(VPTransformState &State) override;

  /// Print the recipe.
  void print(raw_ostream &O, const Twine &Indent,
             VPSlotTracker &SlotTracker) const override;
};

/// A Recipe for widening the canonical induction variable of the vector loop.
class VPWidenCanonicalIVRecipe : public VPRecipeBase {
public:
  VPWidenCanonicalIVRecipe() : VPRecipeBase(VPWidenCanonicalIVSC) {
    new VPValue(nullptr, this);
  }

  ~VPWidenCanonicalIVRecipe() override = default;

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPDef *D) {
    return D->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
  }

  /// Generate a canonical vector induction variable of the vector loop, with
  /// start = {<Part*VF, Part*VF+1, ..., Part*VF+VF-1> for 0 <= Part < UF}, and
  /// step = <VF*UF, VF*UF, ..., VF*UF>.
  void execute(VPTransformState &State) override;

  /// Print the recipe.
  void print(raw_ostream &O, const Twine &Indent,
             VPSlotTracker &SlotTracker) const override;
};

/// VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph. It
/// holds a sequence of zero or more VPRecipe's each representing a sequence of
/// output IR instructions.
class VPBasicBlock : public VPBlockBase {
public:
  using RecipeListTy = iplist<VPRecipeBase>;

private:
  /// The VPRecipes held in the order of output instructions to generate.
  RecipeListTy Recipes;

public:
  VPBasicBlock(const Twine &Name = "", VPRecipeBase *Recipe = nullptr)
      : VPBlockBase(VPBasicBlockSC, Name.str()) {
    if (Recipe)
      appendRecipe(Recipe);
  }

  ~VPBasicBlock() override { Recipes.clear(); }

  /// Instruction iterators...
  using iterator = RecipeListTy::iterator;
  using const_iterator = RecipeListTy::const_iterator;
  using reverse_iterator = RecipeListTy::reverse_iterator;
  using const_reverse_iterator = RecipeListTy::const_reverse_iterator;

  //===--------------------------------------------------------------------===//
  /// Recipe iterator methods
  ///
  inline iterator begin() { return Recipes.begin(); }
  inline const_iterator begin() const { return Recipes.begin(); }
  inline iterator end() { return Recipes.end(); }
  inline const_iterator end() const { return Recipes.end(); }

  inline reverse_iterator rbegin() { return Recipes.rbegin(); }
  inline const_reverse_iterator rbegin() const { return Recipes.rbegin(); }
  inline reverse_iterator rend() { return Recipes.rend(); }
  inline const_reverse_iterator rend() const { return Recipes.rend(); }

  inline size_t size() const { return Recipes.size(); }
  inline bool empty() const { return Recipes.empty(); }
  inline const VPRecipeBase &front() const { return Recipes.front(); }
  inline VPRecipeBase &front() { return Recipes.front(); }
  inline const VPRecipeBase &back() const { return Recipes.back(); }
  inline VPRecipeBase &back() { return Recipes.back(); }

  /// Returns a reference to the list of recipes.
  RecipeListTy &getRecipeList() { return Recipes; }

  /// Returns a pointer to a member of the recipe list.
  static RecipeListTy VPBasicBlock::*getSublistAccess(VPRecipeBase *) {
    return &VPBasicBlock::Recipes;
  }

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPBlockBase *V) {
    return V->getVPBlockID() == VPBlockBase::VPBasicBlockSC;
  }

  void insert(VPRecipeBase *Recipe, iterator InsertPt) {
    assert(Recipe && "No recipe to append.");
    assert(!Recipe->Parent && "Recipe already in VPlan");
    Recipe->Parent = this;
    Recipes.insert(InsertPt, Recipe);
  }

  /// Augment the existing recipes of a VPBasicBlock with an additional
  /// \p Recipe as the last recipe.
  void appendRecipe(VPRecipeBase *Recipe) { insert(Recipe, end()); }

  /// The method which generates the output IR instructions that correspond to
  /// this VPBasicBlock, thereby "executing" the VPlan.
  void execute(struct VPTransformState *State) override;

  /// Return the position of the first non-phi node recipe in the block.
  iterator getFirstNonPhi();

  void dropAllReferences(VPValue *NewValue) override;

private:
  /// Create an IR BasicBlock to hold the output instructions generated by this
  /// VPBasicBlock, and return it. Update the CFGState accordingly.
  BasicBlock *createEmptyBasicBlock(VPTransformState::CFGState &CFG);
};

/// VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks
/// which form a Single-Entry-Single-Exit subgraph of the output IR CFG.
/// A VPRegionBlock may indicate that its contents are to be replicated several
/// times. This is designed to support predicated scalarization, in which a
/// scalar if-then code structure needs to be generated VF * UF times. Having
/// this replication indicator helps to keep a single model for multiple
/// candidate VF's. The actual replication takes place only once the desired VF
/// and UF have been determined.
class VPRegionBlock : public VPBlockBase {
  /// Hold the Single Entry of the SESE region modelled by the VPRegionBlock.
  VPBlockBase *Entry;

  /// Hold the Single Exit of the SESE region modelled by the VPRegionBlock.
  VPBlockBase *Exit;

  /// An indicator whether this region is to generate multiple replicated
  /// instances of output IR corresponding to its VPBlockBases.
  bool IsReplicator;

public:
  VPRegionBlock(VPBlockBase *Entry, VPBlockBase *Exit,
                const std::string &Name = "", bool IsReplicator = false)
      : VPBlockBase(VPRegionBlockSC, Name), Entry(Entry), Exit(Exit),
        IsReplicator(IsReplicator) {
    assert(Entry->getPredecessors().empty() && "Entry block has predecessors.");
    assert(Exit->getSuccessors().empty() && "Exit block has successors.");
    Entry->setParent(this);
    Exit->setParent(this);
  }
  VPRegionBlock(const std::string &Name = "", bool IsReplicator = false)
      : VPBlockBase(VPRegionBlockSC, Name), Entry(nullptr), Exit(nullptr),
        IsReplicator(IsReplicator) {}

  ~VPRegionBlock() override {
    if (Entry) {
      VPValue DummyValue;
      Entry->dropAllReferences(&DummyValue);
      deleteCFG(Entry);
    }
  }

  /// Method to support type inquiry through isa, cast, and dyn_cast.
  static inline bool classof(const VPBlockBase *V) {
    return V->getVPBlockID() == VPBlockBase::VPRegionBlockSC;
  }

  const VPBlockBase *getEntry() const { return Entry; }
  VPBlockBase *getEntry() { return Entry; }

  /// Set \p EntryBlock as the entry VPBlockBase of this VPRegionBlock. \p
  /// EntryBlock must have no predecessors.
  void setEntry(VPBlockBase *EntryBlock) {
    assert(EntryBlock->getPredecessors().empty() &&
           "Entry block cannot have predecessors.");
    Entry = EntryBlock;
    EntryBlock->setParent(this);
  }

  // FIXME: DominatorTreeBase is doing 'A->getParent()->front()'. 'front' is a
  // specific interface of llvm::Function, instead of using
  // GraphTraints::getEntryNode. We should add a new template parameter to
  // DominatorTreeBase representing the Graph type.
  VPBlockBase &front() const { return *Entry; }

  const VPBlockBase *getExit() const { return Exit; }
  VPBlockBase *getExit() { return Exit; }

  /// Set \p ExitBlock as the exit VPBlockBase of this VPRegionBlock. \p
  /// ExitBlock must have no successors.
  void setExit(VPBlockBase *ExitBlock) {
    assert(ExitBlock->getSuccessors().empty() &&
           "Exit block cannot have successors.");
    Exit = ExitBlock;
    ExitBlock->setParent(this);
  }

  /// An indicator whether this region is to generate multiple replicated
  /// instances of output IR corresponding to its VPBlockBases.
  bool isReplicator() const { return IsReplicator; }

  /// The method which generates the output IR instructions that correspond to
  /// this VPRegionBlock, thereby "executing" the VPlan.
  void execute(struct VPTransformState *State) override;

  void dropAllReferences(VPValue *NewValue) override;
};

//===----------------------------------------------------------------------===//
// GraphTraits specializations for VPlan Hierarchical Control-Flow Graphs     //
//===----------------------------------------------------------------------===//

// The following set of template specializations implement GraphTraits to treat
// any VPBlockBase as a node in a graph of VPBlockBases. It's important to note
// that VPBlockBase traits don't recurse into VPRegioBlocks, i.e., if the
// VPBlockBase is a VPRegionBlock, this specialization provides access to its
// successors/predecessors but not to the blocks inside the region.

template <> struct GraphTraits<VPBlockBase *> {
  using NodeRef = VPBlockBase *;
  using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;

  static NodeRef getEntryNode(NodeRef N) { return N; }

  static inline ChildIteratorType child_begin(NodeRef N) {
    return N->getSuccessors().begin();
  }

  static inline ChildIteratorType child_end(NodeRef N) {
    return N->getSuccessors().end();
  }
};

template <> struct GraphTraits<const VPBlockBase *> {
  using NodeRef = const VPBlockBase *;
  using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::const_iterator;

  static NodeRef getEntryNode(NodeRef N) { return N; }

  static inline ChildIteratorType child_begin(NodeRef N) {
    return N->getSuccessors().begin();
  }

  static inline ChildIteratorType child_end(NodeRef N) {
    return N->getSuccessors().end();
  }
};

// Inverse order specialization for VPBasicBlocks. Predecessors are used instead
// of successors for the inverse traversal.
template <> struct GraphTraits<Inverse<VPBlockBase *>> {
  using NodeRef = VPBlockBase *;
  using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;

  static NodeRef getEntryNode(Inverse<NodeRef> B) { return B.Graph; }

  static inline ChildIteratorType child_begin(NodeRef N) {
    return N->getPredecessors().begin();
  }

  static inline ChildIteratorType child_end(NodeRef N) {
    return N->getPredecessors().end();
  }
};

// The following set of template specializations implement GraphTraits to
// treat VPRegionBlock as a graph and recurse inside its nodes. It's important
// to note that the blocks inside the VPRegionBlock are treated as VPBlockBases
// (i.e., no dyn_cast is performed, VPBlockBases specialization is used), so
// there won't be automatic recursion into other VPBlockBases that turn to be
// VPRegionBlocks.

template <>
struct GraphTraits<VPRegionBlock *> : public GraphTraits<VPBlockBase *> {
  using GraphRef = VPRegionBlock *;
  using nodes_iterator = df_iterator<NodeRef>;

  static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }

  static nodes_iterator nodes_begin(GraphRef N) {
    return nodes_iterator::begin(N->getEntry());
  }

  static nodes_iterator nodes_end(GraphRef N) {
    // df_iterator::end() returns an empty iterator so the node used doesn't
    // matter.
    return nodes_iterator::end(N);
  }
};

template <>
struct GraphTraits<const VPRegionBlock *>
    : public GraphTraits<const VPBlockBase *> {
  using GraphRef = const VPRegionBlock *;
  using nodes_iterator = df_iterator<NodeRef>;

  static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }

  static nodes_iterator nodes_begin(GraphRef N) {
    return nodes_iterator::begin(N->getEntry());
  }

  static nodes_iterator nodes_end(GraphRef N) {
    // df_iterator::end() returns an empty iterator so the node used doesn't
    // matter.
    return nodes_iterator::end(N);
  }
};

template <>
struct GraphTraits<Inverse<VPRegionBlock *>>
    : public GraphTraits<Inverse<VPBlockBase *>> {
  using GraphRef = VPRegionBlock *;
  using nodes_iterator = df_iterator<NodeRef>;

  static NodeRef getEntryNode(Inverse<GraphRef> N) {
    return N.Graph->getExit();
  }

  static nodes_iterator nodes_begin(GraphRef N) {
    return nodes_iterator::begin(N->getExit());
  }

  static nodes_iterator nodes_end(GraphRef N) {
    // df_iterator::end() returns an empty iterator so the node used doesn't
    // matter.
    return nodes_iterator::end(N);
  }
};

/// VPlan models a candidate for vectorization, encoding various decisions take
/// to produce efficient output IR, including which branches, basic-blocks and
/// output IR instructions to generate, and their cost. VPlan holds a
/// Hierarchical-CFG of VPBasicBlocks and VPRegionBlocks rooted at an Entry
/// VPBlock.
class VPlan {
  friend class VPlanPrinter;
  friend class VPSlotTracker;

  /// Hold the single entry to the Hierarchical CFG of the VPlan.
  VPBlockBase *Entry;

  /// Holds the VFs applicable to this VPlan.
  SmallSetVector<ElementCount, 2> VFs;

  /// Holds the name of the VPlan, for printing.
  std::string Name;

  /// Holds all the external definitions created for this VPlan.
  // TODO: Introduce a specific representation for external definitions in
  // VPlan. External definitions must be immutable and hold a pointer to its
  // underlying IR that will be used to implement its structural comparison
  // (operators '==' and '<').
  SmallPtrSet<VPValue *, 16> VPExternalDefs;

  /// Represents the backedge taken count of the original loop, for folding
  /// the tail.
  VPValue *BackedgeTakenCount = nullptr;

  /// Holds a mapping between Values and their corresponding VPValue inside
  /// VPlan.
  Value2VPValueTy Value2VPValue;

  /// Contains all VPValues that been allocated by addVPValue directly and need
  /// to be free when the plan's destructor is called.
  SmallVector<VPValue *, 16> VPValuesToFree;

  /// Holds the VPLoopInfo analysis for this VPlan.
  VPLoopInfo VPLInfo;

  /// Holds the condition bit values built during VPInstruction to VPRecipe transformation.
  SmallVector<VPValue *, 4> VPCBVs;

public:
  VPlan(VPBlockBase *Entry = nullptr) : Entry(Entry) {
    if (Entry)
      Entry->setPlan(this);
  }

  ~VPlan() {
    if (Entry) {
      VPValue DummyValue;
      for (VPBlockBase *Block : depth_first(Entry))
        Block->dropAllReferences(&DummyValue);

      VPBlockBase::deleteCFG(Entry);
    }
    for (VPValue *VPV : VPValuesToFree)
      delete VPV;
    if (BackedgeTakenCount)
      delete BackedgeTakenCount;
    for (VPValue *Def : VPExternalDefs)
      delete Def;
    for (VPValue *CBV : VPCBVs)
      delete CBV;
  }

  /// Generate the IR code for this VPlan.
  void execute(struct VPTransformState *State);

  VPBlockBase *getEntry() { return Entry; }
  const VPBlockBase *getEntry() const { return Entry; }

  VPBlockBase *setEntry(VPBlockBase *Block) {
    Entry = Block;
    Block->setPlan(this);
    return Entry;
  }

  /// The backedge taken count of the original loop.
  VPValue *getOrCreateBackedgeTakenCount() {
    if (!BackedgeTakenCount)
      BackedgeTakenCount = new VPValue();
    return BackedgeTakenCount;
  }

  void addVF(ElementCount VF) { VFs.insert(VF); }

  bool hasVF(ElementCount VF) { return VFs.count(VF); }

  const std::string &getName() const { return Name; }

  void setName(const Twine &newName) { Name = newName.str(); }

  /// Add \p VPVal to the pool of external definitions if it's not already
  /// in the pool.
  void addExternalDef(VPValue *VPVal) {
    VPExternalDefs.insert(VPVal);
  }

  /// Add \p CBV to the vector of condition bit values.
  void addCBV(VPValue *CBV) {
    VPCBVs.push_back(CBV);
  }

  void addVPValue(Value *V) {
    assert(V && "Trying to add a null Value to VPlan");
    assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
    VPValue *VPV = new VPValue(V);
    Value2VPValue[V] = VPV;
    VPValuesToFree.push_back(VPV);
  }

  void addVPValue(Value *V, VPValue *VPV) {
    assert(V && "Trying to add a null Value to VPlan");
    assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
    Value2VPValue[V] = VPV;
  }

  VPValue *getVPValue(Value *V) {
    assert(V && "Trying to get the VPValue of a null Value");
    assert(Value2VPValue.count(V) && "Value does not exist in VPlan");
    return Value2VPValue[V];
  }

  VPValue *getOrAddVPValue(Value *V) {
    assert(V && "Trying to get or add the VPValue of a null Value");
    if (!Value2VPValue.count(V))
      addVPValue(V);
    return getVPValue(V);
  }

  void removeVPValueFor(Value *V) { Value2VPValue.erase(V); }

  /// Return the VPLoopInfo analysis for this VPlan.
  VPLoopInfo &getVPLoopInfo() { return VPLInfo; }
  const VPLoopInfo &getVPLoopInfo() const { return VPLInfo; }

  /// Dump the plan to stderr (for debugging).
  void dump() const;

  /// Returns a range mapping the values the range \p Operands to their
  /// corresponding VPValues.
  iterator_range<mapped_iterator<Use *, std::function<VPValue *(Value *)>>>
  mapToVPValues(User::op_range Operands) {
    std::function<VPValue *(Value *)> Fn = [this](Value *Op) {
      return getOrAddVPValue(Op);
    };
    return map_range(Operands, Fn);
  }

private:
  /// Add to the given dominator tree the header block and every new basic block
  /// that was created between it and the latch block, inclusive.
  static void updateDominatorTree(DominatorTree *DT, BasicBlock *LoopLatchBB,
                                  BasicBlock *LoopPreHeaderBB,
                                  BasicBlock *LoopExitBB);
};

/// VPlanPrinter prints a given VPlan to a given output stream. The printing is
/// indented and follows the dot format.
class VPlanPrinter {
  friend inline raw_ostream &operator<<(raw_ostream &OS, const VPlan &Plan);
  friend inline raw_ostream &operator<<(raw_ostream &OS,
                                        const struct VPlanIngredient &I);

private:
  raw_ostream &OS;
  const VPlan &Plan;
  unsigned Depth = 0;
  unsigned TabWidth = 2;
  std::string Indent;
  unsigned BID = 0;
  SmallDenseMap<const VPBlockBase *, unsigned> BlockID;

  VPSlotTracker SlotTracker;

  VPlanPrinter(raw_ostream &O, const VPlan &P)
      : OS(O), Plan(P), SlotTracker(&P) {}

  /// Handle indentation.
  void bumpIndent(int b) { Indent = std::string((Depth += b) * TabWidth, ' '); }

  /// Print a given \p Block of the Plan.
  void dumpBlock(const VPBlockBase *Block);

  /// Print the information related to the CFG edges going out of a given
  /// \p Block, followed by printing the successor blocks themselves.
  void dumpEdges(const VPBlockBase *Block);

  /// Print a given \p BasicBlock, including its VPRecipes, followed by printing
  /// its successor blocks.
  void dumpBasicBlock(const VPBasicBlock *BasicBlock);

  /// Print a given \p Region of the Plan.
  void dumpRegion(const VPRegionBlock *Region);

  unsigned getOrCreateBID(const VPBlockBase *Block) {
    return BlockID.count(Block) ? BlockID[Block] : BlockID[Block] = BID++;
  }

  const Twine getOrCreateName(const VPBlockBase *Block);

  const Twine getUID(const VPBlockBase *Block);

  /// Print the information related to a CFG edge between two VPBlockBases.
  void drawEdge(const VPBlockBase *From, const VPBlockBase *To, bool Hidden,
                const Twine &Label);

  void dump();

  static void printAsIngredient(raw_ostream &O, const Value *V);
};

struct VPlanIngredient {
  const Value *V;

  VPlanIngredient(const Value *V) : V(V) {}
};

inline raw_ostream &operator<<(raw_ostream &OS, const VPlanIngredient &I) {
  VPlanPrinter::printAsIngredient(OS, I.V);
  return OS;
}

inline raw_ostream &operator<<(raw_ostream &OS, const VPlan &Plan) {
  VPlanPrinter Printer(OS, Plan);
  Printer.dump();
  return OS;
}

//===----------------------------------------------------------------------===//
// VPlan Utilities
//===----------------------------------------------------------------------===//

/// Class that provides utilities for VPBlockBases in VPlan.
class VPBlockUtils {
public:
  VPBlockUtils() = delete;

  /// Insert disconnected VPBlockBase \p NewBlock after \p BlockPtr. Add \p
  /// NewBlock as successor of \p BlockPtr and \p BlockPtr as predecessor of \p
  /// NewBlock, and propagate \p BlockPtr parent to \p NewBlock. If \p BlockPtr
  /// has more than one successor, its conditional bit is propagated to \p
  /// NewBlock. \p NewBlock must have neither successors nor predecessors.
  static void insertBlockAfter(VPBlockBase *NewBlock, VPBlockBase *BlockPtr) {
    assert(NewBlock->getSuccessors().empty() &&
           "Can't insert new block with successors.");
    // TODO: move successors from BlockPtr to NewBlock when this functionality
    // is necessary. For now, setBlockSingleSuccessor will assert if BlockPtr
    // already has successors.
    BlockPtr->setOneSuccessor(NewBlock);
    NewBlock->setPredecessors({BlockPtr});
    NewBlock->setParent(BlockPtr->getParent());
  }

  /// Insert disconnected VPBlockBases \p IfTrue and \p IfFalse after \p
  /// BlockPtr. Add \p IfTrue and \p IfFalse as succesors of \p BlockPtr and \p
  /// BlockPtr as predecessor of \p IfTrue and \p IfFalse. Propagate \p BlockPtr
  /// parent to \p IfTrue and \p IfFalse. \p Condition is set as the successor
  /// selector. \p BlockPtr must have no successors and \p IfTrue and \p IfFalse
  /// must have neither successors nor predecessors.
  static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
                                   VPValue *Condition, VPBlockBase *BlockPtr) {
    assert(IfTrue->getSuccessors().empty() &&
           "Can't insert IfTrue with successors.");
    assert(IfFalse->getSuccessors().empty() &&
           "Can't insert IfFalse with successors.");
    BlockPtr->setTwoSuccessors(IfTrue, IfFalse, Condition);
    IfTrue->setPredecessors({BlockPtr});
    IfFalse->setPredecessors({BlockPtr});
    IfTrue->setParent(BlockPtr->getParent());
    IfFalse->setParent(BlockPtr->getParent());
  }

  /// Connect VPBlockBases \p From and \p To bi-directionally. Append \p To to
  /// the successors of \p From and \p From to the predecessors of \p To. Both
  /// VPBlockBases must have the same parent, which can be null. Both
  /// VPBlockBases can be already connected to other VPBlockBases.
  static void connectBlocks(VPBlockBase *From, VPBlockBase *To) {
    assert((From->getParent() == To->getParent()) &&
           "Can't connect two block with different parents");
    assert(From->getNumSuccessors() < 2 &&
           "Blocks can't have more than two successors.");
    From->appendSuccessor(To);
    To->appendPredecessor(From);
  }

  /// Disconnect VPBlockBases \p From and \p To bi-directionally. Remove \p To
  /// from the successors of \p From and \p From from the predecessors of \p To.
  static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To) {
    assert(To && "Successor to disconnect is null.");
    From->removeSuccessor(To);
    To->removePredecessor(From);
  }

  /// Returns true if the edge \p FromBlock -> \p ToBlock is a back-edge.
  static bool isBackEdge(const VPBlockBase *FromBlock,
                         const VPBlockBase *ToBlock, const VPLoopInfo *VPLI) {
    assert(FromBlock->getParent() == ToBlock->getParent() &&
           FromBlock->getParent() && "Must be in same region");
    const VPLoop *FromLoop = VPLI->getLoopFor(FromBlock);
    const VPLoop *ToLoop = VPLI->getLoopFor(ToBlock);
    if (!FromLoop || !ToLoop || FromLoop != ToLoop)
      return false;

    // A back-edge is a branch from the loop latch to its header.
    return ToLoop->isLoopLatch(FromBlock) && ToBlock == ToLoop->getHeader();
  }

  /// Returns true if \p Block is a loop latch
  static bool blockIsLoopLatch(const VPBlockBase *Block,
                               const VPLoopInfo *VPLInfo) {
    if (const VPLoop *ParentVPL = VPLInfo->getLoopFor(Block))
      return ParentVPL->isLoopLatch(Block);

    return false;
  }

  /// Count and return the number of succesors of \p PredBlock excluding any
  /// backedges.
  static unsigned countSuccessorsNoBE(VPBlockBase *PredBlock,
                                      VPLoopInfo *VPLI) {
    unsigned Count = 0;
    for (VPBlockBase *SuccBlock : PredBlock->getSuccessors()) {
      if (!VPBlockUtils::isBackEdge(PredBlock, SuccBlock, VPLI))
        Count++;
    }
    return Count;
  }
};

class VPInterleavedAccessInfo {
  DenseMap<VPInstruction *, InterleaveGroup<VPInstruction> *>
      InterleaveGroupMap;

  /// Type for mapping of instruction based interleave groups to VPInstruction
  /// interleave groups
  using Old2NewTy = DenseMap<InterleaveGroup<Instruction> *,
                             InterleaveGroup<VPInstruction> *>;

  /// Recursively \p Region and populate VPlan based interleave groups based on
  /// \p IAI.
  void visitRegion(VPRegionBlock *Region, Old2NewTy &Old2New,
                   InterleavedAccessInfo &IAI);
  /// Recursively traverse \p Block and populate VPlan based interleave groups
  /// based on \p IAI.
  void visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
                  InterleavedAccessInfo &IAI);

public:
  VPInterleavedAccessInfo(VPlan &Plan, InterleavedAccessInfo &IAI);

  ~VPInterleavedAccessInfo() {
    SmallPtrSet<InterleaveGroup<VPInstruction> *, 4> DelSet;
    // Avoid releasing a pointer twice.
    for (auto &I : InterleaveGroupMap)
      DelSet.insert(I.second);
    for (auto *Ptr : DelSet)
      delete Ptr;
  }

  /// Get the interleave group that \p Instr belongs to.
  ///
  /// \returns nullptr if doesn't have such group.
  InterleaveGroup<VPInstruction> *
  getInterleaveGroup(VPInstruction *Instr) const {
    return InterleaveGroupMap.lookup(Instr);
  }
};

/// Class that maps (parts of) an existing VPlan to trees of combined
/// VPInstructions.
class VPlanSlp {
  enum class OpMode { Failed, Load, Opcode };

  /// A DenseMapInfo implementation for using SmallVector<VPValue *, 4> as
  /// DenseMap keys.
  struct BundleDenseMapInfo {
    static SmallVector<VPValue *, 4> getEmptyKey() {
      return {reinterpret_cast<VPValue *>(-1)};
    }

    static SmallVector<VPValue *, 4> getTombstoneKey() {
      return {reinterpret_cast<VPValue *>(-2)};
    }

    static unsigned getHashValue(const SmallVector<VPValue *, 4> &V) {
      return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
    }

    static bool isEqual(const SmallVector<VPValue *, 4> &LHS,
                        const SmallVector<VPValue *, 4> &RHS) {
      return LHS == RHS;
    }
  };

  /// Mapping of values in the original VPlan to a combined VPInstruction.
  DenseMap<SmallVector<VPValue *, 4>, VPInstruction *, BundleDenseMapInfo>
      BundleToCombined;

  VPInterleavedAccessInfo &IAI;

  /// Basic block to operate on. For now, only instructions in a single BB are
  /// considered.
  const VPBasicBlock &BB;

  /// Indicates whether we managed to combine all visited instructions or not.
  bool CompletelySLP = true;

  /// Width of the widest combined bundle in bits.
  unsigned WidestBundleBits = 0;

  using MultiNodeOpTy =
      typename std::pair<VPInstruction *, SmallVector<VPValue *, 4>>;

  // Input operand bundles for the current multi node. Each multi node operand
  // bundle contains values not matching the multi node's opcode. They will
  // be reordered in reorderMultiNodeOps, once we completed building a
  // multi node.
  SmallVector<MultiNodeOpTy, 4> MultiNodeOps;

  /// Indicates whether we are building a multi node currently.
  bool MultiNodeActive = false;

  /// Check if we can vectorize Operands together.
  bool areVectorizable(ArrayRef<VPValue *> Operands) const;

  /// Add combined instruction \p New for the bundle \p Operands.
  void addCombined(ArrayRef<VPValue *> Operands, VPInstruction *New);

  /// Indicate we hit a bundle we failed to combine. Returns nullptr for now.
  VPInstruction *markFailed();

  /// Reorder operands in the multi node to maximize sequential memory access
  /// and commutative operations.
  SmallVector<MultiNodeOpTy, 4> reorderMultiNodeOps();

  /// Choose the best candidate to use for the lane after \p Last. The set of
  /// candidates to choose from are values with an opcode matching \p Last's
  /// or loads consecutive to \p Last.
  std::pair<OpMode, VPValue *> getBest(OpMode Mode, VPValue *Last,
                                       SmallPtrSetImpl<VPValue *> &Candidates,
                                       VPInterleavedAccessInfo &IAI);

  /// Print bundle \p Values to dbgs().
  void dumpBundle(ArrayRef<VPValue *> Values);

public:
  VPlanSlp(VPInterleavedAccessInfo &IAI, VPBasicBlock &BB) : IAI(IAI), BB(BB) {}

  ~VPlanSlp() = default;

  /// Tries to build an SLP tree rooted at \p Operands and returns a
  /// VPInstruction combining \p Operands, if they can be combined.
  VPInstruction *buildGraph(ArrayRef<VPValue *> Operands);

  /// Return the width of the widest combined bundle in bits.
  unsigned getWidestBundleBits() const { return WidestBundleBits; }

  /// Return true if all visited instruction can be combined.
  bool isCompletelySLP() const { return CompletelySLP; }
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_VECTORIZE_VPLAN_H