xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp (revision 770cf0a5f02dc8983a89c6568d741fbc25baa999)
1 //===- VPlanUtils.cpp - VPlan-related utilities ---------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "VPlanUtils.h"
10 #include "VPlanCFG.h"
11 #include "VPlanPatternMatch.h"
12 #include "llvm/ADT/TypeSwitch.h"
13 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
14 
15 using namespace llvm;
16 
17 bool vputils::onlyFirstLaneUsed(const VPValue *Def) {
18   return all_of(Def->users(),
19                 [Def](const VPUser *U) { return U->onlyFirstLaneUsed(Def); });
20 }
21 
22 bool vputils::onlyFirstPartUsed(const VPValue *Def) {
23   return all_of(Def->users(),
24                 [Def](const VPUser *U) { return U->onlyFirstPartUsed(Def); });
25 }
26 
27 VPValue *vputils::getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr,
28                                                 ScalarEvolution &SE) {
29   if (auto *Expanded = Plan.getSCEVExpansion(Expr))
30     return Expanded;
31   VPValue *Expanded = nullptr;
32   if (auto *E = dyn_cast<SCEVConstant>(Expr))
33     Expanded = Plan.getOrAddLiveIn(E->getValue());
34   else {
35     auto *U = dyn_cast<SCEVUnknown>(Expr);
36     // Skip SCEV expansion if Expr is a SCEVUnknown wrapping a non-instruction
37     // value. Otherwise the value may be defined in a loop and using it directly
38     // will break LCSSA form. The SCEV expansion takes care of preserving LCSSA
39     // form.
40     if (U && !isa<Instruction>(U->getValue())) {
41       Expanded = Plan.getOrAddLiveIn(U->getValue());
42     } else {
43       Expanded = new VPExpandSCEVRecipe(Expr, SE);
44       Plan.getEntry()->appendRecipe(Expanded->getDefiningRecipe());
45     }
46   }
47   Plan.addSCEVExpansion(Expr, Expanded);
48   return Expanded;
49 }
50 
51 bool vputils::isHeaderMask(const VPValue *V, VPlan &Plan) {
52   if (isa<VPActiveLaneMaskPHIRecipe>(V))
53     return true;
54 
55   auto IsWideCanonicalIV = [](VPValue *A) {
56     return isa<VPWidenCanonicalIVRecipe>(A) ||
57            (isa<VPWidenIntOrFpInductionRecipe>(A) &&
58             cast<VPWidenIntOrFpInductionRecipe>(A)->isCanonical());
59   };
60 
61   VPValue *A, *B;
62   using namespace VPlanPatternMatch;
63 
64   if (match(V, m_ActiveLaneMask(m_VPValue(A), m_VPValue(B))))
65     return B == Plan.getTripCount() &&
66            (match(A, m_ScalarIVSteps(m_Specific(Plan.getCanonicalIV()),
67                                      m_SpecificInt(1),
68                                      m_Specific(&Plan.getVF()))) ||
69             IsWideCanonicalIV(A));
70 
71   return match(V, m_Binary<Instruction::ICmp>(m_VPValue(A), m_VPValue(B))) &&
72          IsWideCanonicalIV(A) && B == Plan.getOrCreateBackedgeTakenCount();
73 }
74 
75 const SCEV *vputils::getSCEVExprForVPValue(VPValue *V, ScalarEvolution &SE) {
76   if (V->isLiveIn())
77     return SE.getSCEV(V->getLiveInIRValue());
78 
79   // TODO: Support constructing SCEVs for more recipes as needed.
80   return TypeSwitch<const VPRecipeBase *, const SCEV *>(V->getDefiningRecipe())
81       .Case<VPExpandSCEVRecipe>(
82           [](const VPExpandSCEVRecipe *R) { return R->getSCEV(); })
83       .Default([&SE](const VPRecipeBase *) { return SE.getCouldNotCompute(); });
84 }
85 
86 bool vputils::isUniformAcrossVFsAndUFs(VPValue *V) {
87   using namespace VPlanPatternMatch;
88   // Live-ins are uniform.
89   if (V->isLiveIn())
90     return true;
91 
92   VPRecipeBase *R = V->getDefiningRecipe();
93   if (R && V->isDefinedOutsideLoopRegions()) {
94     if (match(V->getDefiningRecipe(),
95               m_VPInstruction<VPInstruction::CanonicalIVIncrementForPart>(
96                   m_VPValue())))
97       return false;
98     return all_of(R->operands(), isUniformAcrossVFsAndUFs);
99   }
100 
101   auto *CanonicalIV = R->getParent()->getPlan()->getCanonicalIV();
102   // Canonical IV chain is uniform.
103   if (V == CanonicalIV || V == CanonicalIV->getBackedgeValue())
104     return true;
105 
106   return TypeSwitch<const VPRecipeBase *, bool>(R)
107       .Case<VPDerivedIVRecipe>([](const auto *R) { return true; })
108       .Case<VPReplicateRecipe>([](const auto *R) {
109         // Loads and stores that are uniform across VF lanes are handled by
110         // VPReplicateRecipe.IsUniform. They are also uniform across UF parts if
111         // all their operands are invariant.
112         // TODO: Further relax the restrictions.
113         return R->isSingleScalar() &&
114                (isa<LoadInst, StoreInst>(R->getUnderlyingValue())) &&
115                all_of(R->operands(), isUniformAcrossVFsAndUFs);
116       })
117       .Case<VPInstruction>([](const auto *VPI) {
118         return VPI->isScalarCast() &&
119                isUniformAcrossVFsAndUFs(VPI->getOperand(0));
120       })
121       .Case<VPWidenCastRecipe>([](const auto *R) {
122         // A cast is uniform according to its operand.
123         return isUniformAcrossVFsAndUFs(R->getOperand(0));
124       })
125       .Default([](const VPRecipeBase *) { // A value is considered non-uniform
126                                           // unless proven otherwise.
127         return false;
128       });
129 }
130 
131 VPBasicBlock *vputils::getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT) {
132   auto DepthFirst = vp_depth_first_shallow(Plan.getEntry());
133   auto I = find_if(DepthFirst, [&VPDT](VPBlockBase *VPB) {
134     return VPBlockUtils::isHeader(VPB, VPDT);
135   });
136   return I == DepthFirst.end() ? nullptr : cast<VPBasicBlock>(*I);
137 }
138