1 //===- VPlan.cpp - Vectorizer Plan ----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This is the LLVM vectorization plan. It represents a candidate for
11 /// vectorization, allowing to plan and optimize how to vectorize a given loop
12 /// before generating LLVM-IR.
13 /// The vectorizer uses vectorization plans to estimate the costs of potential
14 /// candidates and if profitable to execute the desired plan, generating vector
15 /// LLVM-IR code.
16 ///
17 //===----------------------------------------------------------------------===//
18
19 #include "VPlan.h"
20 #include "LoopVectorizationPlanner.h"
21 #include "VPlanCFG.h"
22 #include "VPlanDominatorTree.h"
23 #include "VPlanPatternMatch.h"
24 #include "llvm/ADT/PostOrderIterator.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/Analysis/DomTreeUpdater.h"
30 #include "llvm/Analysis/LoopInfo.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/CFG.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/Instruction.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/Type.h"
37 #include "llvm/IR/Value.h"
38 #include "llvm/Support/Casting.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/GenericDomTreeConstruction.h"
42 #include "llvm/Support/GraphWriter.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
45 #include "llvm/Transforms/Utils/LoopVersioning.h"
46 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
47 #include <cassert>
48 #include <string>
49 #include <vector>
50
51 using namespace llvm;
52 using namespace llvm::VPlanPatternMatch;
53
54 namespace llvm {
55 extern cl::opt<bool> EnableVPlanNativePath;
56 }
57
58 #define DEBUG_TYPE "vplan"
59
60 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
operator <<(raw_ostream & OS,const VPValue & V)61 raw_ostream &llvm::operator<<(raw_ostream &OS, const VPValue &V) {
62 const VPInstruction *Instr = dyn_cast<VPInstruction>(&V);
63 VPSlotTracker SlotTracker(
64 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr);
65 V.print(OS, SlotTracker);
66 return OS;
67 }
68 #endif
69
getAsRuntimeExpr(IRBuilderBase & Builder,const ElementCount & VF) const70 Value *VPLane::getAsRuntimeExpr(IRBuilderBase &Builder,
71 const ElementCount &VF) const {
72 switch (LaneKind) {
73 case VPLane::Kind::ScalableLast:
74 // Lane = RuntimeVF - VF.getKnownMinValue() + Lane
75 return Builder.CreateSub(getRuntimeVF(Builder, Builder.getInt32Ty(), VF),
76 Builder.getInt32(VF.getKnownMinValue() - Lane));
77 case VPLane::Kind::First:
78 return Builder.getInt32(Lane);
79 }
80 llvm_unreachable("Unknown lane kind");
81 }
82
VPValue(const unsigned char SC,Value * UV,VPDef * Def)83 VPValue::VPValue(const unsigned char SC, Value *UV, VPDef *Def)
84 : SubclassID(SC), UnderlyingVal(UV), Def(Def) {
85 if (Def)
86 Def->addDefinedValue(this);
87 }
88
~VPValue()89 VPValue::~VPValue() {
90 assert(Users.empty() && "trying to delete a VPValue with remaining users");
91 if (Def)
92 Def->removeDefinedValue(this);
93 }
94
95 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
print(raw_ostream & OS,VPSlotTracker & SlotTracker) const96 void VPValue::print(raw_ostream &OS, VPSlotTracker &SlotTracker) const {
97 if (const VPRecipeBase *R = dyn_cast_or_null<VPRecipeBase>(Def))
98 R->print(OS, "", SlotTracker);
99 else
100 printAsOperand(OS, SlotTracker);
101 }
102
dump() const103 void VPValue::dump() const {
104 const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this->Def);
105 VPSlotTracker SlotTracker(
106 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr);
107 print(dbgs(), SlotTracker);
108 dbgs() << "\n";
109 }
110
dump() const111 void VPDef::dump() const {
112 const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this);
113 VPSlotTracker SlotTracker(
114 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr);
115 print(dbgs(), "", SlotTracker);
116 dbgs() << "\n";
117 }
118 #endif
119
getDefiningRecipe()120 VPRecipeBase *VPValue::getDefiningRecipe() {
121 return cast_or_null<VPRecipeBase>(Def);
122 }
123
getDefiningRecipe() const124 const VPRecipeBase *VPValue::getDefiningRecipe() const {
125 return cast_or_null<VPRecipeBase>(Def);
126 }
127
128 // Get the top-most entry block of \p Start. This is the entry block of the
129 // containing VPlan. This function is templated to support both const and non-const blocks
getPlanEntry(T * Start)130 template <typename T> static T *getPlanEntry(T *Start) {
131 T *Next = Start;
132 T *Current = Start;
133 while ((Next = Next->getParent()))
134 Current = Next;
135
136 SmallSetVector<T *, 8> WorkList;
137 WorkList.insert(Current);
138
139 for (unsigned i = 0; i < WorkList.size(); i++) {
140 T *Current = WorkList[i];
141 if (Current->getNumPredecessors() == 0)
142 return Current;
143 auto &Predecessors = Current->getPredecessors();
144 WorkList.insert(Predecessors.begin(), Predecessors.end());
145 }
146
147 llvm_unreachable("VPlan without any entry node without predecessors");
148 }
149
getPlan()150 VPlan *VPBlockBase::getPlan() { return getPlanEntry(this)->Plan; }
151
getPlan() const152 const VPlan *VPBlockBase::getPlan() const { return getPlanEntry(this)->Plan; }
153
154 /// \return the VPBasicBlock that is the entry of Block, possibly indirectly.
getEntryBasicBlock() const155 const VPBasicBlock *VPBlockBase::getEntryBasicBlock() const {
156 const VPBlockBase *Block = this;
157 while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
158 Block = Region->getEntry();
159 return cast<VPBasicBlock>(Block);
160 }
161
getEntryBasicBlock()162 VPBasicBlock *VPBlockBase::getEntryBasicBlock() {
163 VPBlockBase *Block = this;
164 while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
165 Block = Region->getEntry();
166 return cast<VPBasicBlock>(Block);
167 }
168
setPlan(VPlan * ParentPlan)169 void VPBlockBase::setPlan(VPlan *ParentPlan) {
170 assert(
171 (ParentPlan->getEntry() == this || ParentPlan->getPreheader() == this) &&
172 "Can only set plan on its entry or preheader block.");
173 Plan = ParentPlan;
174 }
175
176 /// \return the VPBasicBlock that is the exit of Block, possibly indirectly.
getExitingBasicBlock() const177 const VPBasicBlock *VPBlockBase::getExitingBasicBlock() const {
178 const VPBlockBase *Block = this;
179 while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
180 Block = Region->getExiting();
181 return cast<VPBasicBlock>(Block);
182 }
183
getExitingBasicBlock()184 VPBasicBlock *VPBlockBase::getExitingBasicBlock() {
185 VPBlockBase *Block = this;
186 while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
187 Block = Region->getExiting();
188 return cast<VPBasicBlock>(Block);
189 }
190
getEnclosingBlockWithSuccessors()191 VPBlockBase *VPBlockBase::getEnclosingBlockWithSuccessors() {
192 if (!Successors.empty() || !Parent)
193 return this;
194 assert(Parent->getExiting() == this &&
195 "Block w/o successors not the exiting block of its parent.");
196 return Parent->getEnclosingBlockWithSuccessors();
197 }
198
getEnclosingBlockWithPredecessors()199 VPBlockBase *VPBlockBase::getEnclosingBlockWithPredecessors() {
200 if (!Predecessors.empty() || !Parent)
201 return this;
202 assert(Parent->getEntry() == this &&
203 "Block w/o predecessors not the entry of its parent.");
204 return Parent->getEnclosingBlockWithPredecessors();
205 }
206
deleteCFG(VPBlockBase * Entry)207 void VPBlockBase::deleteCFG(VPBlockBase *Entry) {
208 for (VPBlockBase *Block : to_vector(vp_depth_first_shallow(Entry)))
209 delete Block;
210 }
211
getFirstNonPhi()212 VPBasicBlock::iterator VPBasicBlock::getFirstNonPhi() {
213 iterator It = begin();
214 while (It != end() && It->isPhi())
215 It++;
216 return It;
217 }
218
VPTransformState(ElementCount VF,unsigned UF,LoopInfo * LI,DominatorTree * DT,IRBuilderBase & Builder,InnerLoopVectorizer * ILV,VPlan * Plan,LLVMContext & Ctx)219 VPTransformState::VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
220 DominatorTree *DT, IRBuilderBase &Builder,
221 InnerLoopVectorizer *ILV, VPlan *Plan,
222 LLVMContext &Ctx)
223 : VF(VF), UF(UF), CFG(DT), LI(LI), Builder(Builder), ILV(ILV), Plan(Plan),
224 LVer(nullptr),
225 TypeAnalysis(Plan->getCanonicalIV()->getScalarType(), Ctx) {}
226
get(VPValue * Def,const VPIteration & Instance)227 Value *VPTransformState::get(VPValue *Def, const VPIteration &Instance) {
228 if (Def->isLiveIn())
229 return Def->getLiveInIRValue();
230
231 if (hasScalarValue(Def, Instance)) {
232 return Data
233 .PerPartScalars[Def][Instance.Part][Instance.Lane.mapToCacheIndex(VF)];
234 }
235 if (!Instance.Lane.isFirstLane() &&
236 vputils::isUniformAfterVectorization(Def) &&
237 hasScalarValue(Def, {Instance.Part, VPLane::getFirstLane()})) {
238 return Data.PerPartScalars[Def][Instance.Part][0];
239 }
240
241 assert(hasVectorValue(Def, Instance.Part));
242 auto *VecPart = Data.PerPartOutput[Def][Instance.Part];
243 if (!VecPart->getType()->isVectorTy()) {
244 assert(Instance.Lane.isFirstLane() && "cannot get lane > 0 for scalar");
245 return VecPart;
246 }
247 // TODO: Cache created scalar values.
248 Value *Lane = Instance.Lane.getAsRuntimeExpr(Builder, VF);
249 auto *Extract = Builder.CreateExtractElement(VecPart, Lane);
250 // set(Def, Extract, Instance);
251 return Extract;
252 }
253
get(VPValue * Def,unsigned Part,bool NeedsScalar)254 Value *VPTransformState::get(VPValue *Def, unsigned Part, bool NeedsScalar) {
255 if (NeedsScalar) {
256 assert((VF.isScalar() || Def->isLiveIn() || hasVectorValue(Def, Part) ||
257 !vputils::onlyFirstLaneUsed(Def) ||
258 (hasScalarValue(Def, VPIteration(Part, 0)) &&
259 Data.PerPartScalars[Def][Part].size() == 1)) &&
260 "Trying to access a single scalar per part but has multiple scalars "
261 "per part.");
262 return get(Def, VPIteration(Part, 0));
263 }
264
265 // If Values have been set for this Def return the one relevant for \p Part.
266 if (hasVectorValue(Def, Part))
267 return Data.PerPartOutput[Def][Part];
268
269 auto GetBroadcastInstrs = [this, Def](Value *V) {
270 bool SafeToHoist = Def->isDefinedOutsideVectorRegions();
271 if (VF.isScalar())
272 return V;
273 // Place the code for broadcasting invariant variables in the new preheader.
274 IRBuilder<>::InsertPointGuard Guard(Builder);
275 if (SafeToHoist) {
276 BasicBlock *LoopVectorPreHeader = CFG.VPBB2IRBB[cast<VPBasicBlock>(
277 Plan->getVectorLoopRegion()->getSinglePredecessor())];
278 if (LoopVectorPreHeader)
279 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
280 }
281
282 // Place the code for broadcasting invariant variables in the new preheader.
283 // Broadcast the scalar into all locations in the vector.
284 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
285
286 return Shuf;
287 };
288
289 if (!hasScalarValue(Def, {Part, 0})) {
290 assert(Def->isLiveIn() && "expected a live-in");
291 if (Part != 0)
292 return get(Def, 0);
293 Value *IRV = Def->getLiveInIRValue();
294 Value *B = GetBroadcastInstrs(IRV);
295 set(Def, B, Part);
296 return B;
297 }
298
299 Value *ScalarValue = get(Def, {Part, 0});
300 // If we aren't vectorizing, we can just copy the scalar map values over
301 // to the vector map.
302 if (VF.isScalar()) {
303 set(Def, ScalarValue, Part);
304 return ScalarValue;
305 }
306
307 bool IsUniform = vputils::isUniformAfterVectorization(Def);
308
309 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
310 // Check if there is a scalar value for the selected lane.
311 if (!hasScalarValue(Def, {Part, LastLane})) {
312 // At the moment, VPWidenIntOrFpInductionRecipes, VPScalarIVStepsRecipes and
313 // VPExpandSCEVRecipes can also be uniform.
314 assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDefiningRecipe()) ||
315 isa<VPScalarIVStepsRecipe>(Def->getDefiningRecipe()) ||
316 isa<VPExpandSCEVRecipe>(Def->getDefiningRecipe())) &&
317 "unexpected recipe found to be invariant");
318 IsUniform = true;
319 LastLane = 0;
320 }
321
322 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
323 // Set the insert point after the last scalarized instruction or after the
324 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
325 // will directly follow the scalar definitions.
326 auto OldIP = Builder.saveIP();
327 auto NewIP =
328 isa<PHINode>(LastInst)
329 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
330 : std::next(BasicBlock::iterator(LastInst));
331 Builder.SetInsertPoint(&*NewIP);
332
333 // However, if we are vectorizing, we need to construct the vector values.
334 // If the value is known to be uniform after vectorization, we can just
335 // broadcast the scalar value corresponding to lane zero for each unroll
336 // iteration. Otherwise, we construct the vector values using
337 // insertelement instructions. Since the resulting vectors are stored in
338 // State, we will only generate the insertelements once.
339 Value *VectorValue = nullptr;
340 if (IsUniform) {
341 VectorValue = GetBroadcastInstrs(ScalarValue);
342 set(Def, VectorValue, Part);
343 } else {
344 // Initialize packing with insertelements to start from undef.
345 assert(!VF.isScalable() && "VF is assumed to be non scalable.");
346 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
347 set(Def, Undef, Part);
348 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
349 packScalarIntoVectorValue(Def, {Part, Lane});
350 VectorValue = get(Def, Part);
351 }
352 Builder.restoreIP(OldIP);
353 return VectorValue;
354 }
355
getPreheaderBBFor(VPRecipeBase * R)356 BasicBlock *VPTransformState::CFGState::getPreheaderBBFor(VPRecipeBase *R) {
357 VPRegionBlock *LoopRegion = R->getParent()->getEnclosingLoopRegion();
358 return VPBB2IRBB[LoopRegion->getPreheaderVPBB()];
359 }
360
addNewMetadata(Instruction * To,const Instruction * Orig)361 void VPTransformState::addNewMetadata(Instruction *To,
362 const Instruction *Orig) {
363 // If the loop was versioned with memchecks, add the corresponding no-alias
364 // metadata.
365 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
366 LVer->annotateInstWithNoAlias(To, Orig);
367 }
368
addMetadata(Value * To,Instruction * From)369 void VPTransformState::addMetadata(Value *To, Instruction *From) {
370 // No source instruction to transfer metadata from?
371 if (!From)
372 return;
373
374 if (Instruction *ToI = dyn_cast<Instruction>(To)) {
375 propagateMetadata(ToI, From);
376 addNewMetadata(ToI, From);
377 }
378 }
379
setDebugLocFrom(DebugLoc DL)380 void VPTransformState::setDebugLocFrom(DebugLoc DL) {
381 const DILocation *DIL = DL;
382 // When a FSDiscriminator is enabled, we don't need to add the multiply
383 // factors to the discriminators.
384 if (DIL &&
385 Builder.GetInsertBlock()
386 ->getParent()
387 ->shouldEmitDebugInfoForProfiling() &&
388 !EnableFSDiscriminator) {
389 // FIXME: For scalable vectors, assume vscale=1.
390 auto NewDIL =
391 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
392 if (NewDIL)
393 Builder.SetCurrentDebugLocation(*NewDIL);
394 else
395 LLVM_DEBUG(dbgs() << "Failed to create new discriminator: "
396 << DIL->getFilename() << " Line: " << DIL->getLine());
397 } else
398 Builder.SetCurrentDebugLocation(DIL);
399 }
400
packScalarIntoVectorValue(VPValue * Def,const VPIteration & Instance)401 void VPTransformState::packScalarIntoVectorValue(VPValue *Def,
402 const VPIteration &Instance) {
403 Value *ScalarInst = get(Def, Instance);
404 Value *VectorValue = get(Def, Instance.Part);
405 VectorValue = Builder.CreateInsertElement(
406 VectorValue, ScalarInst, Instance.Lane.getAsRuntimeExpr(Builder, VF));
407 set(Def, VectorValue, Instance.Part);
408 }
409
410 BasicBlock *
createEmptyBasicBlock(VPTransformState::CFGState & CFG)411 VPBasicBlock::createEmptyBasicBlock(VPTransformState::CFGState &CFG) {
412 // BB stands for IR BasicBlocks. VPBB stands for VPlan VPBasicBlocks.
413 // Pred stands for Predessor. Prev stands for Previous - last visited/created.
414 BasicBlock *PrevBB = CFG.PrevBB;
415 BasicBlock *NewBB = BasicBlock::Create(PrevBB->getContext(), getName(),
416 PrevBB->getParent(), CFG.ExitBB);
417 LLVM_DEBUG(dbgs() << "LV: created " << NewBB->getName() << '\n');
418
419 // Hook up the new basic block to its predecessors.
420 for (VPBlockBase *PredVPBlock : getHierarchicalPredecessors()) {
421 VPBasicBlock *PredVPBB = PredVPBlock->getExitingBasicBlock();
422 auto &PredVPSuccessors = PredVPBB->getHierarchicalSuccessors();
423 BasicBlock *PredBB = CFG.VPBB2IRBB[PredVPBB];
424
425 assert(PredBB && "Predecessor basic-block not found building successor.");
426 auto *PredBBTerminator = PredBB->getTerminator();
427 LLVM_DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n');
428
429 auto *TermBr = dyn_cast<BranchInst>(PredBBTerminator);
430 if (isa<UnreachableInst>(PredBBTerminator)) {
431 assert(PredVPSuccessors.size() == 1 &&
432 "Predecessor ending w/o branch must have single successor.");
433 DebugLoc DL = PredBBTerminator->getDebugLoc();
434 PredBBTerminator->eraseFromParent();
435 auto *Br = BranchInst::Create(NewBB, PredBB);
436 Br->setDebugLoc(DL);
437 } else if (TermBr && !TermBr->isConditional()) {
438 TermBr->setSuccessor(0, NewBB);
439 } else {
440 // Set each forward successor here when it is created, excluding
441 // backedges. A backward successor is set when the branch is created.
442 unsigned idx = PredVPSuccessors.front() == this ? 0 : 1;
443 assert(!TermBr->getSuccessor(idx) &&
444 "Trying to reset an existing successor block.");
445 TermBr->setSuccessor(idx, NewBB);
446 }
447 CFG.DTU.applyUpdates({{DominatorTree::Insert, PredBB, NewBB}});
448 }
449 return NewBB;
450 }
451
execute(VPTransformState * State)452 void VPIRBasicBlock::execute(VPTransformState *State) {
453 assert(getHierarchicalSuccessors().size() <= 2 &&
454 "VPIRBasicBlock can have at most two successors at the moment!");
455 State->Builder.SetInsertPoint(getIRBasicBlock()->getTerminator());
456 executeRecipes(State, getIRBasicBlock());
457 if (getSingleSuccessor()) {
458 assert(isa<UnreachableInst>(getIRBasicBlock()->getTerminator()));
459 auto *Br = State->Builder.CreateBr(getIRBasicBlock());
460 Br->setOperand(0, nullptr);
461 getIRBasicBlock()->getTerminator()->eraseFromParent();
462 }
463
464 for (VPBlockBase *PredVPBlock : getHierarchicalPredecessors()) {
465 VPBasicBlock *PredVPBB = PredVPBlock->getExitingBasicBlock();
466 BasicBlock *PredBB = State->CFG.VPBB2IRBB[PredVPBB];
467 assert(PredBB && "Predecessor basic-block not found building successor.");
468 LLVM_DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n');
469
470 auto *PredBBTerminator = PredBB->getTerminator();
471 auto *TermBr = cast<BranchInst>(PredBBTerminator);
472 // Set each forward successor here when it is created, excluding
473 // backedges. A backward successor is set when the branch is created.
474 const auto &PredVPSuccessors = PredVPBB->getHierarchicalSuccessors();
475 unsigned idx = PredVPSuccessors.front() == this ? 0 : 1;
476 assert(!TermBr->getSuccessor(idx) &&
477 "Trying to reset an existing successor block.");
478 TermBr->setSuccessor(idx, IRBB);
479 State->CFG.DTU.applyUpdates({{DominatorTree::Insert, PredBB, IRBB}});
480 }
481 }
482
execute(VPTransformState * State)483 void VPBasicBlock::execute(VPTransformState *State) {
484 bool Replica = State->Instance && !State->Instance->isFirstIteration();
485 VPBasicBlock *PrevVPBB = State->CFG.PrevVPBB;
486 VPBlockBase *SingleHPred = nullptr;
487 BasicBlock *NewBB = State->CFG.PrevBB; // Reuse it if possible.
488
489 auto IsLoopRegion = [](VPBlockBase *BB) {
490 auto *R = dyn_cast<VPRegionBlock>(BB);
491 return R && !R->isReplicator();
492 };
493
494 // 1. Create an IR basic block.
495 if (PrevVPBB && /* A */
496 !((SingleHPred = getSingleHierarchicalPredecessor()) &&
497 SingleHPred->getExitingBasicBlock() == PrevVPBB &&
498 PrevVPBB->getSingleHierarchicalSuccessor() &&
499 (SingleHPred->getParent() == getEnclosingLoopRegion() &&
500 !IsLoopRegion(SingleHPred))) && /* B */
501 !(Replica && getPredecessors().empty())) { /* C */
502 // The last IR basic block is reused, as an optimization, in three cases:
503 // A. the first VPBB reuses the loop pre-header BB - when PrevVPBB is null;
504 // B. when the current VPBB has a single (hierarchical) predecessor which
505 // is PrevVPBB and the latter has a single (hierarchical) successor which
506 // both are in the same non-replicator region; and
507 // C. when the current VPBB is an entry of a region replica - where PrevVPBB
508 // is the exiting VPBB of this region from a previous instance, or the
509 // predecessor of this region.
510
511 NewBB = createEmptyBasicBlock(State->CFG);
512 State->Builder.SetInsertPoint(NewBB);
513 // Temporarily terminate with unreachable until CFG is rewired.
514 UnreachableInst *Terminator = State->Builder.CreateUnreachable();
515 // Register NewBB in its loop. In innermost loops its the same for all
516 // BB's.
517 if (State->CurrentVectorLoop)
518 State->CurrentVectorLoop->addBasicBlockToLoop(NewBB, *State->LI);
519 State->Builder.SetInsertPoint(Terminator);
520 State->CFG.PrevBB = NewBB;
521 }
522
523 // 2. Fill the IR basic block with IR instructions.
524 executeRecipes(State, NewBB);
525 }
526
dropAllReferences(VPValue * NewValue)527 void VPBasicBlock::dropAllReferences(VPValue *NewValue) {
528 for (VPRecipeBase &R : Recipes) {
529 for (auto *Def : R.definedValues())
530 Def->replaceAllUsesWith(NewValue);
531
532 for (unsigned I = 0, E = R.getNumOperands(); I != E; I++)
533 R.setOperand(I, NewValue);
534 }
535 }
536
executeRecipes(VPTransformState * State,BasicBlock * BB)537 void VPBasicBlock::executeRecipes(VPTransformState *State, BasicBlock *BB) {
538 LLVM_DEBUG(dbgs() << "LV: vectorizing VPBB:" << getName()
539 << " in BB:" << BB->getName() << '\n');
540
541 State->CFG.VPBB2IRBB[this] = BB;
542 State->CFG.PrevVPBB = this;
543
544 for (VPRecipeBase &Recipe : Recipes)
545 Recipe.execute(*State);
546
547 LLVM_DEBUG(dbgs() << "LV: filled BB:" << *BB);
548 }
549
splitAt(iterator SplitAt)550 VPBasicBlock *VPBasicBlock::splitAt(iterator SplitAt) {
551 assert((SplitAt == end() || SplitAt->getParent() == this) &&
552 "can only split at a position in the same block");
553
554 SmallVector<VPBlockBase *, 2> Succs(successors());
555 // First, disconnect the current block from its successors.
556 for (VPBlockBase *Succ : Succs)
557 VPBlockUtils::disconnectBlocks(this, Succ);
558
559 // Create new empty block after the block to split.
560 auto *SplitBlock = new VPBasicBlock(getName() + ".split");
561 VPBlockUtils::insertBlockAfter(SplitBlock, this);
562
563 // Add successors for block to split to new block.
564 for (VPBlockBase *Succ : Succs)
565 VPBlockUtils::connectBlocks(SplitBlock, Succ);
566
567 // Finally, move the recipes starting at SplitAt to new block.
568 for (VPRecipeBase &ToMove :
569 make_early_inc_range(make_range(SplitAt, this->end())))
570 ToMove.moveBefore(*SplitBlock, SplitBlock->end());
571
572 return SplitBlock;
573 }
574
getEnclosingLoopRegion()575 VPRegionBlock *VPBasicBlock::getEnclosingLoopRegion() {
576 VPRegionBlock *P = getParent();
577 if (P && P->isReplicator()) {
578 P = P->getParent();
579 assert(!cast<VPRegionBlock>(P)->isReplicator() &&
580 "unexpected nested replicate regions");
581 }
582 return P;
583 }
584
hasConditionalTerminator(const VPBasicBlock * VPBB)585 static bool hasConditionalTerminator(const VPBasicBlock *VPBB) {
586 if (VPBB->empty()) {
587 assert(
588 VPBB->getNumSuccessors() < 2 &&
589 "block with multiple successors doesn't have a recipe as terminator");
590 return false;
591 }
592
593 const VPRecipeBase *R = &VPBB->back();
594 bool IsCondBranch = isa<VPBranchOnMaskRecipe>(R) ||
595 match(R, m_BranchOnCond(m_VPValue())) ||
596 match(R, m_BranchOnCount(m_VPValue(), m_VPValue()));
597 (void)IsCondBranch;
598
599 if (VPBB->getNumSuccessors() >= 2 ||
600 (VPBB->isExiting() && !VPBB->getParent()->isReplicator())) {
601 assert(IsCondBranch && "block with multiple successors not terminated by "
602 "conditional branch recipe");
603
604 return true;
605 }
606
607 assert(
608 !IsCondBranch &&
609 "block with 0 or 1 successors terminated by conditional branch recipe");
610 return false;
611 }
612
getTerminator()613 VPRecipeBase *VPBasicBlock::getTerminator() {
614 if (hasConditionalTerminator(this))
615 return &back();
616 return nullptr;
617 }
618
getTerminator() const619 const VPRecipeBase *VPBasicBlock::getTerminator() const {
620 if (hasConditionalTerminator(this))
621 return &back();
622 return nullptr;
623 }
624
isExiting() const625 bool VPBasicBlock::isExiting() const {
626 return getParent() && getParent()->getExitingBasicBlock() == this;
627 }
628
629 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
printSuccessors(raw_ostream & O,const Twine & Indent) const630 void VPBlockBase::printSuccessors(raw_ostream &O, const Twine &Indent) const {
631 if (getSuccessors().empty()) {
632 O << Indent << "No successors\n";
633 } else {
634 O << Indent << "Successor(s): ";
635 ListSeparator LS;
636 for (auto *Succ : getSuccessors())
637 O << LS << Succ->getName();
638 O << '\n';
639 }
640 }
641
print(raw_ostream & O,const Twine & Indent,VPSlotTracker & SlotTracker) const642 void VPBasicBlock::print(raw_ostream &O, const Twine &Indent,
643 VPSlotTracker &SlotTracker) const {
644 O << Indent << getName() << ":\n";
645
646 auto RecipeIndent = Indent + " ";
647 for (const VPRecipeBase &Recipe : *this) {
648 Recipe.print(O, RecipeIndent, SlotTracker);
649 O << '\n';
650 }
651
652 printSuccessors(O, Indent);
653 }
654 #endif
655
656 static std::pair<VPBlockBase *, VPBlockBase *> cloneFrom(VPBlockBase *Entry);
657
658 // Clone the CFG for all nodes reachable from \p Entry, this includes cloning
659 // the blocks and their recipes. Operands of cloned recipes will NOT be updated.
660 // Remapping of operands must be done separately. Returns a pair with the new
661 // entry and exiting blocks of the cloned region. If \p Entry isn't part of a
662 // region, return nullptr for the exiting block.
cloneFrom(VPBlockBase * Entry)663 static std::pair<VPBlockBase *, VPBlockBase *> cloneFrom(VPBlockBase *Entry) {
664 DenseMap<VPBlockBase *, VPBlockBase *> Old2NewVPBlocks;
665 VPBlockBase *Exiting = nullptr;
666 bool InRegion = Entry->getParent();
667 // First, clone blocks reachable from Entry.
668 for (VPBlockBase *BB : vp_depth_first_shallow(Entry)) {
669 VPBlockBase *NewBB = BB->clone();
670 Old2NewVPBlocks[BB] = NewBB;
671 if (InRegion && BB->getNumSuccessors() == 0) {
672 assert(!Exiting && "Multiple exiting blocks?");
673 Exiting = BB;
674 }
675 }
676 assert((!InRegion || Exiting) && "regions must have a single exiting block");
677
678 // Second, update the predecessors & successors of the cloned blocks.
679 for (VPBlockBase *BB : vp_depth_first_shallow(Entry)) {
680 VPBlockBase *NewBB = Old2NewVPBlocks[BB];
681 SmallVector<VPBlockBase *> NewPreds;
682 for (VPBlockBase *Pred : BB->getPredecessors()) {
683 NewPreds.push_back(Old2NewVPBlocks[Pred]);
684 }
685 NewBB->setPredecessors(NewPreds);
686 SmallVector<VPBlockBase *> NewSuccs;
687 for (VPBlockBase *Succ : BB->successors()) {
688 NewSuccs.push_back(Old2NewVPBlocks[Succ]);
689 }
690 NewBB->setSuccessors(NewSuccs);
691 }
692
693 #if !defined(NDEBUG)
694 // Verify that the order of predecessors and successors matches in the cloned
695 // version.
696 for (const auto &[OldBB, NewBB] :
697 zip(vp_depth_first_shallow(Entry),
698 vp_depth_first_shallow(Old2NewVPBlocks[Entry]))) {
699 for (const auto &[OldPred, NewPred] :
700 zip(OldBB->getPredecessors(), NewBB->getPredecessors()))
701 assert(NewPred == Old2NewVPBlocks[OldPred] && "Different predecessors");
702
703 for (const auto &[OldSucc, NewSucc] :
704 zip(OldBB->successors(), NewBB->successors()))
705 assert(NewSucc == Old2NewVPBlocks[OldSucc] && "Different successors");
706 }
707 #endif
708
709 return std::make_pair(Old2NewVPBlocks[Entry],
710 Exiting ? Old2NewVPBlocks[Exiting] : nullptr);
711 }
712
clone()713 VPRegionBlock *VPRegionBlock::clone() {
714 const auto &[NewEntry, NewExiting] = cloneFrom(getEntry());
715 auto *NewRegion =
716 new VPRegionBlock(NewEntry, NewExiting, getName(), isReplicator());
717 for (VPBlockBase *Block : vp_depth_first_shallow(NewEntry))
718 Block->setParent(NewRegion);
719 return NewRegion;
720 }
721
dropAllReferences(VPValue * NewValue)722 void VPRegionBlock::dropAllReferences(VPValue *NewValue) {
723 for (VPBlockBase *Block : vp_depth_first_shallow(Entry))
724 // Drop all references in VPBasicBlocks and replace all uses with
725 // DummyValue.
726 Block->dropAllReferences(NewValue);
727 }
728
execute(VPTransformState * State)729 void VPRegionBlock::execute(VPTransformState *State) {
730 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>>
731 RPOT(Entry);
732
733 if (!isReplicator()) {
734 // Create and register the new vector loop.
735 Loop *PrevLoop = State->CurrentVectorLoop;
736 State->CurrentVectorLoop = State->LI->AllocateLoop();
737 BasicBlock *VectorPH = State->CFG.VPBB2IRBB[getPreheaderVPBB()];
738 Loop *ParentLoop = State->LI->getLoopFor(VectorPH);
739
740 // Insert the new loop into the loop nest and register the new basic blocks
741 // before calling any utilities such as SCEV that require valid LoopInfo.
742 if (ParentLoop)
743 ParentLoop->addChildLoop(State->CurrentVectorLoop);
744 else
745 State->LI->addTopLevelLoop(State->CurrentVectorLoop);
746
747 // Visit the VPBlocks connected to "this", starting from it.
748 for (VPBlockBase *Block : RPOT) {
749 LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n');
750 Block->execute(State);
751 }
752
753 State->CurrentVectorLoop = PrevLoop;
754 return;
755 }
756
757 assert(!State->Instance && "Replicating a Region with non-null instance.");
758
759 // Enter replicating mode.
760 State->Instance = VPIteration(0, 0);
761
762 for (unsigned Part = 0, UF = State->UF; Part < UF; ++Part) {
763 State->Instance->Part = Part;
764 assert(!State->VF.isScalable() && "VF is assumed to be non scalable.");
765 for (unsigned Lane = 0, VF = State->VF.getKnownMinValue(); Lane < VF;
766 ++Lane) {
767 State->Instance->Lane = VPLane(Lane, VPLane::Kind::First);
768 // Visit the VPBlocks connected to \p this, starting from it.
769 for (VPBlockBase *Block : RPOT) {
770 LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n');
771 Block->execute(State);
772 }
773 }
774 }
775
776 // Exit replicating mode.
777 State->Instance.reset();
778 }
779
cost(ElementCount VF,VPCostContext & Ctx)780 InstructionCost VPBasicBlock::cost(ElementCount VF, VPCostContext &Ctx) {
781 InstructionCost Cost = 0;
782 for (VPRecipeBase &R : Recipes)
783 Cost += R.cost(VF, Ctx);
784 return Cost;
785 }
786
cost(ElementCount VF,VPCostContext & Ctx)787 InstructionCost VPRegionBlock::cost(ElementCount VF, VPCostContext &Ctx) {
788 if (!isReplicator()) {
789 InstructionCost Cost = 0;
790 for (VPBlockBase *Block : vp_depth_first_shallow(getEntry()))
791 Cost += Block->cost(VF, Ctx);
792 InstructionCost BackedgeCost =
793 Ctx.TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
794 LLVM_DEBUG(dbgs() << "Cost of " << BackedgeCost << " for VF " << VF
795 << ": vector loop backedge\n");
796 Cost += BackedgeCost;
797 return Cost;
798 }
799
800 // Compute the cost of a replicate region. Replicating isn't supported for
801 // scalable vectors, return an invalid cost for them.
802 // TODO: Discard scalable VPlans with replicate recipes earlier after
803 // construction.
804 if (VF.isScalable())
805 return InstructionCost::getInvalid();
806
807 // First compute the cost of the conditionally executed recipes, followed by
808 // account for the branching cost, except if the mask is a header mask or
809 // uniform condition.
810 using namespace llvm::VPlanPatternMatch;
811 VPBasicBlock *Then = cast<VPBasicBlock>(getEntry()->getSuccessors()[0]);
812 InstructionCost ThenCost = Then->cost(VF, Ctx);
813
814 // For the scalar case, we may not always execute the original predicated
815 // block, Thus, scale the block's cost by the probability of executing it.
816 if (VF.isScalar())
817 return ThenCost / getReciprocalPredBlockProb();
818
819 return ThenCost;
820 }
821
822 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
print(raw_ostream & O,const Twine & Indent,VPSlotTracker & SlotTracker) const823 void VPRegionBlock::print(raw_ostream &O, const Twine &Indent,
824 VPSlotTracker &SlotTracker) const {
825 O << Indent << (isReplicator() ? "<xVFxUF> " : "<x1> ") << getName() << ": {";
826 auto NewIndent = Indent + " ";
827 for (auto *BlockBase : vp_depth_first_shallow(Entry)) {
828 O << '\n';
829 BlockBase->print(O, NewIndent, SlotTracker);
830 }
831 O << Indent << "}\n";
832
833 printSuccessors(O, Indent);
834 }
835 #endif
836
~VPlan()837 VPlan::~VPlan() {
838 for (auto &KV : LiveOuts)
839 delete KV.second;
840 LiveOuts.clear();
841
842 if (Entry) {
843 VPValue DummyValue;
844 for (VPBlockBase *Block : vp_depth_first_shallow(Entry))
845 Block->dropAllReferences(&DummyValue);
846
847 VPBlockBase::deleteCFG(Entry);
848
849 Preheader->dropAllReferences(&DummyValue);
850 delete Preheader;
851 }
852 for (VPValue *VPV : VPLiveInsToFree)
853 delete VPV;
854 if (BackedgeTakenCount)
855 delete BackedgeTakenCount;
856 }
857
createInitialVPlan(const SCEV * TripCount,ScalarEvolution & SE,bool RequiresScalarEpilogueCheck,bool TailFolded,Loop * TheLoop)858 VPlanPtr VPlan::createInitialVPlan(const SCEV *TripCount, ScalarEvolution &SE,
859 bool RequiresScalarEpilogueCheck,
860 bool TailFolded, Loop *TheLoop) {
861 VPIRBasicBlock *Entry = new VPIRBasicBlock(TheLoop->getLoopPreheader());
862 VPBasicBlock *VecPreheader = new VPBasicBlock("vector.ph");
863 auto Plan = std::make_unique<VPlan>(Entry, VecPreheader);
864 Plan->TripCount =
865 vputils::getOrCreateVPValueForSCEVExpr(*Plan, TripCount, SE);
866 // Create VPRegionBlock, with empty header and latch blocks, to be filled
867 // during processing later.
868 VPBasicBlock *HeaderVPBB = new VPBasicBlock("vector.body");
869 VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch");
870 VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB);
871 auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop",
872 false /*isReplicator*/);
873
874 VPBlockUtils::insertBlockAfter(TopRegion, VecPreheader);
875 VPBasicBlock *MiddleVPBB = new VPBasicBlock("middle.block");
876 VPBlockUtils::insertBlockAfter(MiddleVPBB, TopRegion);
877
878 VPBasicBlock *ScalarPH = new VPBasicBlock("scalar.ph");
879 if (!RequiresScalarEpilogueCheck) {
880 VPBlockUtils::connectBlocks(MiddleVPBB, ScalarPH);
881 return Plan;
882 }
883
884 // If needed, add a check in the middle block to see if we have completed
885 // all of the iterations in the first vector loop. Three cases:
886 // 1) If (N - N%VF) == N, then we *don't* need to run the remainder.
887 // Thus if tail is to be folded, we know we don't need to run the
888 // remainder and we can set the condition to true.
889 // 2) If we require a scalar epilogue, there is no conditional branch as
890 // we unconditionally branch to the scalar preheader. Do nothing.
891 // 3) Otherwise, construct a runtime check.
892 BasicBlock *IRExitBlock = TheLoop->getUniqueExitBlock();
893 auto *VPExitBlock = new VPIRBasicBlock(IRExitBlock);
894 // The connection order corresponds to the operands of the conditional branch.
895 VPBlockUtils::insertBlockAfter(VPExitBlock, MiddleVPBB);
896 VPBlockUtils::connectBlocks(MiddleVPBB, ScalarPH);
897
898 auto *ScalarLatchTerm = TheLoop->getLoopLatch()->getTerminator();
899 // Here we use the same DebugLoc as the scalar loop latch terminator instead
900 // of the corresponding compare because they may have ended up with
901 // different line numbers and we want to avoid awkward line stepping while
902 // debugging. Eg. if the compare has got a line number inside the loop.
903 VPBuilder Builder(MiddleVPBB);
904 VPValue *Cmp =
905 TailFolded
906 ? Plan->getOrAddLiveIn(ConstantInt::getTrue(
907 IntegerType::getInt1Ty(TripCount->getType()->getContext())))
908 : Builder.createICmp(CmpInst::ICMP_EQ, Plan->getTripCount(),
909 &Plan->getVectorTripCount(),
910 ScalarLatchTerm->getDebugLoc(), "cmp.n");
911 Builder.createNaryOp(VPInstruction::BranchOnCond, {Cmp},
912 ScalarLatchTerm->getDebugLoc());
913 return Plan;
914 }
915
prepareToExecute(Value * TripCountV,Value * VectorTripCountV,Value * CanonicalIVStartValue,VPTransformState & State)916 void VPlan::prepareToExecute(Value *TripCountV, Value *VectorTripCountV,
917 Value *CanonicalIVStartValue,
918 VPTransformState &State) {
919 // Check if the backedge taken count is needed, and if so build it.
920 if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) {
921 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
922 auto *TCMO = Builder.CreateSub(TripCountV,
923 ConstantInt::get(TripCountV->getType(), 1),
924 "trip.count.minus.1");
925 BackedgeTakenCount->setUnderlyingValue(TCMO);
926 }
927
928 VectorTripCount.setUnderlyingValue(VectorTripCountV);
929
930 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
931 // FIXME: Model VF * UF computation completely in VPlan.
932 VFxUF.setUnderlyingValue(
933 createStepForVF(Builder, TripCountV->getType(), State.VF, State.UF));
934
935 // When vectorizing the epilogue loop, the canonical induction start value
936 // needs to be changed from zero to the value after the main vector loop.
937 // FIXME: Improve modeling for canonical IV start values in the epilogue loop.
938 if (CanonicalIVStartValue) {
939 VPValue *VPV = getOrAddLiveIn(CanonicalIVStartValue);
940 auto *IV = getCanonicalIV();
941 assert(all_of(IV->users(),
942 [](const VPUser *U) {
943 return isa<VPScalarIVStepsRecipe>(U) ||
944 isa<VPScalarCastRecipe>(U) ||
945 isa<VPDerivedIVRecipe>(U) ||
946 cast<VPInstruction>(U)->getOpcode() ==
947 Instruction::Add;
948 }) &&
949 "the canonical IV should only be used by its increment or "
950 "ScalarIVSteps when resetting the start value");
951 IV->setOperand(0, VPV);
952 }
953 }
954
955 /// Replace \p VPBB with a VPIRBasicBlock wrapping \p IRBB. All recipes from \p
956 /// VPBB are moved to the newly created VPIRBasicBlock. VPBB must have a single
957 /// predecessor, which is rewired to the new VPIRBasicBlock. All successors of
958 /// VPBB, if any, are rewired to the new VPIRBasicBlock.
replaceVPBBWithIRVPBB(VPBasicBlock * VPBB,BasicBlock * IRBB)959 static void replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB) {
960 VPIRBasicBlock *IRMiddleVPBB = new VPIRBasicBlock(IRBB);
961 for (auto &R : make_early_inc_range(*VPBB))
962 R.moveBefore(*IRMiddleVPBB, IRMiddleVPBB->end());
963 VPBlockBase *PredVPBB = VPBB->getSinglePredecessor();
964 VPBlockUtils::disconnectBlocks(PredVPBB, VPBB);
965 VPBlockUtils::connectBlocks(PredVPBB, IRMiddleVPBB);
966 for (auto *Succ : to_vector(VPBB->getSuccessors())) {
967 VPBlockUtils::connectBlocks(IRMiddleVPBB, Succ);
968 VPBlockUtils::disconnectBlocks(VPBB, Succ);
969 }
970 delete VPBB;
971 }
972
973 /// Generate the code inside the preheader and body of the vectorized loop.
974 /// Assumes a single pre-header basic-block was created for this. Introduce
975 /// additional basic-blocks as needed, and fill them all.
execute(VPTransformState * State)976 void VPlan::execute(VPTransformState *State) {
977 // Initialize CFG state.
978 State->CFG.PrevVPBB = nullptr;
979 State->CFG.ExitBB = State->CFG.PrevBB->getSingleSuccessor();
980 BasicBlock *VectorPreHeader = State->CFG.PrevBB;
981 State->Builder.SetInsertPoint(VectorPreHeader->getTerminator());
982
983 // Disconnect VectorPreHeader from ExitBB in both the CFG and DT.
984 cast<BranchInst>(VectorPreHeader->getTerminator())->setSuccessor(0, nullptr);
985 State->CFG.DTU.applyUpdates(
986 {{DominatorTree::Delete, VectorPreHeader, State->CFG.ExitBB}});
987
988 // Replace regular VPBB's for the middle and scalar preheader blocks with
989 // VPIRBasicBlocks wrapping their IR blocks. The IR blocks are created during
990 // skeleton creation, so we can only create the VPIRBasicBlocks now during
991 // VPlan execution rather than earlier during VPlan construction.
992 BasicBlock *MiddleBB = State->CFG.ExitBB;
993 VPBasicBlock *MiddleVPBB =
994 cast<VPBasicBlock>(getVectorLoopRegion()->getSingleSuccessor());
995 // Find the VPBB for the scalar preheader, relying on the current structure
996 // when creating the middle block and its successrs: if there's a single
997 // predecessor, it must be the scalar preheader. Otherwise, the second
998 // successor is the scalar preheader.
999 BasicBlock *ScalarPh = MiddleBB->getSingleSuccessor();
1000 auto &MiddleSuccs = MiddleVPBB->getSuccessors();
1001 assert((MiddleSuccs.size() == 1 || MiddleSuccs.size() == 2) &&
1002 "middle block has unexpected successors");
1003 VPBasicBlock *ScalarPhVPBB = cast<VPBasicBlock>(
1004 MiddleSuccs.size() == 1 ? MiddleSuccs[0] : MiddleSuccs[1]);
1005 assert(!isa<VPIRBasicBlock>(ScalarPhVPBB) &&
1006 "scalar preheader cannot be wrapped already");
1007 replaceVPBBWithIRVPBB(ScalarPhVPBB, ScalarPh);
1008 replaceVPBBWithIRVPBB(MiddleVPBB, MiddleBB);
1009
1010 // Disconnect the middle block from its single successor (the scalar loop
1011 // header) in both the CFG and DT. The branch will be recreated during VPlan
1012 // execution.
1013 auto *BrInst = new UnreachableInst(MiddleBB->getContext());
1014 BrInst->insertBefore(MiddleBB->getTerminator());
1015 MiddleBB->getTerminator()->eraseFromParent();
1016 State->CFG.DTU.applyUpdates({{DominatorTree::Delete, MiddleBB, ScalarPh}});
1017
1018 // Generate code in the loop pre-header and body.
1019 for (VPBlockBase *Block : vp_depth_first_shallow(Entry))
1020 Block->execute(State);
1021
1022 VPBasicBlock *LatchVPBB = getVectorLoopRegion()->getExitingBasicBlock();
1023 BasicBlock *VectorLatchBB = State->CFG.VPBB2IRBB[LatchVPBB];
1024
1025 // Fix the latch value of canonical, reduction and first-order recurrences
1026 // phis in the vector loop.
1027 VPBasicBlock *Header = getVectorLoopRegion()->getEntryBasicBlock();
1028 for (VPRecipeBase &R : Header->phis()) {
1029 // Skip phi-like recipes that generate their backedege values themselves.
1030 if (isa<VPWidenPHIRecipe>(&R))
1031 continue;
1032
1033 if (isa<VPWidenPointerInductionRecipe>(&R) ||
1034 isa<VPWidenIntOrFpInductionRecipe>(&R)) {
1035 PHINode *Phi = nullptr;
1036 if (isa<VPWidenIntOrFpInductionRecipe>(&R)) {
1037 Phi = cast<PHINode>(State->get(R.getVPSingleValue(), 0));
1038 } else {
1039 auto *WidenPhi = cast<VPWidenPointerInductionRecipe>(&R);
1040 assert(!WidenPhi->onlyScalarsGenerated(State->VF.isScalable()) &&
1041 "recipe generating only scalars should have been replaced");
1042 auto *GEP = cast<GetElementPtrInst>(State->get(WidenPhi, 0));
1043 Phi = cast<PHINode>(GEP->getPointerOperand());
1044 }
1045
1046 Phi->setIncomingBlock(1, VectorLatchBB);
1047
1048 // Move the last step to the end of the latch block. This ensures
1049 // consistent placement of all induction updates.
1050 Instruction *Inc = cast<Instruction>(Phi->getIncomingValue(1));
1051 Inc->moveBefore(VectorLatchBB->getTerminator()->getPrevNode());
1052 continue;
1053 }
1054
1055 auto *PhiR = cast<VPHeaderPHIRecipe>(&R);
1056 // For canonical IV, first-order recurrences and in-order reduction phis,
1057 // only a single part is generated, which provides the last part from the
1058 // previous iteration. For non-ordered reductions all UF parts are
1059 // generated.
1060 bool SinglePartNeeded =
1061 isa<VPCanonicalIVPHIRecipe>(PhiR) ||
1062 isa<VPFirstOrderRecurrencePHIRecipe, VPEVLBasedIVPHIRecipe>(PhiR) ||
1063 (isa<VPReductionPHIRecipe>(PhiR) &&
1064 cast<VPReductionPHIRecipe>(PhiR)->isOrdered());
1065 bool NeedsScalar =
1066 isa<VPCanonicalIVPHIRecipe, VPEVLBasedIVPHIRecipe>(PhiR) ||
1067 (isa<VPReductionPHIRecipe>(PhiR) &&
1068 cast<VPReductionPHIRecipe>(PhiR)->isInLoop());
1069 unsigned LastPartForNewPhi = SinglePartNeeded ? 1 : State->UF;
1070
1071 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) {
1072 Value *Phi = State->get(PhiR, Part, NeedsScalar);
1073 Value *Val =
1074 State->get(PhiR->getBackedgeValue(),
1075 SinglePartNeeded ? State->UF - 1 : Part, NeedsScalar);
1076 cast<PHINode>(Phi)->addIncoming(Val, VectorLatchBB);
1077 }
1078 }
1079
1080 State->CFG.DTU.flush();
1081 assert(State->CFG.DTU.getDomTree().verify(
1082 DominatorTree::VerificationLevel::Fast) &&
1083 "DT not preserved correctly");
1084 }
1085
cost(ElementCount VF,VPCostContext & Ctx)1086 InstructionCost VPlan::cost(ElementCount VF, VPCostContext &Ctx) {
1087 // For now only return the cost of the vector loop region, ignoring any other
1088 // blocks, like the preheader or middle blocks.
1089 return getVectorLoopRegion()->cost(VF, Ctx);
1090 }
1091
1092 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
printLiveIns(raw_ostream & O) const1093 void VPlan::printLiveIns(raw_ostream &O) const {
1094 VPSlotTracker SlotTracker(this);
1095
1096 if (VFxUF.getNumUsers() > 0) {
1097 O << "\nLive-in ";
1098 VFxUF.printAsOperand(O, SlotTracker);
1099 O << " = VF * UF";
1100 }
1101
1102 if (VectorTripCount.getNumUsers() > 0) {
1103 O << "\nLive-in ";
1104 VectorTripCount.printAsOperand(O, SlotTracker);
1105 O << " = vector-trip-count";
1106 }
1107
1108 if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) {
1109 O << "\nLive-in ";
1110 BackedgeTakenCount->printAsOperand(O, SlotTracker);
1111 O << " = backedge-taken count";
1112 }
1113
1114 O << "\n";
1115 if (TripCount->isLiveIn())
1116 O << "Live-in ";
1117 TripCount->printAsOperand(O, SlotTracker);
1118 O << " = original trip-count";
1119 O << "\n";
1120 }
1121
1122 LLVM_DUMP_METHOD
print(raw_ostream & O) const1123 void VPlan::print(raw_ostream &O) const {
1124 VPSlotTracker SlotTracker(this);
1125
1126 O << "VPlan '" << getName() << "' {";
1127
1128 printLiveIns(O);
1129
1130 if (!getPreheader()->empty()) {
1131 O << "\n";
1132 getPreheader()->print(O, "", SlotTracker);
1133 }
1134
1135 for (const VPBlockBase *Block : vp_depth_first_shallow(getEntry())) {
1136 O << '\n';
1137 Block->print(O, "", SlotTracker);
1138 }
1139
1140 if (!LiveOuts.empty())
1141 O << "\n";
1142 for (const auto &KV : LiveOuts) {
1143 KV.second->print(O, SlotTracker);
1144 }
1145
1146 O << "}\n";
1147 }
1148
getName() const1149 std::string VPlan::getName() const {
1150 std::string Out;
1151 raw_string_ostream RSO(Out);
1152 RSO << Name << " for ";
1153 if (!VFs.empty()) {
1154 RSO << "VF={" << VFs[0];
1155 for (ElementCount VF : drop_begin(VFs))
1156 RSO << "," << VF;
1157 RSO << "},";
1158 }
1159
1160 if (UFs.empty()) {
1161 RSO << "UF>=1";
1162 } else {
1163 RSO << "UF={" << UFs[0];
1164 for (unsigned UF : drop_begin(UFs))
1165 RSO << "," << UF;
1166 RSO << "}";
1167 }
1168
1169 return Out;
1170 }
1171
1172 LLVM_DUMP_METHOD
printDOT(raw_ostream & O) const1173 void VPlan::printDOT(raw_ostream &O) const {
1174 VPlanPrinter Printer(O, *this);
1175 Printer.dump();
1176 }
1177
1178 LLVM_DUMP_METHOD
dump() const1179 void VPlan::dump() const { print(dbgs()); }
1180 #endif
1181
addLiveOut(PHINode * PN,VPValue * V)1182 void VPlan::addLiveOut(PHINode *PN, VPValue *V) {
1183 assert(LiveOuts.count(PN) == 0 && "an exit value for PN already exists");
1184 LiveOuts.insert({PN, new VPLiveOut(PN, V)});
1185 }
1186
remapOperands(VPBlockBase * Entry,VPBlockBase * NewEntry,DenseMap<VPValue *,VPValue * > & Old2NewVPValues)1187 static void remapOperands(VPBlockBase *Entry, VPBlockBase *NewEntry,
1188 DenseMap<VPValue *, VPValue *> &Old2NewVPValues) {
1189 // Update the operands of all cloned recipes starting at NewEntry. This
1190 // traverses all reachable blocks. This is done in two steps, to handle cycles
1191 // in PHI recipes.
1192 ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>>
1193 OldDeepRPOT(Entry);
1194 ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>>
1195 NewDeepRPOT(NewEntry);
1196 // First, collect all mappings from old to new VPValues defined by cloned
1197 // recipes.
1198 for (const auto &[OldBB, NewBB] :
1199 zip(VPBlockUtils::blocksOnly<VPBasicBlock>(OldDeepRPOT),
1200 VPBlockUtils::blocksOnly<VPBasicBlock>(NewDeepRPOT))) {
1201 assert(OldBB->getRecipeList().size() == NewBB->getRecipeList().size() &&
1202 "blocks must have the same number of recipes");
1203 for (const auto &[OldR, NewR] : zip(*OldBB, *NewBB)) {
1204 assert(OldR.getNumOperands() == NewR.getNumOperands() &&
1205 "recipes must have the same number of operands");
1206 assert(OldR.getNumDefinedValues() == NewR.getNumDefinedValues() &&
1207 "recipes must define the same number of operands");
1208 for (const auto &[OldV, NewV] :
1209 zip(OldR.definedValues(), NewR.definedValues()))
1210 Old2NewVPValues[OldV] = NewV;
1211 }
1212 }
1213
1214 // Update all operands to use cloned VPValues.
1215 for (VPBasicBlock *NewBB :
1216 VPBlockUtils::blocksOnly<VPBasicBlock>(NewDeepRPOT)) {
1217 for (VPRecipeBase &NewR : *NewBB)
1218 for (unsigned I = 0, E = NewR.getNumOperands(); I != E; ++I) {
1219 VPValue *NewOp = Old2NewVPValues.lookup(NewR.getOperand(I));
1220 NewR.setOperand(I, NewOp);
1221 }
1222 }
1223 }
1224
duplicate()1225 VPlan *VPlan::duplicate() {
1226 // Clone blocks.
1227 VPBasicBlock *NewPreheader = Preheader->clone();
1228 const auto &[NewEntry, __] = cloneFrom(Entry);
1229
1230 // Create VPlan, clone live-ins and remap operands in the cloned blocks.
1231 auto *NewPlan = new VPlan(NewPreheader, cast<VPBasicBlock>(NewEntry));
1232 DenseMap<VPValue *, VPValue *> Old2NewVPValues;
1233 for (VPValue *OldLiveIn : VPLiveInsToFree) {
1234 Old2NewVPValues[OldLiveIn] =
1235 NewPlan->getOrAddLiveIn(OldLiveIn->getLiveInIRValue());
1236 }
1237 Old2NewVPValues[&VectorTripCount] = &NewPlan->VectorTripCount;
1238 Old2NewVPValues[&VFxUF] = &NewPlan->VFxUF;
1239 if (BackedgeTakenCount) {
1240 NewPlan->BackedgeTakenCount = new VPValue();
1241 Old2NewVPValues[BackedgeTakenCount] = NewPlan->BackedgeTakenCount;
1242 }
1243 assert(TripCount && "trip count must be set");
1244 if (TripCount->isLiveIn())
1245 Old2NewVPValues[TripCount] =
1246 NewPlan->getOrAddLiveIn(TripCount->getLiveInIRValue());
1247 // else NewTripCount will be created and inserted into Old2NewVPValues when
1248 // TripCount is cloned. In any case NewPlan->TripCount is updated below.
1249
1250 remapOperands(Preheader, NewPreheader, Old2NewVPValues);
1251 remapOperands(Entry, NewEntry, Old2NewVPValues);
1252
1253 // Clone live-outs.
1254 for (const auto &[_, LO] : LiveOuts)
1255 NewPlan->addLiveOut(LO->getPhi(), Old2NewVPValues[LO->getOperand(0)]);
1256
1257 // Initialize remaining fields of cloned VPlan.
1258 NewPlan->VFs = VFs;
1259 NewPlan->UFs = UFs;
1260 // TODO: Adjust names.
1261 NewPlan->Name = Name;
1262 assert(Old2NewVPValues.contains(TripCount) &&
1263 "TripCount must have been added to Old2NewVPValues");
1264 NewPlan->TripCount = Old2NewVPValues[TripCount];
1265 return NewPlan;
1266 }
1267
1268 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1269
getUID(const VPBlockBase * Block)1270 Twine VPlanPrinter::getUID(const VPBlockBase *Block) {
1271 return (isa<VPRegionBlock>(Block) ? "cluster_N" : "N") +
1272 Twine(getOrCreateBID(Block));
1273 }
1274
getOrCreateName(const VPBlockBase * Block)1275 Twine VPlanPrinter::getOrCreateName(const VPBlockBase *Block) {
1276 const std::string &Name = Block->getName();
1277 if (!Name.empty())
1278 return Name;
1279 return "VPB" + Twine(getOrCreateBID(Block));
1280 }
1281
dump()1282 void VPlanPrinter::dump() {
1283 Depth = 1;
1284 bumpIndent(0);
1285 OS << "digraph VPlan {\n";
1286 OS << "graph [labelloc=t, fontsize=30; label=\"Vectorization Plan";
1287 if (!Plan.getName().empty())
1288 OS << "\\n" << DOT::EscapeString(Plan.getName());
1289
1290 {
1291 // Print live-ins.
1292 std::string Str;
1293 raw_string_ostream SS(Str);
1294 Plan.printLiveIns(SS);
1295 SmallVector<StringRef, 0> Lines;
1296 StringRef(Str).rtrim('\n').split(Lines, "\n");
1297 for (auto Line : Lines)
1298 OS << DOT::EscapeString(Line.str()) << "\\n";
1299 }
1300
1301 OS << "\"]\n";
1302 OS << "node [shape=rect, fontname=Courier, fontsize=30]\n";
1303 OS << "edge [fontname=Courier, fontsize=30]\n";
1304 OS << "compound=true\n";
1305
1306 dumpBlock(Plan.getPreheader());
1307
1308 for (const VPBlockBase *Block : vp_depth_first_shallow(Plan.getEntry()))
1309 dumpBlock(Block);
1310
1311 OS << "}\n";
1312 }
1313
dumpBlock(const VPBlockBase * Block)1314 void VPlanPrinter::dumpBlock(const VPBlockBase *Block) {
1315 if (const VPBasicBlock *BasicBlock = dyn_cast<VPBasicBlock>(Block))
1316 dumpBasicBlock(BasicBlock);
1317 else if (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
1318 dumpRegion(Region);
1319 else
1320 llvm_unreachable("Unsupported kind of VPBlock.");
1321 }
1322
drawEdge(const VPBlockBase * From,const VPBlockBase * To,bool Hidden,const Twine & Label)1323 void VPlanPrinter::drawEdge(const VPBlockBase *From, const VPBlockBase *To,
1324 bool Hidden, const Twine &Label) {
1325 // Due to "dot" we print an edge between two regions as an edge between the
1326 // exiting basic block and the entry basic of the respective regions.
1327 const VPBlockBase *Tail = From->getExitingBasicBlock();
1328 const VPBlockBase *Head = To->getEntryBasicBlock();
1329 OS << Indent << getUID(Tail) << " -> " << getUID(Head);
1330 OS << " [ label=\"" << Label << '\"';
1331 if (Tail != From)
1332 OS << " ltail=" << getUID(From);
1333 if (Head != To)
1334 OS << " lhead=" << getUID(To);
1335 if (Hidden)
1336 OS << "; splines=none";
1337 OS << "]\n";
1338 }
1339
dumpEdges(const VPBlockBase * Block)1340 void VPlanPrinter::dumpEdges(const VPBlockBase *Block) {
1341 auto &Successors = Block->getSuccessors();
1342 if (Successors.size() == 1)
1343 drawEdge(Block, Successors.front(), false, "");
1344 else if (Successors.size() == 2) {
1345 drawEdge(Block, Successors.front(), false, "T");
1346 drawEdge(Block, Successors.back(), false, "F");
1347 } else {
1348 unsigned SuccessorNumber = 0;
1349 for (auto *Successor : Successors)
1350 drawEdge(Block, Successor, false, Twine(SuccessorNumber++));
1351 }
1352 }
1353
dumpBasicBlock(const VPBasicBlock * BasicBlock)1354 void VPlanPrinter::dumpBasicBlock(const VPBasicBlock *BasicBlock) {
1355 // Implement dot-formatted dump by performing plain-text dump into the
1356 // temporary storage followed by some post-processing.
1357 OS << Indent << getUID(BasicBlock) << " [label =\n";
1358 bumpIndent(1);
1359 std::string Str;
1360 raw_string_ostream SS(Str);
1361 // Use no indentation as we need to wrap the lines into quotes ourselves.
1362 BasicBlock->print(SS, "", SlotTracker);
1363
1364 // We need to process each line of the output separately, so split
1365 // single-string plain-text dump.
1366 SmallVector<StringRef, 0> Lines;
1367 StringRef(Str).rtrim('\n').split(Lines, "\n");
1368
1369 auto EmitLine = [&](StringRef Line, StringRef Suffix) {
1370 OS << Indent << '"' << DOT::EscapeString(Line.str()) << "\\l\"" << Suffix;
1371 };
1372
1373 // Don't need the "+" after the last line.
1374 for (auto Line : make_range(Lines.begin(), Lines.end() - 1))
1375 EmitLine(Line, " +\n");
1376 EmitLine(Lines.back(), "\n");
1377
1378 bumpIndent(-1);
1379 OS << Indent << "]\n";
1380
1381 dumpEdges(BasicBlock);
1382 }
1383
dumpRegion(const VPRegionBlock * Region)1384 void VPlanPrinter::dumpRegion(const VPRegionBlock *Region) {
1385 OS << Indent << "subgraph " << getUID(Region) << " {\n";
1386 bumpIndent(1);
1387 OS << Indent << "fontname=Courier\n"
1388 << Indent << "label=\""
1389 << DOT::EscapeString(Region->isReplicator() ? "<xVFxUF> " : "<x1> ")
1390 << DOT::EscapeString(Region->getName()) << "\"\n";
1391 // Dump the blocks of the region.
1392 assert(Region->getEntry() && "Region contains no inner blocks.");
1393 for (const VPBlockBase *Block : vp_depth_first_shallow(Region->getEntry()))
1394 dumpBlock(Block);
1395 bumpIndent(-1);
1396 OS << Indent << "}\n";
1397 dumpEdges(Region);
1398 }
1399
print(raw_ostream & O) const1400 void VPlanIngredient::print(raw_ostream &O) const {
1401 if (auto *Inst = dyn_cast<Instruction>(V)) {
1402 if (!Inst->getType()->isVoidTy()) {
1403 Inst->printAsOperand(O, false);
1404 O << " = ";
1405 }
1406 O << Inst->getOpcodeName() << " ";
1407 unsigned E = Inst->getNumOperands();
1408 if (E > 0) {
1409 Inst->getOperand(0)->printAsOperand(O, false);
1410 for (unsigned I = 1; I < E; ++I)
1411 Inst->getOperand(I)->printAsOperand(O << ", ", false);
1412 }
1413 } else // !Inst
1414 V->printAsOperand(O, false);
1415 }
1416
1417 #endif
1418
1419 template void DomTreeBuilder::Calculate<VPDominatorTree>(VPDominatorTree &DT);
1420
replaceAllUsesWith(VPValue * New)1421 void VPValue::replaceAllUsesWith(VPValue *New) {
1422 replaceUsesWithIf(New, [](VPUser &, unsigned) { return true; });
1423 }
1424
replaceUsesWithIf(VPValue * New,llvm::function_ref<bool (VPUser & U,unsigned Idx)> ShouldReplace)1425 void VPValue::replaceUsesWithIf(
1426 VPValue *New,
1427 llvm::function_ref<bool(VPUser &U, unsigned Idx)> ShouldReplace) {
1428 // Note that this early exit is required for correctness; the implementation
1429 // below relies on the number of users for this VPValue to decrease, which
1430 // isn't the case if this == New.
1431 if (this == New)
1432 return;
1433
1434 for (unsigned J = 0; J < getNumUsers();) {
1435 VPUser *User = Users[J];
1436 bool RemovedUser = false;
1437 for (unsigned I = 0, E = User->getNumOperands(); I < E; ++I) {
1438 if (User->getOperand(I) != this || !ShouldReplace(*User, I))
1439 continue;
1440
1441 RemovedUser = true;
1442 User->setOperand(I, New);
1443 }
1444 // If a user got removed after updating the current user, the next user to
1445 // update will be moved to the current position, so we only need to
1446 // increment the index if the number of users did not change.
1447 if (!RemovedUser)
1448 J++;
1449 }
1450 }
1451
1452 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
printAsOperand(raw_ostream & OS,VPSlotTracker & Tracker) const1453 void VPValue::printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const {
1454 OS << Tracker.getOrCreateName(this);
1455 }
1456
printOperands(raw_ostream & O,VPSlotTracker & SlotTracker) const1457 void VPUser::printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const {
1458 interleaveComma(operands(), O, [&O, &SlotTracker](VPValue *Op) {
1459 Op->printAsOperand(O, SlotTracker);
1460 });
1461 }
1462 #endif
1463
visitRegion(VPRegionBlock * Region,Old2NewTy & Old2New,InterleavedAccessInfo & IAI)1464 void VPInterleavedAccessInfo::visitRegion(VPRegionBlock *Region,
1465 Old2NewTy &Old2New,
1466 InterleavedAccessInfo &IAI) {
1467 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>>
1468 RPOT(Region->getEntry());
1469 for (VPBlockBase *Base : RPOT) {
1470 visitBlock(Base, Old2New, IAI);
1471 }
1472 }
1473
visitBlock(VPBlockBase * Block,Old2NewTy & Old2New,InterleavedAccessInfo & IAI)1474 void VPInterleavedAccessInfo::visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
1475 InterleavedAccessInfo &IAI) {
1476 if (VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(Block)) {
1477 for (VPRecipeBase &VPI : *VPBB) {
1478 if (isa<VPWidenPHIRecipe>(&VPI))
1479 continue;
1480 assert(isa<VPInstruction>(&VPI) && "Can only handle VPInstructions");
1481 auto *VPInst = cast<VPInstruction>(&VPI);
1482
1483 auto *Inst = dyn_cast_or_null<Instruction>(VPInst->getUnderlyingValue());
1484 if (!Inst)
1485 continue;
1486 auto *IG = IAI.getInterleaveGroup(Inst);
1487 if (!IG)
1488 continue;
1489
1490 auto NewIGIter = Old2New.find(IG);
1491 if (NewIGIter == Old2New.end())
1492 Old2New[IG] = new InterleaveGroup<VPInstruction>(
1493 IG->getFactor(), IG->isReverse(), IG->getAlign());
1494
1495 if (Inst == IG->getInsertPos())
1496 Old2New[IG]->setInsertPos(VPInst);
1497
1498 InterleaveGroupMap[VPInst] = Old2New[IG];
1499 InterleaveGroupMap[VPInst]->insertMember(
1500 VPInst, IG->getIndex(Inst),
1501 Align(IG->isReverse() ? (-1) * int(IG->getFactor())
1502 : IG->getFactor()));
1503 }
1504 } else if (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
1505 visitRegion(Region, Old2New, IAI);
1506 else
1507 llvm_unreachable("Unsupported kind of VPBlock.");
1508 }
1509
VPInterleavedAccessInfo(VPlan & Plan,InterleavedAccessInfo & IAI)1510 VPInterleavedAccessInfo::VPInterleavedAccessInfo(VPlan &Plan,
1511 InterleavedAccessInfo &IAI) {
1512 Old2NewTy Old2New;
1513 visitRegion(Plan.getVectorLoopRegion(), Old2New, IAI);
1514 }
1515
assignName(const VPValue * V)1516 void VPSlotTracker::assignName(const VPValue *V) {
1517 assert(!VPValue2Name.contains(V) && "VPValue already has a name!");
1518 auto *UV = V->getUnderlyingValue();
1519 if (!UV) {
1520 VPValue2Name[V] = (Twine("vp<%") + Twine(NextSlot) + ">").str();
1521 NextSlot++;
1522 return;
1523 }
1524
1525 // Use the name of the underlying Value, wrapped in "ir<>", and versioned by
1526 // appending ".Number" to the name if there are multiple uses.
1527 std::string Name;
1528 raw_string_ostream S(Name);
1529 UV->printAsOperand(S, false);
1530 assert(!Name.empty() && "Name cannot be empty.");
1531 std::string BaseName = (Twine("ir<") + Name + Twine(">")).str();
1532
1533 // First assign the base name for V.
1534 const auto &[A, _] = VPValue2Name.insert({V, BaseName});
1535 // Integer or FP constants with different types will result in he same string
1536 // due to stripping types.
1537 if (V->isLiveIn() && isa<ConstantInt, ConstantFP>(UV))
1538 return;
1539
1540 // If it is already used by C > 0 other VPValues, increase the version counter
1541 // C and use it for V.
1542 const auto &[C, UseInserted] = BaseName2Version.insert({BaseName, 0});
1543 if (!UseInserted) {
1544 C->second++;
1545 A->second = (BaseName + Twine(".") + Twine(C->second)).str();
1546 }
1547 }
1548
assignNames(const VPlan & Plan)1549 void VPSlotTracker::assignNames(const VPlan &Plan) {
1550 if (Plan.VFxUF.getNumUsers() > 0)
1551 assignName(&Plan.VFxUF);
1552 assignName(&Plan.VectorTripCount);
1553 if (Plan.BackedgeTakenCount)
1554 assignName(Plan.BackedgeTakenCount);
1555 for (VPValue *LI : Plan.VPLiveInsToFree)
1556 assignName(LI);
1557 assignNames(Plan.getPreheader());
1558
1559 ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<const VPBlockBase *>>
1560 RPOT(VPBlockDeepTraversalWrapper<const VPBlockBase *>(Plan.getEntry()));
1561 for (const VPBasicBlock *VPBB :
1562 VPBlockUtils::blocksOnly<const VPBasicBlock>(RPOT))
1563 assignNames(VPBB);
1564 }
1565
assignNames(const VPBasicBlock * VPBB)1566 void VPSlotTracker::assignNames(const VPBasicBlock *VPBB) {
1567 for (const VPRecipeBase &Recipe : *VPBB)
1568 for (VPValue *Def : Recipe.definedValues())
1569 assignName(Def);
1570 }
1571
getOrCreateName(const VPValue * V) const1572 std::string VPSlotTracker::getOrCreateName(const VPValue *V) const {
1573 std::string Name = VPValue2Name.lookup(V);
1574 if (!Name.empty())
1575 return Name;
1576
1577 // If no name was assigned, no VPlan was provided when creating the slot
1578 // tracker or it is not reachable from the provided VPlan. This can happen,
1579 // e.g. when trying to print a recipe that has not been inserted into a VPlan
1580 // in a debugger.
1581 // TODO: Update VPSlotTracker constructor to assign names to recipes &
1582 // VPValues not associated with a VPlan, instead of constructing names ad-hoc
1583 // here.
1584 const VPRecipeBase *DefR = V->getDefiningRecipe();
1585 (void)DefR;
1586 assert((!DefR || !DefR->getParent() || !DefR->getParent()->getPlan()) &&
1587 "VPValue defined by a recipe in a VPlan?");
1588
1589 // Use the underlying value's name, if there is one.
1590 if (auto *UV = V->getUnderlyingValue()) {
1591 std::string Name;
1592 raw_string_ostream S(Name);
1593 UV->printAsOperand(S, false);
1594 return (Twine("ir<") + Name + ">").str();
1595 }
1596
1597 return "<badref>";
1598 }
1599
onlyFirstLaneUsed(const VPValue * Def)1600 bool vputils::onlyFirstLaneUsed(const VPValue *Def) {
1601 return all_of(Def->users(),
1602 [Def](const VPUser *U) { return U->onlyFirstLaneUsed(Def); });
1603 }
1604
onlyFirstPartUsed(const VPValue * Def)1605 bool vputils::onlyFirstPartUsed(const VPValue *Def) {
1606 return all_of(Def->users(),
1607 [Def](const VPUser *U) { return U->onlyFirstPartUsed(Def); });
1608 }
1609
getOrCreateVPValueForSCEVExpr(VPlan & Plan,const SCEV * Expr,ScalarEvolution & SE)1610 VPValue *vputils::getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr,
1611 ScalarEvolution &SE) {
1612 if (auto *Expanded = Plan.getSCEVExpansion(Expr))
1613 return Expanded;
1614 VPValue *Expanded = nullptr;
1615 if (auto *E = dyn_cast<SCEVConstant>(Expr))
1616 Expanded = Plan.getOrAddLiveIn(E->getValue());
1617 else if (auto *E = dyn_cast<SCEVUnknown>(Expr))
1618 Expanded = Plan.getOrAddLiveIn(E->getValue());
1619 else {
1620 Expanded = new VPExpandSCEVRecipe(Expr, SE);
1621 Plan.getPreheader()->appendRecipe(Expanded->getDefiningRecipe());
1622 }
1623 Plan.addSCEVExpansion(Expr, Expanded);
1624 return Expanded;
1625 }
1626
isHeaderMask(VPValue * V,VPlan & Plan)1627 bool vputils::isHeaderMask(VPValue *V, VPlan &Plan) {
1628 if (isa<VPActiveLaneMaskPHIRecipe>(V))
1629 return true;
1630
1631 auto IsWideCanonicalIV = [](VPValue *A) {
1632 return isa<VPWidenCanonicalIVRecipe>(A) ||
1633 (isa<VPWidenIntOrFpInductionRecipe>(A) &&
1634 cast<VPWidenIntOrFpInductionRecipe>(A)->isCanonical());
1635 };
1636
1637 VPValue *A, *B;
1638 if (match(V, m_ActiveLaneMask(m_VPValue(A), m_VPValue(B))))
1639 return B == Plan.getTripCount() &&
1640 (match(A, m_ScalarIVSteps(m_CanonicalIV(), m_SpecificInt(1))) ||
1641 IsWideCanonicalIV(A));
1642
1643 return match(V, m_Binary<Instruction::ICmp>(m_VPValue(A), m_VPValue(B))) &&
1644 IsWideCanonicalIV(A) && B == Plan.getOrCreateBackedgeTakenCount();
1645 }
1646