xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/InterleavedAccessPass.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===- InterleavedAccessPass.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Interleaved Access pass, which identifies
10 // interleaved memory accesses and transforms them into target specific
11 // intrinsics.
12 //
13 // An interleaved load reads data from memory into several vectors, with
14 // DE-interleaving the data on a factor. An interleaved store writes several
15 // vectors to memory with RE-interleaving the data on a factor.
16 //
17 // As interleaved accesses are difficult to identified in CodeGen (mainly
18 // because the VECTOR_SHUFFLE DAG node is quite different from the shufflevector
19 // IR), we identify and transform them to intrinsics in this pass so the
20 // intrinsics can be easily matched into target specific instructions later in
21 // CodeGen.
22 //
23 // E.g. An interleaved load (Factor = 2):
24 //        %wide.vec = load <8 x i32>, <8 x i32>* %ptr
25 //        %v0 = shuffle <8 x i32> %wide.vec, <8 x i32> poison, <0, 2, 4, 6>
26 //        %v1 = shuffle <8 x i32> %wide.vec, <8 x i32> poison, <1, 3, 5, 7>
27 //
28 // It could be transformed into a ld2 intrinsic in AArch64 backend or a vld2
29 // intrinsic in ARM backend.
30 //
31 // In X86, this can be further optimized into a set of target
32 // specific loads followed by an optimized sequence of shuffles.
33 //
34 // E.g. An interleaved store (Factor = 3):
35 //        %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
36 //                                    <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
37 //        store <12 x i32> %i.vec, <12 x i32>* %ptr
38 //
39 // It could be transformed into a st3 intrinsic in AArch64 backend or a vst3
40 // intrinsic in ARM backend.
41 //
42 // Similarly, a set of interleaved stores can be transformed into an optimized
43 // sequence of shuffles followed by a set of target specific stores for X86.
44 //
45 //===----------------------------------------------------------------------===//
46 
47 #include "llvm/ADT/ArrayRef.h"
48 #include "llvm/ADT/DenseMap.h"
49 #include "llvm/ADT/SetVector.h"
50 #include "llvm/ADT/SmallVector.h"
51 #include "llvm/CodeGen/InterleavedAccess.h"
52 #include "llvm/CodeGen/TargetLowering.h"
53 #include "llvm/CodeGen/TargetPassConfig.h"
54 #include "llvm/CodeGen/TargetSubtargetInfo.h"
55 #include "llvm/IR/Constants.h"
56 #include "llvm/IR/Dominators.h"
57 #include "llvm/IR/Function.h"
58 #include "llvm/IR/IRBuilder.h"
59 #include "llvm/IR/InstIterator.h"
60 #include "llvm/IR/Instruction.h"
61 #include "llvm/IR/Instructions.h"
62 #include "llvm/IR/IntrinsicInst.h"
63 #include "llvm/InitializePasses.h"
64 #include "llvm/Pass.h"
65 #include "llvm/Support/Casting.h"
66 #include "llvm/Support/CommandLine.h"
67 #include "llvm/Support/Debug.h"
68 #include "llvm/Support/MathExtras.h"
69 #include "llvm/Support/raw_ostream.h"
70 #include "llvm/Target/TargetMachine.h"
71 #include "llvm/Transforms/Utils/Local.h"
72 #include <cassert>
73 #include <utility>
74 
75 using namespace llvm;
76 
77 #define DEBUG_TYPE "interleaved-access"
78 
79 static cl::opt<bool> LowerInterleavedAccesses(
80     "lower-interleaved-accesses",
81     cl::desc("Enable lowering interleaved accesses to intrinsics"),
82     cl::init(true), cl::Hidden);
83 
84 namespace {
85 
86 class InterleavedAccessImpl {
87   friend class InterleavedAccess;
88 
89 public:
90   InterleavedAccessImpl() = default;
InterleavedAccessImpl(DominatorTree * DT,const TargetLowering * TLI)91   InterleavedAccessImpl(DominatorTree *DT, const TargetLowering *TLI)
92       : DT(DT), TLI(TLI), MaxFactor(TLI->getMaxSupportedInterleaveFactor()) {}
93   bool runOnFunction(Function &F);
94 
95 private:
96   DominatorTree *DT = nullptr;
97   const TargetLowering *TLI = nullptr;
98 
99   /// The maximum supported interleave factor.
100   unsigned MaxFactor = 0u;
101 
102   /// Transform an interleaved load into target specific intrinsics.
103   bool lowerInterleavedLoad(LoadInst *LI,
104                             SmallVector<Instruction *, 32> &DeadInsts);
105 
106   /// Transform an interleaved store into target specific intrinsics.
107   bool lowerInterleavedStore(StoreInst *SI,
108                              SmallVector<Instruction *, 32> &DeadInsts);
109 
110   /// Transform a load and a deinterleave intrinsic into target specific
111   /// instructions.
112   bool lowerDeinterleaveIntrinsic(IntrinsicInst *II,
113                                   SmallVector<Instruction *, 32> &DeadInsts);
114 
115   /// Transform an interleave intrinsic and a store into target specific
116   /// instructions.
117   bool lowerInterleaveIntrinsic(IntrinsicInst *II,
118                                 SmallVector<Instruction *, 32> &DeadInsts);
119 
120   /// Returns true if the uses of an interleaved load by the
121   /// extractelement instructions in \p Extracts can be replaced by uses of the
122   /// shufflevector instructions in \p Shuffles instead. If so, the necessary
123   /// replacements are also performed.
124   bool tryReplaceExtracts(ArrayRef<ExtractElementInst *> Extracts,
125                           ArrayRef<ShuffleVectorInst *> Shuffles);
126 
127   /// Given a number of shuffles of the form shuffle(binop(x,y)), convert them
128   /// to binop(shuffle(x), shuffle(y)) to allow the formation of an
129   /// interleaving load. Any newly created shuffles that operate on \p LI will
130   /// be added to \p Shuffles. Returns true, if any changes to the IR have been
131   /// made.
132   bool replaceBinOpShuffles(ArrayRef<ShuffleVectorInst *> BinOpShuffles,
133                             SmallVectorImpl<ShuffleVectorInst *> &Shuffles,
134                             LoadInst *LI);
135 };
136 
137 class InterleavedAccess : public FunctionPass {
138   InterleavedAccessImpl Impl;
139 
140 public:
141   static char ID;
142 
InterleavedAccess()143   InterleavedAccess() : FunctionPass(ID) {
144     initializeInterleavedAccessPass(*PassRegistry::getPassRegistry());
145   }
146 
getPassName() const147   StringRef getPassName() const override { return "Interleaved Access Pass"; }
148 
149   bool runOnFunction(Function &F) override;
150 
getAnalysisUsage(AnalysisUsage & AU) const151   void getAnalysisUsage(AnalysisUsage &AU) const override {
152     AU.addRequired<DominatorTreeWrapperPass>();
153     AU.setPreservesCFG();
154   }
155 };
156 
157 } // end anonymous namespace.
158 
run(Function & F,FunctionAnalysisManager & FAM)159 PreservedAnalyses InterleavedAccessPass::run(Function &F,
160                                              FunctionAnalysisManager &FAM) {
161   auto *DT = &FAM.getResult<DominatorTreeAnalysis>(F);
162   auto *TLI = TM->getSubtargetImpl(F)->getTargetLowering();
163   InterleavedAccessImpl Impl(DT, TLI);
164   bool Changed = Impl.runOnFunction(F);
165 
166   if (!Changed)
167     return PreservedAnalyses::all();
168 
169   PreservedAnalyses PA;
170   PA.preserveSet<CFGAnalyses>();
171   return PA;
172 }
173 
174 char InterleavedAccess::ID = 0;
175 
runOnFunction(Function & F)176 bool InterleavedAccess::runOnFunction(Function &F) {
177   auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
178   if (!TPC || !LowerInterleavedAccesses)
179     return false;
180 
181   LLVM_DEBUG(dbgs() << "*** " << getPassName() << ": " << F.getName() << "\n");
182 
183   Impl.DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
184   auto &TM = TPC->getTM<TargetMachine>();
185   Impl.TLI = TM.getSubtargetImpl(F)->getTargetLowering();
186   Impl.MaxFactor = Impl.TLI->getMaxSupportedInterleaveFactor();
187 
188   return Impl.runOnFunction(F);
189 }
190 
191 INITIALIZE_PASS_BEGIN(InterleavedAccess, DEBUG_TYPE,
192     "Lower interleaved memory accesses to target specific intrinsics", false,
193     false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)194 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
195 INITIALIZE_PASS_END(InterleavedAccess, DEBUG_TYPE,
196     "Lower interleaved memory accesses to target specific intrinsics", false,
197     false)
198 
199 FunctionPass *llvm::createInterleavedAccessPass() {
200   return new InterleavedAccess();
201 }
202 
203 /// Check if the mask is a DE-interleave mask for an interleaved load.
204 ///
205 /// E.g. DE-interleave masks (Factor = 2) could be:
206 ///     <0, 2, 4, 6>    (mask of index 0 to extract even elements)
207 ///     <1, 3, 5, 7>    (mask of index 1 to extract odd elements)
isDeInterleaveMask(ArrayRef<int> Mask,unsigned & Factor,unsigned & Index,unsigned MaxFactor,unsigned NumLoadElements)208 static bool isDeInterleaveMask(ArrayRef<int> Mask, unsigned &Factor,
209                                unsigned &Index, unsigned MaxFactor,
210                                unsigned NumLoadElements) {
211   if (Mask.size() < 2)
212     return false;
213 
214   // Check potential Factors.
215   for (Factor = 2; Factor <= MaxFactor; Factor++) {
216     // Make sure we don't produce a load wider than the input load.
217     if (Mask.size() * Factor > NumLoadElements)
218       return false;
219     if (ShuffleVectorInst::isDeInterleaveMaskOfFactor(Mask, Factor, Index))
220       return true;
221   }
222 
223   return false;
224 }
225 
226 /// Check if the mask can be used in an interleaved store.
227 //
228 /// It checks for a more general pattern than the RE-interleave mask.
229 /// I.e. <x, y, ... z, x+1, y+1, ...z+1, x+2, y+2, ...z+2, ...>
230 /// E.g. For a Factor of 2 (LaneLen=4): <4, 32, 5, 33, 6, 34, 7, 35>
231 /// E.g. For a Factor of 3 (LaneLen=4): <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
232 /// E.g. For a Factor of 4 (LaneLen=2): <8, 2, 12, 4, 9, 3, 13, 5>
233 ///
234 /// The particular case of an RE-interleave mask is:
235 /// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...>
236 /// E.g. For a Factor of 2 (LaneLen=4): <0, 4, 1, 5, 2, 6, 3, 7>
isReInterleaveMask(ShuffleVectorInst * SVI,unsigned & Factor,unsigned MaxFactor)237 static bool isReInterleaveMask(ShuffleVectorInst *SVI, unsigned &Factor,
238                                unsigned MaxFactor) {
239   unsigned NumElts = SVI->getShuffleMask().size();
240   if (NumElts < 4)
241     return false;
242 
243   // Check potential Factors.
244   for (Factor = 2; Factor <= MaxFactor; Factor++) {
245     if (SVI->isInterleave(Factor))
246       return true;
247   }
248 
249   return false;
250 }
251 
lowerInterleavedLoad(LoadInst * LI,SmallVector<Instruction *,32> & DeadInsts)252 bool InterleavedAccessImpl::lowerInterleavedLoad(
253     LoadInst *LI, SmallVector<Instruction *, 32> &DeadInsts) {
254   if (!LI->isSimple() || isa<ScalableVectorType>(LI->getType()))
255     return false;
256 
257   // Check if all users of this load are shufflevectors. If we encounter any
258   // users that are extractelement instructions or binary operators, we save
259   // them to later check if they can be modified to extract from one of the
260   // shufflevectors instead of the load.
261 
262   SmallVector<ShuffleVectorInst *, 4> Shuffles;
263   SmallVector<ExtractElementInst *, 4> Extracts;
264   // BinOpShuffles need to be handled a single time in case both operands of the
265   // binop are the same load.
266   SmallSetVector<ShuffleVectorInst *, 4> BinOpShuffles;
267 
268   for (auto *User : LI->users()) {
269     auto *Extract = dyn_cast<ExtractElementInst>(User);
270     if (Extract && isa<ConstantInt>(Extract->getIndexOperand())) {
271       Extracts.push_back(Extract);
272       continue;
273     }
274     if (auto *BI = dyn_cast<BinaryOperator>(User)) {
275       if (!BI->user_empty() && all_of(BI->users(), [](auto *U) {
276             auto *SVI = dyn_cast<ShuffleVectorInst>(U);
277             return SVI && isa<UndefValue>(SVI->getOperand(1));
278           })) {
279         for (auto *SVI : BI->users())
280           BinOpShuffles.insert(cast<ShuffleVectorInst>(SVI));
281         continue;
282       }
283     }
284     auto *SVI = dyn_cast<ShuffleVectorInst>(User);
285     if (!SVI || !isa<UndefValue>(SVI->getOperand(1)))
286       return false;
287 
288     Shuffles.push_back(SVI);
289   }
290 
291   if (Shuffles.empty() && BinOpShuffles.empty())
292     return false;
293 
294   unsigned Factor, Index;
295 
296   unsigned NumLoadElements =
297       cast<FixedVectorType>(LI->getType())->getNumElements();
298   auto *FirstSVI = Shuffles.size() > 0 ? Shuffles[0] : BinOpShuffles[0];
299   // Check if the first shufflevector is DE-interleave shuffle.
300   if (!isDeInterleaveMask(FirstSVI->getShuffleMask(), Factor, Index, MaxFactor,
301                           NumLoadElements))
302     return false;
303 
304   // Holds the corresponding index for each DE-interleave shuffle.
305   SmallVector<unsigned, 4> Indices;
306 
307   Type *VecTy = FirstSVI->getType();
308 
309   // Check if other shufflevectors are also DE-interleaved of the same type
310   // and factor as the first shufflevector.
311   for (auto *Shuffle : Shuffles) {
312     if (Shuffle->getType() != VecTy)
313       return false;
314     if (!ShuffleVectorInst::isDeInterleaveMaskOfFactor(
315             Shuffle->getShuffleMask(), Factor, Index))
316       return false;
317 
318     assert(Shuffle->getShuffleMask().size() <= NumLoadElements);
319     Indices.push_back(Index);
320   }
321   for (auto *Shuffle : BinOpShuffles) {
322     if (Shuffle->getType() != VecTy)
323       return false;
324     if (!ShuffleVectorInst::isDeInterleaveMaskOfFactor(
325             Shuffle->getShuffleMask(), Factor, Index))
326       return false;
327 
328     assert(Shuffle->getShuffleMask().size() <= NumLoadElements);
329 
330     if (cast<Instruction>(Shuffle->getOperand(0))->getOperand(0) == LI)
331       Indices.push_back(Index);
332     if (cast<Instruction>(Shuffle->getOperand(0))->getOperand(1) == LI)
333       Indices.push_back(Index);
334   }
335 
336   // Try and modify users of the load that are extractelement instructions to
337   // use the shufflevector instructions instead of the load.
338   if (!tryReplaceExtracts(Extracts, Shuffles))
339     return false;
340 
341   bool BinOpShuffleChanged =
342       replaceBinOpShuffles(BinOpShuffles.getArrayRef(), Shuffles, LI);
343 
344   LLVM_DEBUG(dbgs() << "IA: Found an interleaved load: " << *LI << "\n");
345 
346   // Try to create target specific intrinsics to replace the load and shuffles.
347   if (!TLI->lowerInterleavedLoad(LI, Shuffles, Indices, Factor)) {
348     // If Extracts is not empty, tryReplaceExtracts made changes earlier.
349     return !Extracts.empty() || BinOpShuffleChanged;
350   }
351 
352   append_range(DeadInsts, Shuffles);
353 
354   DeadInsts.push_back(LI);
355   return true;
356 }
357 
replaceBinOpShuffles(ArrayRef<ShuffleVectorInst * > BinOpShuffles,SmallVectorImpl<ShuffleVectorInst * > & Shuffles,LoadInst * LI)358 bool InterleavedAccessImpl::replaceBinOpShuffles(
359     ArrayRef<ShuffleVectorInst *> BinOpShuffles,
360     SmallVectorImpl<ShuffleVectorInst *> &Shuffles, LoadInst *LI) {
361   for (auto *SVI : BinOpShuffles) {
362     BinaryOperator *BI = cast<BinaryOperator>(SVI->getOperand(0));
363     Type *BIOp0Ty = BI->getOperand(0)->getType();
364     ArrayRef<int> Mask = SVI->getShuffleMask();
365     assert(all_of(Mask, [&](int Idx) {
366       return Idx < (int)cast<FixedVectorType>(BIOp0Ty)->getNumElements();
367     }));
368 
369     BasicBlock::iterator insertPos = SVI->getIterator();
370     auto *NewSVI1 =
371         new ShuffleVectorInst(BI->getOperand(0), PoisonValue::get(BIOp0Ty),
372                               Mask, SVI->getName(), insertPos);
373     auto *NewSVI2 = new ShuffleVectorInst(
374         BI->getOperand(1), PoisonValue::get(BI->getOperand(1)->getType()), Mask,
375         SVI->getName(), insertPos);
376     BinaryOperator *NewBI = BinaryOperator::CreateWithCopiedFlags(
377         BI->getOpcode(), NewSVI1, NewSVI2, BI, BI->getName(), insertPos);
378     SVI->replaceAllUsesWith(NewBI);
379     LLVM_DEBUG(dbgs() << "  Replaced: " << *BI << "\n    And   : " << *SVI
380                       << "\n  With    : " << *NewSVI1 << "\n    And   : "
381                       << *NewSVI2 << "\n    And   : " << *NewBI << "\n");
382     RecursivelyDeleteTriviallyDeadInstructions(SVI);
383     if (NewSVI1->getOperand(0) == LI)
384       Shuffles.push_back(NewSVI1);
385     if (NewSVI2->getOperand(0) == LI)
386       Shuffles.push_back(NewSVI2);
387   }
388 
389   return !BinOpShuffles.empty();
390 }
391 
tryReplaceExtracts(ArrayRef<ExtractElementInst * > Extracts,ArrayRef<ShuffleVectorInst * > Shuffles)392 bool InterleavedAccessImpl::tryReplaceExtracts(
393     ArrayRef<ExtractElementInst *> Extracts,
394     ArrayRef<ShuffleVectorInst *> Shuffles) {
395   // If there aren't any extractelement instructions to modify, there's nothing
396   // to do.
397   if (Extracts.empty())
398     return true;
399 
400   // Maps extractelement instructions to vector-index pairs. The extractlement
401   // instructions will be modified to use the new vector and index operands.
402   DenseMap<ExtractElementInst *, std::pair<Value *, int>> ReplacementMap;
403 
404   for (auto *Extract : Extracts) {
405     // The vector index that is extracted.
406     auto *IndexOperand = cast<ConstantInt>(Extract->getIndexOperand());
407     auto Index = IndexOperand->getSExtValue();
408 
409     // Look for a suitable shufflevector instruction. The goal is to modify the
410     // extractelement instruction (which uses an interleaved load) to use one
411     // of the shufflevector instructions instead of the load.
412     for (auto *Shuffle : Shuffles) {
413       // If the shufflevector instruction doesn't dominate the extract, we
414       // can't create a use of it.
415       if (!DT->dominates(Shuffle, Extract))
416         continue;
417 
418       // Inspect the indices of the shufflevector instruction. If the shuffle
419       // selects the same index that is extracted, we can modify the
420       // extractelement instruction.
421       SmallVector<int, 4> Indices;
422       Shuffle->getShuffleMask(Indices);
423       for (unsigned I = 0; I < Indices.size(); ++I)
424         if (Indices[I] == Index) {
425           assert(Extract->getOperand(0) == Shuffle->getOperand(0) &&
426                  "Vector operations do not match");
427           ReplacementMap[Extract] = std::make_pair(Shuffle, I);
428           break;
429         }
430 
431       // If we found a suitable shufflevector instruction, stop looking.
432       if (ReplacementMap.count(Extract))
433         break;
434     }
435 
436     // If we did not find a suitable shufflevector instruction, the
437     // extractelement instruction cannot be modified, so we must give up.
438     if (!ReplacementMap.count(Extract))
439       return false;
440   }
441 
442   // Finally, perform the replacements.
443   IRBuilder<> Builder(Extracts[0]->getContext());
444   for (auto &Replacement : ReplacementMap) {
445     auto *Extract = Replacement.first;
446     auto *Vector = Replacement.second.first;
447     auto Index = Replacement.second.second;
448     Builder.SetInsertPoint(Extract);
449     Extract->replaceAllUsesWith(Builder.CreateExtractElement(Vector, Index));
450     Extract->eraseFromParent();
451   }
452 
453   return true;
454 }
455 
lowerInterleavedStore(StoreInst * SI,SmallVector<Instruction *,32> & DeadInsts)456 bool InterleavedAccessImpl::lowerInterleavedStore(
457     StoreInst *SI, SmallVector<Instruction *, 32> &DeadInsts) {
458   if (!SI->isSimple())
459     return false;
460 
461   auto *SVI = dyn_cast<ShuffleVectorInst>(SI->getValueOperand());
462   if (!SVI || !SVI->hasOneUse() || isa<ScalableVectorType>(SVI->getType()))
463     return false;
464 
465   // Check if the shufflevector is RE-interleave shuffle.
466   unsigned Factor;
467   if (!isReInterleaveMask(SVI, Factor, MaxFactor))
468     return false;
469 
470   LLVM_DEBUG(dbgs() << "IA: Found an interleaved store: " << *SI << "\n");
471 
472   // Try to create target specific intrinsics to replace the store and shuffle.
473   if (!TLI->lowerInterleavedStore(SI, SVI, Factor))
474     return false;
475 
476   // Already have a new target specific interleaved store. Erase the old store.
477   DeadInsts.push_back(SI);
478   DeadInsts.push_back(SVI);
479   return true;
480 }
481 
lowerDeinterleaveIntrinsic(IntrinsicInst * DI,SmallVector<Instruction *,32> & DeadInsts)482 bool InterleavedAccessImpl::lowerDeinterleaveIntrinsic(
483     IntrinsicInst *DI, SmallVector<Instruction *, 32> &DeadInsts) {
484   LoadInst *LI = dyn_cast<LoadInst>(DI->getOperand(0));
485 
486   if (!LI || !LI->hasOneUse() || !LI->isSimple())
487     return false;
488 
489   LLVM_DEBUG(dbgs() << "IA: Found a deinterleave intrinsic: " << *DI << "\n");
490 
491   // Try and match this with target specific intrinsics.
492   if (!TLI->lowerDeinterleaveIntrinsicToLoad(DI, LI))
493     return false;
494 
495   // We now have a target-specific load, so delete the old one.
496   DeadInsts.push_back(DI);
497   DeadInsts.push_back(LI);
498   return true;
499 }
500 
lowerInterleaveIntrinsic(IntrinsicInst * II,SmallVector<Instruction *,32> & DeadInsts)501 bool InterleavedAccessImpl::lowerInterleaveIntrinsic(
502     IntrinsicInst *II, SmallVector<Instruction *, 32> &DeadInsts) {
503   if (!II->hasOneUse())
504     return false;
505 
506   StoreInst *SI = dyn_cast<StoreInst>(*(II->users().begin()));
507 
508   if (!SI || !SI->isSimple())
509     return false;
510 
511   LLVM_DEBUG(dbgs() << "IA: Found an interleave intrinsic: " << *II << "\n");
512 
513   // Try and match this with target specific intrinsics.
514   if (!TLI->lowerInterleaveIntrinsicToStore(II, SI))
515     return false;
516 
517   // We now have a target-specific store, so delete the old one.
518   DeadInsts.push_back(SI);
519   DeadInsts.push_back(II);
520   return true;
521 }
522 
runOnFunction(Function & F)523 bool InterleavedAccessImpl::runOnFunction(Function &F) {
524   // Holds dead instructions that will be erased later.
525   SmallVector<Instruction *, 32> DeadInsts;
526   bool Changed = false;
527 
528   for (auto &I : instructions(F)) {
529     if (auto *LI = dyn_cast<LoadInst>(&I))
530       Changed |= lowerInterleavedLoad(LI, DeadInsts);
531 
532     if (auto *SI = dyn_cast<StoreInst>(&I))
533       Changed |= lowerInterleavedStore(SI, DeadInsts);
534 
535     if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
536       // At present, we only have intrinsics to represent (de)interleaving
537       // with a factor of 2.
538       if (II->getIntrinsicID() == Intrinsic::vector_deinterleave2)
539         Changed |= lowerDeinterleaveIntrinsic(II, DeadInsts);
540       if (II->getIntrinsicID() == Intrinsic::vector_interleave2)
541         Changed |= lowerInterleaveIntrinsic(II, DeadInsts);
542     }
543   }
544 
545   for (auto *I : DeadInsts)
546     I->eraseFromParent();
547 
548   return Changed;
549 }
550