xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/VectorUtils.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines vectorizer utilities.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/VectorUtils.h"
14 #include "llvm/ADT/EquivalenceClasses.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/Analysis/DemandedBits.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/Analysis/LoopIterator.h"
19 #include "llvm/Analysis/ScalarEvolution.h"
20 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/IR/Value.h"
28 #include "llvm/Support/CommandLine.h"
29 
30 #define DEBUG_TYPE "vectorutils"
31 
32 using namespace llvm;
33 using namespace llvm::PatternMatch;
34 
35 /// Maximum factor for an interleaved memory access.
36 static cl::opt<unsigned> MaxInterleaveGroupFactor(
37     "max-interleave-group-factor", cl::Hidden,
38     cl::desc("Maximum factor for an interleaved access group (default = 8)"),
39     cl::init(8));
40 
41 /// Return true if all of the intrinsic's arguments and return type are scalars
42 /// for the scalar form of the intrinsic, and vectors for the vector form of the
43 /// intrinsic (except operands that are marked as always being scalar by
44 /// isVectorIntrinsicWithScalarOpAtArg).
45 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
46   switch (ID) {
47   case Intrinsic::abs:   // Begin integer bit-manipulation.
48   case Intrinsic::bswap:
49   case Intrinsic::bitreverse:
50   case Intrinsic::ctpop:
51   case Intrinsic::ctlz:
52   case Intrinsic::cttz:
53   case Intrinsic::fshl:
54   case Intrinsic::fshr:
55   case Intrinsic::smax:
56   case Intrinsic::smin:
57   case Intrinsic::umax:
58   case Intrinsic::umin:
59   case Intrinsic::sadd_sat:
60   case Intrinsic::ssub_sat:
61   case Intrinsic::uadd_sat:
62   case Intrinsic::usub_sat:
63   case Intrinsic::smul_fix:
64   case Intrinsic::smul_fix_sat:
65   case Intrinsic::umul_fix:
66   case Intrinsic::umul_fix_sat:
67   case Intrinsic::sqrt: // Begin floating-point.
68   case Intrinsic::sin:
69   case Intrinsic::cos:
70   case Intrinsic::exp:
71   case Intrinsic::exp2:
72   case Intrinsic::log:
73   case Intrinsic::log10:
74   case Intrinsic::log2:
75   case Intrinsic::fabs:
76   case Intrinsic::minnum:
77   case Intrinsic::maxnum:
78   case Intrinsic::minimum:
79   case Intrinsic::maximum:
80   case Intrinsic::copysign:
81   case Intrinsic::floor:
82   case Intrinsic::ceil:
83   case Intrinsic::trunc:
84   case Intrinsic::rint:
85   case Intrinsic::nearbyint:
86   case Intrinsic::round:
87   case Intrinsic::roundeven:
88   case Intrinsic::pow:
89   case Intrinsic::fma:
90   case Intrinsic::fmuladd:
91   case Intrinsic::is_fpclass:
92   case Intrinsic::powi:
93   case Intrinsic::canonicalize:
94   case Intrinsic::fptosi_sat:
95   case Intrinsic::fptoui_sat:
96   case Intrinsic::lrint:
97   case Intrinsic::llrint:
98     return true;
99   default:
100     return false;
101   }
102 }
103 
104 /// Identifies if the vector form of the intrinsic has a scalar operand.
105 bool llvm::isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
106                                               unsigned ScalarOpdIdx) {
107   switch (ID) {
108   case Intrinsic::abs:
109   case Intrinsic::ctlz:
110   case Intrinsic::cttz:
111   case Intrinsic::is_fpclass:
112   case Intrinsic::powi:
113     return (ScalarOpdIdx == 1);
114   case Intrinsic::smul_fix:
115   case Intrinsic::smul_fix_sat:
116   case Intrinsic::umul_fix:
117   case Intrinsic::umul_fix_sat:
118     return (ScalarOpdIdx == 2);
119   default:
120     return false;
121   }
122 }
123 
124 bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
125                                                   int OpdIdx) {
126   assert(ID != Intrinsic::not_intrinsic && "Not an intrinsic!");
127 
128   switch (ID) {
129   case Intrinsic::fptosi_sat:
130   case Intrinsic::fptoui_sat:
131   case Intrinsic::lrint:
132   case Intrinsic::llrint:
133     return OpdIdx == -1 || OpdIdx == 0;
134   case Intrinsic::is_fpclass:
135     return OpdIdx == 0;
136   case Intrinsic::powi:
137     return OpdIdx == -1 || OpdIdx == 1;
138   default:
139     return OpdIdx == -1;
140   }
141 }
142 
143 /// Returns intrinsic ID for call.
144 /// For the input call instruction it finds mapping intrinsic and returns
145 /// its ID, in case it does not found it return not_intrinsic.
146 Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
147                                                 const TargetLibraryInfo *TLI) {
148   Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI);
149   if (ID == Intrinsic::not_intrinsic)
150     return Intrinsic::not_intrinsic;
151 
152   if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start ||
153       ID == Intrinsic::lifetime_end || ID == Intrinsic::assume ||
154       ID == Intrinsic::experimental_noalias_scope_decl ||
155       ID == Intrinsic::sideeffect || ID == Intrinsic::pseudoprobe)
156     return ID;
157   return Intrinsic::not_intrinsic;
158 }
159 
160 /// Given a vector and an element number, see if the scalar value is
161 /// already around as a register, for example if it were inserted then extracted
162 /// from the vector.
163 Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
164   assert(V->getType()->isVectorTy() && "Not looking at a vector?");
165   VectorType *VTy = cast<VectorType>(V->getType());
166   // For fixed-length vector, return undef for out of range access.
167   if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
168     unsigned Width = FVTy->getNumElements();
169     if (EltNo >= Width)
170       return UndefValue::get(FVTy->getElementType());
171   }
172 
173   if (Constant *C = dyn_cast<Constant>(V))
174     return C->getAggregateElement(EltNo);
175 
176   if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
177     // If this is an insert to a variable element, we don't know what it is.
178     if (!isa<ConstantInt>(III->getOperand(2)))
179       return nullptr;
180     unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
181 
182     // If this is an insert to the element we are looking for, return the
183     // inserted value.
184     if (EltNo == IIElt)
185       return III->getOperand(1);
186 
187     // Guard against infinite loop on malformed, unreachable IR.
188     if (III == III->getOperand(0))
189       return nullptr;
190 
191     // Otherwise, the insertelement doesn't modify the value, recurse on its
192     // vector input.
193     return findScalarElement(III->getOperand(0), EltNo);
194   }
195 
196   ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V);
197   // Restrict the following transformation to fixed-length vector.
198   if (SVI && isa<FixedVectorType>(SVI->getType())) {
199     unsigned LHSWidth =
200         cast<FixedVectorType>(SVI->getOperand(0)->getType())->getNumElements();
201     int InEl = SVI->getMaskValue(EltNo);
202     if (InEl < 0)
203       return UndefValue::get(VTy->getElementType());
204     if (InEl < (int)LHSWidth)
205       return findScalarElement(SVI->getOperand(0), InEl);
206     return findScalarElement(SVI->getOperand(1), InEl - LHSWidth);
207   }
208 
209   // Extract a value from a vector add operation with a constant zero.
210   // TODO: Use getBinOpIdentity() to generalize this.
211   Value *Val; Constant *C;
212   if (match(V, m_Add(m_Value(Val), m_Constant(C))))
213     if (Constant *Elt = C->getAggregateElement(EltNo))
214       if (Elt->isNullValue())
215         return findScalarElement(Val, EltNo);
216 
217   // If the vector is a splat then we can trivially find the scalar element.
218   if (isa<ScalableVectorType>(VTy))
219     if (Value *Splat = getSplatValue(V))
220       if (EltNo < VTy->getElementCount().getKnownMinValue())
221         return Splat;
222 
223   // Otherwise, we don't know.
224   return nullptr;
225 }
226 
227 int llvm::getSplatIndex(ArrayRef<int> Mask) {
228   int SplatIndex = -1;
229   for (int M : Mask) {
230     // Ignore invalid (undefined) mask elements.
231     if (M < 0)
232       continue;
233 
234     // There can be only 1 non-negative mask element value if this is a splat.
235     if (SplatIndex != -1 && SplatIndex != M)
236       return -1;
237 
238     // Initialize the splat index to the 1st non-negative mask element.
239     SplatIndex = M;
240   }
241   assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?");
242   return SplatIndex;
243 }
244 
245 /// Get splat value if the input is a splat vector or return nullptr.
246 /// This function is not fully general. It checks only 2 cases:
247 /// the input value is (1) a splat constant vector or (2) a sequence
248 /// of instructions that broadcasts a scalar at element 0.
249 Value *llvm::getSplatValue(const Value *V) {
250   if (isa<VectorType>(V->getType()))
251     if (auto *C = dyn_cast<Constant>(V))
252       return C->getSplatValue();
253 
254   // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...>
255   Value *Splat;
256   if (match(V,
257             m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat), m_ZeroInt()),
258                       m_Value(), m_ZeroMask())))
259     return Splat;
260 
261   return nullptr;
262 }
263 
264 bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) {
265   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
266 
267   if (isa<VectorType>(V->getType())) {
268     if (isa<UndefValue>(V))
269       return true;
270     // FIXME: We can allow undefs, but if Index was specified, we may want to
271     //        check that the constant is defined at that index.
272     if (auto *C = dyn_cast<Constant>(V))
273       return C->getSplatValue() != nullptr;
274   }
275 
276   if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) {
277     // FIXME: We can safely allow undefs here. If Index was specified, we will
278     //        check that the mask elt is defined at the required index.
279     if (!all_equal(Shuf->getShuffleMask()))
280       return false;
281 
282     // Match any index.
283     if (Index == -1)
284       return true;
285 
286     // Match a specific element. The mask should be defined at and match the
287     // specified index.
288     return Shuf->getMaskValue(Index) == Index;
289   }
290 
291   // The remaining tests are all recursive, so bail out if we hit the limit.
292   if (Depth++ == MaxAnalysisRecursionDepth)
293     return false;
294 
295   // If both operands of a binop are splats, the result is a splat.
296   Value *X, *Y, *Z;
297   if (match(V, m_BinOp(m_Value(X), m_Value(Y))))
298     return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth);
299 
300   // If all operands of a select are splats, the result is a splat.
301   if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z))))
302     return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) &&
303            isSplatValue(Z, Index, Depth);
304 
305   // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops).
306 
307   return false;
308 }
309 
310 bool llvm::getShuffleDemandedElts(int SrcWidth, ArrayRef<int> Mask,
311                                   const APInt &DemandedElts, APInt &DemandedLHS,
312                                   APInt &DemandedRHS, bool AllowUndefElts) {
313   DemandedLHS = DemandedRHS = APInt::getZero(SrcWidth);
314 
315   // Early out if we don't demand any elements.
316   if (DemandedElts.isZero())
317     return true;
318 
319   // Simple case of a shuffle with zeroinitializer.
320   if (all_of(Mask, [](int Elt) { return Elt == 0; })) {
321     DemandedLHS.setBit(0);
322     return true;
323   }
324 
325   for (unsigned I = 0, E = Mask.size(); I != E; ++I) {
326     int M = Mask[I];
327     assert((-1 <= M) && (M < (SrcWidth * 2)) &&
328            "Invalid shuffle mask constant");
329 
330     if (!DemandedElts[I] || (AllowUndefElts && (M < 0)))
331       continue;
332 
333     // For undef elements, we don't know anything about the common state of
334     // the shuffle result.
335     if (M < 0)
336       return false;
337 
338     if (M < SrcWidth)
339       DemandedLHS.setBit(M);
340     else
341       DemandedRHS.setBit(M - SrcWidth);
342   }
343 
344   return true;
345 }
346 
347 void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
348                                  SmallVectorImpl<int> &ScaledMask) {
349   assert(Scale > 0 && "Unexpected scaling factor");
350 
351   // Fast-path: if no scaling, then it is just a copy.
352   if (Scale == 1) {
353     ScaledMask.assign(Mask.begin(), Mask.end());
354     return;
355   }
356 
357   ScaledMask.clear();
358   for (int MaskElt : Mask) {
359     if (MaskElt >= 0) {
360       assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= INT32_MAX &&
361              "Overflowed 32-bits");
362     }
363     for (int SliceElt = 0; SliceElt != Scale; ++SliceElt)
364       ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt);
365   }
366 }
367 
368 bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
369                                 SmallVectorImpl<int> &ScaledMask) {
370   assert(Scale > 0 && "Unexpected scaling factor");
371 
372   // Fast-path: if no scaling, then it is just a copy.
373   if (Scale == 1) {
374     ScaledMask.assign(Mask.begin(), Mask.end());
375     return true;
376   }
377 
378   // We must map the original elements down evenly to a type with less elements.
379   int NumElts = Mask.size();
380   if (NumElts % Scale != 0)
381     return false;
382 
383   ScaledMask.clear();
384   ScaledMask.reserve(NumElts / Scale);
385 
386   // Step through the input mask by splitting into Scale-sized slices.
387   do {
388     ArrayRef<int> MaskSlice = Mask.take_front(Scale);
389     assert((int)MaskSlice.size() == Scale && "Expected Scale-sized slice.");
390 
391     // The first element of the slice determines how we evaluate this slice.
392     int SliceFront = MaskSlice.front();
393     if (SliceFront < 0) {
394       // Negative values (undef or other "sentinel" values) must be equal across
395       // the entire slice.
396       if (!all_equal(MaskSlice))
397         return false;
398       ScaledMask.push_back(SliceFront);
399     } else {
400       // A positive mask element must be cleanly divisible.
401       if (SliceFront % Scale != 0)
402         return false;
403       // Elements of the slice must be consecutive.
404       for (int i = 1; i < Scale; ++i)
405         if (MaskSlice[i] != SliceFront + i)
406           return false;
407       ScaledMask.push_back(SliceFront / Scale);
408     }
409     Mask = Mask.drop_front(Scale);
410   } while (!Mask.empty());
411 
412   assert((int)ScaledMask.size() * Scale == NumElts && "Unexpected scaled mask");
413 
414   // All elements of the original mask can be scaled down to map to the elements
415   // of a mask with wider elements.
416   return true;
417 }
418 
419 void llvm::getShuffleMaskWithWidestElts(ArrayRef<int> Mask,
420                                         SmallVectorImpl<int> &ScaledMask) {
421   std::array<SmallVector<int, 16>, 2> TmpMasks;
422   SmallVectorImpl<int> *Output = &TmpMasks[0], *Tmp = &TmpMasks[1];
423   ArrayRef<int> InputMask = Mask;
424   for (unsigned Scale = 2; Scale <= InputMask.size(); ++Scale) {
425     while (widenShuffleMaskElts(Scale, InputMask, *Output)) {
426       InputMask = *Output;
427       std::swap(Output, Tmp);
428     }
429   }
430   ScaledMask.assign(InputMask.begin(), InputMask.end());
431 }
432 
433 void llvm::processShuffleMasks(
434     ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs,
435     unsigned NumOfUsedRegs, function_ref<void()> NoInputAction,
436     function_ref<void(ArrayRef<int>, unsigned, unsigned)> SingleInputAction,
437     function_ref<void(ArrayRef<int>, unsigned, unsigned)> ManyInputsAction) {
438   SmallVector<SmallVector<SmallVector<int>>> Res(NumOfDestRegs);
439   // Try to perform better estimation of the permutation.
440   // 1. Split the source/destination vectors into real registers.
441   // 2. Do the mask analysis to identify which real registers are
442   // permuted.
443   int Sz = Mask.size();
444   unsigned SzDest = Sz / NumOfDestRegs;
445   unsigned SzSrc = Sz / NumOfSrcRegs;
446   for (unsigned I = 0; I < NumOfDestRegs; ++I) {
447     auto &RegMasks = Res[I];
448     RegMasks.assign(NumOfSrcRegs, {});
449     // Check that the values in dest registers are in the one src
450     // register.
451     for (unsigned K = 0; K < SzDest; ++K) {
452       int Idx = I * SzDest + K;
453       if (Idx == Sz)
454         break;
455       if (Mask[Idx] >= Sz || Mask[Idx] == PoisonMaskElem)
456         continue;
457       int SrcRegIdx = Mask[Idx] / SzSrc;
458       // Add a cost of PermuteTwoSrc for each new source register permute,
459       // if we have more than one source registers.
460       if (RegMasks[SrcRegIdx].empty())
461         RegMasks[SrcRegIdx].assign(SzDest, PoisonMaskElem);
462       RegMasks[SrcRegIdx][K] = Mask[Idx] % SzSrc;
463     }
464   }
465   // Process split mask.
466   for (unsigned I = 0; I < NumOfUsedRegs; ++I) {
467     auto &Dest = Res[I];
468     int NumSrcRegs =
469         count_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
470     switch (NumSrcRegs) {
471     case 0:
472       // No input vectors were used!
473       NoInputAction();
474       break;
475     case 1: {
476       // Find the only mask with at least single undef mask elem.
477       auto *It =
478           find_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
479       unsigned SrcReg = std::distance(Dest.begin(), It);
480       SingleInputAction(*It, SrcReg, I);
481       break;
482     }
483     default: {
484       // The first mask is a permutation of a single register. Since we have >2
485       // input registers to shuffle, we merge the masks for 2 first registers
486       // and generate a shuffle of 2 registers rather than the reordering of the
487       // first register and then shuffle with the second register. Next,
488       // generate the shuffles of the resulting register + the remaining
489       // registers from the list.
490       auto &&CombineMasks = [](MutableArrayRef<int> FirstMask,
491                                ArrayRef<int> SecondMask) {
492         for (int Idx = 0, VF = FirstMask.size(); Idx < VF; ++Idx) {
493           if (SecondMask[Idx] != PoisonMaskElem) {
494             assert(FirstMask[Idx] == PoisonMaskElem &&
495                    "Expected undefined mask element.");
496             FirstMask[Idx] = SecondMask[Idx] + VF;
497           }
498         }
499       };
500       auto &&NormalizeMask = [](MutableArrayRef<int> Mask) {
501         for (int Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) {
502           if (Mask[Idx] != PoisonMaskElem)
503             Mask[Idx] = Idx;
504         }
505       };
506       int SecondIdx;
507       do {
508         int FirstIdx = -1;
509         SecondIdx = -1;
510         MutableArrayRef<int> FirstMask, SecondMask;
511         for (unsigned I = 0; I < NumOfDestRegs; ++I) {
512           SmallVectorImpl<int> &RegMask = Dest[I];
513           if (RegMask.empty())
514             continue;
515 
516           if (FirstIdx == SecondIdx) {
517             FirstIdx = I;
518             FirstMask = RegMask;
519             continue;
520           }
521           SecondIdx = I;
522           SecondMask = RegMask;
523           CombineMasks(FirstMask, SecondMask);
524           ManyInputsAction(FirstMask, FirstIdx, SecondIdx);
525           NormalizeMask(FirstMask);
526           RegMask.clear();
527           SecondMask = FirstMask;
528           SecondIdx = FirstIdx;
529         }
530         if (FirstIdx != SecondIdx && SecondIdx >= 0) {
531           CombineMasks(SecondMask, FirstMask);
532           ManyInputsAction(SecondMask, SecondIdx, FirstIdx);
533           Dest[FirstIdx].clear();
534           NormalizeMask(SecondMask);
535         }
536       } while (SecondIdx >= 0);
537       break;
538     }
539     }
540   }
541 }
542 
543 MapVector<Instruction *, uint64_t>
544 llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
545                                const TargetTransformInfo *TTI) {
546 
547   // DemandedBits will give us every value's live-out bits. But we want
548   // to ensure no extra casts would need to be inserted, so every DAG
549   // of connected values must have the same minimum bitwidth.
550   EquivalenceClasses<Value *> ECs;
551   SmallVector<Value *, 16> Worklist;
552   SmallPtrSet<Value *, 4> Roots;
553   SmallPtrSet<Value *, 16> Visited;
554   DenseMap<Value *, uint64_t> DBits;
555   SmallPtrSet<Instruction *, 4> InstructionSet;
556   MapVector<Instruction *, uint64_t> MinBWs;
557 
558   // Determine the roots. We work bottom-up, from truncs or icmps.
559   bool SeenExtFromIllegalType = false;
560   for (auto *BB : Blocks)
561     for (auto &I : *BB) {
562       InstructionSet.insert(&I);
563 
564       if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) &&
565           !TTI->isTypeLegal(I.getOperand(0)->getType()))
566         SeenExtFromIllegalType = true;
567 
568       // Only deal with non-vector integers up to 64-bits wide.
569       if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) &&
570           !I.getType()->isVectorTy() &&
571           I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) {
572         // Don't make work for ourselves. If we know the loaded type is legal,
573         // don't add it to the worklist.
574         if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType()))
575           continue;
576 
577         Worklist.push_back(&I);
578         Roots.insert(&I);
579       }
580     }
581   // Early exit.
582   if (Worklist.empty() || (TTI && !SeenExtFromIllegalType))
583     return MinBWs;
584 
585   // Now proceed breadth-first, unioning values together.
586   while (!Worklist.empty()) {
587     Value *Val = Worklist.pop_back_val();
588     Value *Leader = ECs.getOrInsertLeaderValue(Val);
589 
590     if (!Visited.insert(Val).second)
591       continue;
592 
593     // Non-instructions terminate a chain successfully.
594     if (!isa<Instruction>(Val))
595       continue;
596     Instruction *I = cast<Instruction>(Val);
597 
598     // If we encounter a type that is larger than 64 bits, we can't represent
599     // it so bail out.
600     if (DB.getDemandedBits(I).getBitWidth() > 64)
601       return MapVector<Instruction *, uint64_t>();
602 
603     uint64_t V = DB.getDemandedBits(I).getZExtValue();
604     DBits[Leader] |= V;
605     DBits[I] = V;
606 
607     // Casts, loads and instructions outside of our range terminate a chain
608     // successfully.
609     if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) ||
610         !InstructionSet.count(I))
611       continue;
612 
613     // Unsafe casts terminate a chain unsuccessfully. We can't do anything
614     // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to
615     // transform anything that relies on them.
616     if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) ||
617         !I->getType()->isIntegerTy()) {
618       DBits[Leader] |= ~0ULL;
619       continue;
620     }
621 
622     // We don't modify the types of PHIs. Reductions will already have been
623     // truncated if possible, and inductions' sizes will have been chosen by
624     // indvars.
625     if (isa<PHINode>(I))
626       continue;
627 
628     if (DBits[Leader] == ~0ULL)
629       // All bits demanded, no point continuing.
630       continue;
631 
632     for (Value *O : cast<User>(I)->operands()) {
633       ECs.unionSets(Leader, O);
634       Worklist.push_back(O);
635     }
636   }
637 
638   // Now we've discovered all values, walk them to see if there are
639   // any users we didn't see. If there are, we can't optimize that
640   // chain.
641   for (auto &I : DBits)
642     for (auto *U : I.first->users())
643       if (U->getType()->isIntegerTy() && DBits.count(U) == 0)
644         DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL;
645 
646   for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) {
647     uint64_t LeaderDemandedBits = 0;
648     for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
649       LeaderDemandedBits |= DBits[M];
650 
651     uint64_t MinBW = llvm::bit_width(LeaderDemandedBits);
652     // Round up to a power of 2
653     MinBW = llvm::bit_ceil(MinBW);
654 
655     // We don't modify the types of PHIs. Reductions will already have been
656     // truncated if possible, and inductions' sizes will have been chosen by
657     // indvars.
658     // If we are required to shrink a PHI, abandon this entire equivalence class.
659     bool Abort = false;
660     for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
661       if (isa<PHINode>(M) && MinBW < M->getType()->getScalarSizeInBits()) {
662         Abort = true;
663         break;
664       }
665     if (Abort)
666       continue;
667 
668     for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) {
669       auto *MI = dyn_cast<Instruction>(M);
670       if (!MI)
671         continue;
672       Type *Ty = M->getType();
673       if (Roots.count(M))
674         Ty = MI->getOperand(0)->getType();
675 
676       if (MinBW >= Ty->getScalarSizeInBits())
677         continue;
678 
679       // If any of M's operands demand more bits than MinBW then M cannot be
680       // performed safely in MinBW.
681       if (any_of(MI->operands(), [&DB, MinBW](Use &U) {
682             auto *CI = dyn_cast<ConstantInt>(U);
683             // For constants shift amounts, check if the shift would result in
684             // poison.
685             if (CI &&
686                 isa<ShlOperator, LShrOperator, AShrOperator>(U.getUser()) &&
687                 U.getOperandNo() == 1)
688               return CI->uge(MinBW);
689             uint64_t BW = bit_width(DB.getDemandedBits(&U).getZExtValue());
690             return bit_ceil(BW) > MinBW;
691           }))
692         continue;
693 
694       MinBWs[MI] = MinBW;
695     }
696   }
697 
698   return MinBWs;
699 }
700 
701 /// Add all access groups in @p AccGroups to @p List.
702 template <typename ListT>
703 static void addToAccessGroupList(ListT &List, MDNode *AccGroups) {
704   // Interpret an access group as a list containing itself.
705   if (AccGroups->getNumOperands() == 0) {
706     assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group");
707     List.insert(AccGroups);
708     return;
709   }
710 
711   for (const auto &AccGroupListOp : AccGroups->operands()) {
712     auto *Item = cast<MDNode>(AccGroupListOp.get());
713     assert(isValidAsAccessGroup(Item) && "List item must be an access group");
714     List.insert(Item);
715   }
716 }
717 
718 MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) {
719   if (!AccGroups1)
720     return AccGroups2;
721   if (!AccGroups2)
722     return AccGroups1;
723   if (AccGroups1 == AccGroups2)
724     return AccGroups1;
725 
726   SmallSetVector<Metadata *, 4> Union;
727   addToAccessGroupList(Union, AccGroups1);
728   addToAccessGroupList(Union, AccGroups2);
729 
730   if (Union.size() == 0)
731     return nullptr;
732   if (Union.size() == 1)
733     return cast<MDNode>(Union.front());
734 
735   LLVMContext &Ctx = AccGroups1->getContext();
736   return MDNode::get(Ctx, Union.getArrayRef());
737 }
738 
739 MDNode *llvm::intersectAccessGroups(const Instruction *Inst1,
740                                     const Instruction *Inst2) {
741   bool MayAccessMem1 = Inst1->mayReadOrWriteMemory();
742   bool MayAccessMem2 = Inst2->mayReadOrWriteMemory();
743 
744   if (!MayAccessMem1 && !MayAccessMem2)
745     return nullptr;
746   if (!MayAccessMem1)
747     return Inst2->getMetadata(LLVMContext::MD_access_group);
748   if (!MayAccessMem2)
749     return Inst1->getMetadata(LLVMContext::MD_access_group);
750 
751   MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group);
752   MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group);
753   if (!MD1 || !MD2)
754     return nullptr;
755   if (MD1 == MD2)
756     return MD1;
757 
758   // Use set for scalable 'contains' check.
759   SmallPtrSet<Metadata *, 4> AccGroupSet2;
760   addToAccessGroupList(AccGroupSet2, MD2);
761 
762   SmallVector<Metadata *, 4> Intersection;
763   if (MD1->getNumOperands() == 0) {
764     assert(isValidAsAccessGroup(MD1) && "Node must be an access group");
765     if (AccGroupSet2.count(MD1))
766       Intersection.push_back(MD1);
767   } else {
768     for (const MDOperand &Node : MD1->operands()) {
769       auto *Item = cast<MDNode>(Node.get());
770       assert(isValidAsAccessGroup(Item) && "List item must be an access group");
771       if (AccGroupSet2.count(Item))
772         Intersection.push_back(Item);
773     }
774   }
775 
776   if (Intersection.size() == 0)
777     return nullptr;
778   if (Intersection.size() == 1)
779     return cast<MDNode>(Intersection.front());
780 
781   LLVMContext &Ctx = Inst1->getContext();
782   return MDNode::get(Ctx, Intersection);
783 }
784 
785 /// \returns \p I after propagating metadata from \p VL.
786 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
787   if (VL.empty())
788     return Inst;
789   Instruction *I0 = cast<Instruction>(VL[0]);
790   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
791   I0->getAllMetadataOtherThanDebugLoc(Metadata);
792 
793   for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
794                     LLVMContext::MD_noalias, LLVMContext::MD_fpmath,
795                     LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load,
796                     LLVMContext::MD_access_group}) {
797     MDNode *MD = I0->getMetadata(Kind);
798 
799     for (int J = 1, E = VL.size(); MD && J != E; ++J) {
800       const Instruction *IJ = cast<Instruction>(VL[J]);
801       MDNode *IMD = IJ->getMetadata(Kind);
802       switch (Kind) {
803       case LLVMContext::MD_tbaa:
804         MD = MDNode::getMostGenericTBAA(MD, IMD);
805         break;
806       case LLVMContext::MD_alias_scope:
807         MD = MDNode::getMostGenericAliasScope(MD, IMD);
808         break;
809       case LLVMContext::MD_fpmath:
810         MD = MDNode::getMostGenericFPMath(MD, IMD);
811         break;
812       case LLVMContext::MD_noalias:
813       case LLVMContext::MD_nontemporal:
814       case LLVMContext::MD_invariant_load:
815         MD = MDNode::intersect(MD, IMD);
816         break;
817       case LLVMContext::MD_access_group:
818         MD = intersectAccessGroups(Inst, IJ);
819         break;
820       default:
821         llvm_unreachable("unhandled metadata");
822       }
823     }
824 
825     Inst->setMetadata(Kind, MD);
826   }
827 
828   return Inst;
829 }
830 
831 Constant *
832 llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
833                            const InterleaveGroup<Instruction> &Group) {
834   // All 1's means mask is not needed.
835   if (Group.getNumMembers() == Group.getFactor())
836     return nullptr;
837 
838   // TODO: support reversed access.
839   assert(!Group.isReverse() && "Reversed group not supported.");
840 
841   SmallVector<Constant *, 16> Mask;
842   for (unsigned i = 0; i < VF; i++)
843     for (unsigned j = 0; j < Group.getFactor(); ++j) {
844       unsigned HasMember = Group.getMember(j) ? 1 : 0;
845       Mask.push_back(Builder.getInt1(HasMember));
846     }
847 
848   return ConstantVector::get(Mask);
849 }
850 
851 llvm::SmallVector<int, 16>
852 llvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) {
853   SmallVector<int, 16> MaskVec;
854   for (unsigned i = 0; i < VF; i++)
855     for (unsigned j = 0; j < ReplicationFactor; j++)
856       MaskVec.push_back(i);
857 
858   return MaskVec;
859 }
860 
861 llvm::SmallVector<int, 16> llvm::createInterleaveMask(unsigned VF,
862                                                       unsigned NumVecs) {
863   SmallVector<int, 16> Mask;
864   for (unsigned i = 0; i < VF; i++)
865     for (unsigned j = 0; j < NumVecs; j++)
866       Mask.push_back(j * VF + i);
867 
868   return Mask;
869 }
870 
871 llvm::SmallVector<int, 16>
872 llvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) {
873   SmallVector<int, 16> Mask;
874   for (unsigned i = 0; i < VF; i++)
875     Mask.push_back(Start + i * Stride);
876 
877   return Mask;
878 }
879 
880 llvm::SmallVector<int, 16> llvm::createSequentialMask(unsigned Start,
881                                                       unsigned NumInts,
882                                                       unsigned NumUndefs) {
883   SmallVector<int, 16> Mask;
884   for (unsigned i = 0; i < NumInts; i++)
885     Mask.push_back(Start + i);
886 
887   for (unsigned i = 0; i < NumUndefs; i++)
888     Mask.push_back(-1);
889 
890   return Mask;
891 }
892 
893 llvm::SmallVector<int, 16> llvm::createUnaryMask(ArrayRef<int> Mask,
894                                                  unsigned NumElts) {
895   // Avoid casts in the loop and make sure we have a reasonable number.
896   int NumEltsSigned = NumElts;
897   assert(NumEltsSigned > 0 && "Expected smaller or non-zero element count");
898 
899   // If the mask chooses an element from operand 1, reduce it to choose from the
900   // corresponding element of operand 0. Undef mask elements are unchanged.
901   SmallVector<int, 16> UnaryMask;
902   for (int MaskElt : Mask) {
903     assert((MaskElt < NumEltsSigned * 2) && "Expected valid shuffle mask");
904     int UnaryElt = MaskElt >= NumEltsSigned ? MaskElt - NumEltsSigned : MaskElt;
905     UnaryMask.push_back(UnaryElt);
906   }
907   return UnaryMask;
908 }
909 
910 /// A helper function for concatenating vectors. This function concatenates two
911 /// vectors having the same element type. If the second vector has fewer
912 /// elements than the first, it is padded with undefs.
913 static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1,
914                                     Value *V2) {
915   VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType());
916   VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType());
917   assert(VecTy1 && VecTy2 &&
918          VecTy1->getScalarType() == VecTy2->getScalarType() &&
919          "Expect two vectors with the same element type");
920 
921   unsigned NumElts1 = cast<FixedVectorType>(VecTy1)->getNumElements();
922   unsigned NumElts2 = cast<FixedVectorType>(VecTy2)->getNumElements();
923   assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements");
924 
925   if (NumElts1 > NumElts2) {
926     // Extend with UNDEFs.
927     V2 = Builder.CreateShuffleVector(
928         V2, createSequentialMask(0, NumElts2, NumElts1 - NumElts2));
929   }
930 
931   return Builder.CreateShuffleVector(
932       V1, V2, createSequentialMask(0, NumElts1 + NumElts2, 0));
933 }
934 
935 Value *llvm::concatenateVectors(IRBuilderBase &Builder,
936                                 ArrayRef<Value *> Vecs) {
937   unsigned NumVecs = Vecs.size();
938   assert(NumVecs > 1 && "Should be at least two vectors");
939 
940   SmallVector<Value *, 8> ResList;
941   ResList.append(Vecs.begin(), Vecs.end());
942   do {
943     SmallVector<Value *, 8> TmpList;
944     for (unsigned i = 0; i < NumVecs - 1; i += 2) {
945       Value *V0 = ResList[i], *V1 = ResList[i + 1];
946       assert((V0->getType() == V1->getType() || i == NumVecs - 2) &&
947              "Only the last vector may have a different type");
948 
949       TmpList.push_back(concatenateTwoVectors(Builder, V0, V1));
950     }
951 
952     // Push the last vector if the total number of vectors is odd.
953     if (NumVecs % 2 != 0)
954       TmpList.push_back(ResList[NumVecs - 1]);
955 
956     ResList = TmpList;
957     NumVecs = ResList.size();
958   } while (NumVecs > 1);
959 
960   return ResList[0];
961 }
962 
963 bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
964   assert(isa<VectorType>(Mask->getType()) &&
965          isa<IntegerType>(Mask->getType()->getScalarType()) &&
966          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
967              1 &&
968          "Mask must be a vector of i1");
969 
970   auto *ConstMask = dyn_cast<Constant>(Mask);
971   if (!ConstMask)
972     return false;
973   if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
974     return true;
975   if (isa<ScalableVectorType>(ConstMask->getType()))
976     return false;
977   for (unsigned
978            I = 0,
979            E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
980        I != E; ++I) {
981     if (auto *MaskElt = ConstMask->getAggregateElement(I))
982       if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
983         continue;
984     return false;
985   }
986   return true;
987 }
988 
989 bool llvm::maskIsAllOneOrUndef(Value *Mask) {
990   assert(isa<VectorType>(Mask->getType()) &&
991          isa<IntegerType>(Mask->getType()->getScalarType()) &&
992          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
993              1 &&
994          "Mask must be a vector of i1");
995 
996   auto *ConstMask = dyn_cast<Constant>(Mask);
997   if (!ConstMask)
998     return false;
999   if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
1000     return true;
1001   if (isa<ScalableVectorType>(ConstMask->getType()))
1002     return false;
1003   for (unsigned
1004            I = 0,
1005            E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
1006        I != E; ++I) {
1007     if (auto *MaskElt = ConstMask->getAggregateElement(I))
1008       if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
1009         continue;
1010     return false;
1011   }
1012   return true;
1013 }
1014 
1015 /// TODO: This is a lot like known bits, but for
1016 /// vectors.  Is there something we can common this with?
1017 APInt llvm::possiblyDemandedEltsInMask(Value *Mask) {
1018   assert(isa<FixedVectorType>(Mask->getType()) &&
1019          isa<IntegerType>(Mask->getType()->getScalarType()) &&
1020          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1021              1 &&
1022          "Mask must be a fixed width vector of i1");
1023 
1024   const unsigned VWidth =
1025       cast<FixedVectorType>(Mask->getType())->getNumElements();
1026   APInt DemandedElts = APInt::getAllOnes(VWidth);
1027   if (auto *CV = dyn_cast<ConstantVector>(Mask))
1028     for (unsigned i = 0; i < VWidth; i++)
1029       if (CV->getAggregateElement(i)->isNullValue())
1030         DemandedElts.clearBit(i);
1031   return DemandedElts;
1032 }
1033 
1034 bool InterleavedAccessInfo::isStrided(int Stride) {
1035   unsigned Factor = std::abs(Stride);
1036   return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
1037 }
1038 
1039 void InterleavedAccessInfo::collectConstStrideAccesses(
1040     MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
1041     const DenseMap<Value*, const SCEV*> &Strides) {
1042   auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
1043 
1044   // Since it's desired that the load/store instructions be maintained in
1045   // "program order" for the interleaved access analysis, we have to visit the
1046   // blocks in the loop in reverse postorder (i.e., in a topological order).
1047   // Such an ordering will ensure that any load/store that may be executed
1048   // before a second load/store will precede the second load/store in
1049   // AccessStrideInfo.
1050   LoopBlocksDFS DFS(TheLoop);
1051   DFS.perform(LI);
1052   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
1053     for (auto &I : *BB) {
1054       Value *Ptr = getLoadStorePointerOperand(&I);
1055       if (!Ptr)
1056         continue;
1057       Type *ElementTy = getLoadStoreType(&I);
1058 
1059       // Currently, codegen doesn't support cases where the type size doesn't
1060       // match the alloc size. Skip them for now.
1061       uint64_t Size = DL.getTypeAllocSize(ElementTy);
1062       if (Size * 8 != DL.getTypeSizeInBits(ElementTy))
1063         continue;
1064 
1065       // We don't check wrapping here because we don't know yet if Ptr will be
1066       // part of a full group or a group with gaps. Checking wrapping for all
1067       // pointers (even those that end up in groups with no gaps) will be overly
1068       // conservative. For full groups, wrapping should be ok since if we would
1069       // wrap around the address space we would do a memory access at nullptr
1070       // even without the transformation. The wrapping checks are therefore
1071       // deferred until after we've formed the interleaved groups.
1072       int64_t Stride =
1073         getPtrStride(PSE, ElementTy, Ptr, TheLoop, Strides,
1074                      /*Assume=*/true, /*ShouldCheckWrap=*/false).value_or(0);
1075 
1076       const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
1077       AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size,
1078                                               getLoadStoreAlignment(&I));
1079     }
1080 }
1081 
1082 // Analyze interleaved accesses and collect them into interleaved load and
1083 // store groups.
1084 //
1085 // When generating code for an interleaved load group, we effectively hoist all
1086 // loads in the group to the location of the first load in program order. When
1087 // generating code for an interleaved store group, we sink all stores to the
1088 // location of the last store. This code motion can change the order of load
1089 // and store instructions and may break dependences.
1090 //
1091 // The code generation strategy mentioned above ensures that we won't violate
1092 // any write-after-read (WAR) dependences.
1093 //
1094 // E.g., for the WAR dependence:  a = A[i];      // (1)
1095 //                                A[i] = b;      // (2)
1096 //
1097 // The store group of (2) is always inserted at or below (2), and the load
1098 // group of (1) is always inserted at or above (1). Thus, the instructions will
1099 // never be reordered. All other dependences are checked to ensure the
1100 // correctness of the instruction reordering.
1101 //
1102 // The algorithm visits all memory accesses in the loop in bottom-up program
1103 // order. Program order is established by traversing the blocks in the loop in
1104 // reverse postorder when collecting the accesses.
1105 //
1106 // We visit the memory accesses in bottom-up order because it can simplify the
1107 // construction of store groups in the presence of write-after-write (WAW)
1108 // dependences.
1109 //
1110 // E.g., for the WAW dependence:  A[i] = a;      // (1)
1111 //                                A[i] = b;      // (2)
1112 //                                A[i + 1] = c;  // (3)
1113 //
1114 // We will first create a store group with (3) and (2). (1) can't be added to
1115 // this group because it and (2) are dependent. However, (1) can be grouped
1116 // with other accesses that may precede it in program order. Note that a
1117 // bottom-up order does not imply that WAW dependences should not be checked.
1118 void InterleavedAccessInfo::analyzeInterleaving(
1119                                  bool EnablePredicatedInterleavedMemAccesses) {
1120   LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
1121   const auto &Strides = LAI->getSymbolicStrides();
1122 
1123   // Holds all accesses with a constant stride.
1124   MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
1125   collectConstStrideAccesses(AccessStrideInfo, Strides);
1126 
1127   if (AccessStrideInfo.empty())
1128     return;
1129 
1130   // Collect the dependences in the loop.
1131   collectDependences();
1132 
1133   // Holds all interleaved store groups temporarily.
1134   SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups;
1135   // Holds all interleaved load groups temporarily.
1136   SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups;
1137   // Groups added to this set cannot have new members added.
1138   SmallPtrSet<InterleaveGroup<Instruction> *, 4> CompletedLoadGroups;
1139 
1140   // Search in bottom-up program order for pairs of accesses (A and B) that can
1141   // form interleaved load or store groups. In the algorithm below, access A
1142   // precedes access B in program order. We initialize a group for B in the
1143   // outer loop of the algorithm, and then in the inner loop, we attempt to
1144   // insert each A into B's group if:
1145   //
1146   //  1. A and B have the same stride,
1147   //  2. A and B have the same memory object size, and
1148   //  3. A belongs in B's group according to its distance from B.
1149   //
1150   // Special care is taken to ensure group formation will not break any
1151   // dependences.
1152   for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
1153        BI != E; ++BI) {
1154     Instruction *B = BI->first;
1155     StrideDescriptor DesB = BI->second;
1156 
1157     // Initialize a group for B if it has an allowable stride. Even if we don't
1158     // create a group for B, we continue with the bottom-up algorithm to ensure
1159     // we don't break any of B's dependences.
1160     InterleaveGroup<Instruction> *GroupB = nullptr;
1161     if (isStrided(DesB.Stride) &&
1162         (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
1163       GroupB = getInterleaveGroup(B);
1164       if (!GroupB) {
1165         LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
1166                           << '\n');
1167         GroupB = createInterleaveGroup(B, DesB.Stride, DesB.Alignment);
1168         if (B->mayWriteToMemory())
1169           StoreGroups.insert(GroupB);
1170         else
1171           LoadGroups.insert(GroupB);
1172       }
1173     }
1174 
1175     for (auto AI = std::next(BI); AI != E; ++AI) {
1176       Instruction *A = AI->first;
1177       StrideDescriptor DesA = AI->second;
1178 
1179       // Our code motion strategy implies that we can't have dependences
1180       // between accesses in an interleaved group and other accesses located
1181       // between the first and last member of the group. Note that this also
1182       // means that a group can't have more than one member at a given offset.
1183       // The accesses in a group can have dependences with other accesses, but
1184       // we must ensure we don't extend the boundaries of the group such that
1185       // we encompass those dependent accesses.
1186       //
1187       // For example, assume we have the sequence of accesses shown below in a
1188       // stride-2 loop:
1189       //
1190       //  (1, 2) is a group | A[i]   = a;  // (1)
1191       //                    | A[i-1] = b;  // (2) |
1192       //                      A[i-3] = c;  // (3)
1193       //                      A[i]   = d;  // (4) | (2, 4) is not a group
1194       //
1195       // Because accesses (2) and (3) are dependent, we can group (2) with (1)
1196       // but not with (4). If we did, the dependent access (3) would be within
1197       // the boundaries of the (2, 4) group.
1198       auto DependentMember = [&](InterleaveGroup<Instruction> *Group,
1199                                  StrideEntry *A) -> Instruction * {
1200         for (uint32_t Index = 0; Index < Group->getFactor(); ++Index) {
1201           Instruction *MemberOfGroupB = Group->getMember(Index);
1202           if (MemberOfGroupB && !canReorderMemAccessesForInterleavedGroups(
1203                                     A, &*AccessStrideInfo.find(MemberOfGroupB)))
1204             return MemberOfGroupB;
1205         }
1206         return nullptr;
1207       };
1208 
1209       auto GroupA = getInterleaveGroup(A);
1210       // If A is a load, dependencies are tolerable, there's nothing to do here.
1211       // If both A and B belong to the same (store) group, they are independent,
1212       // even if dependencies have not been recorded.
1213       // If both GroupA and GroupB are null, there's nothing to do here.
1214       if (A->mayWriteToMemory() && GroupA != GroupB) {
1215         Instruction *DependentInst = nullptr;
1216         // If GroupB is a load group, we have to compare AI against all
1217         // members of GroupB because if any load within GroupB has a dependency
1218         // on AI, we need to mark GroupB as complete and also release the
1219         // store GroupA (if A belongs to one). The former prevents incorrect
1220         // hoisting of load B above store A while the latter prevents incorrect
1221         // sinking of store A below load B.
1222         if (GroupB && LoadGroups.contains(GroupB))
1223           DependentInst = DependentMember(GroupB, &*AI);
1224         else if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI))
1225           DependentInst = B;
1226 
1227         if (DependentInst) {
1228           // A has a store dependence on B (or on some load within GroupB) and
1229           // is part of a store group. Release A's group to prevent illegal
1230           // sinking of A below B. A will then be free to form another group
1231           // with instructions that precede it.
1232           if (GroupA && StoreGroups.contains(GroupA)) {
1233             LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to "
1234                                  "dependence between "
1235                               << *A << " and " << *DependentInst << '\n');
1236             StoreGroups.remove(GroupA);
1237             releaseGroup(GroupA);
1238           }
1239           // If B is a load and part of an interleave group, no earlier loads
1240           // can be added to B's interleave group, because this would mean the
1241           // DependentInst would move across store A. Mark the interleave group
1242           // as complete.
1243           if (GroupB && LoadGroups.contains(GroupB)) {
1244             LLVM_DEBUG(dbgs() << "LV: Marking interleave group for " << *B
1245                               << " as complete.\n");
1246             CompletedLoadGroups.insert(GroupB);
1247           }
1248         }
1249       }
1250       if (CompletedLoadGroups.contains(GroupB)) {
1251         // Skip trying to add A to B, continue to look for other conflicting A's
1252         // in groups to be released.
1253         continue;
1254       }
1255 
1256       // At this point, we've checked for illegal code motion. If either A or B
1257       // isn't strided, there's nothing left to do.
1258       if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
1259         continue;
1260 
1261       // Ignore A if it's already in a group or isn't the same kind of memory
1262       // operation as B.
1263       // Note that mayReadFromMemory() isn't mutually exclusive to
1264       // mayWriteToMemory in the case of atomic loads. We shouldn't see those
1265       // here, canVectorizeMemory() should have returned false - except for the
1266       // case we asked for optimization remarks.
1267       if (isInterleaved(A) ||
1268           (A->mayReadFromMemory() != B->mayReadFromMemory()) ||
1269           (A->mayWriteToMemory() != B->mayWriteToMemory()))
1270         continue;
1271 
1272       // Check rules 1 and 2. Ignore A if its stride or size is different from
1273       // that of B.
1274       if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
1275         continue;
1276 
1277       // Ignore A if the memory object of A and B don't belong to the same
1278       // address space
1279       if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B))
1280         continue;
1281 
1282       // Calculate the distance from A to B.
1283       const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
1284           PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
1285       if (!DistToB)
1286         continue;
1287       int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
1288 
1289       // Check rule 3. Ignore A if its distance to B is not a multiple of the
1290       // size.
1291       if (DistanceToB % static_cast<int64_t>(DesB.Size))
1292         continue;
1293 
1294       // All members of a predicated interleave-group must have the same predicate,
1295       // and currently must reside in the same BB.
1296       BasicBlock *BlockA = A->getParent();
1297       BasicBlock *BlockB = B->getParent();
1298       if ((isPredicated(BlockA) || isPredicated(BlockB)) &&
1299           (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB))
1300         continue;
1301 
1302       // The index of A is the index of B plus A's distance to B in multiples
1303       // of the size.
1304       int IndexA =
1305           GroupB->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
1306 
1307       // Try to insert A into B's group.
1308       if (GroupB->insertMember(A, IndexA, DesA.Alignment)) {
1309         LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
1310                           << "    into the interleave group with" << *B
1311                           << '\n');
1312         InterleaveGroupMap[A] = GroupB;
1313 
1314         // Set the first load in program order as the insert position.
1315         if (A->mayReadFromMemory())
1316           GroupB->setInsertPos(A);
1317       }
1318     } // Iteration over A accesses.
1319   }   // Iteration over B accesses.
1320 
1321   auto InvalidateGroupIfMemberMayWrap = [&](InterleaveGroup<Instruction> *Group,
1322                                             int Index,
1323                                             std::string FirstOrLast) -> bool {
1324     Instruction *Member = Group->getMember(Index);
1325     assert(Member && "Group member does not exist");
1326     Value *MemberPtr = getLoadStorePointerOperand(Member);
1327     Type *AccessTy = getLoadStoreType(Member);
1328     if (getPtrStride(PSE, AccessTy, MemberPtr, TheLoop, Strides,
1329                      /*Assume=*/false, /*ShouldCheckWrap=*/true).value_or(0))
1330       return false;
1331     LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
1332                       << FirstOrLast
1333                       << " group member potentially pointer-wrapping.\n");
1334     releaseGroup(Group);
1335     return true;
1336   };
1337 
1338   // Remove interleaved groups with gaps whose memory
1339   // accesses may wrap around. We have to revisit the getPtrStride analysis,
1340   // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
1341   // not check wrapping (see documentation there).
1342   // FORNOW we use Assume=false;
1343   // TODO: Change to Assume=true but making sure we don't exceed the threshold
1344   // of runtime SCEV assumptions checks (thereby potentially failing to
1345   // vectorize altogether).
1346   // Additional optional optimizations:
1347   // TODO: If we are peeling the loop and we know that the first pointer doesn't
1348   // wrap then we can deduce that all pointers in the group don't wrap.
1349   // This means that we can forcefully peel the loop in order to only have to
1350   // check the first pointer for no-wrap. When we'll change to use Assume=true
1351   // we'll only need at most one runtime check per interleaved group.
1352   for (auto *Group : LoadGroups) {
1353     // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1354     // load would wrap around the address space we would do a memory access at
1355     // nullptr even without the transformation.
1356     if (Group->getNumMembers() == Group->getFactor())
1357       continue;
1358 
1359     // Case 2: If first and last members of the group don't wrap this implies
1360     // that all the pointers in the group don't wrap.
1361     // So we check only group member 0 (which is always guaranteed to exist),
1362     // and group member Factor - 1; If the latter doesn't exist we rely on
1363     // peeling (if it is a non-reversed accsess -- see Case 3).
1364     if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first")))
1365       continue;
1366     if (Group->getMember(Group->getFactor() - 1))
1367       InvalidateGroupIfMemberMayWrap(Group, Group->getFactor() - 1,
1368                                      std::string("last"));
1369     else {
1370       // Case 3: A non-reversed interleaved load group with gaps: We need
1371       // to execute at least one scalar epilogue iteration. This will ensure
1372       // we don't speculatively access memory out-of-bounds. We only need
1373       // to look for a member at index factor - 1, since every group must have
1374       // a member at index zero.
1375       if (Group->isReverse()) {
1376         LLVM_DEBUG(
1377             dbgs() << "LV: Invalidate candidate interleaved group due to "
1378                       "a reverse access with gaps.\n");
1379         releaseGroup(Group);
1380         continue;
1381       }
1382       LLVM_DEBUG(
1383           dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
1384       RequiresScalarEpilogue = true;
1385     }
1386   }
1387 
1388   for (auto *Group : StoreGroups) {
1389     // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1390     // store would wrap around the address space we would do a memory access at
1391     // nullptr even without the transformation.
1392     if (Group->getNumMembers() == Group->getFactor())
1393       continue;
1394 
1395     // Interleave-store-group with gaps is implemented using masked wide store.
1396     // Remove interleaved store groups with gaps if
1397     // masked-interleaved-accesses are not enabled by the target.
1398     if (!EnablePredicatedInterleavedMemAccesses) {
1399       LLVM_DEBUG(
1400           dbgs() << "LV: Invalidate candidate interleaved store group due "
1401                     "to gaps.\n");
1402       releaseGroup(Group);
1403       continue;
1404     }
1405 
1406     // Case 2: If first and last members of the group don't wrap this implies
1407     // that all the pointers in the group don't wrap.
1408     // So we check only group member 0 (which is always guaranteed to exist),
1409     // and the last group member. Case 3 (scalar epilog) is not relevant for
1410     // stores with gaps, which are implemented with masked-store (rather than
1411     // speculative access, as in loads).
1412     if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first")))
1413       continue;
1414     for (int Index = Group->getFactor() - 1; Index > 0; Index--)
1415       if (Group->getMember(Index)) {
1416         InvalidateGroupIfMemberMayWrap(Group, Index, std::string("last"));
1417         break;
1418       }
1419   }
1420 }
1421 
1422 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
1423   // If no group had triggered the requirement to create an epilogue loop,
1424   // there is nothing to do.
1425   if (!requiresScalarEpilogue())
1426     return;
1427 
1428   bool ReleasedGroup = false;
1429   // Release groups requiring scalar epilogues. Note that this also removes them
1430   // from InterleaveGroups.
1431   for (auto *Group : make_early_inc_range(InterleaveGroups)) {
1432     if (!Group->requiresScalarEpilogue())
1433       continue;
1434     LLVM_DEBUG(
1435         dbgs()
1436         << "LV: Invalidate candidate interleaved group due to gaps that "
1437            "require a scalar epilogue (not allowed under optsize) and cannot "
1438            "be masked (not enabled). \n");
1439     releaseGroup(Group);
1440     ReleasedGroup = true;
1441   }
1442   assert(ReleasedGroup && "At least one group must be invalidated, as a "
1443                           "scalar epilogue was required");
1444   (void)ReleasedGroup;
1445   RequiresScalarEpilogue = false;
1446 }
1447 
1448 template <typename InstT>
1449 void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const {
1450   llvm_unreachable("addMetadata can only be used for Instruction");
1451 }
1452 
1453 namespace llvm {
1454 template <>
1455 void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const {
1456   SmallVector<Value *, 4> VL;
1457   std::transform(Members.begin(), Members.end(), std::back_inserter(VL),
1458                  [](std::pair<int, Instruction *> p) { return p.second; });
1459   propagateMetadata(NewInst, VL);
1460 }
1461 }
1462 
1463 void VFABI::getVectorVariantNames(
1464     const CallInst &CI, SmallVectorImpl<std::string> &VariantMappings) {
1465   const StringRef S = CI.getFnAttr(VFABI::MappingsAttrName).getValueAsString();
1466   if (S.empty())
1467     return;
1468 
1469   SmallVector<StringRef, 8> ListAttr;
1470   S.split(ListAttr, ",");
1471 
1472   for (const auto &S : SetVector<StringRef>(ListAttr.begin(), ListAttr.end())) {
1473     std::optional<VFInfo> Info =
1474         VFABI::tryDemangleForVFABI(S, CI.getFunctionType());
1475     if (Info && CI.getModule()->getFunction(Info->VectorName)) {
1476       LLVM_DEBUG(dbgs() << "VFABI: Adding mapping '" << S << "' for " << CI
1477                         << "\n");
1478       VariantMappings.push_back(std::string(S));
1479     } else
1480       LLVM_DEBUG(dbgs() << "VFABI: Invalid mapping '" << S << "'\n");
1481   }
1482 }
1483 
1484 FunctionType *VFABI::createFunctionType(const VFInfo &Info,
1485                                         const FunctionType *ScalarFTy) {
1486   // Create vector parameter types
1487   SmallVector<Type *, 8> VecTypes;
1488   ElementCount VF = Info.Shape.VF;
1489   int ScalarParamIndex = 0;
1490   for (auto VFParam : Info.Shape.Parameters) {
1491     if (VFParam.ParamKind == VFParamKind::GlobalPredicate) {
1492       VectorType *MaskTy =
1493           VectorType::get(Type::getInt1Ty(ScalarFTy->getContext()), VF);
1494       VecTypes.push_back(MaskTy);
1495       continue;
1496     }
1497 
1498     Type *OperandTy = ScalarFTy->getParamType(ScalarParamIndex++);
1499     if (VFParam.ParamKind == VFParamKind::Vector)
1500       OperandTy = VectorType::get(OperandTy, VF);
1501     VecTypes.push_back(OperandTy);
1502   }
1503 
1504   auto *RetTy = ScalarFTy->getReturnType();
1505   if (!RetTy->isVoidTy())
1506     RetTy = VectorType::get(RetTy, VF);
1507   return FunctionType::get(RetTy, VecTypes, false);
1508 }
1509 
1510 bool VFShape::hasValidParameterList() const {
1511   for (unsigned Pos = 0, NumParams = Parameters.size(); Pos < NumParams;
1512        ++Pos) {
1513     assert(Parameters[Pos].ParamPos == Pos && "Broken parameter list.");
1514 
1515     switch (Parameters[Pos].ParamKind) {
1516     default: // Nothing to check.
1517       break;
1518     case VFParamKind::OMP_Linear:
1519     case VFParamKind::OMP_LinearRef:
1520     case VFParamKind::OMP_LinearVal:
1521     case VFParamKind::OMP_LinearUVal:
1522       // Compile time linear steps must be non-zero.
1523       if (Parameters[Pos].LinearStepOrPos == 0)
1524         return false;
1525       break;
1526     case VFParamKind::OMP_LinearPos:
1527     case VFParamKind::OMP_LinearRefPos:
1528     case VFParamKind::OMP_LinearValPos:
1529     case VFParamKind::OMP_LinearUValPos:
1530       // The runtime linear step must be referring to some other
1531       // parameters in the signature.
1532       if (Parameters[Pos].LinearStepOrPos >= int(NumParams))
1533         return false;
1534       // The linear step parameter must be marked as uniform.
1535       if (Parameters[Parameters[Pos].LinearStepOrPos].ParamKind !=
1536           VFParamKind::OMP_Uniform)
1537         return false;
1538       // The linear step parameter can't point at itself.
1539       if (Parameters[Pos].LinearStepOrPos == int(Pos))
1540         return false;
1541       break;
1542     case VFParamKind::GlobalPredicate:
1543       // The global predicate must be the unique. Can be placed anywhere in the
1544       // signature.
1545       for (unsigned NextPos = Pos + 1; NextPos < NumParams; ++NextPos)
1546         if (Parameters[NextPos].ParamKind == VFParamKind::GlobalPredicate)
1547           return false;
1548       break;
1549     }
1550   }
1551   return true;
1552 }
1553