xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp (revision 700637cbb5e582861067a11aaca4d053546871d2)
1*700637cbSDimitry Andric //===-- RISCVInterleavedAccess.cpp - RISC-V Interleaved Access Transform --===//
2*700637cbSDimitry Andric //
3*700637cbSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*700637cbSDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
5*700637cbSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*700637cbSDimitry Andric //
7*700637cbSDimitry Andric //===----------------------------------------------------------------------===//
8*700637cbSDimitry Andric //
9*700637cbSDimitry Andric // Functions and callbacks related to the InterleavedAccessPass.
10*700637cbSDimitry Andric //
11*700637cbSDimitry Andric //===----------------------------------------------------------------------===//
12*700637cbSDimitry Andric 
13*700637cbSDimitry Andric #include "RISCV.h"
14*700637cbSDimitry Andric #include "RISCVISelLowering.h"
15*700637cbSDimitry Andric #include "RISCVSubtarget.h"
16*700637cbSDimitry Andric #include "llvm/Analysis/ValueTracking.h"
17*700637cbSDimitry Andric #include "llvm/CodeGen/ValueTypes.h"
18*700637cbSDimitry Andric #include "llvm/IR/IRBuilder.h"
19*700637cbSDimitry Andric #include "llvm/IR/Instructions.h"
20*700637cbSDimitry Andric #include "llvm/IR/IntrinsicsRISCV.h"
21*700637cbSDimitry Andric #include "llvm/IR/Module.h"
22*700637cbSDimitry Andric #include "llvm/IR/PatternMatch.h"
23*700637cbSDimitry Andric 
24*700637cbSDimitry Andric using namespace llvm;
25*700637cbSDimitry Andric 
isLegalInterleavedAccessType(VectorType * VTy,unsigned Factor,Align Alignment,unsigned AddrSpace,const DataLayout & DL) const26*700637cbSDimitry Andric bool RISCVTargetLowering::isLegalInterleavedAccessType(
27*700637cbSDimitry Andric     VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace,
28*700637cbSDimitry Andric     const DataLayout &DL) const {
29*700637cbSDimitry Andric   EVT VT = getValueType(DL, VTy);
30*700637cbSDimitry Andric   // Don't lower vlseg/vsseg for vector types that can't be split.
31*700637cbSDimitry Andric   if (!isTypeLegal(VT))
32*700637cbSDimitry Andric     return false;
33*700637cbSDimitry Andric 
34*700637cbSDimitry Andric   if (!isLegalElementTypeForRVV(VT.getScalarType()) ||
35*700637cbSDimitry Andric       !allowsMemoryAccessForAlignment(VTy->getContext(), DL, VT, AddrSpace,
36*700637cbSDimitry Andric                                       Alignment))
37*700637cbSDimitry Andric     return false;
38*700637cbSDimitry Andric 
39*700637cbSDimitry Andric   MVT ContainerVT = VT.getSimpleVT();
40*700637cbSDimitry Andric 
41*700637cbSDimitry Andric   if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
42*700637cbSDimitry Andric     if (!Subtarget.useRVVForFixedLengthVectors())
43*700637cbSDimitry Andric       return false;
44*700637cbSDimitry Andric     // Sometimes the interleaved access pass picks up splats as interleaves of
45*700637cbSDimitry Andric     // one element. Don't lower these.
46*700637cbSDimitry Andric     if (FVTy->getNumElements() < 2)
47*700637cbSDimitry Andric       return false;
48*700637cbSDimitry Andric 
49*700637cbSDimitry Andric     ContainerVT = getContainerForFixedLengthVector(VT.getSimpleVT());
50*700637cbSDimitry Andric   }
51*700637cbSDimitry Andric 
52*700637cbSDimitry Andric   // Need to make sure that EMUL * NFIELDS ≤ 8
53*700637cbSDimitry Andric   auto [LMUL, Fractional] = RISCVVType::decodeVLMUL(getLMUL(ContainerVT));
54*700637cbSDimitry Andric   if (Fractional)
55*700637cbSDimitry Andric     return true;
56*700637cbSDimitry Andric   return Factor * LMUL <= 8;
57*700637cbSDimitry Andric }
58*700637cbSDimitry Andric 
59*700637cbSDimitry Andric static const Intrinsic::ID FixedVlsegIntrIds[] = {
60*700637cbSDimitry Andric     Intrinsic::riscv_seg2_load_mask, Intrinsic::riscv_seg3_load_mask,
61*700637cbSDimitry Andric     Intrinsic::riscv_seg4_load_mask, Intrinsic::riscv_seg5_load_mask,
62*700637cbSDimitry Andric     Intrinsic::riscv_seg6_load_mask, Intrinsic::riscv_seg7_load_mask,
63*700637cbSDimitry Andric     Intrinsic::riscv_seg8_load_mask};
64*700637cbSDimitry Andric 
65*700637cbSDimitry Andric static const Intrinsic::ID ScalableVlsegIntrIds[] = {
66*700637cbSDimitry Andric     Intrinsic::riscv_vlseg2_mask, Intrinsic::riscv_vlseg3_mask,
67*700637cbSDimitry Andric     Intrinsic::riscv_vlseg4_mask, Intrinsic::riscv_vlseg5_mask,
68*700637cbSDimitry Andric     Intrinsic::riscv_vlseg6_mask, Intrinsic::riscv_vlseg7_mask,
69*700637cbSDimitry Andric     Intrinsic::riscv_vlseg8_mask};
70*700637cbSDimitry Andric 
71*700637cbSDimitry Andric /// Lower an interleaved load into a vlsegN intrinsic.
72*700637cbSDimitry Andric ///
73*700637cbSDimitry Andric /// E.g. Lower an interleaved load (Factor = 2):
74*700637cbSDimitry Andric /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr
75*700637cbSDimitry Andric /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>  ; Extract even elements
76*700637cbSDimitry Andric /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>  ; Extract odd elements
77*700637cbSDimitry Andric ///
78*700637cbSDimitry Andric /// Into:
79*700637cbSDimitry Andric /// %ld2 = { <4 x i32>, <4 x i32> } call llvm.riscv.seg2.load.v4i32.p0.i64(
80*700637cbSDimitry Andric ///                                        %ptr, i64 4)
81*700637cbSDimitry Andric /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0
82*700637cbSDimitry Andric /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1
lowerInterleavedLoad(LoadInst * LI,ArrayRef<ShuffleVectorInst * > Shuffles,ArrayRef<unsigned> Indices,unsigned Factor) const83*700637cbSDimitry Andric bool RISCVTargetLowering::lowerInterleavedLoad(
84*700637cbSDimitry Andric     LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
85*700637cbSDimitry Andric     ArrayRef<unsigned> Indices, unsigned Factor) const {
86*700637cbSDimitry Andric   assert(Indices.size() == Shuffles.size());
87*700637cbSDimitry Andric 
88*700637cbSDimitry Andric   IRBuilder<> Builder(LI);
89*700637cbSDimitry Andric 
90*700637cbSDimitry Andric   const DataLayout &DL = LI->getDataLayout();
91*700637cbSDimitry Andric 
92*700637cbSDimitry Andric   auto *VTy = cast<FixedVectorType>(Shuffles[0]->getType());
93*700637cbSDimitry Andric   if (!isLegalInterleavedAccessType(VTy, Factor, LI->getAlign(),
94*700637cbSDimitry Andric                                     LI->getPointerAddressSpace(), DL))
95*700637cbSDimitry Andric     return false;
96*700637cbSDimitry Andric 
97*700637cbSDimitry Andric   auto *PtrTy = LI->getPointerOperandType();
98*700637cbSDimitry Andric   auto *XLenTy = Type::getIntNTy(LI->getContext(), Subtarget.getXLen());
99*700637cbSDimitry Andric 
100*700637cbSDimitry Andric   // If the segment load is going to be performed segment at a time anyways
101*700637cbSDimitry Andric   // and there's only one element used, use a strided load instead.  This
102*700637cbSDimitry Andric   // will be equally fast, and create less vector register pressure.
103*700637cbSDimitry Andric   if (Indices.size() == 1 && !Subtarget.hasOptimizedSegmentLoadStore(Factor)) {
104*700637cbSDimitry Andric     unsigned ScalarSizeInBytes = DL.getTypeStoreSize(VTy->getElementType());
105*700637cbSDimitry Andric     Value *Stride = ConstantInt::get(XLenTy, Factor * ScalarSizeInBytes);
106*700637cbSDimitry Andric     Value *Offset = ConstantInt::get(XLenTy, Indices[0] * ScalarSizeInBytes);
107*700637cbSDimitry Andric     Value *BasePtr = Builder.CreatePtrAdd(LI->getPointerOperand(), Offset);
108*700637cbSDimitry Andric     Value *Mask = Builder.getAllOnesMask(VTy->getElementCount());
109*700637cbSDimitry Andric     Value *VL = Builder.getInt32(VTy->getNumElements());
110*700637cbSDimitry Andric 
111*700637cbSDimitry Andric     CallInst *CI =
112*700637cbSDimitry Andric         Builder.CreateIntrinsic(Intrinsic::experimental_vp_strided_load,
113*700637cbSDimitry Andric                                 {VTy, BasePtr->getType(), Stride->getType()},
114*700637cbSDimitry Andric                                 {BasePtr, Stride, Mask, VL});
115*700637cbSDimitry Andric     CI->addParamAttr(
116*700637cbSDimitry Andric         0, Attribute::getWithAlignment(CI->getContext(), LI->getAlign()));
117*700637cbSDimitry Andric     Shuffles[0]->replaceAllUsesWith(CI);
118*700637cbSDimitry Andric     return true;
119*700637cbSDimitry Andric   };
120*700637cbSDimitry Andric 
121*700637cbSDimitry Andric   Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
122*700637cbSDimitry Andric   Value *Mask = Builder.getAllOnesMask(VTy->getElementCount());
123*700637cbSDimitry Andric   CallInst *VlsegN = Builder.CreateIntrinsic(
124*700637cbSDimitry Andric       FixedVlsegIntrIds[Factor - 2], {VTy, PtrTy, XLenTy},
125*700637cbSDimitry Andric       {LI->getPointerOperand(), Mask, VL});
126*700637cbSDimitry Andric 
127*700637cbSDimitry Andric   for (unsigned i = 0; i < Shuffles.size(); i++) {
128*700637cbSDimitry Andric     Value *SubVec = Builder.CreateExtractValue(VlsegN, Indices[i]);
129*700637cbSDimitry Andric     Shuffles[i]->replaceAllUsesWith(SubVec);
130*700637cbSDimitry Andric   }
131*700637cbSDimitry Andric 
132*700637cbSDimitry Andric   return true;
133*700637cbSDimitry Andric }
134*700637cbSDimitry Andric 
135*700637cbSDimitry Andric static const Intrinsic::ID FixedVssegIntrIds[] = {
136*700637cbSDimitry Andric     Intrinsic::riscv_seg2_store_mask, Intrinsic::riscv_seg3_store_mask,
137*700637cbSDimitry Andric     Intrinsic::riscv_seg4_store_mask, Intrinsic::riscv_seg5_store_mask,
138*700637cbSDimitry Andric     Intrinsic::riscv_seg6_store_mask, Intrinsic::riscv_seg7_store_mask,
139*700637cbSDimitry Andric     Intrinsic::riscv_seg8_store_mask};
140*700637cbSDimitry Andric 
141*700637cbSDimitry Andric static const Intrinsic::ID ScalableVssegIntrIds[] = {
142*700637cbSDimitry Andric     Intrinsic::riscv_vsseg2_mask, Intrinsic::riscv_vsseg3_mask,
143*700637cbSDimitry Andric     Intrinsic::riscv_vsseg4_mask, Intrinsic::riscv_vsseg5_mask,
144*700637cbSDimitry Andric     Intrinsic::riscv_vsseg6_mask, Intrinsic::riscv_vsseg7_mask,
145*700637cbSDimitry Andric     Intrinsic::riscv_vsseg8_mask};
146*700637cbSDimitry Andric 
147*700637cbSDimitry Andric /// Lower an interleaved store into a vssegN intrinsic.
148*700637cbSDimitry Andric ///
149*700637cbSDimitry Andric /// E.g. Lower an interleaved store (Factor = 3):
150*700637cbSDimitry Andric /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
151*700637cbSDimitry Andric ///                  <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
152*700637cbSDimitry Andric /// store <12 x i32> %i.vec, <12 x i32>* %ptr
153*700637cbSDimitry Andric ///
154*700637cbSDimitry Andric /// Into:
155*700637cbSDimitry Andric /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
156*700637cbSDimitry Andric /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
157*700637cbSDimitry Andric /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
158*700637cbSDimitry Andric /// call void llvm.riscv.seg3.store.v4i32.p0.i64(%sub.v0, %sub.v1, %sub.v2,
159*700637cbSDimitry Andric ///                                              %ptr, i32 4)
160*700637cbSDimitry Andric ///
161*700637cbSDimitry Andric /// Note that the new shufflevectors will be removed and we'll only generate one
162*700637cbSDimitry Andric /// vsseg3 instruction in CodeGen.
lowerInterleavedStore(StoreInst * SI,ShuffleVectorInst * SVI,unsigned Factor) const163*700637cbSDimitry Andric bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
164*700637cbSDimitry Andric                                                 ShuffleVectorInst *SVI,
165*700637cbSDimitry Andric                                                 unsigned Factor) const {
166*700637cbSDimitry Andric   IRBuilder<> Builder(SI);
167*700637cbSDimitry Andric   const DataLayout &DL = SI->getDataLayout();
168*700637cbSDimitry Andric   auto Mask = SVI->getShuffleMask();
169*700637cbSDimitry Andric   auto *ShuffleVTy = cast<FixedVectorType>(SVI->getType());
170*700637cbSDimitry Andric   // Given SVI : <n*factor x ty>, then VTy : <n x ty>
171*700637cbSDimitry Andric   auto *VTy = FixedVectorType::get(ShuffleVTy->getElementType(),
172*700637cbSDimitry Andric                                    ShuffleVTy->getNumElements() / Factor);
173*700637cbSDimitry Andric   if (!isLegalInterleavedAccessType(VTy, Factor, SI->getAlign(),
174*700637cbSDimitry Andric                                     SI->getPointerAddressSpace(), DL))
175*700637cbSDimitry Andric     return false;
176*700637cbSDimitry Andric 
177*700637cbSDimitry Andric   auto *PtrTy = SI->getPointerOperandType();
178*700637cbSDimitry Andric   auto *XLenTy = Type::getIntNTy(SI->getContext(), Subtarget.getXLen());
179*700637cbSDimitry Andric 
180*700637cbSDimitry Andric   unsigned Index;
181*700637cbSDimitry Andric   // If the segment store only has one active lane (i.e. the interleave is
182*700637cbSDimitry Andric   // just a spread shuffle), we can use a strided store instead.  This will
183*700637cbSDimitry Andric   // be equally fast, and create less vector register pressure.
184*700637cbSDimitry Andric   if (!Subtarget.hasOptimizedSegmentLoadStore(Factor) &&
185*700637cbSDimitry Andric       isSpreadMask(Mask, Factor, Index)) {
186*700637cbSDimitry Andric     unsigned ScalarSizeInBytes =
187*700637cbSDimitry Andric         DL.getTypeStoreSize(ShuffleVTy->getElementType());
188*700637cbSDimitry Andric     Value *Data = SVI->getOperand(0);
189*700637cbSDimitry Andric     auto *DataVTy = cast<FixedVectorType>(Data->getType());
190*700637cbSDimitry Andric     Value *Stride = ConstantInt::get(XLenTy, Factor * ScalarSizeInBytes);
191*700637cbSDimitry Andric     Value *Offset = ConstantInt::get(XLenTy, Index * ScalarSizeInBytes);
192*700637cbSDimitry Andric     Value *BasePtr = Builder.CreatePtrAdd(SI->getPointerOperand(), Offset);
193*700637cbSDimitry Andric     Value *Mask = Builder.getAllOnesMask(DataVTy->getElementCount());
194*700637cbSDimitry Andric     Value *VL = Builder.getInt32(VTy->getNumElements());
195*700637cbSDimitry Andric 
196*700637cbSDimitry Andric     CallInst *CI = Builder.CreateIntrinsic(
197*700637cbSDimitry Andric         Intrinsic::experimental_vp_strided_store,
198*700637cbSDimitry Andric         {Data->getType(), BasePtr->getType(), Stride->getType()},
199*700637cbSDimitry Andric         {Data, BasePtr, Stride, Mask, VL});
200*700637cbSDimitry Andric     CI->addParamAttr(
201*700637cbSDimitry Andric         1, Attribute::getWithAlignment(CI->getContext(), SI->getAlign()));
202*700637cbSDimitry Andric 
203*700637cbSDimitry Andric     return true;
204*700637cbSDimitry Andric   }
205*700637cbSDimitry Andric 
206*700637cbSDimitry Andric   Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
207*700637cbSDimitry Andric       SI->getModule(), FixedVssegIntrIds[Factor - 2], {VTy, PtrTy, XLenTy});
208*700637cbSDimitry Andric 
209*700637cbSDimitry Andric   SmallVector<Value *, 10> Ops;
210*700637cbSDimitry Andric   SmallVector<int, 16> NewShuffleMask;
211*700637cbSDimitry Andric 
212*700637cbSDimitry Andric   for (unsigned i = 0; i < Factor; i++) {
213*700637cbSDimitry Andric     // Collect shuffle mask for this lane.
214*700637cbSDimitry Andric     for (unsigned j = 0; j < VTy->getNumElements(); j++)
215*700637cbSDimitry Andric       NewShuffleMask.push_back(Mask[i + Factor * j]);
216*700637cbSDimitry Andric 
217*700637cbSDimitry Andric     Value *Shuffle = Builder.CreateShuffleVector(
218*700637cbSDimitry Andric         SVI->getOperand(0), SVI->getOperand(1), NewShuffleMask);
219*700637cbSDimitry Andric     Ops.push_back(Shuffle);
220*700637cbSDimitry Andric 
221*700637cbSDimitry Andric     NewShuffleMask.clear();
222*700637cbSDimitry Andric   }
223*700637cbSDimitry Andric   // This VL should be OK (should be executable in one vsseg instruction,
224*700637cbSDimitry Andric   // potentially under larger LMULs) because we checked that the fixed vector
225*700637cbSDimitry Andric   // type fits in isLegalInterleavedAccessType
226*700637cbSDimitry Andric   Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
227*700637cbSDimitry Andric   Value *StoreMask = Builder.getAllOnesMask(VTy->getElementCount());
228*700637cbSDimitry Andric   Ops.append({SI->getPointerOperand(), StoreMask, VL});
229*700637cbSDimitry Andric 
230*700637cbSDimitry Andric   Builder.CreateCall(VssegNFunc, Ops);
231*700637cbSDimitry Andric 
232*700637cbSDimitry Andric   return true;
233*700637cbSDimitry Andric }
234*700637cbSDimitry Andric 
lowerDeinterleaveIntrinsicToLoad(LoadInst * LI,ArrayRef<Value * > DeinterleaveValues) const235*700637cbSDimitry Andric bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
236*700637cbSDimitry Andric     LoadInst *LI, ArrayRef<Value *> DeinterleaveValues) const {
237*700637cbSDimitry Andric   const unsigned Factor = DeinterleaveValues.size();
238*700637cbSDimitry Andric   if (Factor > 8)
239*700637cbSDimitry Andric     return false;
240*700637cbSDimitry Andric 
241*700637cbSDimitry Andric   assert(LI->isSimple());
242*700637cbSDimitry Andric   IRBuilder<> Builder(LI);
243*700637cbSDimitry Andric 
244*700637cbSDimitry Andric   Value *FirstActive =
245*700637cbSDimitry Andric       *llvm::find_if(DeinterleaveValues, [](Value *V) { return V != nullptr; });
246*700637cbSDimitry Andric   VectorType *ResVTy = cast<VectorType>(FirstActive->getType());
247*700637cbSDimitry Andric 
248*700637cbSDimitry Andric   const DataLayout &DL = LI->getDataLayout();
249*700637cbSDimitry Andric 
250*700637cbSDimitry Andric   if (!isLegalInterleavedAccessType(ResVTy, Factor, LI->getAlign(),
251*700637cbSDimitry Andric                                     LI->getPointerAddressSpace(), DL))
252*700637cbSDimitry Andric     return false;
253*700637cbSDimitry Andric 
254*700637cbSDimitry Andric   Value *Return;
255*700637cbSDimitry Andric   Type *PtrTy = LI->getPointerOperandType();
256*700637cbSDimitry Andric   Type *XLenTy = Type::getIntNTy(LI->getContext(), Subtarget.getXLen());
257*700637cbSDimitry Andric 
258*700637cbSDimitry Andric   if (auto *FVTy = dyn_cast<FixedVectorType>(ResVTy)) {
259*700637cbSDimitry Andric     Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
260*700637cbSDimitry Andric     Value *Mask = Builder.getAllOnesMask(FVTy->getElementCount());
261*700637cbSDimitry Andric     Return = Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2],
262*700637cbSDimitry Andric                                      {ResVTy, PtrTy, XLenTy},
263*700637cbSDimitry Andric                                      {LI->getPointerOperand(), Mask, VL});
264*700637cbSDimitry Andric   } else {
265*700637cbSDimitry Andric     static const Intrinsic::ID IntrIds[] = {
266*700637cbSDimitry Andric         Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
267*700637cbSDimitry Andric         Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5,
268*700637cbSDimitry Andric         Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7,
269*700637cbSDimitry Andric         Intrinsic::riscv_vlseg8};
270*700637cbSDimitry Andric 
271*700637cbSDimitry Andric     unsigned SEW = DL.getTypeSizeInBits(ResVTy->getElementType());
272*700637cbSDimitry Andric     unsigned NumElts = ResVTy->getElementCount().getKnownMinValue();
273*700637cbSDimitry Andric     Type *VecTupTy = TargetExtType::get(
274*700637cbSDimitry Andric         LI->getContext(), "riscv.vector.tuple",
275*700637cbSDimitry Andric         ScalableVectorType::get(Type::getInt8Ty(LI->getContext()),
276*700637cbSDimitry Andric                                 NumElts * SEW / 8),
277*700637cbSDimitry Andric         Factor);
278*700637cbSDimitry Andric 
279*700637cbSDimitry Andric     Value *VL = Constant::getAllOnesValue(XLenTy);
280*700637cbSDimitry Andric 
281*700637cbSDimitry Andric     Value *Vlseg = Builder.CreateIntrinsic(
282*700637cbSDimitry Andric         IntrIds[Factor - 2], {VecTupTy, PtrTy, XLenTy},
283*700637cbSDimitry Andric         {PoisonValue::get(VecTupTy), LI->getPointerOperand(), VL,
284*700637cbSDimitry Andric          ConstantInt::get(XLenTy, Log2_64(SEW))});
285*700637cbSDimitry Andric 
286*700637cbSDimitry Andric     SmallVector<Type *, 2> AggrTypes{Factor, ResVTy};
287*700637cbSDimitry Andric     Return = PoisonValue::get(StructType::get(LI->getContext(), AggrTypes));
288*700637cbSDimitry Andric     for (unsigned i = 0; i < Factor; ++i) {
289*700637cbSDimitry Andric       Value *VecExtract = Builder.CreateIntrinsic(
290*700637cbSDimitry Andric           Intrinsic::riscv_tuple_extract, {ResVTy, VecTupTy},
291*700637cbSDimitry Andric           {Vlseg, Builder.getInt32(i)});
292*700637cbSDimitry Andric       Return = Builder.CreateInsertValue(Return, VecExtract, i);
293*700637cbSDimitry Andric     }
294*700637cbSDimitry Andric   }
295*700637cbSDimitry Andric 
296*700637cbSDimitry Andric   for (auto [Idx, DIV] : enumerate(DeinterleaveValues)) {
297*700637cbSDimitry Andric     if (!DIV)
298*700637cbSDimitry Andric       continue;
299*700637cbSDimitry Andric     // We have to create a brand new ExtractValue to replace each
300*700637cbSDimitry Andric     // of these old ExtractValue instructions.
301*700637cbSDimitry Andric     Value *NewEV =
302*700637cbSDimitry Andric         Builder.CreateExtractValue(Return, {static_cast<unsigned>(Idx)});
303*700637cbSDimitry Andric     DIV->replaceAllUsesWith(NewEV);
304*700637cbSDimitry Andric   }
305*700637cbSDimitry Andric 
306*700637cbSDimitry Andric   return true;
307*700637cbSDimitry Andric }
308*700637cbSDimitry Andric 
lowerInterleaveIntrinsicToStore(StoreInst * SI,ArrayRef<Value * > InterleaveValues) const309*700637cbSDimitry Andric bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
310*700637cbSDimitry Andric     StoreInst *SI, ArrayRef<Value *> InterleaveValues) const {
311*700637cbSDimitry Andric   unsigned Factor = InterleaveValues.size();
312*700637cbSDimitry Andric   if (Factor > 8)
313*700637cbSDimitry Andric     return false;
314*700637cbSDimitry Andric 
315*700637cbSDimitry Andric   assert(SI->isSimple());
316*700637cbSDimitry Andric   IRBuilder<> Builder(SI);
317*700637cbSDimitry Andric 
318*700637cbSDimitry Andric   auto *InVTy = cast<VectorType>(InterleaveValues[0]->getType());
319*700637cbSDimitry Andric   auto *PtrTy = SI->getPointerOperandType();
320*700637cbSDimitry Andric   const DataLayout &DL = SI->getDataLayout();
321*700637cbSDimitry Andric 
322*700637cbSDimitry Andric   if (!isLegalInterleavedAccessType(InVTy, Factor, SI->getAlign(),
323*700637cbSDimitry Andric                                     SI->getPointerAddressSpace(), DL))
324*700637cbSDimitry Andric     return false;
325*700637cbSDimitry Andric 
326*700637cbSDimitry Andric   Type *XLenTy = Type::getIntNTy(SI->getContext(), Subtarget.getXLen());
327*700637cbSDimitry Andric 
328*700637cbSDimitry Andric   if (auto *FVTy = dyn_cast<FixedVectorType>(InVTy)) {
329*700637cbSDimitry Andric     Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
330*700637cbSDimitry Andric         SI->getModule(), FixedVssegIntrIds[Factor - 2], {InVTy, PtrTy, XLenTy});
331*700637cbSDimitry Andric 
332*700637cbSDimitry Andric     SmallVector<Value *, 10> Ops(InterleaveValues);
333*700637cbSDimitry Andric     Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
334*700637cbSDimitry Andric     Value *Mask = Builder.getAllOnesMask(FVTy->getElementCount());
335*700637cbSDimitry Andric     Ops.append({SI->getPointerOperand(), Mask, VL});
336*700637cbSDimitry Andric 
337*700637cbSDimitry Andric     Builder.CreateCall(VssegNFunc, Ops);
338*700637cbSDimitry Andric   } else {
339*700637cbSDimitry Andric     static const Intrinsic::ID IntrIds[] = {
340*700637cbSDimitry Andric         Intrinsic::riscv_vsseg2, Intrinsic::riscv_vsseg3,
341*700637cbSDimitry Andric         Intrinsic::riscv_vsseg4, Intrinsic::riscv_vsseg5,
342*700637cbSDimitry Andric         Intrinsic::riscv_vsseg6, Intrinsic::riscv_vsseg7,
343*700637cbSDimitry Andric         Intrinsic::riscv_vsseg8};
344*700637cbSDimitry Andric 
345*700637cbSDimitry Andric     unsigned SEW = DL.getTypeSizeInBits(InVTy->getElementType());
346*700637cbSDimitry Andric     unsigned NumElts = InVTy->getElementCount().getKnownMinValue();
347*700637cbSDimitry Andric     Type *VecTupTy = TargetExtType::get(
348*700637cbSDimitry Andric         SI->getContext(), "riscv.vector.tuple",
349*700637cbSDimitry Andric         ScalableVectorType::get(Type::getInt8Ty(SI->getContext()),
350*700637cbSDimitry Andric                                 NumElts * SEW / 8),
351*700637cbSDimitry Andric         Factor);
352*700637cbSDimitry Andric 
353*700637cbSDimitry Andric     Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
354*700637cbSDimitry Andric         SI->getModule(), IntrIds[Factor - 2], {VecTupTy, PtrTy, XLenTy});
355*700637cbSDimitry Andric 
356*700637cbSDimitry Andric     Value *VL = Constant::getAllOnesValue(XLenTy);
357*700637cbSDimitry Andric 
358*700637cbSDimitry Andric     Value *StoredVal = PoisonValue::get(VecTupTy);
359*700637cbSDimitry Andric     for (unsigned i = 0; i < Factor; ++i)
360*700637cbSDimitry Andric       StoredVal = Builder.CreateIntrinsic(
361*700637cbSDimitry Andric           Intrinsic::riscv_tuple_insert, {VecTupTy, InVTy},
362*700637cbSDimitry Andric           {StoredVal, InterleaveValues[i], Builder.getInt32(i)});
363*700637cbSDimitry Andric 
364*700637cbSDimitry Andric     Builder.CreateCall(VssegNFunc, {StoredVal, SI->getPointerOperand(), VL,
365*700637cbSDimitry Andric                                     ConstantInt::get(XLenTy, Log2_64(SEW))});
366*700637cbSDimitry Andric   }
367*700637cbSDimitry Andric 
368*700637cbSDimitry Andric   return true;
369*700637cbSDimitry Andric }
370*700637cbSDimitry Andric 
isMultipleOfN(const Value * V,const DataLayout & DL,unsigned N)371*700637cbSDimitry Andric static bool isMultipleOfN(const Value *V, const DataLayout &DL, unsigned N) {
372*700637cbSDimitry Andric   assert(N);
373*700637cbSDimitry Andric   if (N == 1)
374*700637cbSDimitry Andric     return true;
375*700637cbSDimitry Andric 
376*700637cbSDimitry Andric   using namespace PatternMatch;
377*700637cbSDimitry Andric   // Right now we're only recognizing the simplest pattern.
378*700637cbSDimitry Andric   uint64_t C;
379*700637cbSDimitry Andric   if (match(V, m_CombineOr(m_ConstantInt(C),
380*700637cbSDimitry Andric                            m_c_Mul(m_Value(), m_ConstantInt(C)))) &&
381*700637cbSDimitry Andric       C && C % N == 0)
382*700637cbSDimitry Andric     return true;
383*700637cbSDimitry Andric 
384*700637cbSDimitry Andric   if (isPowerOf2_32(N)) {
385*700637cbSDimitry Andric     KnownBits KB = llvm::computeKnownBits(V, DL);
386*700637cbSDimitry Andric     return KB.countMinTrailingZeros() >= Log2_32(N);
387*700637cbSDimitry Andric   }
388*700637cbSDimitry Andric 
389*700637cbSDimitry Andric   return false;
390*700637cbSDimitry Andric }
391*700637cbSDimitry Andric 
392*700637cbSDimitry Andric /// Lower an interleaved vp.load into a vlsegN intrinsic.
393*700637cbSDimitry Andric ///
394*700637cbSDimitry Andric /// E.g. Lower an interleaved vp.load (Factor = 2):
395*700637cbSDimitry Andric ///   %l = call <vscale x 64 x i8> @llvm.vp.load.nxv64i8.p0(ptr %ptr,
396*700637cbSDimitry Andric ///                                                         %mask,
397*700637cbSDimitry Andric ///                                                         i32 %wide.rvl)
398*700637cbSDimitry Andric ///   %dl = tail call { <vscale x 32 x i8>, <vscale x 32 x i8> }
399*700637cbSDimitry Andric ///             @llvm.vector.deinterleave2.nxv64i8(
400*700637cbSDimitry Andric ///               <vscale x 64 x i8> %l)
401*700637cbSDimitry Andric ///   %r0 = extractvalue { <vscale x 32 x i8>, <vscale x 32 x i8> } %dl, 0
402*700637cbSDimitry Andric ///   %r1 = extractvalue { <vscale x 32 x i8>, <vscale x 32 x i8> } %dl, 1
403*700637cbSDimitry Andric ///
404*700637cbSDimitry Andric /// Into:
405*700637cbSDimitry Andric ///   %rvl = udiv %wide.rvl, 2
406*700637cbSDimitry Andric ///   %sl = call { <vscale x 32 x i8>, <vscale x 32 x i8> }
407*700637cbSDimitry Andric ///             @llvm.riscv.vlseg2.mask.nxv32i8.i64(<vscale x 32 x i8> undef,
408*700637cbSDimitry Andric ///                                                 <vscale x 32 x i8> undef,
409*700637cbSDimitry Andric ///                                                 ptr %ptr,
410*700637cbSDimitry Andric ///                                                 %mask,
411*700637cbSDimitry Andric ///                                                 i64 %rvl,
412*700637cbSDimitry Andric ///                                                 i64 1)
413*700637cbSDimitry Andric ///   %r0 = extractvalue { <vscale x 32 x i8>, <vscale x 32 x i8> } %sl, 0
414*700637cbSDimitry Andric ///   %r1 = extractvalue { <vscale x 32 x i8>, <vscale x 32 x i8> } %sl, 1
415*700637cbSDimitry Andric ///
416*700637cbSDimitry Andric /// NOTE: the deinterleave2 intrinsic won't be touched and is expected to be
417*700637cbSDimitry Andric /// removed by the caller
418*700637cbSDimitry Andric /// TODO: We probably can loosen the dependency on matching extractvalue when
419*700637cbSDimitry Andric /// dealing with factor of 2 (extractvalue is still required for most of other
420*700637cbSDimitry Andric /// factors though).
lowerInterleavedVPLoad(VPIntrinsic * Load,Value * Mask,ArrayRef<Value * > DeinterleaveResults) const421*700637cbSDimitry Andric bool RISCVTargetLowering::lowerInterleavedVPLoad(
422*700637cbSDimitry Andric     VPIntrinsic *Load, Value *Mask,
423*700637cbSDimitry Andric     ArrayRef<Value *> DeinterleaveResults) const {
424*700637cbSDimitry Andric   const unsigned Factor = DeinterleaveResults.size();
425*700637cbSDimitry Andric   assert(Mask && "Expect a valid mask");
426*700637cbSDimitry Andric   assert(Load->getIntrinsicID() == Intrinsic::vp_load &&
427*700637cbSDimitry Andric          "Unexpected intrinsic");
428*700637cbSDimitry Andric 
429*700637cbSDimitry Andric   Value *FirstActive = *llvm::find_if(DeinterleaveResults,
430*700637cbSDimitry Andric                                       [](Value *V) { return V != nullptr; });
431*700637cbSDimitry Andric   VectorType *VTy = cast<VectorType>(FirstActive->getType());
432*700637cbSDimitry Andric 
433*700637cbSDimitry Andric   auto &DL = Load->getModule()->getDataLayout();
434*700637cbSDimitry Andric   Align Alignment = Load->getParamAlign(0).value_or(
435*700637cbSDimitry Andric       DL.getABITypeAlign(VTy->getElementType()));
436*700637cbSDimitry Andric   if (!isLegalInterleavedAccessType(
437*700637cbSDimitry Andric           VTy, Factor, Alignment,
438*700637cbSDimitry Andric           Load->getArgOperand(0)->getType()->getPointerAddressSpace(), DL))
439*700637cbSDimitry Andric     return false;
440*700637cbSDimitry Andric 
441*700637cbSDimitry Andric   IRBuilder<> Builder(Load);
442*700637cbSDimitry Andric 
443*700637cbSDimitry Andric   Value *WideEVL = Load->getVectorLengthParam();
444*700637cbSDimitry Andric   // Conservatively check if EVL is a multiple of factor, otherwise some
445*700637cbSDimitry Andric   // (trailing) elements might be lost after the transformation.
446*700637cbSDimitry Andric   if (!isMultipleOfN(WideEVL, Load->getDataLayout(), Factor))
447*700637cbSDimitry Andric     return false;
448*700637cbSDimitry Andric 
449*700637cbSDimitry Andric   auto *PtrTy = Load->getArgOperand(0)->getType();
450*700637cbSDimitry Andric   auto *XLenTy = Type::getIntNTy(Load->getContext(), Subtarget.getXLen());
451*700637cbSDimitry Andric   Value *EVL = Builder.CreateZExt(
452*700637cbSDimitry Andric       Builder.CreateUDiv(WideEVL, ConstantInt::get(WideEVL->getType(), Factor)),
453*700637cbSDimitry Andric       XLenTy);
454*700637cbSDimitry Andric 
455*700637cbSDimitry Andric   Value *Return = nullptr;
456*700637cbSDimitry Andric   if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
457*700637cbSDimitry Andric     Return = Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2],
458*700637cbSDimitry Andric                                      {FVTy, PtrTy, XLenTy},
459*700637cbSDimitry Andric                                      {Load->getArgOperand(0), Mask, EVL});
460*700637cbSDimitry Andric   } else {
461*700637cbSDimitry Andric     unsigned SEW = DL.getTypeSizeInBits(VTy->getElementType());
462*700637cbSDimitry Andric     unsigned NumElts = VTy->getElementCount().getKnownMinValue();
463*700637cbSDimitry Andric     Type *VecTupTy = TargetExtType::get(
464*700637cbSDimitry Andric         Load->getContext(), "riscv.vector.tuple",
465*700637cbSDimitry Andric         ScalableVectorType::get(Type::getInt8Ty(Load->getContext()),
466*700637cbSDimitry Andric                                 NumElts * SEW / 8),
467*700637cbSDimitry Andric         Factor);
468*700637cbSDimitry Andric 
469*700637cbSDimitry Andric     Value *PoisonVal = PoisonValue::get(VecTupTy);
470*700637cbSDimitry Andric 
471*700637cbSDimitry Andric     Function *VlsegNFunc = Intrinsic::getOrInsertDeclaration(
472*700637cbSDimitry Andric         Load->getModule(), ScalableVlsegIntrIds[Factor - 2],
473*700637cbSDimitry Andric         {VecTupTy, PtrTy, Mask->getType(), EVL->getType()});
474*700637cbSDimitry Andric 
475*700637cbSDimitry Andric     Value *Operands[] = {
476*700637cbSDimitry Andric         PoisonVal,
477*700637cbSDimitry Andric         Load->getArgOperand(0),
478*700637cbSDimitry Andric         Mask,
479*700637cbSDimitry Andric         EVL,
480*700637cbSDimitry Andric         ConstantInt::get(XLenTy,
481*700637cbSDimitry Andric                          RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC),
482*700637cbSDimitry Andric         ConstantInt::get(XLenTy, Log2_64(SEW))};
483*700637cbSDimitry Andric 
484*700637cbSDimitry Andric     CallInst *VlsegN = Builder.CreateCall(VlsegNFunc, Operands);
485*700637cbSDimitry Andric 
486*700637cbSDimitry Andric     SmallVector<Type *, 8> AggrTypes{Factor, VTy};
487*700637cbSDimitry Andric     Return = PoisonValue::get(StructType::get(Load->getContext(), AggrTypes));
488*700637cbSDimitry Andric     Function *VecExtractFunc = Intrinsic::getOrInsertDeclaration(
489*700637cbSDimitry Andric         Load->getModule(), Intrinsic::riscv_tuple_extract, {VTy, VecTupTy});
490*700637cbSDimitry Andric     for (unsigned i = 0; i < Factor; ++i) {
491*700637cbSDimitry Andric       Value *VecExtract =
492*700637cbSDimitry Andric           Builder.CreateCall(VecExtractFunc, {VlsegN, Builder.getInt32(i)});
493*700637cbSDimitry Andric       Return = Builder.CreateInsertValue(Return, VecExtract, i);
494*700637cbSDimitry Andric     }
495*700637cbSDimitry Andric   }
496*700637cbSDimitry Andric 
497*700637cbSDimitry Andric   for (auto [Idx, DIO] : enumerate(DeinterleaveResults)) {
498*700637cbSDimitry Andric     if (!DIO)
499*700637cbSDimitry Andric       continue;
500*700637cbSDimitry Andric     // We have to create a brand new ExtractValue to replace each
501*700637cbSDimitry Andric     // of these old ExtractValue instructions.
502*700637cbSDimitry Andric     Value *NewEV =
503*700637cbSDimitry Andric         Builder.CreateExtractValue(Return, {static_cast<unsigned>(Idx)});
504*700637cbSDimitry Andric     DIO->replaceAllUsesWith(NewEV);
505*700637cbSDimitry Andric   }
506*700637cbSDimitry Andric 
507*700637cbSDimitry Andric   return true;
508*700637cbSDimitry Andric }
509*700637cbSDimitry Andric 
510*700637cbSDimitry Andric /// Lower an interleaved vp.store into a vssegN intrinsic.
511*700637cbSDimitry Andric ///
512*700637cbSDimitry Andric /// E.g. Lower an interleaved vp.store (Factor = 2):
513*700637cbSDimitry Andric ///
514*700637cbSDimitry Andric ///   %is = tail call <vscale x 64 x i8>
515*700637cbSDimitry Andric ///             @llvm.vector.interleave2.nxv64i8(
516*700637cbSDimitry Andric ///                               <vscale x 32 x i8> %load0,
517*700637cbSDimitry Andric ///                               <vscale x 32 x i8> %load1
518*700637cbSDimitry Andric ///   %wide.rvl = shl nuw nsw i32 %rvl, 1
519*700637cbSDimitry Andric ///   tail call void @llvm.vp.store.nxv64i8.p0(
520*700637cbSDimitry Andric ///                               <vscale x 64 x i8> %is, ptr %ptr,
521*700637cbSDimitry Andric ///                               %mask,
522*700637cbSDimitry Andric ///                               i32 %wide.rvl)
523*700637cbSDimitry Andric ///
524*700637cbSDimitry Andric /// Into:
525*700637cbSDimitry Andric ///   call void @llvm.riscv.vsseg2.mask.nxv32i8.i64(
526*700637cbSDimitry Andric ///                               <vscale x 32 x i8> %load1,
527*700637cbSDimitry Andric ///                               <vscale x 32 x i8> %load2, ptr %ptr,
528*700637cbSDimitry Andric ///                               %mask,
529*700637cbSDimitry Andric ///                               i64 %rvl)
lowerInterleavedVPStore(VPIntrinsic * Store,Value * Mask,ArrayRef<Value * > InterleaveOperands) const530*700637cbSDimitry Andric bool RISCVTargetLowering::lowerInterleavedVPStore(
531*700637cbSDimitry Andric     VPIntrinsic *Store, Value *Mask,
532*700637cbSDimitry Andric     ArrayRef<Value *> InterleaveOperands) const {
533*700637cbSDimitry Andric   assert(Mask && "Expect a valid mask");
534*700637cbSDimitry Andric   assert(Store->getIntrinsicID() == Intrinsic::vp_store &&
535*700637cbSDimitry Andric          "Unexpected intrinsic");
536*700637cbSDimitry Andric 
537*700637cbSDimitry Andric   const unsigned Factor = InterleaveOperands.size();
538*700637cbSDimitry Andric 
539*700637cbSDimitry Andric   auto *VTy = dyn_cast<VectorType>(InterleaveOperands[0]->getType());
540*700637cbSDimitry Andric   if (!VTy)
541*700637cbSDimitry Andric     return false;
542*700637cbSDimitry Andric 
543*700637cbSDimitry Andric   const DataLayout &DL = Store->getDataLayout();
544*700637cbSDimitry Andric   Align Alignment = Store->getParamAlign(1).value_or(
545*700637cbSDimitry Andric       DL.getABITypeAlign(VTy->getElementType()));
546*700637cbSDimitry Andric   if (!isLegalInterleavedAccessType(
547*700637cbSDimitry Andric           VTy, Factor, Alignment,
548*700637cbSDimitry Andric           Store->getArgOperand(1)->getType()->getPointerAddressSpace(), DL))
549*700637cbSDimitry Andric     return false;
550*700637cbSDimitry Andric 
551*700637cbSDimitry Andric   IRBuilder<> Builder(Store);
552*700637cbSDimitry Andric   Value *WideEVL = Store->getArgOperand(3);
553*700637cbSDimitry Andric   // Conservatively check if EVL is a multiple of factor, otherwise some
554*700637cbSDimitry Andric   // (trailing) elements might be lost after the transformation.
555*700637cbSDimitry Andric   if (!isMultipleOfN(WideEVL, Store->getDataLayout(), Factor))
556*700637cbSDimitry Andric     return false;
557*700637cbSDimitry Andric 
558*700637cbSDimitry Andric   auto *PtrTy = Store->getArgOperand(1)->getType();
559*700637cbSDimitry Andric   auto *XLenTy = Type::getIntNTy(Store->getContext(), Subtarget.getXLen());
560*700637cbSDimitry Andric   Value *EVL = Builder.CreateZExt(
561*700637cbSDimitry Andric       Builder.CreateUDiv(WideEVL, ConstantInt::get(WideEVL->getType(), Factor)),
562*700637cbSDimitry Andric       XLenTy);
563*700637cbSDimitry Andric 
564*700637cbSDimitry Andric   if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
565*700637cbSDimitry Andric     SmallVector<Value *, 8> Operands(InterleaveOperands);
566*700637cbSDimitry Andric     Operands.append({Store->getArgOperand(1), Mask, EVL});
567*700637cbSDimitry Andric     Builder.CreateIntrinsic(FixedVssegIntrIds[Factor - 2],
568*700637cbSDimitry Andric                             {FVTy, PtrTy, XLenTy}, Operands);
569*700637cbSDimitry Andric     return true;
570*700637cbSDimitry Andric   }
571*700637cbSDimitry Andric 
572*700637cbSDimitry Andric   unsigned SEW = DL.getTypeSizeInBits(VTy->getElementType());
573*700637cbSDimitry Andric   unsigned NumElts = VTy->getElementCount().getKnownMinValue();
574*700637cbSDimitry Andric   Type *VecTupTy = TargetExtType::get(
575*700637cbSDimitry Andric       Store->getContext(), "riscv.vector.tuple",
576*700637cbSDimitry Andric       ScalableVectorType::get(Type::getInt8Ty(Store->getContext()),
577*700637cbSDimitry Andric                               NumElts * SEW / 8),
578*700637cbSDimitry Andric       Factor);
579*700637cbSDimitry Andric 
580*700637cbSDimitry Andric   Function *VecInsertFunc = Intrinsic::getOrInsertDeclaration(
581*700637cbSDimitry Andric       Store->getModule(), Intrinsic::riscv_tuple_insert, {VecTupTy, VTy});
582*700637cbSDimitry Andric   Value *StoredVal = PoisonValue::get(VecTupTy);
583*700637cbSDimitry Andric   for (unsigned i = 0; i < Factor; ++i)
584*700637cbSDimitry Andric     StoredVal = Builder.CreateCall(
585*700637cbSDimitry Andric         VecInsertFunc, {StoredVal, InterleaveOperands[i], Builder.getInt32(i)});
586*700637cbSDimitry Andric 
587*700637cbSDimitry Andric   Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
588*700637cbSDimitry Andric       Store->getModule(), ScalableVssegIntrIds[Factor - 2],
589*700637cbSDimitry Andric       {VecTupTy, PtrTy, Mask->getType(), EVL->getType()});
590*700637cbSDimitry Andric 
591*700637cbSDimitry Andric   Value *Operands[] = {StoredVal, Store->getArgOperand(1), Mask, EVL,
592*700637cbSDimitry Andric                        ConstantInt::get(XLenTy, Log2_64(SEW))};
593*700637cbSDimitry Andric 
594*700637cbSDimitry Andric   Builder.CreateCall(VssegNFunc, Operands);
595*700637cbSDimitry Andric   return true;
596*700637cbSDimitry Andric }
597