xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/TargetBuiltins/RISCV.cpp (revision e64bea71c21eb42e97aa615188ba91f6cce0d36d)
1 //===-------- RISCV.cpp - Emit LLVM Code for builtins ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Builtin calls as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenFunction.h"
14 #include "clang/Basic/TargetBuiltins.h"
15 #include "llvm/IR/IntrinsicsRISCV.h"
16 #include "llvm/TargetParser/RISCVISAInfo.h"
17 #include "llvm/TargetParser/RISCVTargetParser.h"
18 
19 using namespace clang;
20 using namespace CodeGen;
21 using namespace llvm;
22 
23 // The 0th bit simulates the `vta` of RVV
24 // The 1st bit simulates the `vma` of RVV
25 static constexpr unsigned RVV_VTA = 0x1;
26 static constexpr unsigned RVV_VMA = 0x2;
27 
28 // RISC-V Vector builtin helper functions are marked NOINLINE to prevent
29 // excessive inlining in CodeGenFunction::EmitRISCVBuiltinExpr's large switch
30 // statement, which would significantly increase compilation time.
31 static LLVM_ATTRIBUTE_NOINLINE Value *
emitRVVVLEFFBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)32 emitRVVVLEFFBuiltin(CodeGenFunction *CGF, const CallExpr *E,
33                     ReturnValueSlot ReturnValue, llvm::Type *ResultType,
34                     Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
35                     int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
36   auto &Builder = CGF->Builder;
37   auto &CGM = CGF->CGM;
38   llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
39   if (IsMasked) {
40     // Move mask to right before vl.
41     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
42     if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
43       Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
44     Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
45     IntrinsicTypes = {ResultType, Ops[4]->getType(), Ops[2]->getType()};
46   } else {
47     if (PolicyAttrs & RVV_VTA)
48       Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
49     IntrinsicTypes = {ResultType, Ops[3]->getType(), Ops[1]->getType()};
50   }
51   Value *NewVL = Ops[2];
52   Ops.erase(Ops.begin() + 2);
53   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
54   llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
55   llvm::Value *V = Builder.CreateExtractValue(LoadValue, {0});
56   // Store new_vl.
57   clang::CharUnits Align;
58   if (IsMasked)
59     Align = CGM.getNaturalPointeeTypeAlignment(
60         E->getArg(E->getNumArgs() - 2)->getType());
61   else
62     Align = CGM.getNaturalPointeeTypeAlignment(E->getArg(1)->getType());
63   llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {1});
64   Builder.CreateStore(Val, Address(NewVL, Val->getType(), Align));
65   return V;
66 }
67 
68 static LLVM_ATTRIBUTE_NOINLINE Value *
emitRVVVSSEBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)69 emitRVVVSSEBuiltin(CodeGenFunction *CGF, const CallExpr *E,
70                    ReturnValueSlot ReturnValue, llvm::Type *ResultType,
71                    Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
72                    int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
73   auto &Builder = CGF->Builder;
74   auto &CGM = CGF->CGM;
75   llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
76   if (IsMasked) {
77     // Builtin: (mask, ptr, stride, value, vl). Intrinsic: (value, ptr, stride,
78     // mask, vl)
79     std::swap(Ops[0], Ops[3]);
80   } else {
81     // Builtin: (ptr, stride, value, vl). Intrinsic: (value, ptr, stride, vl)
82     std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
83   }
84   if (IsMasked)
85     IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[4]->getType()};
86   else
87     IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[3]->getType()};
88   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
89   return Builder.CreateCall(F, Ops, "");
90 }
91 
emitRVVIndexedStoreBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)92 static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVIndexedStoreBuiltin(
93     CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
94     llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
95     int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
96   auto &Builder = CGF->Builder;
97   auto &CGM = CGF->CGM;
98   llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
99   if (IsMasked) {
100     // Builtin: (mask, ptr, index, value, vl).
101     // Intrinsic: (value, ptr, index, mask, vl)
102     std::swap(Ops[0], Ops[3]);
103   } else {
104     // Builtin: (ptr, index, value, vl).
105     // Intrinsic: (value, ptr, index, vl)
106     std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
107   }
108   if (IsMasked)
109     IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
110                       Ops[4]->getType()};
111   else
112     IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
113                       Ops[3]->getType()};
114   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
115   return Builder.CreateCall(F, Ops, "");
116 }
117 
118 static LLVM_ATTRIBUTE_NOINLINE Value *
emitRVVPseudoUnaryBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)119 emitRVVPseudoUnaryBuiltin(CodeGenFunction *CGF, const CallExpr *E,
120                           ReturnValueSlot ReturnValue, llvm::Type *ResultType,
121                           Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
122                           int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
123   auto &Builder = CGF->Builder;
124   auto &CGM = CGF->CGM;
125   llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
126   if (IsMasked) {
127     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
128     if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
129       Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
130   } else {
131     if (PolicyAttrs & RVV_VTA)
132       Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
133   }
134   auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
135   Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
136   if (IsMasked) {
137     Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
138     // maskedoff, op1, op2, mask, vl, policy
139     IntrinsicTypes = {ResultType, ElemTy, Ops[4]->getType()};
140   } else {
141     // passthru, op1, op2, vl
142     IntrinsicTypes = {ResultType, ElemTy, Ops[3]->getType()};
143   }
144   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
145   return Builder.CreateCall(F, Ops, "");
146 }
147 
148 static LLVM_ATTRIBUTE_NOINLINE Value *
emitRVVPseudoVNotBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)149 emitRVVPseudoVNotBuiltin(CodeGenFunction *CGF, const CallExpr *E,
150                          ReturnValueSlot ReturnValue, llvm::Type *ResultType,
151                          Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
152                          int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
153   auto &Builder = CGF->Builder;
154   auto &CGM = CGF->CGM;
155   llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
156   if (IsMasked) {
157     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
158     if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
159       Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
160   } else {
161     if (PolicyAttrs & RVV_VTA)
162       Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
163   }
164   auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
165   Ops.insert(Ops.begin() + 2, llvm::Constant::getAllOnesValue(ElemTy));
166   if (IsMasked) {
167     Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
168     // maskedoff, op1, po2, mask, vl, policy
169     IntrinsicTypes = {ResultType, ElemTy, Ops[4]->getType()};
170   } else {
171     // passthru, op1, op2, vl
172     IntrinsicTypes = {ResultType, ElemTy, Ops[3]->getType()};
173   }
174   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
175   return Builder.CreateCall(F, Ops, "");
176 }
177 
178 static LLVM_ATTRIBUTE_NOINLINE Value *
emitRVVPseudoMaskBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)179 emitRVVPseudoMaskBuiltin(CodeGenFunction *CGF, const CallExpr *E,
180                          ReturnValueSlot ReturnValue, llvm::Type *ResultType,
181                          Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
182                          int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
183   auto &Builder = CGF->Builder;
184   auto &CGM = CGF->CGM;
185   llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
186   // op1, vl
187   IntrinsicTypes = {ResultType, Ops[1]->getType()};
188   Ops.insert(Ops.begin() + 1, Ops[0]);
189   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
190   return Builder.CreateCall(F, Ops, "");
191 }
192 
emitRVVPseudoVFUnaryBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)193 static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVPseudoVFUnaryBuiltin(
194     CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
195     llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
196     int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
197   auto &Builder = CGF->Builder;
198   auto &CGM = CGF->CGM;
199   llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
200   if (IsMasked) {
201     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
202     if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
203       Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
204     Ops.insert(Ops.begin() + 2, Ops[1]);
205     Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
206     // maskedoff, op1, op2, mask, vl
207     IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()};
208   } else {
209     if (PolicyAttrs & RVV_VTA)
210       Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
211     // op1, po2, vl
212     IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType()};
213     Ops.insert(Ops.begin() + 2, Ops[1]);
214   }
215   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
216   return Builder.CreateCall(F, Ops, "");
217 }
218 
219 static LLVM_ATTRIBUTE_NOINLINE Value *
emitRVVPseudoVWCVTBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)220 emitRVVPseudoVWCVTBuiltin(CodeGenFunction *CGF, const CallExpr *E,
221                           ReturnValueSlot ReturnValue, llvm::Type *ResultType,
222                           Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
223                           int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
224   auto &Builder = CGF->Builder;
225   auto &CGM = CGF->CGM;
226   llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
227   if (IsMasked) {
228     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
229     if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
230       Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
231   } else {
232     if (PolicyAttrs & RVV_VTA)
233       Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
234   }
235   auto ElemTy = cast<llvm::VectorType>(Ops[1]->getType())->getElementType();
236   Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
237   if (IsMasked) {
238     Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
239     // maskedoff, op1, op2, mask, vl, policy
240     IntrinsicTypes = {ResultType, Ops[1]->getType(), ElemTy, Ops[4]->getType()};
241   } else {
242     // passtru, op1, op2, vl
243     IntrinsicTypes = {ResultType, Ops[1]->getType(), ElemTy, Ops[3]->getType()};
244   }
245   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
246   return Builder.CreateCall(F, Ops, "");
247 }
248 
249 static LLVM_ATTRIBUTE_NOINLINE Value *
emitRVVPseudoVNCVTBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)250 emitRVVPseudoVNCVTBuiltin(CodeGenFunction *CGF, const CallExpr *E,
251                           ReturnValueSlot ReturnValue, llvm::Type *ResultType,
252                           Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
253                           int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
254   auto &Builder = CGF->Builder;
255   auto &CGM = CGF->CGM;
256   llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
257   if (IsMasked) {
258     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
259     if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
260       Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
261   } else {
262     if (PolicyAttrs & RVV_VTA)
263       Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
264   }
265   Ops.insert(Ops.begin() + 2,
266              llvm::Constant::getNullValue(Ops.back()->getType()));
267   if (IsMasked) {
268     Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
269     // maskedoff, op1, xlen, mask, vl
270     IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[4]->getType(),
271                       Ops[4]->getType()};
272   } else {
273     // passthru, op1, xlen, vl
274     IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType(),
275                       Ops[3]->getType()};
276   }
277   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
278   return Builder.CreateCall(F, Ops, "");
279 }
280 
281 static LLVM_ATTRIBUTE_NOINLINE Value *
emitRVVVlenbBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)282 emitRVVVlenbBuiltin(CodeGenFunction *CGF, const CallExpr *E,
283                     ReturnValueSlot ReturnValue, llvm::Type *ResultType,
284                     Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
285                     int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
286   auto &Builder = CGF->Builder;
287   auto &CGM = CGF->CGM;
288   LLVMContext &Context = CGM.getLLVMContext();
289   llvm::MDBuilder MDHelper(Context);
290   llvm::Metadata *OpsMD[] = {llvm::MDString::get(Context, "vlenb")};
291   llvm::MDNode *RegName = llvm::MDNode::get(Context, OpsMD);
292   llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
293   llvm::Function *F =
294       CGM.getIntrinsic(llvm::Intrinsic::read_register, {CGF->SizeTy});
295   return Builder.CreateCall(F, Metadata);
296 }
297 
298 static LLVM_ATTRIBUTE_NOINLINE Value *
emitRVVVsetvliBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)299 emitRVVVsetvliBuiltin(CodeGenFunction *CGF, const CallExpr *E,
300                       ReturnValueSlot ReturnValue, llvm::Type *ResultType,
301                       Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
302                       int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
303   auto &Builder = CGF->Builder;
304   auto &CGM = CGF->CGM;
305   llvm::Function *F = CGM.getIntrinsic(ID, {ResultType});
306   return Builder.CreateCall(F, Ops, "");
307 }
308 
309 static LLVM_ATTRIBUTE_NOINLINE Value *
emitRVVVSEMaskBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)310 emitRVVVSEMaskBuiltin(CodeGenFunction *CGF, const CallExpr *E,
311                       ReturnValueSlot ReturnValue, llvm::Type *ResultType,
312                       Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
313                       int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
314   auto &Builder = CGF->Builder;
315   auto &CGM = CGF->CGM;
316   llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
317   if (IsMasked) {
318     // Builtin: (mask, ptr, value, vl).
319     // Intrinsic: (value, ptr, mask, vl)
320     std::swap(Ops[0], Ops[2]);
321   } else {
322     // Builtin: (ptr, value, vl).
323     // Intrinsic: (value, ptr, vl)
324     std::swap(Ops[0], Ops[1]);
325   }
326   if (IsMasked)
327     IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[3]->getType()};
328   else
329     IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType()};
330   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
331   return Builder.CreateCall(F, Ops, "");
332 }
333 
emitRVVUnitStridedSegLoadTupleBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)334 static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVUnitStridedSegLoadTupleBuiltin(
335     CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
336     llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
337     int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
338   auto &Builder = CGF->Builder;
339   auto &CGM = CGF->CGM;
340   llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
341   bool NoPassthru =
342       (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
343       (!IsMasked && (PolicyAttrs & RVV_VTA));
344   unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
345   if (IsMasked)
346     IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[0]->getType(),
347                       Ops.back()->getType()};
348   else
349     IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
350                       Ops.back()->getType()};
351   if (IsMasked)
352     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
353   if (NoPassthru)
354     Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
355   if (IsMasked)
356     Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
357   Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
358   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
359   llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
360   if (ReturnValue.isNull())
361     return LoadValue;
362   return Builder.CreateStore(LoadValue, ReturnValue.getValue());
363 }
364 
emitRVVUnitStridedSegStoreTupleBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)365 static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVUnitStridedSegStoreTupleBuiltin(
366     CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
367     llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
368     int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
369   auto &Builder = CGF->Builder;
370   auto &CGM = CGF->CGM;
371   llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
372   // Masked
373   // Builtin: (mask, ptr, v_tuple, vl)
374   // Intrinsic: (tuple, ptr, mask, vl, SegInstSEW)
375   // Unmasked
376   // Builtin: (ptr, v_tuple, vl)
377   // Intrinsic: (tuple, ptr, vl, SegInstSEW)
378   if (IsMasked)
379     std::swap(Ops[0], Ops[2]);
380   else
381     std::swap(Ops[0], Ops[1]);
382   Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
383   if (IsMasked)
384     IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
385                       Ops[3]->getType()};
386   else
387     IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType()};
388   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
389   return Builder.CreateCall(F, Ops, "");
390 }
391 
emitRVVUnitStridedSegLoadFFTupleBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)392 static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVUnitStridedSegLoadFFTupleBuiltin(
393     CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
394     llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
395     int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
396   auto &Builder = CGF->Builder;
397   auto &CGM = CGF->CGM;
398   llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
399   bool NoPassthru =
400       (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
401       (!IsMasked && (PolicyAttrs & RVV_VTA));
402   unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
403   if (IsMasked)
404     IntrinsicTypes = {ResultType, Ops.back()->getType(), Ops[Offset]->getType(),
405                       Ops[0]->getType()};
406   else
407     IntrinsicTypes = {ResultType, Ops.back()->getType(),
408                       Ops[Offset]->getType()};
409   if (IsMasked)
410     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
411   if (NoPassthru)
412     Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
413   if (IsMasked)
414     Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
415   Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
416   Value *NewVL = Ops[2];
417   Ops.erase(Ops.begin() + 2);
418   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
419   llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
420   // Get alignment from the new vl operand
421   clang::CharUnits Align =
422       CGM.getNaturalPointeeTypeAlignment(E->getArg(Offset + 1)->getType());
423   llvm::Value *ReturnTuple = Builder.CreateExtractValue(LoadValue, 0);
424   // Store new_vl
425   llvm::Value *V = Builder.CreateExtractValue(LoadValue, 1);
426   Builder.CreateStore(V, Address(NewVL, V->getType(), Align));
427   if (ReturnValue.isNull())
428     return ReturnTuple;
429   return Builder.CreateStore(ReturnTuple, ReturnValue.getValue());
430 }
431 
emitRVVStridedSegLoadTupleBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)432 static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVStridedSegLoadTupleBuiltin(
433     CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
434     llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
435     int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
436   auto &Builder = CGF->Builder;
437   auto &CGM = CGF->CGM;
438   llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
439   bool NoPassthru =
440       (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
441       (!IsMasked && (PolicyAttrs & RVV_VTA));
442   unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
443   if (IsMasked)
444     IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops.back()->getType(),
445                       Ops[0]->getType()};
446   else
447     IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
448                       Ops.back()->getType()};
449   if (IsMasked)
450     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
451   if (NoPassthru)
452     Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
453   if (IsMasked)
454     Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
455   Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
456   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
457   llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
458   if (ReturnValue.isNull())
459     return LoadValue;
460   return Builder.CreateStore(LoadValue, ReturnValue.getValue());
461 }
462 
emitRVVStridedSegStoreTupleBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)463 static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVStridedSegStoreTupleBuiltin(
464     CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
465     llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
466     int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
467   auto &Builder = CGF->Builder;
468   auto &CGM = CGF->CGM;
469   llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
470   // Masked
471   // Builtin: (mask, ptr, stride, v_tuple, vl)
472   // Intrinsic: (tuple, ptr, stride, mask, vl, SegInstSEW)
473   // Unmasked
474   // Builtin: (ptr, stride, v_tuple, vl)
475   // Intrinsic: (tuple, ptr, stride, vl, SegInstSEW)
476   if (IsMasked)
477     std::swap(Ops[0], Ops[3]);
478   else
479     std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
480   Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
481   if (IsMasked)
482     IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[4]->getType(),
483                       Ops[3]->getType()};
484   else
485     IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[3]->getType()};
486   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
487   return Builder.CreateCall(F, Ops, "");
488 }
489 
490 static LLVM_ATTRIBUTE_NOINLINE Value *
emitRVVAveragingBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)491 emitRVVAveragingBuiltin(CodeGenFunction *CGF, const CallExpr *E,
492                         ReturnValueSlot ReturnValue, llvm::Type *ResultType,
493                         Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
494                         int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
495   auto &Builder = CGF->Builder;
496   auto &CGM = CGF->CGM;
497   llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
498   // LLVM intrinsic
499   // Unmasked: (passthru, op0, op1, round_mode, vl)
500   // Masked:   (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl,
501   // policy)
502 
503   bool HasMaskedOff =
504       !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
505         (!IsMasked && PolicyAttrs & RVV_VTA));
506 
507   if (IsMasked)
508     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
509 
510   if (!HasMaskedOff)
511     Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
512 
513   if (IsMasked)
514     Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
515 
516   llvm::Function *F = CGM.getIntrinsic(
517       ID, {ResultType, Ops[2]->getType(), Ops.back()->getType()});
518   return Builder.CreateCall(F, Ops, "");
519 }
520 
emitRVVNarrowingClipBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)521 static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVNarrowingClipBuiltin(
522     CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
523     llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
524     int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
525   auto &Builder = CGF->Builder;
526   auto &CGM = CGF->CGM;
527   llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
528   // LLVM intrinsic
529   // Unmasked: (passthru, op0, op1, round_mode, vl)
530   // Masked:   (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl,
531   // policy)
532 
533   bool HasMaskedOff =
534       !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
535         (!IsMasked && PolicyAttrs & RVV_VTA));
536 
537   if (IsMasked)
538     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
539 
540   if (!HasMaskedOff)
541     Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
542 
543   if (IsMasked)
544     Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
545 
546   llvm::Function *F =
547       CGM.getIntrinsic(ID, {ResultType, Ops[1]->getType(), Ops[2]->getType(),
548                             Ops.back()->getType()});
549   return Builder.CreateCall(F, Ops, "");
550 }
551 
emitRVVFloatingPointBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)552 static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVFloatingPointBuiltin(
553     CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
554     llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
555     int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
556   auto &Builder = CGF->Builder;
557   auto &CGM = CGF->CGM;
558   llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
559   // LLVM intrinsic
560   // Unmasked: (passthru, op0, op1, round_mode, vl)
561   // Masked:   (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
562 
563   bool HasMaskedOff =
564       !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
565         (!IsMasked && PolicyAttrs & RVV_VTA));
566   bool HasRoundModeOp =
567       IsMasked ? (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5)
568                : (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
569 
570   if (!HasRoundModeOp)
571     Ops.insert(Ops.end() - 1,
572                ConstantInt::get(Ops.back()->getType(), 7)); // frm
573 
574   if (IsMasked)
575     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
576 
577   if (!HasMaskedOff)
578     Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
579 
580   if (IsMasked)
581     Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
582 
583   llvm::Function *F = CGM.getIntrinsic(
584       ID, {ResultType, Ops[2]->getType(), Ops.back()->getType()});
585   return Builder.CreateCall(F, Ops, "");
586 }
587 
emitRVVWideningFloatingPointBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)588 static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVWideningFloatingPointBuiltin(
589     CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
590     llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
591     int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
592   auto &Builder = CGF->Builder;
593   auto &CGM = CGF->CGM;
594   llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
595   // LLVM intrinsic
596   // Unmasked: (passthru, op0, op1, round_mode, vl)
597   // Masked:   (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
598 
599   bool HasMaskedOff =
600       !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
601         (!IsMasked && PolicyAttrs & RVV_VTA));
602   bool HasRoundModeOp =
603       IsMasked ? (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5)
604                : (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
605 
606   if (!HasRoundModeOp)
607     Ops.insert(Ops.end() - 1,
608                ConstantInt::get(Ops.back()->getType(), 7)); // frm
609 
610   if (IsMasked)
611     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
612 
613   if (!HasMaskedOff)
614     Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
615 
616   if (IsMasked)
617     Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
618 
619   llvm::Function *F =
620       CGM.getIntrinsic(ID, {ResultType, Ops[1]->getType(), Ops[2]->getType(),
621                             Ops.back()->getType()});
622   return Builder.CreateCall(F, Ops, "");
623 }
624 
emitRVVIndexedSegLoadTupleBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)625 static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVIndexedSegLoadTupleBuiltin(
626     CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
627     llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
628     int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
629   auto &Builder = CGF->Builder;
630   auto &CGM = CGF->CGM;
631   llvm::SmallVector<llvm::Type *, 5> IntrinsicTypes;
632 
633   bool NoPassthru =
634       (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
635       (!IsMasked && (PolicyAttrs & RVV_VTA));
636 
637   if (IsMasked)
638     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
639   if (NoPassthru)
640     Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
641 
642   if (IsMasked)
643     Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
644   Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
645 
646   if (IsMasked)
647     IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType(),
648                       Ops[3]->getType(), Ops[4]->getType()};
649   else
650     IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType(),
651                       Ops[3]->getType()};
652   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
653   llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
654 
655   if (ReturnValue.isNull())
656     return LoadValue;
657   return Builder.CreateStore(LoadValue, ReturnValue.getValue());
658 }
659 
emitRVVIndexedSegStoreTupleBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)660 static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVIndexedSegStoreTupleBuiltin(
661     CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
662     llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
663     int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
664   auto &Builder = CGF->Builder;
665   auto &CGM = CGF->CGM;
666   llvm::SmallVector<llvm::Type *, 5> IntrinsicTypes;
667   // Masked
668   // Builtin: (mask, ptr, index, v_tuple, vl)
669   // Intrinsic: (tuple, ptr, index, mask, vl, SegInstSEW)
670   // Unmasked
671   // Builtin: (ptr, index, v_tuple, vl)
672   // Intrinsic: (tuple, ptr, index, vl, SegInstSEW)
673 
674   if (IsMasked)
675     std::swap(Ops[0], Ops[3]);
676   else
677     std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
678 
679   Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
680 
681   if (IsMasked)
682     IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
683                       Ops[3]->getType(), Ops[4]->getType()};
684   else
685     IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
686                       Ops[3]->getType()};
687   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
688   return Builder.CreateCall(F, Ops, "");
689 }
690 
691 static LLVM_ATTRIBUTE_NOINLINE Value *
emitRVVFMABuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)692 emitRVVFMABuiltin(CodeGenFunction *CGF, const CallExpr *E,
693                   ReturnValueSlot ReturnValue, llvm::Type *ResultType,
694                   Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
695                   int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
696   auto &Builder = CGF->Builder;
697   auto &CGM = CGF->CGM;
698   llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
699   // LLVM intrinsic
700   // Unmasked: (vector_in, vector_in/scalar_in, vector_in, round_mode,
701   //            vl, policy)
702   // Masked:   (vector_in, vector_in/scalar_in, vector_in, mask, frm,
703   //            vl, policy)
704 
705   bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5;
706 
707   if (!HasRoundModeOp)
708     Ops.insert(Ops.end() - 1,
709                ConstantInt::get(Ops.back()->getType(), 7)); // frm
710 
711   if (IsMasked)
712     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
713 
714   Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
715 
716   llvm::Function *F = CGM.getIntrinsic(
717       ID, {ResultType, Ops[1]->getType(), Ops.back()->getType()});
718   return Builder.CreateCall(F, Ops, "");
719 }
720 
721 static LLVM_ATTRIBUTE_NOINLINE Value *
emitRVVWideningFMABuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)722 emitRVVWideningFMABuiltin(CodeGenFunction *CGF, const CallExpr *E,
723                           ReturnValueSlot ReturnValue, llvm::Type *ResultType,
724                           Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
725                           int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
726   auto &Builder = CGF->Builder;
727   auto &CGM = CGF->CGM;
728   llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
729   // LLVM intrinsic
730   // Unmasked: (vector_in, vector_in/scalar_in, vector_in, round_mode, vl,
731   // policy) Masked:   (vector_in, vector_in/scalar_in, vector_in, mask, frm,
732   // vl, policy)
733 
734   bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5;
735 
736   if (!HasRoundModeOp)
737     Ops.insert(Ops.end() - 1,
738                ConstantInt::get(Ops.back()->getType(), 7)); // frm
739 
740   if (IsMasked)
741     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 4);
742 
743   Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
744 
745   llvm::Function *F =
746       CGM.getIntrinsic(ID, {ResultType, Ops[1]->getType(), Ops[2]->getType(),
747                             Ops.back()->getType()});
748   return Builder.CreateCall(F, Ops, "");
749 }
750 
emitRVVFloatingUnaryBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)751 static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVFloatingUnaryBuiltin(
752     CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
753     llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
754     int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
755   auto &Builder = CGF->Builder;
756   auto &CGM = CGF->CGM;
757   llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
758   // LLVM intrinsic
759   // Unmasked: (passthru, op0, round_mode, vl)
760   // Masked:   (passthru, op0, mask, frm, vl, policy)
761 
762   bool HasMaskedOff =
763       !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
764         (!IsMasked && PolicyAttrs & RVV_VTA));
765   bool HasRoundModeOp =
766       IsMasked ? (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4)
767                : (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3);
768 
769   if (!HasRoundModeOp)
770     Ops.insert(Ops.end() - 1,
771                ConstantInt::get(Ops.back()->getType(), 7)); // frm
772 
773   if (IsMasked)
774     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
775 
776   if (!HasMaskedOff)
777     Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
778 
779   if (IsMasked)
780     Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
781 
782   IntrinsicTypes = {ResultType, Ops.back()->getType()};
783   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
784   return Builder.CreateCall(F, Ops, "");
785 }
786 
emitRVVFloatingConvBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)787 static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVFloatingConvBuiltin(
788     CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
789     llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
790     int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
791   auto &Builder = CGF->Builder;
792   auto &CGM = CGF->CGM;
793   llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
794   // LLVM intrinsic
795   // Unmasked: (passthru, op0, frm, vl)
796   // Masked:   (passthru, op0, mask, frm, vl, policy)
797   bool HasMaskedOff =
798       !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
799         (!IsMasked && PolicyAttrs & RVV_VTA));
800   bool HasRoundModeOp =
801       IsMasked ? (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4)
802                : (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3);
803 
804   if (!HasRoundModeOp)
805     Ops.insert(Ops.end() - 1,
806                ConstantInt::get(Ops.back()->getType(), 7)); // frm
807 
808   if (IsMasked)
809     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
810 
811   if (!HasMaskedOff)
812     Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
813 
814   if (IsMasked)
815     Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
816 
817   llvm::Function *F = CGM.getIntrinsic(
818       ID, {ResultType, Ops[1]->getType(), Ops.back()->getType()});
819   return Builder.CreateCall(F, Ops, "");
820 }
821 
emitRVVFloatingReductionBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)822 static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVFloatingReductionBuiltin(
823     CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
824     llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
825     int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
826   auto &Builder = CGF->Builder;
827   auto &CGM = CGF->CGM;
828   llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
829   // LLVM intrinsic
830   // Unmasked: (passthru, op0, op1, round_mode, vl)
831   // Masked:   (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
832 
833   bool HasMaskedOff =
834       !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
835         (!IsMasked && PolicyAttrs & RVV_VTA));
836   bool HasRoundModeOp =
837       IsMasked ? (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5)
838                : (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
839 
840   if (!HasRoundModeOp)
841     Ops.insert(Ops.end() - 1,
842                ConstantInt::get(Ops.back()->getType(), 7)); // frm
843 
844   if (IsMasked)
845     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
846 
847   if (!HasMaskedOff)
848     Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
849 
850   llvm::Function *F = CGM.getIntrinsic(
851       ID, {ResultType, Ops[1]->getType(), Ops.back()->getType()});
852   return Builder.CreateCall(F, Ops, "");
853 }
854 
855 static LLVM_ATTRIBUTE_NOINLINE Value *
emitRVVReinterpretBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)856 emitRVVReinterpretBuiltin(CodeGenFunction *CGF, const CallExpr *E,
857                           ReturnValueSlot ReturnValue, llvm::Type *ResultType,
858                           Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
859                           int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
860   auto &Builder = CGF->Builder;
861   auto &CGM = CGF->CGM;
862 
863   if (ResultType->isIntOrIntVectorTy(1) ||
864       Ops[0]->getType()->isIntOrIntVectorTy(1)) {
865     assert(isa<ScalableVectorType>(ResultType) &&
866            isa<ScalableVectorType>(Ops[0]->getType()));
867 
868     LLVMContext &Context = CGM.getLLVMContext();
869     ScalableVectorType *Boolean64Ty =
870         ScalableVectorType::get(llvm::Type::getInt1Ty(Context), 64);
871 
872     if (ResultType->isIntOrIntVectorTy(1)) {
873       // Casting from m1 vector integer -> vector boolean
874       // Ex: <vscale x 8 x i8>
875       //     --(bitcast)--------> <vscale x 64 x i1>
876       //     --(vector_extract)-> <vscale x  8 x i1>
877       llvm::Value *BitCast = Builder.CreateBitCast(Ops[0], Boolean64Ty);
878       return Builder.CreateExtractVector(ResultType, BitCast,
879                                          ConstantInt::get(CGF->Int64Ty, 0));
880     } else {
881       // Casting from vector boolean -> m1 vector integer
882       // Ex: <vscale x  1 x i1>
883       //       --(vector_insert)-> <vscale x 64 x i1>
884       //       --(bitcast)-------> <vscale x  8 x i8>
885       llvm::Value *Boolean64Val = Builder.CreateInsertVector(
886           Boolean64Ty, llvm::PoisonValue::get(Boolean64Ty), Ops[0],
887           ConstantInt::get(CGF->Int64Ty, 0));
888       return Builder.CreateBitCast(Boolean64Val, ResultType);
889     }
890   }
891   return Builder.CreateBitCast(Ops[0], ResultType);
892 }
893 
894 static LLVM_ATTRIBUTE_NOINLINE Value *
emitRVVGetBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)895 emitRVVGetBuiltin(CodeGenFunction *CGF, const CallExpr *E,
896                   ReturnValueSlot ReturnValue, llvm::Type *ResultType,
897                   Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
898                   int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
899   auto &Builder = CGF->Builder;
900   auto *VecTy = cast<ScalableVectorType>(ResultType);
901   if (auto *OpVecTy = dyn_cast<ScalableVectorType>(Ops[0]->getType())) {
902     unsigned MaxIndex =
903         OpVecTy->getMinNumElements() / VecTy->getMinNumElements();
904     assert(isPowerOf2_32(MaxIndex));
905     // Mask to only valid indices.
906     Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
907     Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
908     Ops[1] =
909         Builder.CreateMul(Ops[1], ConstantInt::get(Ops[1]->getType(),
910                                                    VecTy->getMinNumElements()));
911     return Builder.CreateExtractVector(ResultType, Ops[0], Ops[1]);
912   }
913 
914   return Builder.CreateIntrinsic(
915       Intrinsic::riscv_tuple_extract, {ResultType, Ops[0]->getType()},
916       {Ops[0], Builder.CreateTrunc(Ops[1], Builder.getInt32Ty())});
917 }
918 
919 static LLVM_ATTRIBUTE_NOINLINE Value *
emitRVVSetBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)920 emitRVVSetBuiltin(CodeGenFunction *CGF, const CallExpr *E,
921                   ReturnValueSlot ReturnValue, llvm::Type *ResultType,
922                   Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
923                   int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
924   auto &Builder = CGF->Builder;
925   if (auto *ResVecTy = dyn_cast<ScalableVectorType>(ResultType)) {
926     auto *VecTy = cast<ScalableVectorType>(Ops[2]->getType());
927     unsigned MaxIndex =
928         ResVecTy->getMinNumElements() / VecTy->getMinNumElements();
929     assert(isPowerOf2_32(MaxIndex));
930     // Mask to only valid indices.
931     Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
932     Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
933     Ops[1] =
934         Builder.CreateMul(Ops[1], ConstantInt::get(Ops[1]->getType(),
935                                                    VecTy->getMinNumElements()));
936     return Builder.CreateInsertVector(ResultType, Ops[0], Ops[2], Ops[1]);
937   }
938 
939   return Builder.CreateIntrinsic(
940       Intrinsic::riscv_tuple_insert, {ResultType, Ops[2]->getType()},
941       {Ops[0], Ops[2], Builder.CreateTrunc(Ops[1], Builder.getInt32Ty())});
942 }
943 
944 static LLVM_ATTRIBUTE_NOINLINE Value *
emitRVVCreateBuiltin(CodeGenFunction * CGF,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Type * ResultType,Intrinsic::ID ID,SmallVectorImpl<Value * > & Ops,int PolicyAttrs,bool IsMasked,unsigned SegInstSEW)945 emitRVVCreateBuiltin(CodeGenFunction *CGF, const CallExpr *E,
946                      ReturnValueSlot ReturnValue, llvm::Type *ResultType,
947                      Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
948                      int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
949   auto &Builder = CGF->Builder;
950   llvm::Value *ReturnVector = llvm::PoisonValue::get(ResultType);
951   auto *VecTy = cast<ScalableVectorType>(Ops[0]->getType());
952   for (unsigned I = 0, N = Ops.size(); I < N; ++I) {
953     if (isa<ScalableVectorType>(ResultType)) {
954       llvm::Value *Idx = ConstantInt::get(Builder.getInt64Ty(),
955                                           VecTy->getMinNumElements() * I);
956       ReturnVector =
957           Builder.CreateInsertVector(ResultType, ReturnVector, Ops[I], Idx);
958     } else {
959       llvm::Value *Idx = ConstantInt::get(Builder.getInt32Ty(), I);
960       ReturnVector = Builder.CreateIntrinsic(Intrinsic::riscv_tuple_insert,
961                                              {ResultType, Ops[I]->getType()},
962                                              {ReturnVector, Ops[I], Idx});
963     }
964   }
965   return ReturnVector;
966 }
967 
EmitRISCVCpuInit()968 Value *CodeGenFunction::EmitRISCVCpuInit() {
969   llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {VoidPtrTy}, false);
970   llvm::FunctionCallee Func =
971       CGM.CreateRuntimeFunction(FTy, "__init_riscv_feature_bits");
972   auto *CalleeGV = cast<llvm::GlobalValue>(Func.getCallee());
973   CalleeGV->setDSOLocal(true);
974   CalleeGV->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
975   return Builder.CreateCall(Func, {llvm::ConstantPointerNull::get(VoidPtrTy)});
976 }
977 
EmitRISCVCpuSupports(const CallExpr * E)978 Value *CodeGenFunction::EmitRISCVCpuSupports(const CallExpr *E) {
979 
980   const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
981   StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
982   if (!getContext().getTargetInfo().validateCpuSupports(FeatureStr))
983     return Builder.getFalse();
984 
985   return EmitRISCVCpuSupports(ArrayRef<StringRef>(FeatureStr));
986 }
987 
loadRISCVFeatureBits(unsigned Index,CGBuilderTy & Builder,CodeGenModule & CGM)988 static Value *loadRISCVFeatureBits(unsigned Index, CGBuilderTy &Builder,
989                                    CodeGenModule &CGM) {
990   llvm::Type *Int32Ty = Builder.getInt32Ty();
991   llvm::Type *Int64Ty = Builder.getInt64Ty();
992   llvm::ArrayType *ArrayOfInt64Ty =
993       llvm::ArrayType::get(Int64Ty, llvm::RISCVISAInfo::FeatureBitSize);
994   llvm::Type *StructTy = llvm::StructType::get(Int32Ty, ArrayOfInt64Ty);
995   llvm::Constant *RISCVFeaturesBits =
996       CGM.CreateRuntimeVariable(StructTy, "__riscv_feature_bits");
997   cast<llvm::GlobalValue>(RISCVFeaturesBits)->setDSOLocal(true);
998   Value *IndexVal = llvm::ConstantInt::get(Int32Ty, Index);
999   llvm::Value *GEPIndices[] = {Builder.getInt32(0), Builder.getInt32(1),
1000                                IndexVal};
1001   Value *Ptr =
1002       Builder.CreateInBoundsGEP(StructTy, RISCVFeaturesBits, GEPIndices);
1003   Value *FeaturesBit =
1004       Builder.CreateAlignedLoad(Int64Ty, Ptr, CharUnits::fromQuantity(8));
1005   return FeaturesBit;
1006 }
1007 
EmitRISCVCpuSupports(ArrayRef<StringRef> FeaturesStrs)1008 Value *CodeGenFunction::EmitRISCVCpuSupports(ArrayRef<StringRef> FeaturesStrs) {
1009   const unsigned RISCVFeatureLength = llvm::RISCVISAInfo::FeatureBitSize;
1010   uint64_t RequireBitMasks[RISCVFeatureLength] = {0};
1011 
1012   for (auto Feat : FeaturesStrs) {
1013     auto [GroupID, BitPos] = RISCVISAInfo::getRISCVFeaturesBitsInfo(Feat);
1014 
1015     // If there isn't BitPos for this feature, skip this version.
1016     // It also report the warning to user during compilation.
1017     if (BitPos == -1)
1018       return Builder.getFalse();
1019 
1020     RequireBitMasks[GroupID] |= (1ULL << BitPos);
1021   }
1022 
1023   Value *Result = nullptr;
1024   for (unsigned Idx = 0; Idx < RISCVFeatureLength; Idx++) {
1025     if (RequireBitMasks[Idx] == 0)
1026       continue;
1027 
1028     Value *Mask = Builder.getInt64(RequireBitMasks[Idx]);
1029     Value *Bitset =
1030         Builder.CreateAnd(loadRISCVFeatureBits(Idx, Builder, CGM), Mask);
1031     Value *CmpV = Builder.CreateICmpEQ(Bitset, Mask);
1032     Result = (!Result) ? CmpV : Builder.CreateAnd(Result, CmpV);
1033   }
1034 
1035   assert(Result && "Should have value here.");
1036 
1037   return Result;
1038 }
1039 
EmitRISCVCpuIs(const CallExpr * E)1040 Value *CodeGenFunction::EmitRISCVCpuIs(const CallExpr *E) {
1041   const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
1042   StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
1043   return EmitRISCVCpuIs(CPUStr);
1044 }
1045 
EmitRISCVCpuIs(StringRef CPUStr)1046 Value *CodeGenFunction::EmitRISCVCpuIs(StringRef CPUStr) {
1047   llvm::Type *Int32Ty = Builder.getInt32Ty();
1048   llvm::Type *Int64Ty = Builder.getInt64Ty();
1049   llvm::StructType *StructTy = llvm::StructType::get(Int32Ty, Int64Ty, Int64Ty);
1050   llvm::Constant *RISCVCPUModel =
1051       CGM.CreateRuntimeVariable(StructTy, "__riscv_cpu_model");
1052   cast<llvm::GlobalValue>(RISCVCPUModel)->setDSOLocal(true);
1053 
1054   auto loadRISCVCPUID = [&](unsigned Index) {
1055     Value *Ptr = Builder.CreateStructGEP(StructTy, RISCVCPUModel, Index);
1056     Value *CPUID = Builder.CreateAlignedLoad(StructTy->getTypeAtIndex(Index),
1057                                              Ptr, llvm::MaybeAlign());
1058     return CPUID;
1059   };
1060 
1061   const llvm::RISCV::CPUModel Model = llvm::RISCV::getCPUModel(CPUStr);
1062 
1063   // Compare mvendorid.
1064   Value *VendorID = loadRISCVCPUID(0);
1065   Value *Result =
1066       Builder.CreateICmpEQ(VendorID, Builder.getInt32(Model.MVendorID));
1067 
1068   // Compare marchid.
1069   Value *ArchID = loadRISCVCPUID(1);
1070   Result = Builder.CreateAnd(
1071       Result, Builder.CreateICmpEQ(ArchID, Builder.getInt64(Model.MArchID)));
1072 
1073   // Compare mimpid.
1074   Value *ImpID = loadRISCVCPUID(2);
1075   Result = Builder.CreateAnd(
1076       Result, Builder.CreateICmpEQ(ImpID, Builder.getInt64(Model.MImpID)));
1077 
1078   return Result;
1079 }
1080 
EmitRISCVBuiltinExpr(unsigned BuiltinID,const CallExpr * E,ReturnValueSlot ReturnValue)1081 Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
1082                                              const CallExpr *E,
1083                                              ReturnValueSlot ReturnValue) {
1084 
1085   if (BuiltinID == Builtin::BI__builtin_cpu_supports)
1086     return EmitRISCVCpuSupports(E);
1087   if (BuiltinID == Builtin::BI__builtin_cpu_init)
1088     return EmitRISCVCpuInit();
1089   if (BuiltinID == Builtin::BI__builtin_cpu_is)
1090     return EmitRISCVCpuIs(E);
1091 
1092   SmallVector<Value *, 4> Ops;
1093   llvm::Type *ResultType = ConvertType(E->getType());
1094 
1095   // Find out if any arguments are required to be integer constant expressions.
1096   unsigned ICEArguments = 0;
1097   ASTContext::GetBuiltinTypeError Error;
1098   getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
1099   if (Error == ASTContext::GE_Missing_type) {
1100     // Vector intrinsics don't have a type string.
1101     assert(BuiltinID >= clang::RISCV::FirstRVVBuiltin &&
1102            BuiltinID <= clang::RISCV::LastRVVBuiltin);
1103     ICEArguments = 0;
1104     if (BuiltinID == RISCVVector::BI__builtin_rvv_vget_v ||
1105         BuiltinID == RISCVVector::BI__builtin_rvv_vset_v)
1106       ICEArguments = 1 << 1;
1107   } else {
1108     assert(Error == ASTContext::GE_None && "Unexpected error");
1109   }
1110 
1111   if (BuiltinID == RISCV::BI__builtin_riscv_ntl_load)
1112     ICEArguments |= (1 << 1);
1113   if (BuiltinID == RISCV::BI__builtin_riscv_ntl_store)
1114     ICEArguments |= (1 << 2);
1115 
1116   for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
1117     // Handle aggregate argument, namely RVV tuple types in segment load/store
1118     if (hasAggregateEvaluationKind(E->getArg(i)->getType())) {
1119       LValue L = EmitAggExprToLValue(E->getArg(i));
1120       llvm::Value *AggValue = Builder.CreateLoad(L.getAddress());
1121       Ops.push_back(AggValue);
1122       continue;
1123     }
1124     Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
1125   }
1126 
1127   Intrinsic::ID ID = Intrinsic::not_intrinsic;
1128   int PolicyAttrs = 0;
1129   bool IsMasked = false;
1130   // This is used by segment load/store to determine it's llvm type.
1131   unsigned SegInstSEW = 8;
1132 
1133   // Required for overloaded intrinsics.
1134   llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
1135   switch (BuiltinID) {
1136   default: llvm_unreachable("unexpected builtin ID");
1137   case RISCV::BI__builtin_riscv_orc_b_32:
1138   case RISCV::BI__builtin_riscv_orc_b_64:
1139   case RISCV::BI__builtin_riscv_clmul_32:
1140   case RISCV::BI__builtin_riscv_clmul_64:
1141   case RISCV::BI__builtin_riscv_clmulh_32:
1142   case RISCV::BI__builtin_riscv_clmulh_64:
1143   case RISCV::BI__builtin_riscv_clmulr_32:
1144   case RISCV::BI__builtin_riscv_clmulr_64:
1145   case RISCV::BI__builtin_riscv_xperm4_32:
1146   case RISCV::BI__builtin_riscv_xperm4_64:
1147   case RISCV::BI__builtin_riscv_xperm8_32:
1148   case RISCV::BI__builtin_riscv_xperm8_64:
1149   case RISCV::BI__builtin_riscv_brev8_32:
1150   case RISCV::BI__builtin_riscv_brev8_64:
1151   case RISCV::BI__builtin_riscv_zip_32:
1152   case RISCV::BI__builtin_riscv_unzip_32: {
1153     switch (BuiltinID) {
1154     default: llvm_unreachable("unexpected builtin ID");
1155     // Zbb
1156     case RISCV::BI__builtin_riscv_orc_b_32:
1157     case RISCV::BI__builtin_riscv_orc_b_64:
1158       ID = Intrinsic::riscv_orc_b;
1159       break;
1160 
1161     // Zbc
1162     case RISCV::BI__builtin_riscv_clmul_32:
1163     case RISCV::BI__builtin_riscv_clmul_64:
1164       ID = Intrinsic::riscv_clmul;
1165       break;
1166     case RISCV::BI__builtin_riscv_clmulh_32:
1167     case RISCV::BI__builtin_riscv_clmulh_64:
1168       ID = Intrinsic::riscv_clmulh;
1169       break;
1170     case RISCV::BI__builtin_riscv_clmulr_32:
1171     case RISCV::BI__builtin_riscv_clmulr_64:
1172       ID = Intrinsic::riscv_clmulr;
1173       break;
1174 
1175     // Zbkx
1176     case RISCV::BI__builtin_riscv_xperm8_32:
1177     case RISCV::BI__builtin_riscv_xperm8_64:
1178       ID = Intrinsic::riscv_xperm8;
1179       break;
1180     case RISCV::BI__builtin_riscv_xperm4_32:
1181     case RISCV::BI__builtin_riscv_xperm4_64:
1182       ID = Intrinsic::riscv_xperm4;
1183       break;
1184 
1185     // Zbkb
1186     case RISCV::BI__builtin_riscv_brev8_32:
1187     case RISCV::BI__builtin_riscv_brev8_64:
1188       ID = Intrinsic::riscv_brev8;
1189       break;
1190     case RISCV::BI__builtin_riscv_zip_32:
1191       ID = Intrinsic::riscv_zip;
1192       break;
1193     case RISCV::BI__builtin_riscv_unzip_32:
1194       ID = Intrinsic::riscv_unzip;
1195       break;
1196     }
1197 
1198     IntrinsicTypes = {ResultType};
1199     break;
1200   }
1201 
1202   // Zk builtins
1203 
1204   // Zknh
1205   case RISCV::BI__builtin_riscv_sha256sig0:
1206     ID = Intrinsic::riscv_sha256sig0;
1207     break;
1208   case RISCV::BI__builtin_riscv_sha256sig1:
1209     ID = Intrinsic::riscv_sha256sig1;
1210     break;
1211   case RISCV::BI__builtin_riscv_sha256sum0:
1212     ID = Intrinsic::riscv_sha256sum0;
1213     break;
1214   case RISCV::BI__builtin_riscv_sha256sum1:
1215     ID = Intrinsic::riscv_sha256sum1;
1216     break;
1217 
1218   // Zksed
1219   case RISCV::BI__builtin_riscv_sm4ks:
1220     ID = Intrinsic::riscv_sm4ks;
1221     break;
1222   case RISCV::BI__builtin_riscv_sm4ed:
1223     ID = Intrinsic::riscv_sm4ed;
1224     break;
1225 
1226   // Zksh
1227   case RISCV::BI__builtin_riscv_sm3p0:
1228     ID = Intrinsic::riscv_sm3p0;
1229     break;
1230   case RISCV::BI__builtin_riscv_sm3p1:
1231     ID = Intrinsic::riscv_sm3p1;
1232     break;
1233 
1234   case RISCV::BI__builtin_riscv_clz_32:
1235   case RISCV::BI__builtin_riscv_clz_64: {
1236     Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
1237     Value *Result = Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
1238     if (Result->getType() != ResultType)
1239       Result =
1240           Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
1241     return Result;
1242   }
1243   case RISCV::BI__builtin_riscv_ctz_32:
1244   case RISCV::BI__builtin_riscv_ctz_64: {
1245     Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
1246     Value *Result = Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
1247     if (Result->getType() != ResultType)
1248       Result =
1249           Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
1250     return Result;
1251   }
1252 
1253   // Zihintntl
1254   case RISCV::BI__builtin_riscv_ntl_load: {
1255     llvm::Type *ResTy = ConvertType(E->getType());
1256     unsigned DomainVal = 5; // Default __RISCV_NTLH_ALL
1257     if (Ops.size() == 2)
1258       DomainVal = cast<ConstantInt>(Ops[1])->getZExtValue();
1259 
1260     llvm::MDNode *RISCVDomainNode = llvm::MDNode::get(
1261         getLLVMContext(),
1262         llvm::ConstantAsMetadata::get(Builder.getInt32(DomainVal)));
1263     llvm::MDNode *NontemporalNode = llvm::MDNode::get(
1264         getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1265 
1266     int Width;
1267     if(ResTy->isScalableTy()) {
1268       const ScalableVectorType *SVTy = cast<ScalableVectorType>(ResTy);
1269       llvm::Type *ScalarTy = ResTy->getScalarType();
1270       Width = ScalarTy->getPrimitiveSizeInBits() *
1271               SVTy->getElementCount().getKnownMinValue();
1272     } else
1273       Width = ResTy->getPrimitiveSizeInBits();
1274     LoadInst *Load = Builder.CreateLoad(
1275         Address(Ops[0], ResTy, CharUnits::fromQuantity(Width / 8)));
1276 
1277     Load->setMetadata(llvm::LLVMContext::MD_nontemporal, NontemporalNode);
1278     Load->setMetadata(CGM.getModule().getMDKindID("riscv-nontemporal-domain"),
1279                       RISCVDomainNode);
1280 
1281     return Load;
1282   }
1283   case RISCV::BI__builtin_riscv_ntl_store: {
1284     unsigned DomainVal = 5; // Default __RISCV_NTLH_ALL
1285     if (Ops.size() == 3)
1286       DomainVal = cast<ConstantInt>(Ops[2])->getZExtValue();
1287 
1288     llvm::MDNode *RISCVDomainNode = llvm::MDNode::get(
1289         getLLVMContext(),
1290         llvm::ConstantAsMetadata::get(Builder.getInt32(DomainVal)));
1291     llvm::MDNode *NontemporalNode = llvm::MDNode::get(
1292         getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1293 
1294     StoreInst *Store = Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
1295     Store->setMetadata(llvm::LLVMContext::MD_nontemporal, NontemporalNode);
1296     Store->setMetadata(CGM.getModule().getMDKindID("riscv-nontemporal-domain"),
1297                        RISCVDomainNode);
1298 
1299     return Store;
1300   }
1301   // Zihintpause
1302   case RISCV::BI__builtin_riscv_pause: {
1303     llvm::Function *Fn = CGM.getIntrinsic(llvm::Intrinsic::riscv_pause);
1304     return Builder.CreateCall(Fn, {});
1305   }
1306 
1307   // XCValu
1308   case RISCV::BI__builtin_riscv_cv_alu_addN:
1309     ID = Intrinsic::riscv_cv_alu_addN;
1310     break;
1311   case RISCV::BI__builtin_riscv_cv_alu_addRN:
1312     ID = Intrinsic::riscv_cv_alu_addRN;
1313     break;
1314   case RISCV::BI__builtin_riscv_cv_alu_adduN:
1315     ID = Intrinsic::riscv_cv_alu_adduN;
1316     break;
1317   case RISCV::BI__builtin_riscv_cv_alu_adduRN:
1318     ID = Intrinsic::riscv_cv_alu_adduRN;
1319     break;
1320   case RISCV::BI__builtin_riscv_cv_alu_clip:
1321     ID = Intrinsic::riscv_cv_alu_clip;
1322     break;
1323   case RISCV::BI__builtin_riscv_cv_alu_clipu:
1324     ID = Intrinsic::riscv_cv_alu_clipu;
1325     break;
1326   case RISCV::BI__builtin_riscv_cv_alu_extbs:
1327     return Builder.CreateSExt(Builder.CreateTrunc(Ops[0], Int8Ty), Int32Ty,
1328                               "extbs");
1329   case RISCV::BI__builtin_riscv_cv_alu_extbz:
1330     return Builder.CreateZExt(Builder.CreateTrunc(Ops[0], Int8Ty), Int32Ty,
1331                               "extbz");
1332   case RISCV::BI__builtin_riscv_cv_alu_exths:
1333     return Builder.CreateSExt(Builder.CreateTrunc(Ops[0], Int16Ty), Int32Ty,
1334                               "exths");
1335   case RISCV::BI__builtin_riscv_cv_alu_exthz:
1336     return Builder.CreateZExt(Builder.CreateTrunc(Ops[0], Int16Ty), Int32Ty,
1337                               "exthz");
1338   case RISCV::BI__builtin_riscv_cv_alu_sle:
1339     return Builder.CreateZExt(Builder.CreateICmpSLE(Ops[0], Ops[1]), Int32Ty,
1340                               "sle");
1341   case RISCV::BI__builtin_riscv_cv_alu_sleu:
1342     return Builder.CreateZExt(Builder.CreateICmpULE(Ops[0], Ops[1]), Int32Ty,
1343                               "sleu");
1344   case RISCV::BI__builtin_riscv_cv_alu_subN:
1345     ID = Intrinsic::riscv_cv_alu_subN;
1346     break;
1347   case RISCV::BI__builtin_riscv_cv_alu_subRN:
1348     ID = Intrinsic::riscv_cv_alu_subRN;
1349     break;
1350   case RISCV::BI__builtin_riscv_cv_alu_subuN:
1351     ID = Intrinsic::riscv_cv_alu_subuN;
1352     break;
1353   case RISCV::BI__builtin_riscv_cv_alu_subuRN:
1354     ID = Intrinsic::riscv_cv_alu_subuRN;
1355     break;
1356 
1357     // Vector builtins are handled from here.
1358 #include "clang/Basic/riscv_vector_builtin_cg.inc"
1359 
1360     // SiFive Vector builtins are handled from here.
1361 #include "clang/Basic/riscv_sifive_vector_builtin_cg.inc"
1362 
1363     // Andes Vector builtins are handled from here.
1364 #include "clang/Basic/riscv_andes_vector_builtin_cg.inc"
1365   }
1366 
1367   assert(ID != Intrinsic::not_intrinsic);
1368 
1369   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
1370   return Builder.CreateCall(F, Ops, "");
1371 }
1372