xref: /freebsd/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsRISCV.td (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1//===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines all of the RISCV-specific intrinsics.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------------------------------------------===//
14// Atomics
15
16// Atomic Intrinsics have multiple versions for different access widths, which
17// all follow one of the following signatures (depending on how many arguments
18// they require). We carefully instantiate only specific versions of these for
19// specific integer widths, rather than using `llvm_anyint_ty`.
20//
21// In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
22// canonical names, and the intrinsics used in the code will have a name
23// suffixed with the pointer type they are specialised for (denoted `<p>` in the
24// names below), in order to avoid type conflicts.
25
26let TargetPrefix = "riscv" in {
27
28  // T @llvm.<name>.T.<p>(any*, T, T, T imm);
29  class MaskedAtomicRMWFourArg<LLVMType itype>
30      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
31                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
32  // T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
33  class MaskedAtomicRMWFiveArg<LLVMType itype>
34      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
35                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
36
37  // We define 32-bit and 64-bit variants of the above, where T stands for i32
38  // or i64 respectively:
39  multiclass MaskedAtomicRMWFourArgIntrinsics {
40    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
41    def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>;
42    // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
43    def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>;
44  }
45
46  multiclass MaskedAtomicRMWFiveArgIntrinsics {
47    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
48    def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>;
49    // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
50    def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
51  }
52
53  // These intrinsics are intended only for internal compiler use (i.e. as
54  // part of AtomicExpandpass via the emitMaskedAtomic*Intrinsic hooks). Their
55  // names and semantics could change in the future.
56
57  // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(
58  //   ptr addr, ixlen oparg, ixlen mask, ixlenimm ordering)
59  defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics;
60  defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics;
61  defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics;
62  defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics;
63  defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics;
64  defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics;
65  // Signed min and max need an extra operand to do sign extension with.
66  // @llvm.riscv.masked.atomicrmw.{max,min}.{i32,i64}.<p>(
67  //   ptr addr, ixlen oparg, ixlen mask, ixlen shamt, ixlenimm ordering)
68  defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics;
69  defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics;
70
71  // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(
72  //   ptr addr, ixlen cmpval, ixlen newval, ixlen mask, ixlenimm ordering)
73  defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
74
75} // TargetPrefix = "riscv"
76
77//===----------------------------------------------------------------------===//
78// Bitmanip (Bit Manipulation) Extension
79
80let TargetPrefix = "riscv" in {
81
82  class BitManipGPRIntrinsics
83      : DefaultAttrsIntrinsic<[llvm_any_ty],
84                              [LLVMMatchType<0>],
85                              [IntrNoMem, IntrSpeculatable]>;
86  class BitManipGPRGPRIntrinsics
87      : DefaultAttrsIntrinsic<[llvm_any_ty],
88                              [LLVMMatchType<0>, LLVMMatchType<0>],
89                              [IntrNoMem, IntrSpeculatable]>;
90
91  // Zbb
92  def int_riscv_orc_b : BitManipGPRIntrinsics;
93
94  // Zbc or Zbkc
95  def int_riscv_clmul  : BitManipGPRGPRIntrinsics;
96  def int_riscv_clmulh : BitManipGPRGPRIntrinsics;
97
98  // Zbc
99  def int_riscv_clmulr : BitManipGPRGPRIntrinsics;
100
101  // Zbkb
102  def int_riscv_brev8 : BitManipGPRIntrinsics;
103  def int_riscv_zip   : BitManipGPRIntrinsics;
104  def int_riscv_unzip : BitManipGPRIntrinsics;
105
106  // Zbkx
107  def int_riscv_xperm4  : BitManipGPRGPRIntrinsics;
108  def int_riscv_xperm8  : BitManipGPRGPRIntrinsics;
109} // TargetPrefix = "riscv"
110
111//===----------------------------------------------------------------------===//
112// May-Be-Operations
113
114let TargetPrefix = "riscv" in {
115
116  // Zimop
117  def int_riscv_mopr
118      : DefaultAttrsIntrinsic<[llvm_any_ty],
119                              [LLVMMatchType<0>, LLVMMatchType<0>],
120                              [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<1>>]>;
121  def int_riscv_moprr
122      : DefaultAttrsIntrinsic<[llvm_any_ty],
123                              [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
124                              [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]>;
125} // TargetPrefix = "riscv"
126
127//===----------------------------------------------------------------------===//
128// Vectors
129
130// The intrinsic does not have any operand that must be extended.
131defvar NoScalarOperand = 0xF;
132
133// The intrinsic does not have a VL operand.
134// (e.g., riscv_vmv_x_s and riscv_vfmv_f_s)
135defvar NoVLOperand = 0x1F;
136
137class RISCVVIntrinsic {
138  // These intrinsics may accept illegal integer values in their llvm_any_ty
139  // operand, so they have to be extended.
140  Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
141  bits<4> ScalarOperand = NoScalarOperand;
142  bits<5> VLOperand = NoVLOperand;
143}
144
145let TargetPrefix = "riscv" in {
146  // We use anyint here but we only support XLen.
147  def int_riscv_vsetvli   : DefaultAttrsIntrinsic<[llvm_anyint_ty],
148                           /* AVL */  [LLVMMatchType<0>,
149                           /* VSEW */  LLVMMatchType<0>,
150                           /* VLMUL */ LLVMMatchType<0>],
151                                      [IntrNoMem,
152                                       ImmArg<ArgIndex<1>>,
153                                       ImmArg<ArgIndex<2>>]>;
154  def int_riscv_vsetvlimax : DefaultAttrsIntrinsic<[llvm_anyint_ty],
155                            /* VSEW */ [LLVMMatchType<0>,
156                            /* VLMUL */ LLVMMatchType<0>],
157                                      [IntrNoMem,
158                                       ImmArg<ArgIndex<0>>,
159                                       ImmArg<ArgIndex<1>>]>;
160
161  // For unit stride mask load
162  // Input: (pointer, vl)
163  class RISCVUSMLoad
164        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
165                    [llvm_ptr_ty, llvm_anyint_ty],
166                    [NoCapture<ArgIndex<0>>, IntrReadMem, IntrArgMemOnly]>,
167          RISCVVIntrinsic {
168    let VLOperand = 1;
169  }
170  // For unit stride load
171  // Input: (passthru, pointer, vl)
172  class RISCVUSLoad
173        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
174                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty],
175                    [NoCapture<ArgIndex<1>>, IntrReadMem, IntrArgMemOnly]>,
176          RISCVVIntrinsic {
177    let VLOperand = 2;
178  }
179  // For unit stride fault-only-first load
180  // Input: (passthru, pointer, vl)
181  // Output: (data, vl)
182  // NOTE: We model this with default memory properties since we model writing
183  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
184  class RISCVUSLoadFF
185        : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
186                    [LLVMMatchType<0>, llvm_ptr_ty, LLVMMatchType<1>],
187                    [NoCapture<ArgIndex<1>>]>,
188                    RISCVVIntrinsic {
189    let VLOperand = 2;
190  }
191  // For unit stride load with mask
192  // Input: (maskedoff, pointer, mask, vl, policy)
193  class RISCVUSLoadMasked
194        : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
195                    [LLVMMatchType<0>, llvm_ptr_ty,
196                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
197                     llvm_anyint_ty, LLVMMatchType<1>],
198                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem,
199                     IntrArgMemOnly]>,
200                    RISCVVIntrinsic {
201    let VLOperand = 3;
202  }
203  // For unit stride fault-only-first load with mask
204  // Input: (maskedoff, pointer, mask, vl, policy)
205  // Output: (data, vl)
206  // NOTE: We model this with default memory properties since we model writing
207  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
208  class RISCVUSLoadFFMasked
209        : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
210                    [LLVMMatchType<0>, llvm_ptr_ty,
211                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
212                     LLVMMatchType<1>, LLVMMatchType<1>],
213                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
214    let VLOperand = 3;
215  }
216  // For strided load with passthru operand
217  // Input: (passthru, pointer, stride, vl)
218  class RISCVSLoad
219        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
220                    [LLVMMatchType<0>, llvm_ptr_ty,
221                     llvm_anyint_ty, LLVMMatchType<1>],
222                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
223    let VLOperand = 3;
224  }
225  // For strided load with mask
226  // Input: (maskedoff, pointer, stride, mask, vl, policy)
227  class RISCVSLoadMasked
228        : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
229                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty,
230                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
231                     LLVMMatchType<1>],
232                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
233                    RISCVVIntrinsic {
234    let VLOperand = 4;
235  }
236  // For indexed load with passthru operand
237  // Input: (passthru, pointer, index, vl)
238  class RISCVILoad
239        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
240                    [LLVMMatchType<0>, llvm_ptr_ty,
241                     llvm_anyvector_ty, llvm_anyint_ty],
242                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
243    let VLOperand = 3;
244  }
245  // For indexed load with mask
246  // Input: (maskedoff, pointer, index, mask, vl, policy)
247  class RISCVILoadMasked
248        : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
249                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty,
250                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
251                     LLVMMatchType<2>],
252                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
253                    RISCVVIntrinsic {
254    let VLOperand = 4;
255  }
256  // For unit stride store
257  // Input: (vector_in, pointer, vl)
258  class RISCVUSStore
259        : DefaultAttrsIntrinsic<[],
260                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty],
261                    [NoCapture<ArgIndex<1>>, IntrWriteMem, IntrArgMemOnly]>,
262          RISCVVIntrinsic {
263    let VLOperand = 2;
264  }
265  // For unit stride store with mask
266  // Input: (vector_in, pointer, mask, vl)
267  class RISCVUSStoreMasked
268        : DefaultAttrsIntrinsic<[],
269                    [llvm_anyvector_ty, llvm_ptr_ty,
270                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
271                     llvm_anyint_ty],
272                    [NoCapture<ArgIndex<1>>, IntrWriteMem, IntrArgMemOnly]>,
273          RISCVVIntrinsic {
274    let VLOperand = 3;
275  }
276  // For strided store
277  // Input: (vector_in, pointer, stride, vl)
278  class RISCVSStore
279        : DefaultAttrsIntrinsic<[],
280                    [llvm_anyvector_ty, llvm_ptr_ty,
281                     llvm_anyint_ty, LLVMMatchType<1>],
282                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
283    let VLOperand = 3;
284  }
285  // For stride store with mask
286  // Input: (vector_in, pointer, stirde, mask, vl)
287  class RISCVSStoreMasked
288        : DefaultAttrsIntrinsic<[],
289                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty,
290                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
291                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
292    let VLOperand = 4;
293  }
294  // For indexed store
295  // Input: (vector_in, pointer, index, vl)
296  class RISCVIStore
297        : DefaultAttrsIntrinsic<[],
298                    [llvm_anyvector_ty, llvm_ptr_ty,
299                     llvm_anyint_ty, llvm_anyint_ty],
300                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
301    let VLOperand = 3;
302  }
303  // For indexed store with mask
304  // Input: (vector_in, pointer, index, mask, vl)
305  class RISCVIStoreMasked
306        : DefaultAttrsIntrinsic<[],
307                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyvector_ty,
308                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
309                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
310    let VLOperand = 4;
311  }
312  // For destination vector type is the same as source vector.
313  // Input: (passthru, vector_in, vl)
314  class RISCVUnaryAAUnMasked
315        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
316                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
317                    [IntrNoMem]>, RISCVVIntrinsic {
318    let VLOperand = 2;
319  }
320  // For destination vector type is the same as the source vector type
321  // Input: (passthru, vector_in, vl, policy)
322  class RISCVUnaryAAUnMaskedZvk<bit IsVS>
323        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
324                    [LLVMMatchType<0>, !if(IsVS, llvm_anyvector_ty, LLVMMatchType<0>),
325                     llvm_anyint_ty, !if(IsVS, LLVMMatchType<2>, LLVMMatchType<1>)],
326                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
327    let VLOperand = 2;
328  }
329
330  multiclass RISCVUnaryAAUnMaskedZvk<bit HasVV = 1, bit HasVS = 1> {
331    if HasVV then
332      def "int_riscv_" # NAME # "_vv" : RISCVUnaryAAUnMaskedZvk<IsVS=0>;
333
334    if HasVS then
335      def "int_riscv_" # NAME # "_vs" : RISCVUnaryAAUnMaskedZvk<IsVS=1>;
336  }
337  // For destination vector type is the same as first source vector (with mask).
338  // Input: (vector_in, vector_in, mask, vl, policy)
339  class RISCVUnaryAAMasked
340        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
341                    [LLVMMatchType<0>, LLVMMatchType<0>,
342                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
343                     LLVMMatchType<1>],
344                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
345    let VLOperand = 3;
346  }
347  // For destination vector type is the same as source vector.
348  // Input: (passthru, vector_in, frm, vl)
349  class RISCVUnaryAAUnMaskedRoundingMode
350        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
351                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
352                    [ImmArg<ArgIndex<2>>, IntrNoMem]>, RISCVVIntrinsic {
353    let VLOperand = 3;
354  }
355  // For destination vector type is the same as first source vector (with mask).
356  // Input: (vector_in, vector_in, mask, frm, vl, policy)
357  class RISCVUnaryAAMaskedRoundingMode
358        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
359                    [LLVMMatchType<0>, LLVMMatchType<0>,
360                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
361                     LLVMMatchType<1>, LLVMMatchType<1>],
362                    [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
363    let VLOperand = 4;
364  }
365  // Input: (passthru, vector_in, vector_in, mask, vl)
366  class RISCVCompress
367        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
368                    [LLVMMatchType<0>, LLVMMatchType<0>,
369                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
370                    [IntrNoMem]>, RISCVVIntrinsic {
371    let VLOperand = 3;
372  }
373  // For destination vector type is the same as first and second source vector.
374  // Input: (vector_in, vector_in, vl)
375  class RISCVBinaryAAAUnMasked
376        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
377                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
378                    [IntrNoMem]>, RISCVVIntrinsic {
379    let VLOperand = 2;
380  }
381  // For destination vector type is the same as first and second source vector.
382  // Input: (passthru, vector_in, int_vector_in, vl)
383  class RISCVRGatherVVUnMasked
384        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
385                    [LLVMMatchType<0>, LLVMMatchType<0>,
386                     LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
387                    [IntrNoMem]>, RISCVVIntrinsic {
388    let VLOperand = 3;
389  }
390  // For destination vector type is the same as first and second source vector.
391  // Input: (vector_in, vector_in, int_vector_in, vl, policy)
392  class RISCVRGatherVVMasked
393        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
394                    [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
395                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
396                     LLVMMatchType<1>],
397                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
398    let VLOperand = 4;
399  }
400  // Input: (passthru, vector_in, int16_vector_in, vl)
401  class RISCVRGatherEI16VVUnMasked
402        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
403                    [LLVMMatchType<0>, LLVMMatchType<0>,
404                     LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
405                     llvm_anyint_ty],
406                    [IntrNoMem]>, RISCVVIntrinsic {
407    let VLOperand = 3;
408  }
409  // For destination vector type is the same as first and second source vector.
410  // Input: (vector_in, vector_in, int16_vector_in, vl, policy)
411  class RISCVRGatherEI16VVMasked
412        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
413                    [LLVMMatchType<0>, LLVMMatchType<0>,
414                     LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
415                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
416                     LLVMMatchType<1>],
417                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
418    let VLOperand = 4;
419  }
420  // For destination vector type is the same as first source vector, and the
421  // second operand is XLen.
422  // Input: (passthru, vector_in, xlen_in, vl)
423  class RISCVGatherVXUnMasked
424        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
425                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
426                     LLVMMatchType<1>],
427                    [IntrNoMem]>, RISCVVIntrinsic {
428    let VLOperand = 3;
429  }
430  // For destination vector type is the same as first source vector (with mask).
431  // Second operand is XLen.
432  // Input: (maskedoff, vector_in, xlen_in, mask, vl, policy)
433  class RISCVGatherVXMasked
434       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
435                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
436                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
437                    LLVMMatchType<1>],
438                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
439    let VLOperand = 4;
440  }
441  // For destination vector type is the same as first source vector.
442  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
443  class RISCVBinaryAAXUnMasked<bit IsVI = 0>
444        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
445                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
446                     llvm_anyint_ty],
447                    !listconcat([IntrNoMem],
448                                !if(IsVI, [ImmArg<ArgIndex<2>>], []))>,
449                    RISCVVIntrinsic {
450    let ScalarOperand = 2;
451    let VLOperand = 3;
452  }
453  // For destination vector type is the same as the source vector type.
454  // Input: (passthru, vector_in, vector_in/scalar_in, vl, policy)
455  class RISCVBinaryAAXUnMaskedZvk<bit IsVI = 0>
456        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
457                                [LLVMMatchType<0>, LLVMMatchType<0>,
458                                 llvm_any_ty, llvm_anyint_ty, LLVMMatchType<2>],
459                                !listconcat([ImmArg<ArgIndex<4>>, IntrNoMem],
460                                            !if(IsVI, [ImmArg<ArgIndex<2>>], []))>,
461                                RISCVVIntrinsic {
462    let ScalarOperand = 2;
463    let VLOperand = 3;
464  }
465  // For destination vector type is the same as first source vector (with mask).
466  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
467  class RISCVBinaryAAXMasked
468       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
469                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
470                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
471                    LLVMMatchType<2>],
472                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
473    let ScalarOperand = 2;
474    let VLOperand = 4;
475  }
476  // For destination vector type is the same as first source vector.
477  // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl)
478  class RISCVBinaryAAXUnMaskedRoundingMode
479        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
480                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
481                     llvm_anyint_ty, LLVMMatchType<2>],
482                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
483    let ScalarOperand = 2;
484    let VLOperand = 4;
485  }
486  // For destination vector type is the same as first source vector (with mask).
487  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
488  class RISCVBinaryAAXMaskedRoundingMode
489       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
490                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
491                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
492                    LLVMMatchType<2>, LLVMMatchType<2>],
493                   [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
494    let ScalarOperand = 2;
495    let VLOperand = 5;
496  }
497  // For destination vector type is the same as first source vector. The
498  // second source operand must match the destination type or be an XLen scalar.
499  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
500  class RISCVBinaryAAShiftUnMasked
501        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
502                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
503                     llvm_anyint_ty],
504                    [IntrNoMem]>, RISCVVIntrinsic {
505    let VLOperand = 3;
506  }
507  // For destination vector type is the same as first source vector (with mask).
508  // The second source operand must match the destination type or be an XLen scalar.
509  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
510  class RISCVBinaryAAShiftMasked
511       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
512                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
513                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
514                    LLVMMatchType<2>],
515                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
516    let VLOperand = 4;
517  }
518  // For destination vector type is NOT the same as first source vector.
519  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
520  class RISCVBinaryABXUnMasked
521        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
522                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
523                     llvm_anyint_ty],
524                    [IntrNoMem]>, RISCVVIntrinsic {
525    let ScalarOperand = 2;
526    let VLOperand = 3;
527  }
528  // For destination vector type is NOT the same as first source vector (with mask).
529  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
530  class RISCVBinaryABXMasked
531        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
532                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
533                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
534                     LLVMMatchType<3>],
535                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
536    let ScalarOperand = 2;
537    let VLOperand = 4;
538  }
539  // For destination vector type is NOT the same as first source vector.
540  // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl)
541  class RISCVBinaryABXUnMaskedRoundingMode
542        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
543                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
544                     llvm_anyint_ty, LLVMMatchType<3>],
545                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
546    let ScalarOperand = 2;
547    let VLOperand = 4;
548  }
549  // For destination vector type is NOT the same as first source vector (with mask).
550  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
551  class RISCVBinaryABXMaskedRoundingMode
552        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
553                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
554                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
555                     LLVMMatchType<3>, LLVMMatchType<3>],
556                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
557    let ScalarOperand = 2;
558    let VLOperand = 5;
559  }
560  // For destination vector type is NOT the same as first source vector. The
561  // second source operand must match the destination type or be an XLen scalar.
562  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
563  class RISCVBinaryABShiftUnMasked
564        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
565                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
566                     llvm_anyint_ty],
567                    [IntrNoMem]>, RISCVVIntrinsic {
568    let VLOperand = 3;
569  }
570  // For destination vector type is NOT the same as first source vector (with mask).
571  // The second source operand must match the destination type or be an XLen scalar.
572  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
573  class RISCVBinaryABShiftMasked
574        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
575                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
576                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
577                     LLVMMatchType<3>],
578                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
579    let VLOperand = 4;
580  }
581  // For binary operations with V0 as input.
582  // Input: (passthru, vector_in, vector_in/scalar_in, V0, vl)
583  class RISCVBinaryWithV0
584        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
585                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
586                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
587                     llvm_anyint_ty],
588                    [IntrNoMem]>, RISCVVIntrinsic {
589    let ScalarOperand = 2;
590    let VLOperand = 4;
591  }
592  // For binary operations with mask type output and V0 as input.
593  // Output: (mask type output)
594  // Input: (vector_in, vector_in/scalar_in, V0, vl)
595  class RISCVBinaryMOutWithV0
596        :DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
597                   [llvm_anyvector_ty, llvm_any_ty,
598                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
599                    llvm_anyint_ty],
600                   [IntrNoMem]>, RISCVVIntrinsic {
601    let ScalarOperand = 1;
602    let VLOperand = 3;
603  }
604  // For binary operations with mask type output.
605  // Output: (mask type output)
606  // Input: (vector_in, vector_in/scalar_in, vl)
607  class RISCVBinaryMOut
608        : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
609                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
610                    [IntrNoMem]>, RISCVVIntrinsic {
611    let ScalarOperand = 1;
612    let VLOperand = 2;
613  }
614  // For binary operations with mask type output without mask.
615  // Output: (mask type output)
616  // Input: (vector_in, vector_in/scalar_in, vl)
617  class RISCVCompareUnMasked
618        : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
619                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
620                    [IntrNoMem]>, RISCVVIntrinsic {
621    let ScalarOperand = 1;
622    let VLOperand = 2;
623  }
624  // For binary operations with mask type output with mask.
625  // Output: (mask type output)
626  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
627  class RISCVCompareMasked
628        : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
629                    [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
630                     llvm_anyvector_ty, llvm_any_ty,
631                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
632                    [IntrNoMem]>, RISCVVIntrinsic {
633    let ScalarOperand = 2;
634    let VLOperand = 4;
635  }
636  // For FP classify operations.
637  // Output: (bit mask type output)
638  // Input: (passthru, vector_in, vl)
639  class RISCVClassifyUnMasked
640        : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>],
641                    [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
642                      llvm_anyint_ty],
643                    [IntrNoMem]>, RISCVVIntrinsic {
644    let VLOperand = 1;
645  }
646  // For FP classify operations with mask.
647  // Output: (bit mask type output)
648  // Input: (maskedoff, vector_in, mask, vl, policy)
649  class RISCVClassifyMasked
650        : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>],
651                    [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
652                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
653                     llvm_anyint_ty, LLVMMatchType<1>],
654                    [IntrNoMem, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
655    let VLOperand = 3;
656  }
657  // For Saturating binary operations.
658  // The destination vector type is the same as first source vector.
659  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
660  class RISCVSaturatingBinaryAAXUnMasked
661        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
662                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
663                     llvm_anyint_ty],
664                    [IntrNoMem]>, RISCVVIntrinsic {
665    let ScalarOperand = 2;
666    let VLOperand = 3;
667  }
668  // For Saturating binary operations with rounding-mode operand
669  // The destination vector type is the same as first source vector.
670  // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
671  class RISCVSaturatingBinaryAAXUnMaskedRoundingMode
672        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
673                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
674                     llvm_anyint_ty, LLVMMatchType<2>],
675                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
676    let ScalarOperand = 2;
677    let VLOperand = 4;
678  }
679  // For Saturating binary operations with mask.
680  // The destination vector type is the same as first source vector.
681  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
682  class RISCVSaturatingBinaryAAXMasked
683        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
684                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
685                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
686                     LLVMMatchType<2>],
687                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
688    let ScalarOperand = 2;
689    let VLOperand = 4;
690  }
691  // For Saturating binary operations with mask and rounding-mode operand
692  // The destination vector type is the same as first source vector.
693  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
694  class RISCVSaturatingBinaryAAXMaskedRoundingMode
695        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
696                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
697                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
698                     LLVMMatchType<2>, LLVMMatchType<2>],
699                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
700    let ScalarOperand = 2;
701    let VLOperand = 5;
702  }
703  // For Saturating binary operations.
704  // The destination vector type is the same as first source vector.
705  // The second source operand matches the destination type or is an XLen scalar.
706  // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
707  class RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode
708        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
709                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
710                     llvm_anyint_ty, LLVMMatchType<2>],
711                    [ImmArg<ArgIndex<3>>, IntrNoMem]>,
712                    RISCVVIntrinsic {
713    let VLOperand = 4;
714  }
715  // For Saturating binary operations with mask.
716  // The destination vector type is the same as first source vector.
717  // The second source operand matches the destination type or is an XLen scalar.
718  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
719  class RISCVSaturatingBinaryAAShiftMaskedRoundingMode
720        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
721                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
722                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
723                     LLVMMatchType<2>, LLVMMatchType<2>],
724                    [ImmArg<ArgIndex<4>>,ImmArg<ArgIndex<6>>, IntrNoMem]>,
725                    RISCVVIntrinsic {
726    let VLOperand = 5;
727  }
728  // For Saturating binary operations.
729  // The destination vector type is NOT the same as first source vector.
730  // The second source operand matches the destination type or is an XLen scalar.
731  // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
732  class RISCVSaturatingBinaryABShiftUnMaskedRoundingMode
733        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
734                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
735                     llvm_anyint_ty, LLVMMatchType<3>],
736                    [ImmArg<ArgIndex<3>>, IntrNoMem]>,
737                    RISCVVIntrinsic {
738    let VLOperand = 4;
739  }
740  // For Saturating binary operations with mask.
741  // The destination vector type is NOT the same as first source vector (with mask).
742  // The second source operand matches the destination type or is an XLen scalar.
743  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
744  class RISCVSaturatingBinaryABShiftMaskedRoundingMode
745        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
746                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
747                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
748                     LLVMMatchType<3>, LLVMMatchType<3>],
749                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
750    let VLOperand = 5;
751  }
752  // Input: (vector_in, vector_in, scalar_in, vl, policy)
753  class RVVSlideUnMasked
754        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
755                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
756                     LLVMMatchType<1>, LLVMMatchType<1>],
757                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
758    let VLOperand = 3;
759  }
760  // Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy)
761  class RVVSlideMasked
762        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
763                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
764                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
765                     LLVMMatchType<1>, LLVMMatchType<1>],
766                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
767    let VLOperand = 4;
768  }
769  // UnMasked Vector Multiply-Add operations, its first operand can not be undef.
770  // Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
771  class RISCVTernaryAAXAUnMasked
772        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
773                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
774                     llvm_anyint_ty, LLVMMatchType<2>],
775                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
776    let ScalarOperand = 1;
777    let VLOperand = 3;
778  }
779  // Masked Vector Multiply-Add operations, its first operand can not be undef.
780  // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
781  class RISCVTernaryAAXAMasked
782        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
783                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
784                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
785                     llvm_anyint_ty, LLVMMatchType<2>],
786                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
787    let ScalarOperand = 1;
788    let VLOperand = 4;
789  }
790  // UnMasked Vector Multiply-Add operations, its first operand can not be undef.
791  // Input: (vector_in, vector_in/scalar, vector_in, frm, vl, policy)
792  class RISCVTernaryAAXAUnMaskedRoundingMode
793        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
794                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
795                     llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>],
796                    [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>,
797                    RISCVVIntrinsic {
798    let ScalarOperand = 1;
799    let VLOperand = 4;
800  }
801  // Masked Vector Multiply-Add operations, its first operand can not be undef.
802  // Input: (vector_in, vector_in/scalar, vector_in, mask, frm, vl, policy
803  class RISCVTernaryAAXAMaskedRoundingMode
804        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
805                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
806                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
807                     llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>],
808                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>,
809                    RISCVVIntrinsic {
810    let ScalarOperand = 1;
811    let VLOperand = 5;
812  }
813  // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef.
814  // Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
815  class RISCVTernaryWideUnMasked
816        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
817                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
818                      llvm_anyint_ty, LLVMMatchType<3>],
819                     [ImmArg<ArgIndex<4>>, IntrNoMem] >, RISCVVIntrinsic {
820    let ScalarOperand = 1;
821    let VLOperand = 3;
822  }
823  // Masked Widening Vector Multiply-Add operations, its first operand can not be undef.
824  // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
825  class RISCVTernaryWideMasked
826        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
827                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
828                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
829                      llvm_anyint_ty, LLVMMatchType<3>],
830                     [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
831    let ScalarOperand = 1;
832    let VLOperand = 4;
833  }
834  // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef.
835  // Input: (vector_in, vector_in/scalar, vector_in, frm, vl, policy)
836  class RISCVTernaryWideUnMaskedRoundingMode
837        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
838                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
839                      llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>],
840                     [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem] >,
841                     RISCVVIntrinsic {
842    let ScalarOperand = 1;
843    let VLOperand = 4;
844  }
845  // Masked Widening Vector Multiply-Add operations, its first operand can not be undef.
846  // Input: (vector_in, vector_in/scalar, vector_in, mask, frm, vl, policy
847  class RISCVTernaryWideMaskedRoundingMode
848        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
849                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
850                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
851                      llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>],
852                     [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>,
853                     RISCVVIntrinsic {
854    let ScalarOperand = 1;
855    let VLOperand = 5;
856  }
857  // For Reduction ternary operations.
858  // For destination vector type is the same as first and third source vector.
859  // Input: (vector_in, vector_in, vector_in, vl)
860  class RISCVReductionUnMasked
861        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
862                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
863                     llvm_anyint_ty],
864                    [IntrNoMem]>, RISCVVIntrinsic {
865    let VLOperand = 3;
866  }
867  // For Reduction ternary operations with mask.
868  // For destination vector type is the same as first and third source vector.
869  // The mask type come from second source vector.
870  // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl)
871  class RISCVReductionMasked
872        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
873                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
874                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
875                    [IntrNoMem]>, RISCVVIntrinsic {
876    let VLOperand = 4;
877  }
878  // For Reduction ternary operations.
879  // For destination vector type is the same as first and third source vector.
880  // Input: (vector_in, vector_in, vector_in, frm, vl)
881  class RISCVReductionUnMaskedRoundingMode
882        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
883                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
884                     llvm_anyint_ty, LLVMMatchType<2>],
885                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
886    let VLOperand = 4;
887  }
888  // For Reduction ternary operations with mask.
889  // For destination vector type is the same as first and third source vector.
890  // The mask type come from second source vector.
891  // Input: (vector_in, vector_in, vector_in, mask, frm, vl)
892  class RISCVReductionMaskedRoundingMode
893        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
894                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
895                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty,
896                     LLVMMatchType<2>],
897                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
898    let VLOperand = 5;
899  }
900  // For unary operations with scalar type output without mask
901  // Output: (scalar type)
902  // Input: (vector_in, vl)
903  class RISCVMaskedUnarySOutUnMasked
904        : DefaultAttrsIntrinsic<[LLVMMatchType<1>],
905                    [llvm_anyvector_ty, llvm_anyint_ty],
906                    [IntrNoMem]>, RISCVVIntrinsic {
907    let VLOperand = 1;
908  }
909  // For unary operations with scalar type output with mask
910  // Output: (scalar type)
911  // Input: (vector_in, mask, vl)
912  class RISCVMaskedUnarySOutMasked
913        : DefaultAttrsIntrinsic<[LLVMMatchType<1>],
914                    [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty],
915                    [IntrNoMem]>, RISCVVIntrinsic {
916    let VLOperand = 2;
917  }
918  // For destination vector type is NOT the same as source vector.
919  // Input: (passthru, vector_in, vl)
920  class RISCVUnaryABUnMasked
921        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
922                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
923                    [IntrNoMem]>, RISCVVIntrinsic {
924    let VLOperand = 2;
925  }
926  // For destination vector type is NOT the same as source vector (with mask).
927  // Input: (maskedoff, vector_in, mask, vl, policy)
928  class RISCVUnaryABMasked
929        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
930                    [LLVMMatchType<0>, llvm_anyvector_ty,
931                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
932                     llvm_anyint_ty, LLVMMatchType<2>],
933                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
934    let VLOperand = 3;
935  }
936  // For unary operations with the same vector type in/out without mask
937  // Output: (vector)
938  // Input: (vector_in, vl)
939  class RISCVUnaryUnMasked
940        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
941                    [LLVMMatchType<0>, llvm_anyint_ty],
942                    [IntrNoMem]>, RISCVVIntrinsic {
943    let VLOperand = 1;
944  }
945  // For mask unary operations with mask type in/out with mask
946  // Output: (mask type output)
947  // Input: (mask type maskedoff, mask type vector_in, mask, vl)
948  class RISCVMaskedUnaryMOutMasked
949        : DefaultAttrsIntrinsic<[llvm_anyint_ty],
950                    [LLVMMatchType<0>, LLVMMatchType<0>,
951                     LLVMMatchType<0>, llvm_anyint_ty],
952                    [IntrNoMem]>, RISCVVIntrinsic {
953    let VLOperand = 3;
954  }
955  // Output: (vector)
956  // Input: (vl)
957  class RISCVNullaryIntrinsic
958        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
959                    [llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic {
960    let VLOperand = 1;
961  }
962  // Output: (vector)
963  // Input: (passthru, vl)
964  class RISCVID
965        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
966                    [LLVMMatchType<0>, llvm_anyint_ty],
967                    [IntrNoMem]>, RISCVVIntrinsic {
968    let VLOperand = 1;
969  }
970  // For Conversion unary operations.
971  // Input: (passthru, vector_in, vl)
972  class RISCVConversionUnMasked
973        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
974                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
975                    [IntrNoMem]>, RISCVVIntrinsic {
976    let VLOperand = 2;
977  }
978  // For Conversion unary operations with mask.
979  // Input: (maskedoff, vector_in, mask, vl, policy)
980  class RISCVConversionMasked
981        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
982                    [LLVMMatchType<0>, llvm_anyvector_ty,
983                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
984                     LLVMMatchType<2>],
985                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
986    let VLOperand = 3;
987  }
988  // For Conversion unary operations.
989  // Input: (passthru, vector_in, frm, vl)
990  class RISCVConversionUnMaskedRoundingMode
991        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
992                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty,
993                     LLVMMatchType<2>],
994                    [ImmArg<ArgIndex<2>>, IntrNoMem]>, RISCVVIntrinsic {
995    let VLOperand = 3;
996  }
997  // For Conversion unary operations with mask.
998  // Input: (maskedoff, vector_in, mask, frm, vl, policy)
999  class RISCVConversionMaskedRoundingMode
1000        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1001                    [LLVMMatchType<0>, llvm_anyvector_ty,
1002                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
1003                     LLVMMatchType<2>, LLVMMatchType<2>],
1004                    [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
1005    let VLOperand = 4;
1006  }
1007
1008  // For unit stride segment load
1009  // Input: (passthru, pointer, vl)
1010  class RISCVUSSegLoad<int nf>
1011        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1012                                !add(nf, -1))),
1013                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1014                                [llvm_ptr_ty, llvm_anyint_ty]),
1015                    [NoCapture<ArgIndex<nf>>, IntrReadMem, IntrArgMemOnly]>,
1016          RISCVVIntrinsic {
1017    let VLOperand = !add(nf, 1);
1018  }
1019  // For unit stride segment load with mask
1020  // Input: (maskedoff, pointer, mask, vl, policy)
1021  class RISCVUSSegLoadMasked<int nf>
1022        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1023                                !add(nf, -1))),
1024                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1025                                [llvm_ptr_ty,
1026                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1027                                 llvm_anyint_ty, LLVMMatchType<1>]),
1028                    [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>,
1029                     IntrReadMem, IntrArgMemOnly]>,
1030          RISCVVIntrinsic {
1031    let VLOperand = !add(nf, 2);
1032  }
1033
1034  // For unit stride fault-only-first segment load
1035  // Input: (passthru, pointer, vl)
1036  // Output: (data, vl)
1037  // NOTE: We model this with default memory properties since we model writing
1038  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
1039  class RISCVUSSegLoadFF<int nf>
1040        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1041                                !add(nf, -1)), [llvm_anyint_ty]),
1042                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1043                    [llvm_ptr_ty, LLVMMatchType<1>]),
1044                    [NoCapture<ArgIndex<nf>>]>, RISCVVIntrinsic {
1045    let VLOperand = !add(nf, 1);
1046  }
1047  // For unit stride fault-only-first segment load with mask
1048  // Input: (maskedoff, pointer, mask, vl, policy)
1049  // Output: (data, vl)
1050  // NOTE: We model this with default memory properties since we model writing
1051  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
1052  class RISCVUSSegLoadFFMasked<int nf>
1053        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1054                                !add(nf, -1)), [llvm_anyint_ty]),
1055                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1056                     [llvm_ptr_ty,
1057                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1058                      LLVMMatchType<1>, LLVMMatchType<1>]),
1059                    [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>]>,
1060                    RISCVVIntrinsic {
1061    let VLOperand = !add(nf, 2);
1062  }
1063
1064  // For stride segment load
1065  // Input: (passthru, pointer, offset, vl)
1066  class RISCVSSegLoad<int nf>
1067        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1068                                !add(nf, -1))),
1069                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1070                    [llvm_ptr_ty, llvm_anyint_ty, LLVMMatchType<1>]),
1071                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
1072    let VLOperand = !add(nf, 2);
1073  }
1074  // For stride segment load with mask
1075  // Input: (maskedoff, pointer, offset, mask, vl, policy)
1076  class RISCVSSegLoadMasked<int nf>
1077        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1078                                !add(nf, -1))),
1079                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1080                                [llvm_ptr_ty,
1081                                 llvm_anyint_ty,
1082                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1083                                 LLVMMatchType<1>, LLVMMatchType<1>]),
1084                    [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
1085                    RISCVVIntrinsic {
1086    let VLOperand = !add(nf, 3);
1087  }
1088
1089  // For indexed segment load
1090  // Input: (passthru, pointer, index, vl)
1091  class RISCVISegLoad<int nf>
1092        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1093                                !add(nf, -1))),
1094                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1095                    [llvm_ptr_ty, llvm_anyvector_ty, llvm_anyint_ty]),
1096                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
1097    let VLOperand = !add(nf, 2);
1098  }
1099  // For indexed segment load with mask
1100  // Input: (maskedoff, pointer, index, mask, vl, policy)
1101  class RISCVISegLoadMasked<int nf>
1102        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1103                                !add(nf, -1))),
1104                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1105                                [llvm_ptr_ty,
1106                                 llvm_anyvector_ty,
1107                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1108                                 llvm_anyint_ty, LLVMMatchType<2>]),
1109                    [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
1110                    RISCVVIntrinsic {
1111    let VLOperand = !add(nf, 3);
1112  }
1113
1114  // For unit stride segment store
1115  // Input: (value, pointer, vl)
1116  class RISCVUSSegStore<int nf>
1117        : DefaultAttrsIntrinsic<[],
1118                    !listconcat([llvm_anyvector_ty],
1119                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1120                                [llvm_ptr_ty, llvm_anyint_ty]),
1121                    [NoCapture<ArgIndex<nf>>, IntrWriteMem, IntrArgMemOnly]>,
1122          RISCVVIntrinsic {
1123    let VLOperand = !add(nf, 1);
1124  }
1125  // For unit stride segment store with mask
1126  // Input: (value, pointer, mask, vl)
1127  class RISCVUSSegStoreMasked<int nf>
1128        : DefaultAttrsIntrinsic<[],
1129                    !listconcat([llvm_anyvector_ty],
1130                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1131                                [llvm_ptr_ty,
1132                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1133                                 llvm_anyint_ty]),
1134                    [NoCapture<ArgIndex<nf>>, IntrWriteMem, IntrArgMemOnly]>,
1135          RISCVVIntrinsic {
1136    let VLOperand = !add(nf, 2);
1137  }
1138
1139  // For stride segment store
1140  // Input: (value, pointer, offset, vl)
1141  class RISCVSSegStore<int nf>
1142        : DefaultAttrsIntrinsic<[],
1143                    !listconcat([llvm_anyvector_ty],
1144                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1145                                [llvm_ptr_ty, llvm_anyint_ty,
1146                                 LLVMMatchType<1>]),
1147                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1148    let VLOperand = !add(nf, 2);
1149  }
1150  // For stride segment store with mask
1151  // Input: (value, pointer, offset, mask, vl)
1152  class RISCVSSegStoreMasked<int nf>
1153        : DefaultAttrsIntrinsic<[],
1154                    !listconcat([llvm_anyvector_ty],
1155                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1156                                [llvm_ptr_ty, llvm_anyint_ty,
1157                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1158                                 LLVMMatchType<1>]),
1159                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1160    let VLOperand = !add(nf, 3);
1161  }
1162
1163  // For indexed segment store
1164  // Input: (value, pointer, offset, vl)
1165  class RISCVISegStore<int nf>
1166        : DefaultAttrsIntrinsic<[],
1167                    !listconcat([llvm_anyvector_ty],
1168                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1169                                [llvm_ptr_ty, llvm_anyvector_ty,
1170                                 llvm_anyint_ty]),
1171                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1172    let VLOperand = !add(nf, 2);
1173  }
1174  // For indexed segment store with mask
1175  // Input: (value, pointer, offset, mask, vl)
1176  class RISCVISegStoreMasked<int nf>
1177        : DefaultAttrsIntrinsic<[],
1178                    !listconcat([llvm_anyvector_ty],
1179                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1180                                [llvm_ptr_ty, llvm_anyvector_ty,
1181                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1182                                 llvm_anyint_ty]),
1183                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1184    let VLOperand = !add(nf, 3);
1185  }
1186
1187  multiclass RISCVUSLoad {
1188    def "int_riscv_" # NAME : RISCVUSLoad;
1189    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMasked;
1190  }
1191  multiclass RISCVUSLoadFF {
1192    def "int_riscv_" # NAME : RISCVUSLoadFF;
1193    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMasked;
1194  }
1195  multiclass RISCVSLoad {
1196    def "int_riscv_" # NAME : RISCVSLoad;
1197    def "int_riscv_" # NAME # "_mask" : RISCVSLoadMasked;
1198  }
1199  multiclass RISCVILoad {
1200    def "int_riscv_" # NAME : RISCVILoad;
1201    def "int_riscv_" # NAME # "_mask" : RISCVILoadMasked;
1202  }
1203  multiclass RISCVUSStore {
1204    def "int_riscv_" # NAME : RISCVUSStore;
1205    def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMasked;
1206  }
1207  multiclass RISCVSStore {
1208    def "int_riscv_" # NAME : RISCVSStore;
1209    def "int_riscv_" # NAME # "_mask" : RISCVSStoreMasked;
1210  }
1211
1212  multiclass RISCVIStore {
1213    def "int_riscv_" # NAME : RISCVIStore;
1214    def "int_riscv_" # NAME # "_mask" : RISCVIStoreMasked;
1215  }
1216  multiclass RISCVUnaryAA {
1217    def "int_riscv_" # NAME : RISCVUnaryAAUnMasked;
1218    def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMasked;
1219  }
1220  multiclass RISCVUnaryAARoundingMode {
1221    def "int_riscv_" # NAME : RISCVUnaryAAUnMaskedRoundingMode;
1222    def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMaskedRoundingMode;
1223  }
1224  multiclass RISCVUnaryAB {
1225    def "int_riscv_" # NAME : RISCVUnaryABUnMasked;
1226    def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMasked;
1227  }
1228  // AAX means the destination type(A) is the same as the first source
1229  // type(A). X means any type for the second source operand.
1230  multiclass RISCVBinaryAAX {
1231    def "int_riscv_" # NAME : RISCVBinaryAAXUnMasked;
1232    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMasked;
1233  }
1234  multiclass RISCVBinaryAAXRoundingMode {
1235    def "int_riscv_" # NAME : RISCVBinaryAAXUnMaskedRoundingMode;
1236    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMaskedRoundingMode;
1237  }
1238  // Like RISCVBinaryAAX, but the second operand is used a shift amount so it
1239  // must be a vector or an XLen scalar.
1240  multiclass RISCVBinaryAAShift {
1241    def "int_riscv_" # NAME : RISCVBinaryAAShiftUnMasked;
1242    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMasked;
1243  }
1244  multiclass RISCVRGatherVV {
1245    def "int_riscv_" # NAME : RISCVRGatherVVUnMasked;
1246    def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMasked;
1247  }
1248  multiclass RISCVRGatherVX {
1249    def "int_riscv_" # NAME : RISCVGatherVXUnMasked;
1250    def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMasked;
1251  }
1252  multiclass RISCVRGatherEI16VV {
1253    def "int_riscv_" # NAME : RISCVRGatherEI16VVUnMasked;
1254    def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMasked;
1255  }
1256  // ABX means the destination type(A) is different from the first source
1257  // type(B). X means any type for the second source operand.
1258  multiclass RISCVBinaryABX {
1259    def "int_riscv_" # NAME : RISCVBinaryABXUnMasked;
1260    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMasked;
1261  }
1262  multiclass RISCVBinaryABXRoundingMode {
1263    def "int_riscv_" # NAME : RISCVBinaryABXUnMaskedRoundingMode;
1264    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMaskedRoundingMode;
1265  }
1266  // Like RISCVBinaryABX, but the second operand is used a shift amount so it
1267  // must be a vector or an XLen scalar.
1268  multiclass RISCVBinaryABShift {
1269    def "int_riscv_" # NAME : RISCVBinaryABShiftUnMasked;
1270    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMasked;
1271  }
1272  multiclass RISCVBinaryWithV0 {
1273    def "int_riscv_" # NAME : RISCVBinaryWithV0;
1274  }
1275  multiclass RISCVBinaryMaskOutWithV0 {
1276    def "int_riscv_" # NAME : RISCVBinaryMOutWithV0;
1277  }
1278  multiclass RISCVBinaryMaskOut {
1279    def "int_riscv_" # NAME : RISCVBinaryMOut;
1280  }
1281  multiclass RISCVSaturatingBinaryAAX {
1282    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMasked;
1283    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMasked;
1284  }
1285  multiclass RISCVSaturatingBinaryAAXRoundingMode {
1286    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMaskedRoundingMode;
1287    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMaskedRoundingMode;
1288  }
1289  multiclass RISCVSaturatingBinaryAAShiftRoundingMode {
1290    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode;
1291    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMaskedRoundingMode;
1292  }
1293  multiclass RISCVSaturatingBinaryABShiftRoundingMode {
1294    def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMaskedRoundingMode;
1295    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMaskedRoundingMode;
1296  }
1297  multiclass RVVSlide {
1298    def "int_riscv_" # NAME : RVVSlideUnMasked;
1299    def "int_riscv_" # NAME # "_mask" : RVVSlideMasked;
1300  }
1301  multiclass RISCVTernaryAAXA {
1302    def "int_riscv_" # NAME : RISCVTernaryAAXAUnMasked;
1303    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMasked;
1304  }
1305  multiclass RISCVTernaryAAXARoundingMode {
1306    def "int_riscv_" # NAME : RISCVTernaryAAXAUnMaskedRoundingMode;
1307    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMaskedRoundingMode;
1308  }
1309  multiclass RISCVCompare {
1310    def "int_riscv_" # NAME : RISCVCompareUnMasked;
1311    def "int_riscv_" # NAME # "_mask" : RISCVCompareMasked;
1312  }
1313  multiclass RISCVClassify {
1314    def "int_riscv_" # NAME : RISCVClassifyUnMasked;
1315    def "int_riscv_" # NAME # "_mask" : RISCVClassifyMasked;
1316  }
1317  multiclass RISCVTernaryWide {
1318    def "int_riscv_" # NAME : RISCVTernaryWideUnMasked;
1319    def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMasked;
1320  }
1321  multiclass RISCVTernaryWideRoundingMode {
1322    def "int_riscv_" # NAME : RISCVTernaryWideUnMaskedRoundingMode;
1323    def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMaskedRoundingMode;
1324  }
1325  multiclass RISCVReduction {
1326    def "int_riscv_" # NAME : RISCVReductionUnMasked;
1327    def "int_riscv_" # NAME # "_mask" : RISCVReductionMasked;
1328  }
1329  multiclass RISCVReductionRoundingMode {
1330    def "int_riscv_" # NAME : RISCVReductionUnMaskedRoundingMode;
1331    def "int_riscv_" # NAME # "_mask" : RISCVReductionMaskedRoundingMode;
1332  }
1333  multiclass RISCVMaskedUnarySOut {
1334    def "int_riscv_" # NAME : RISCVMaskedUnarySOutUnMasked;
1335    def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnarySOutMasked;
1336  }
1337  multiclass RISCVMaskedUnaryMOut {
1338    def "int_riscv_" # NAME : RISCVUnaryUnMasked;
1339    def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnaryMOutMasked;
1340  }
1341  multiclass RISCVConversion {
1342    def "int_riscv_" #NAME :RISCVConversionUnMasked;
1343    def "int_riscv_" # NAME # "_mask" : RISCVConversionMasked;
1344  }
1345  multiclass RISCVConversionRoundingMode {
1346    def "int_riscv_" #NAME :RISCVConversionUnMaskedRoundingMode;
1347    def "int_riscv_" # NAME # "_mask" : RISCVConversionMaskedRoundingMode;
1348  }
1349  multiclass RISCVUSSegLoad<int nf> {
1350    def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
1351    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMasked<nf>;
1352  }
1353  multiclass RISCVUSSegLoadFF<int nf> {
1354    def "int_riscv_" # NAME : RISCVUSSegLoadFF<nf>;
1355    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMasked<nf>;
1356  }
1357  multiclass RISCVSSegLoad<int nf> {
1358    def "int_riscv_" # NAME : RISCVSSegLoad<nf>;
1359    def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMasked<nf>;
1360  }
1361  multiclass RISCVISegLoad<int nf> {
1362    def "int_riscv_" # NAME : RISCVISegLoad<nf>;
1363    def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMasked<nf>;
1364  }
1365  multiclass RISCVUSSegStore<int nf> {
1366    def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
1367    def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMasked<nf>;
1368  }
1369  multiclass RISCVSSegStore<int nf> {
1370    def "int_riscv_" # NAME : RISCVSSegStore<nf>;
1371    def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMasked<nf>;
1372  }
1373  multiclass RISCVISegStore<int nf> {
1374    def "int_riscv_" # NAME : RISCVISegStore<nf>;
1375    def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMasked<nf>;
1376  }
1377
1378  defm vle : RISCVUSLoad;
1379  defm vleff : RISCVUSLoadFF;
1380  defm vse : RISCVUSStore;
1381  defm vlse: RISCVSLoad;
1382  defm vsse: RISCVSStore;
1383  defm vluxei : RISCVILoad;
1384  defm vloxei : RISCVILoad;
1385  defm vsoxei : RISCVIStore;
1386  defm vsuxei : RISCVIStore;
1387
1388  def int_riscv_vlm : RISCVUSMLoad;
1389  def int_riscv_vsm : RISCVUSStore;
1390
1391  defm vadd : RISCVBinaryAAX;
1392  defm vsub : RISCVBinaryAAX;
1393  defm vrsub : RISCVBinaryAAX;
1394
1395  defm vwaddu : RISCVBinaryABX;
1396  defm vwadd : RISCVBinaryABX;
1397  defm vwaddu_w : RISCVBinaryAAX;
1398  defm vwadd_w : RISCVBinaryAAX;
1399  defm vwsubu : RISCVBinaryABX;
1400  defm vwsub : RISCVBinaryABX;
1401  defm vwsubu_w : RISCVBinaryAAX;
1402  defm vwsub_w : RISCVBinaryAAX;
1403
1404  defm vzext : RISCVUnaryAB;
1405  defm vsext : RISCVUnaryAB;
1406
1407  defm vadc : RISCVBinaryWithV0;
1408  defm vmadc_carry_in : RISCVBinaryMaskOutWithV0;
1409  defm vmadc : RISCVBinaryMaskOut;
1410
1411  defm vsbc : RISCVBinaryWithV0;
1412  defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
1413  defm vmsbc : RISCVBinaryMaskOut;
1414
1415  defm vand : RISCVBinaryAAX;
1416  defm vor : RISCVBinaryAAX;
1417  defm vxor : RISCVBinaryAAX;
1418
1419  defm vsll : RISCVBinaryAAShift;
1420  defm vsrl : RISCVBinaryAAShift;
1421  defm vsra : RISCVBinaryAAShift;
1422
1423  defm vnsrl : RISCVBinaryABShift;
1424  defm vnsra : RISCVBinaryABShift;
1425
1426  defm vmseq : RISCVCompare;
1427  defm vmsne : RISCVCompare;
1428  defm vmsltu : RISCVCompare;
1429  defm vmslt : RISCVCompare;
1430  defm vmsleu : RISCVCompare;
1431  defm vmsle : RISCVCompare;
1432  defm vmsgtu : RISCVCompare;
1433  defm vmsgt : RISCVCompare;
1434  defm vmsgeu : RISCVCompare;
1435  defm vmsge : RISCVCompare;
1436
1437  defm vminu : RISCVBinaryAAX;
1438  defm vmin : RISCVBinaryAAX;
1439  defm vmaxu : RISCVBinaryAAX;
1440  defm vmax : RISCVBinaryAAX;
1441
1442  defm vmul : RISCVBinaryAAX;
1443  defm vmulh : RISCVBinaryAAX;
1444  defm vmulhu : RISCVBinaryAAX;
1445  defm vmulhsu : RISCVBinaryAAX;
1446
1447  defm vdivu : RISCVBinaryAAX;
1448  defm vdiv : RISCVBinaryAAX;
1449  defm vremu : RISCVBinaryAAX;
1450  defm vrem : RISCVBinaryAAX;
1451
1452  defm vwmul : RISCVBinaryABX;
1453  defm vwmulu : RISCVBinaryABX;
1454  defm vwmulsu : RISCVBinaryABX;
1455
1456  defm vmacc : RISCVTernaryAAXA;
1457  defm vnmsac : RISCVTernaryAAXA;
1458  defm vmadd : RISCVTernaryAAXA;
1459  defm vnmsub : RISCVTernaryAAXA;
1460
1461  defm vwmaccu  : RISCVTernaryWide;
1462  defm vwmacc   : RISCVTernaryWide;
1463  defm vwmaccus : RISCVTernaryWide;
1464  defm vwmaccsu : RISCVTernaryWide;
1465
1466  defm vfadd : RISCVBinaryAAXRoundingMode;
1467  defm vfsub : RISCVBinaryAAXRoundingMode;
1468  defm vfrsub : RISCVBinaryAAXRoundingMode;
1469
1470  defm vfwadd : RISCVBinaryABXRoundingMode;
1471  defm vfwsub : RISCVBinaryABXRoundingMode;
1472  defm vfwadd_w : RISCVBinaryAAXRoundingMode;
1473  defm vfwsub_w : RISCVBinaryAAXRoundingMode;
1474
1475  defm vsaddu : RISCVSaturatingBinaryAAX;
1476  defm vsadd : RISCVSaturatingBinaryAAX;
1477  defm vssubu : RISCVSaturatingBinaryAAX;
1478  defm vssub : RISCVSaturatingBinaryAAX;
1479
1480  defm vmerge : RISCVBinaryWithV0;
1481
1482  // Output: (vector)
1483  // Input: (passthru, vector_in, vl)
1484  def int_riscv_vmv_v_v : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1485                                                [LLVMMatchType<0>,
1486                                                 LLVMMatchType<0>,
1487                                                 llvm_anyint_ty],
1488                                                [IntrNoMem]>, RISCVVIntrinsic {
1489    let VLOperand = 2;
1490  }
1491  // Output: (vector)
1492  // Input: (passthru, scalar, vl)
1493  def int_riscv_vmv_v_x : DefaultAttrsIntrinsic<[llvm_anyint_ty],
1494                                                 [LLVMMatchType<0>,
1495                                                  LLVMVectorElementType<0>,
1496                                                  llvm_anyint_ty],
1497                                                 [IntrNoMem]>, RISCVVIntrinsic {
1498    let VLOperand = 2;
1499  }
1500  // Output: (vector)
1501  // Input: (passthru, scalar, vl)
1502  def int_riscv_vfmv_v_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
1503                                                 [LLVMMatchType<0>,
1504                                                  LLVMVectorElementType<0>,
1505                                                  llvm_anyint_ty],
1506                                                 [IntrNoMem]>, RISCVVIntrinsic {
1507    let VLOperand = 2;
1508  }
1509
1510  def int_riscv_vmv_x_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
1511                                                [llvm_anyint_ty],
1512                                                [IntrNoMem]>, RISCVVIntrinsic;
1513  def int_riscv_vmv_s_x : DefaultAttrsIntrinsic<[llvm_anyint_ty],
1514                                                [LLVMMatchType<0>,
1515                                                 LLVMVectorElementType<0>,
1516                                                 llvm_anyint_ty],
1517                                                [IntrNoMem]>, RISCVVIntrinsic {
1518    let VLOperand = 2;
1519  }
1520
1521  def int_riscv_vfmv_f_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
1522                                                 [llvm_anyfloat_ty],
1523                                                 [IntrNoMem]>, RISCVVIntrinsic;
1524  def int_riscv_vfmv_s_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
1525                                                 [LLVMMatchType<0>,
1526                                                  LLVMVectorElementType<0>,
1527                                                  llvm_anyint_ty],
1528                                                 [IntrNoMem]>, RISCVVIntrinsic {
1529    let VLOperand = 2;
1530  }
1531
1532  defm vfmul : RISCVBinaryAAXRoundingMode;
1533  defm vfdiv : RISCVBinaryAAXRoundingMode;
1534  defm vfrdiv : RISCVBinaryAAXRoundingMode;
1535
1536  defm vfwmul : RISCVBinaryABXRoundingMode;
1537
1538  defm vfmacc : RISCVTernaryAAXARoundingMode;
1539  defm vfnmacc : RISCVTernaryAAXARoundingMode;
1540  defm vfmsac : RISCVTernaryAAXARoundingMode;
1541  defm vfnmsac : RISCVTernaryAAXARoundingMode;
1542  defm vfmadd : RISCVTernaryAAXARoundingMode;
1543  defm vfnmadd : RISCVTernaryAAXARoundingMode;
1544  defm vfmsub : RISCVTernaryAAXARoundingMode;
1545  defm vfnmsub : RISCVTernaryAAXARoundingMode;
1546
1547  defm vfwmacc : RISCVTernaryWideRoundingMode;
1548  defm vfwmaccbf16 : RISCVTernaryWideRoundingMode;
1549  defm vfwnmacc : RISCVTernaryWideRoundingMode;
1550  defm vfwmsac : RISCVTernaryWideRoundingMode;
1551  defm vfwnmsac : RISCVTernaryWideRoundingMode;
1552
1553  defm vfsqrt : RISCVUnaryAARoundingMode;
1554  defm vfrsqrt7 : RISCVUnaryAA;
1555  defm vfrec7 : RISCVUnaryAARoundingMode;
1556
1557  defm vfmin : RISCVBinaryAAX;
1558  defm vfmax : RISCVBinaryAAX;
1559
1560  defm vfsgnj : RISCVBinaryAAX;
1561  defm vfsgnjn : RISCVBinaryAAX;
1562  defm vfsgnjx : RISCVBinaryAAX;
1563
1564  defm vfclass : RISCVClassify;
1565
1566  defm vfmerge : RISCVBinaryWithV0;
1567
1568  defm vslideup : RVVSlide;
1569  defm vslidedown : RVVSlide;
1570
1571  defm vslide1up : RISCVBinaryAAX;
1572  defm vslide1down : RISCVBinaryAAX;
1573  defm vfslide1up : RISCVBinaryAAX;
1574  defm vfslide1down : RISCVBinaryAAX;
1575
1576  defm vrgather_vv : RISCVRGatherVV;
1577  defm vrgather_vx : RISCVRGatherVX;
1578  defm vrgatherei16_vv : RISCVRGatherEI16VV;
1579
1580  def "int_riscv_vcompress" : RISCVCompress;
1581
1582  defm vaaddu : RISCVSaturatingBinaryAAXRoundingMode;
1583  defm vaadd : RISCVSaturatingBinaryAAXRoundingMode;
1584  defm vasubu : RISCVSaturatingBinaryAAXRoundingMode;
1585  defm vasub : RISCVSaturatingBinaryAAXRoundingMode;
1586
1587  defm vsmul : RISCVSaturatingBinaryAAXRoundingMode;
1588
1589  defm vssrl : RISCVSaturatingBinaryAAShiftRoundingMode;
1590  defm vssra : RISCVSaturatingBinaryAAShiftRoundingMode;
1591
1592  defm vnclipu : RISCVSaturatingBinaryABShiftRoundingMode;
1593  defm vnclip : RISCVSaturatingBinaryABShiftRoundingMode;
1594
1595  defm vmfeq : RISCVCompare;
1596  defm vmfne : RISCVCompare;
1597  defm vmflt : RISCVCompare;
1598  defm vmfle : RISCVCompare;
1599  defm vmfgt : RISCVCompare;
1600  defm vmfge : RISCVCompare;
1601
1602  defm vredsum : RISCVReduction;
1603  defm vredand : RISCVReduction;
1604  defm vredor : RISCVReduction;
1605  defm vredxor : RISCVReduction;
1606  defm vredminu : RISCVReduction;
1607  defm vredmin : RISCVReduction;
1608  defm vredmaxu : RISCVReduction;
1609  defm vredmax : RISCVReduction;
1610
1611  defm vwredsumu : RISCVReduction;
1612  defm vwredsum : RISCVReduction;
1613
1614  defm vfredosum : RISCVReductionRoundingMode;
1615  defm vfredusum : RISCVReductionRoundingMode;
1616  defm vfredmin : RISCVReduction;
1617  defm vfredmax : RISCVReduction;
1618
1619  defm vfwredusum : RISCVReductionRoundingMode;
1620  defm vfwredosum : RISCVReductionRoundingMode;
1621
1622  def int_riscv_vmand: RISCVBinaryAAAUnMasked;
1623  def int_riscv_vmnand: RISCVBinaryAAAUnMasked;
1624  def int_riscv_vmandn: RISCVBinaryAAAUnMasked;
1625  def int_riscv_vmxor: RISCVBinaryAAAUnMasked;
1626  def int_riscv_vmor: RISCVBinaryAAAUnMasked;
1627  def int_riscv_vmnor: RISCVBinaryAAAUnMasked;
1628  def int_riscv_vmorn: RISCVBinaryAAAUnMasked;
1629  def int_riscv_vmxnor: RISCVBinaryAAAUnMasked;
1630  def int_riscv_vmclr : RISCVNullaryIntrinsic;
1631  def int_riscv_vmset : RISCVNullaryIntrinsic;
1632
1633  defm vcpop : RISCVMaskedUnarySOut;
1634  defm vfirst : RISCVMaskedUnarySOut;
1635  defm vmsbf : RISCVMaskedUnaryMOut;
1636  defm vmsof : RISCVMaskedUnaryMOut;
1637  defm vmsif : RISCVMaskedUnaryMOut;
1638
1639  defm vfcvt_xu_f_v : RISCVConversionRoundingMode;
1640  defm vfcvt_x_f_v : RISCVConversionRoundingMode;
1641  defm vfcvt_rtz_xu_f_v : RISCVConversion;
1642  defm vfcvt_rtz_x_f_v : RISCVConversion;
1643  defm vfcvt_f_xu_v : RISCVConversionRoundingMode;
1644  defm vfcvt_f_x_v : RISCVConversionRoundingMode;
1645
1646  defm vfwcvt_f_xu_v : RISCVConversion;
1647  defm vfwcvt_f_x_v : RISCVConversion;
1648  defm vfwcvt_xu_f_v : RISCVConversionRoundingMode;
1649  defm vfwcvt_x_f_v : RISCVConversionRoundingMode;
1650  defm vfwcvt_rtz_xu_f_v : RISCVConversion;
1651  defm vfwcvt_rtz_x_f_v : RISCVConversion;
1652  defm vfwcvt_f_f_v : RISCVConversion;
1653  defm vfwcvtbf16_f_f_v : RISCVConversion;
1654
1655  defm vfncvt_f_xu_w : RISCVConversionRoundingMode;
1656  defm vfncvt_f_x_w : RISCVConversionRoundingMode;
1657  defm vfncvt_xu_f_w : RISCVConversionRoundingMode;
1658  defm vfncvt_x_f_w : RISCVConversionRoundingMode;
1659  defm vfncvt_rtz_xu_f_w : RISCVConversion;
1660  defm vfncvt_rtz_x_f_w : RISCVConversion;
1661  defm vfncvt_f_f_w : RISCVConversionRoundingMode;
1662  defm vfncvtbf16_f_f_w : RISCVConversionRoundingMode;
1663  defm vfncvt_rod_f_f_w : RISCVConversion;
1664
1665  // Output: (vector)
1666  // Input: (passthru, mask type input, vl)
1667  def int_riscv_viota
1668        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1669                                [LLVMMatchType<0>,
1670                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1671                                 llvm_anyint_ty],
1672                                [IntrNoMem]>, RISCVVIntrinsic {
1673    let VLOperand = 2;
1674  }
1675  // Output: (vector)
1676  // Input: (maskedoff, mask type vector_in, mask, vl, policy)
1677  def int_riscv_viota_mask
1678        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1679                                [LLVMMatchType<0>,
1680                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1681                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1682                                 llvm_anyint_ty, LLVMMatchType<1>],
1683                                [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
1684    let VLOperand = 3;
1685  }
1686  // Output: (vector)
1687  // Input: (passthru, vl)
1688  def int_riscv_vid : RISCVID;
1689
1690  // Output: (vector)
1691  // Input: (maskedoff, mask, vl, policy)
1692  def int_riscv_vid_mask
1693        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1694                                [LLVMMatchType<0>,
1695                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1696                                 llvm_anyint_ty, LLVMMatchType<1>],
1697                                [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
1698    let VLOperand = 2;
1699  }
1700
1701  foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1702    defm vlseg # nf : RISCVUSSegLoad<nf>;
1703    defm vlseg # nf # ff : RISCVUSSegLoadFF<nf>;
1704    defm vlsseg # nf : RISCVSSegLoad<nf>;
1705    defm vloxseg # nf : RISCVISegLoad<nf>;
1706    defm vluxseg # nf : RISCVISegLoad<nf>;
1707    defm vsseg # nf : RISCVUSSegStore<nf>;
1708    defm vssseg # nf : RISCVSSegStore<nf>;
1709    defm vsoxseg # nf : RISCVISegStore<nf>;
1710    defm vsuxseg # nf : RISCVISegStore<nf>;
1711  }
1712
1713  // Strided loads/stores for fixed vectors.
1714  def int_riscv_masked_strided_load
1715        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1716                                [LLVMMatchType<0>, llvm_anyptr_ty,
1717                                 llvm_anyint_ty,
1718                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
1719                                [NoCapture<ArgIndex<1>>, IntrReadMem]>;
1720  def int_riscv_masked_strided_store
1721        : DefaultAttrsIntrinsic<[],
1722                                [llvm_anyvector_ty, llvm_anyptr_ty,
1723                                 llvm_anyint_ty,
1724                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
1725                                [NoCapture<ArgIndex<1>>, IntrWriteMem]>;
1726
1727  // Segment loads/stores for fixed vectors.
1728  foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1729    def int_riscv_seg # nf # _load
1730          : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty],
1731                                              !listsplat(LLVMMatchType<0>,
1732                                              !add(nf, -1))),
1733                                  [llvm_anyptr_ty, llvm_anyint_ty],
1734                                  [NoCapture<ArgIndex<0>>, IntrReadMem]>;
1735    def int_riscv_seg # nf # _store
1736          : DefaultAttrsIntrinsic<[],
1737                                  !listconcat([llvm_anyvector_ty],
1738                                              !listsplat(LLVMMatchType<0>,
1739                                                          !add(nf, -1)),
1740                                              [llvm_anyptr_ty, llvm_anyint_ty]),
1741                                  [NoCapture<ArgIndex<nf>>, IntrWriteMem]>;
1742  }
1743
1744} // TargetPrefix = "riscv"
1745
1746//===----------------------------------------------------------------------===//
1747// Scalar Cryptography
1748//
1749// These intrinsics will lower directly into the corresponding instructions
1750// added by the scalar cyptography extension, if the extension is present.
1751
1752let TargetPrefix = "riscv" in {
1753
1754class ScalarCryptoByteSelect32
1755    : DefaultAttrsIntrinsic<[llvm_i32_ty],
1756                            [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1757                            [IntrNoMem, IntrSpeculatable,
1758                             ImmArg<ArgIndex<2>>]>;
1759
1760class ScalarCryptoGprGprIntrinsic32
1761    : DefaultAttrsIntrinsic<[llvm_i32_ty],
1762                            [llvm_i32_ty, llvm_i32_ty],
1763                            [IntrNoMem, IntrSpeculatable]>;
1764
1765class ScalarCryptoGprGprIntrinsic64
1766    : DefaultAttrsIntrinsic<[llvm_i64_ty],
1767                            [llvm_i64_ty, llvm_i64_ty],
1768                            [IntrNoMem, IntrSpeculatable]>;
1769
1770class ScalarCryptoGprIntrinsic32
1771    : DefaultAttrsIntrinsic<[llvm_i32_ty],
1772                            [llvm_i32_ty],
1773                            [IntrNoMem, IntrSpeculatable]>;
1774
1775class ScalarCryptoGprIntrinsic64
1776    : DefaultAttrsIntrinsic<[llvm_i64_ty],
1777                            [llvm_i64_ty],
1778                            [IntrNoMem, IntrSpeculatable]>;
1779
1780// Zknd
1781def int_riscv_aes32dsi  : ScalarCryptoByteSelect32,
1782                          ClangBuiltin<"__builtin_riscv_aes32dsi">;
1783def int_riscv_aes32dsmi : ScalarCryptoByteSelect32,
1784                          ClangBuiltin<"__builtin_riscv_aes32dsmi">;
1785
1786def int_riscv_aes64ds   : ScalarCryptoGprGprIntrinsic64,
1787                          ClangBuiltin<"__builtin_riscv_aes64ds">;
1788def int_riscv_aes64dsm  : ScalarCryptoGprGprIntrinsic64,
1789                          ClangBuiltin<"__builtin_riscv_aes64dsm">;
1790
1791def int_riscv_aes64im   : ScalarCryptoGprIntrinsic64,
1792                          ClangBuiltin<"__builtin_riscv_aes64im">;
1793
1794// Zkne
1795def int_riscv_aes32esi  : ScalarCryptoByteSelect32,
1796                          ClangBuiltin<"__builtin_riscv_aes32esi">;
1797def int_riscv_aes32esmi : ScalarCryptoByteSelect32,
1798                          ClangBuiltin<"__builtin_riscv_aes32esmi">;
1799
1800def int_riscv_aes64es   : ScalarCryptoGprGprIntrinsic64,
1801                          ClangBuiltin<"__builtin_riscv_aes64es">;
1802def int_riscv_aes64esm  : ScalarCryptoGprGprIntrinsic64,
1803                          ClangBuiltin<"__builtin_riscv_aes64esm">;
1804
1805// Zknd & Zkne
1806def int_riscv_aes64ks2  : ScalarCryptoGprGprIntrinsic64,
1807                          ClangBuiltin<"__builtin_riscv_aes64ks2">;
1808def int_riscv_aes64ks1i : DefaultAttrsIntrinsic<[llvm_i64_ty],
1809                                                [llvm_i64_ty, llvm_i32_ty],
1810                                                [IntrNoMem, IntrSpeculatable,
1811                                                 ImmArg<ArgIndex<1>>]>,
1812                          ClangBuiltin<"__builtin_riscv_aes64ks1i">;
1813
1814// Zknh
1815def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsic32;
1816def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsic32;
1817def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsic32;
1818def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsic32;
1819
1820def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32,
1821                            ClangBuiltin<"__builtin_riscv_sha512sig0l">;
1822def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32,
1823                            ClangBuiltin<"__builtin_riscv_sha512sig0h">;
1824def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32,
1825                            ClangBuiltin<"__builtin_riscv_sha512sig1l">;
1826def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32,
1827                            ClangBuiltin<"__builtin_riscv_sha512sig1h">;
1828def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32,
1829                            ClangBuiltin<"__builtin_riscv_sha512sum0r">;
1830def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32,
1831                            ClangBuiltin<"__builtin_riscv_sha512sum1r">;
1832
1833def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64,
1834                           ClangBuiltin<"__builtin_riscv_sha512sig0">;
1835def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64,
1836                           ClangBuiltin<"__builtin_riscv_sha512sig1">;
1837def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64,
1838                           ClangBuiltin<"__builtin_riscv_sha512sum0">;
1839def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64,
1840                           ClangBuiltin<"__builtin_riscv_sha512sum1">;
1841
1842// Zksed
1843def int_riscv_sm4ks      : ScalarCryptoByteSelect32;
1844def int_riscv_sm4ed      : ScalarCryptoByteSelect32;
1845
1846// Zksh
1847def int_riscv_sm3p0      : ScalarCryptoGprIntrinsic32;
1848def int_riscv_sm3p1      : ScalarCryptoGprIntrinsic32;
1849} // TargetPrefix = "riscv"
1850
1851//===----------------------------------------------------------------------===//
1852// Vector Cryptography
1853//
1854// These intrinsics will lower directly into the corresponding instructions
1855// added by the vector cyptography extension, if the extension is present.
1856let TargetPrefix = "riscv" in {
1857  // Zvkb
1858  defm vandn             : RISCVBinaryAAX;
1859  defm vbrev8            : RISCVUnaryAA;
1860  defm vrev8             : RISCVUnaryAA;
1861  defm vrol              : RISCVBinaryAAX;
1862  defm vror              : RISCVBinaryAAX;
1863
1864  // Zvbb
1865  defm vbrev             : RISCVUnaryAA;
1866  defm vclz              : RISCVUnaryAA;
1867  defm vctz              : RISCVUnaryAA;
1868  defm vcpopv            : RISCVUnaryAA;
1869  defm vwsll             : RISCVBinaryABX;
1870
1871  // Zvbc
1872  defm vclmul            : RISCVBinaryAAX;
1873  defm vclmulh           : RISCVBinaryAAX;
1874
1875  // Zvkg
1876  def int_riscv_vghsh    : RISCVBinaryAAXUnMaskedZvk;
1877  def int_riscv_vgmul_vv : RISCVUnaryAAUnMaskedZvk<IsVS=0>;
1878
1879  // Zvkned
1880  defm vaesdf            : RISCVUnaryAAUnMaskedZvk;
1881  defm vaesdm            : RISCVUnaryAAUnMaskedZvk;
1882  defm vaesef            : RISCVUnaryAAUnMaskedZvk;
1883  defm vaesem            : RISCVUnaryAAUnMaskedZvk;
1884  def int_riscv_vaeskf1  : RISCVBinaryAAXUnMasked<IsVI=1>;
1885  def int_riscv_vaeskf2  : RISCVBinaryAAXUnMaskedZvk<IsVI=1>;
1886  defm vaesz             : RISCVUnaryAAUnMaskedZvk<HasVV=0>;
1887
1888  // Zvknha or Zvknhb
1889  def int_riscv_vsha2ch  : RISCVBinaryAAXUnMaskedZvk;
1890  def int_riscv_vsha2cl  : RISCVBinaryAAXUnMaskedZvk;
1891  def int_riscv_vsha2ms  : RISCVBinaryAAXUnMaskedZvk;
1892
1893  // Zvksed
1894  def int_riscv_vsm4k    : RISCVBinaryAAXUnMasked<IsVI=1>;
1895  defm vsm4r             : RISCVUnaryAAUnMaskedZvk;
1896
1897  // Zvksh
1898  def int_riscv_vsm3c    : RISCVBinaryAAXUnMaskedZvk<IsVI=1>;
1899  def int_riscv_vsm3me   : RISCVBinaryAAXUnMasked;
1900} // TargetPrefix = "riscv"
1901
1902// Vendor extensions
1903//===----------------------------------------------------------------------===//
1904include "llvm/IR/IntrinsicsRISCVXTHead.td"
1905include "llvm/IR/IntrinsicsRISCVXsf.td"
1906include "llvm/IR/IntrinsicsRISCVXCV.td"
1907