xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td (revision 4d3fc8b0570b29fb0d6ee9525f104d52176ff0d4)
1//===-- RISCVInstrInfoVPseudos.td - RISC-V 'V' Pseudos -----*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// This file contains the required infrastructure to support code generation
10/// for the standard 'V' (Vector) extension, version 1.0.
11///
12/// This file is included from RISCVInstrInfoV.td
13///
14//===----------------------------------------------------------------------===//
15
16def riscv_vmv_x_s : SDNode<"RISCVISD::VMV_X_S",
17                           SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>,
18                                                SDTCisInt<1>]>>;
19def riscv_read_vlenb : SDNode<"RISCVISD::READ_VLENB",
20                              SDTypeProfile<1, 0, [SDTCisVT<0, XLenVT>]>>;
21
22// Operand that is allowed to be a register or a 5 bit immediate.
23// This allows us to pick between VSETIVLI and VSETVLI opcodes using the same
24// pseudo instructions.
25def AVL : RegisterOperand<GPRNoX0> {
26  let OperandNamespace = "RISCVOp";
27  let OperandType = "OPERAND_AVL";
28}
29
30// X0 has special meaning for vsetvl/vsetvli.
31//  rd | rs1 |   AVL value | Effect on vl
32//--------------------------------------------------------------
33// !X0 |  X0 |       VLMAX | Set vl to VLMAX
34//  X0 |  X0 | Value in vl | Keep current vl, just change vtype.
35def VLOp : ComplexPattern<XLenVT, 1, "selectVLOp">;
36
37def DecImm : SDNodeXForm<imm, [{
38  return CurDAG->getTargetConstant(N->getSExtValue() - 1, SDLoc(N),
39                                   N->getValueType(0));
40}]>;
41
42defvar TAIL_UNDISTURBED_MASK_UNDISTURBED = 0;
43defvar TAIL_AGNOSTIC = 1;
44
45//===----------------------------------------------------------------------===//
46// Utilities.
47//===----------------------------------------------------------------------===//
48
49class PseudoToVInst<string PseudoInst> {
50  string VInst = !subst("_M8", "",
51                 !subst("_M4", "",
52                 !subst("_M2", "",
53                 !subst("_M1", "",
54                 !subst("_MF2", "",
55                 !subst("_MF4", "",
56                 !subst("_MF8", "",
57                 !subst("_B1", "",
58                 !subst("_B2", "",
59                 !subst("_B4", "",
60                 !subst("_B8", "",
61                 !subst("_B16", "",
62                 !subst("_B32", "",
63                 !subst("_B64", "",
64                 !subst("_MASK", "",
65                 !subst("_TIED", "",
66                 !subst("_TU", "",
67                 !subst("F16", "F",
68                 !subst("F32", "F",
69                 !subst("F64", "F",
70                 !subst("Pseudo", "", PseudoInst)))))))))))))))))))));
71}
72
73// This class describes information associated to the LMUL.
74class LMULInfo<int lmul, int oct, VReg regclass, VReg wregclass,
75               VReg f2regclass, VReg f4regclass, VReg f8regclass, string mx> {
76  bits<3> value = lmul; // This is encoded as the vlmul field of vtype.
77  VReg vrclass = regclass;
78  VReg wvrclass = wregclass;
79  VReg f8vrclass = f8regclass;
80  VReg f4vrclass = f4regclass;
81  VReg f2vrclass = f2regclass;
82  string MX = mx;
83  int octuple = oct;
84}
85
86// Associate LMUL with tablegen records of register classes.
87def V_M1  : LMULInfo<0b000,  8,   VR,        VRM2,   VR,   VR, VR, "M1">;
88def V_M2  : LMULInfo<0b001, 16, VRM2,        VRM4,   VR,   VR, VR, "M2">;
89def V_M4  : LMULInfo<0b010, 32, VRM4,        VRM8, VRM2,   VR, VR, "M4">;
90def V_M8  : LMULInfo<0b011, 64, VRM8,/*NoVReg*/VR, VRM4, VRM2, VR, "M8">;
91
92def V_MF8 : LMULInfo<0b101, 1, VR, VR,/*NoVReg*/VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF8">;
93def V_MF4 : LMULInfo<0b110, 2, VR, VR,          VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF4">;
94def V_MF2 : LMULInfo<0b111, 4, VR, VR,          VR,          VR,/*NoVReg*/VR, "MF2">;
95
96// Used to iterate over all possible LMULs.
97defvar MxList = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
98// For floating point which don't need MF8.
99defvar MxListF = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
100
101// Used for widening and narrowing instructions as it doesn't contain M8.
102defvar MxListW = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4];
103// For floating point which don't need MF8.
104defvar MxListFW = [V_MF4, V_MF2, V_M1, V_M2, V_M4];
105
106// Use for zext/sext.vf2
107defvar MxListVF2 = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
108
109// Use for zext/sext.vf4
110defvar MxListVF4 = [V_MF2, V_M1, V_M2, V_M4, V_M8];
111
112// Use for zext/sext.vf8
113defvar MxListVF8 = [V_M1, V_M2, V_M4, V_M8];
114
115class MxSet<int eew> {
116  list<LMULInfo> m = !cond(!eq(eew, 8) : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
117                           !eq(eew, 16) : [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
118                           !eq(eew, 32) : [V_MF2, V_M1, V_M2, V_M4, V_M8],
119                           !eq(eew, 64) : [V_M1, V_M2, V_M4, V_M8]);
120}
121
122class FPR_Info<RegisterClass regclass, string fx, list<LMULInfo> mxlist> {
123  RegisterClass fprclass = regclass;
124  string FX = fx;
125  list<LMULInfo> MxList = mxlist;
126}
127
128def SCALAR_F16 : FPR_Info<FPR16, "F16", MxSet<16>.m>;
129def SCALAR_F32 : FPR_Info<FPR32, "F32", MxSet<32>.m>;
130def SCALAR_F64 : FPR_Info<FPR64, "F64", MxSet<64>.m>;
131
132defvar FPList = [SCALAR_F16, SCALAR_F32, SCALAR_F64];
133
134// Used for widening instructions. It excludes F64.
135defvar FPListW = [SCALAR_F16, SCALAR_F32];
136
137class NFSet<LMULInfo m> {
138  list<int> L = !cond(!eq(m.value, V_M8.value): [],
139                      !eq(m.value, V_M4.value): [2],
140                      !eq(m.value, V_M2.value): [2, 3, 4],
141                      true: [2, 3, 4, 5, 6, 7, 8]);
142}
143
144class log2<int num> {
145  int val = !if(!eq(num, 1), 0, !add(1, log2<!srl(num, 1)>.val));
146}
147
148class octuple_to_str<int octuple> {
149  string ret = !if(!eq(octuple, 1), "MF8",
150                   !if(!eq(octuple, 2), "MF4",
151                   !if(!eq(octuple, 4), "MF2",
152                   !if(!eq(octuple, 8), "M1",
153                   !if(!eq(octuple, 16), "M2",
154                   !if(!eq(octuple, 32), "M4",
155                   !if(!eq(octuple, 64), "M8",
156                   "NoDef")))))));
157}
158
159def VLOpFrag : PatFrag<(ops), (XLenVT (VLOp (XLenVT AVL:$vl)))>;
160
161// Output pattern for X0 used to represent VLMAX in the pseudo instructions.
162// We can't use X0 register becuase the AVL operands use GPRNoX0.
163// This must be kept in sync with RISCV::VLMaxSentinel.
164def VLMax : OutPatFrag<(ops), (XLenVT -1)>;
165
166// List of EEW.
167defvar EEWList = [8, 16, 32, 64];
168
169class SegRegClass<LMULInfo m, int nf> {
170  VReg RC = !cast<VReg>("VRN" # nf # !cond(!eq(m.value, V_MF8.value): V_M1.MX,
171                                           !eq(m.value, V_MF4.value): V_M1.MX,
172                                           !eq(m.value, V_MF2.value): V_M1.MX,
173                                           true: m.MX));
174}
175
176//===----------------------------------------------------------------------===//
177// Vector register and vector group type information.
178//===----------------------------------------------------------------------===//
179
180class VTypeInfo<ValueType Vec, ValueType Mas, int Sew, VReg Reg, LMULInfo M,
181                ValueType Scal = XLenVT, RegisterClass ScalarReg = GPR>
182{
183  ValueType Vector = Vec;
184  ValueType Mask = Mas;
185  int SEW = Sew;
186  int Log2SEW = log2<Sew>.val;
187  VReg RegClass = Reg;
188  LMULInfo LMul = M;
189  ValueType Scalar = Scal;
190  RegisterClass ScalarRegClass = ScalarReg;
191  // The pattern fragment which produces the AVL operand, representing the
192  // "natural" vector length for this type. For scalable vectors this is VLMax.
193  OutPatFrag AVL = VLMax;
194
195  string ScalarSuffix = !cond(!eq(Scal, XLenVT) : "X",
196                              !eq(Scal, f16) : "F16",
197                              !eq(Scal, f32) : "F32",
198                              !eq(Scal, f64) : "F64");
199}
200
201class GroupVTypeInfo<ValueType Vec, ValueType VecM1, ValueType Mas, int Sew,
202                     VReg Reg, LMULInfo M, ValueType Scal = XLenVT,
203                     RegisterClass ScalarReg = GPR>
204    : VTypeInfo<Vec, Mas, Sew, Reg, M, Scal, ScalarReg>
205{
206  ValueType VectorM1 = VecM1;
207}
208
209defset list<VTypeInfo> AllVectors = {
210  defset list<VTypeInfo> AllIntegerVectors = {
211    defset list<VTypeInfo> NoGroupIntegerVectors = {
212      defset list<VTypeInfo> FractionalGroupIntegerVectors = {
213        def VI8MF8: VTypeInfo<vint8mf8_t,  vbool64_t,  8, VR, V_MF8>;
214        def VI8MF4: VTypeInfo<vint8mf4_t,  vbool32_t,  8, VR, V_MF4>;
215        def VI8MF2: VTypeInfo<vint8mf2_t,  vbool16_t,  8, VR, V_MF2>;
216        def VI16MF4: VTypeInfo<vint16mf4_t, vbool64_t, 16, VR, V_MF4>;
217        def VI16MF2: VTypeInfo<vint16mf2_t, vbool32_t, 16, VR, V_MF2>;
218        def VI32MF2: VTypeInfo<vint32mf2_t, vbool64_t, 32, VR, V_MF2>;
219      }
220      def VI8M1: VTypeInfo<vint8m1_t,   vbool8_t,   8, VR, V_M1>;
221      def VI16M1: VTypeInfo<vint16m1_t,  vbool16_t, 16, VR, V_M1>;
222      def VI32M1: VTypeInfo<vint32m1_t,  vbool32_t, 32, VR, V_M1>;
223      def VI64M1: VTypeInfo<vint64m1_t,  vbool64_t, 64, VR, V_M1>;
224    }
225    defset list<GroupVTypeInfo> GroupIntegerVectors = {
226      def VI8M2: GroupVTypeInfo<vint8m2_t, vint8m1_t, vbool4_t, 8, VRM2, V_M2>;
227      def VI8M4: GroupVTypeInfo<vint8m4_t, vint8m1_t, vbool2_t, 8, VRM4, V_M4>;
228      def VI8M8: GroupVTypeInfo<vint8m8_t, vint8m1_t, vbool1_t, 8, VRM8, V_M8>;
229
230      def VI16M2: GroupVTypeInfo<vint16m2_t,vint16m1_t,vbool8_t, 16,VRM2, V_M2>;
231      def VI16M4: GroupVTypeInfo<vint16m4_t,vint16m1_t,vbool4_t, 16,VRM4, V_M4>;
232      def VI16M8: GroupVTypeInfo<vint16m8_t,vint16m1_t,vbool2_t, 16,VRM8, V_M8>;
233
234      def VI32M2: GroupVTypeInfo<vint32m2_t,vint32m1_t,vbool16_t,32,VRM2, V_M2>;
235      def VI32M4: GroupVTypeInfo<vint32m4_t,vint32m1_t,vbool8_t, 32,VRM4, V_M4>;
236      def VI32M8: GroupVTypeInfo<vint32m8_t,vint32m1_t,vbool4_t, 32,VRM8, V_M8>;
237
238      def VI64M2: GroupVTypeInfo<vint64m2_t,vint64m1_t,vbool32_t,64,VRM2, V_M2>;
239      def VI64M4: GroupVTypeInfo<vint64m4_t,vint64m1_t,vbool16_t,64,VRM4, V_M4>;
240      def VI64M8: GroupVTypeInfo<vint64m8_t,vint64m1_t,vbool8_t, 64,VRM8, V_M8>;
241    }
242  }
243
244  defset list<VTypeInfo> AllFloatVectors = {
245    defset list<VTypeInfo> NoGroupFloatVectors = {
246      defset list<VTypeInfo> FractionalGroupFloatVectors = {
247        def VF16MF4: VTypeInfo<vfloat16mf4_t, vbool64_t, 16, VR, V_MF4, f16, FPR16>;
248        def VF16MF2: VTypeInfo<vfloat16mf2_t, vbool32_t, 16, VR, V_MF2, f16, FPR16>;
249        def VF32MF2: VTypeInfo<vfloat32mf2_t,vbool64_t, 32, VR, V_MF2, f32, FPR32>;
250      }
251      def VF16M1:  VTypeInfo<vfloat16m1_t,  vbool16_t, 16, VR, V_M1,  f16, FPR16>;
252      def VF32M1:  VTypeInfo<vfloat32m1_t, vbool32_t, 32, VR, V_M1,  f32, FPR32>;
253      def VF64M1: VTypeInfo<vfloat64m1_t, vbool64_t, 64, VR, V_M1, f64, FPR64>;
254    }
255
256    defset list<GroupVTypeInfo> GroupFloatVectors = {
257      def VF16M2: GroupVTypeInfo<vfloat16m2_t, vfloat16m1_t, vbool8_t, 16,
258                                 VRM2, V_M2, f16, FPR16>;
259      def VF16M4: GroupVTypeInfo<vfloat16m4_t, vfloat16m1_t, vbool4_t, 16,
260                                 VRM4, V_M4, f16, FPR16>;
261      def VF16M8: GroupVTypeInfo<vfloat16m8_t, vfloat16m1_t, vbool2_t, 16,
262                                 VRM8, V_M8, f16, FPR16>;
263
264      def VF32M2: GroupVTypeInfo<vfloat32m2_t, vfloat32m1_t, vbool16_t, 32,
265                                 VRM2, V_M2, f32, FPR32>;
266      def VF32M4: GroupVTypeInfo<vfloat32m4_t, vfloat32m1_t, vbool8_t,  32,
267                                 VRM4, V_M4, f32, FPR32>;
268      def VF32M8: GroupVTypeInfo<vfloat32m8_t, vfloat32m1_t, vbool4_t,  32,
269                                 VRM8, V_M8, f32, FPR32>;
270
271      def VF64M2: GroupVTypeInfo<vfloat64m2_t, vfloat64m1_t, vbool32_t, 64,
272                                 VRM2, V_M2, f64, FPR64>;
273      def VF64M4: GroupVTypeInfo<vfloat64m4_t, vfloat64m1_t, vbool16_t, 64,
274                                 VRM4, V_M4, f64, FPR64>;
275      def VF64M8: GroupVTypeInfo<vfloat64m8_t, vfloat64m1_t, vbool8_t,  64,
276                                 VRM8, V_M8, f64, FPR64>;
277    }
278  }
279}
280
281// This functor is used to obtain the int vector type that has the same SEW and
282// multiplier as the input parameter type
283class GetIntVTypeInfo<VTypeInfo vti>
284{
285  // Equivalent integer vector type. Eg.
286  //   VI8M1 → VI8M1 (identity)
287  //   VF64M4 → VI64M4
288  VTypeInfo Vti = !cast<VTypeInfo>(!subst("VF", "VI", !cast<string>(vti)));
289}
290
291class MTypeInfo<ValueType Mas, LMULInfo M, string Bx> {
292  ValueType Mask = Mas;
293  // {SEW, VLMul} values set a valid VType to deal with this mask type.
294  // we assume SEW=1 and set corresponding LMUL. vsetvli insertion will
295  // look for SEW=1 to optimize based on surrounding instructions.
296  int SEW = 1;
297  int Log2SEW = 0;
298  LMULInfo LMul = M;
299  string BX = Bx; // Appendix of mask operations.
300  // The pattern fragment which produces the AVL operand, representing the
301  // "natural" vector length for this mask type. For scalable masks this is
302  // VLMax.
303  OutPatFrag AVL = VLMax;
304}
305
306defset list<MTypeInfo> AllMasks = {
307  // vbool<n>_t, <n> = SEW/LMUL, we assume SEW=8 and corresponding LMUL.
308  def : MTypeInfo<vbool64_t, V_MF8, "B1">;
309  def : MTypeInfo<vbool32_t, V_MF4, "B2">;
310  def : MTypeInfo<vbool16_t, V_MF2, "B4">;
311  def : MTypeInfo<vbool8_t, V_M1, "B8">;
312  def : MTypeInfo<vbool4_t, V_M2, "B16">;
313  def : MTypeInfo<vbool2_t, V_M4, "B32">;
314  def : MTypeInfo<vbool1_t, V_M8, "B64">;
315}
316
317class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti>
318{
319  VTypeInfo Vti = vti;
320  VTypeInfo Wti = wti;
321}
322
323class VTypeInfoToFraction<VTypeInfo vti, VTypeInfo fti>
324{
325  VTypeInfo Vti = vti;
326  VTypeInfo Fti = fti;
327}
328
329defset list<VTypeInfoToWide> AllWidenableIntVectors = {
330  def : VTypeInfoToWide<VI8MF8,  VI16MF4>;
331  def : VTypeInfoToWide<VI8MF4,  VI16MF2>;
332  def : VTypeInfoToWide<VI8MF2,  VI16M1>;
333  def : VTypeInfoToWide<VI8M1,   VI16M2>;
334  def : VTypeInfoToWide<VI8M2,   VI16M4>;
335  def : VTypeInfoToWide<VI8M4,   VI16M8>;
336
337  def : VTypeInfoToWide<VI16MF4, VI32MF2>;
338  def : VTypeInfoToWide<VI16MF2, VI32M1>;
339  def : VTypeInfoToWide<VI16M1,  VI32M2>;
340  def : VTypeInfoToWide<VI16M2,  VI32M4>;
341  def : VTypeInfoToWide<VI16M4,  VI32M8>;
342
343  def : VTypeInfoToWide<VI32MF2, VI64M1>;
344  def : VTypeInfoToWide<VI32M1,  VI64M2>;
345  def : VTypeInfoToWide<VI32M2,  VI64M4>;
346  def : VTypeInfoToWide<VI32M4,  VI64M8>;
347}
348
349defset list<VTypeInfoToWide> AllWidenableFloatVectors = {
350  def : VTypeInfoToWide<VF16MF4, VF32MF2>;
351  def : VTypeInfoToWide<VF16MF2, VF32M1>;
352  def : VTypeInfoToWide<VF16M1, VF32M2>;
353  def : VTypeInfoToWide<VF16M2, VF32M4>;
354  def : VTypeInfoToWide<VF16M4, VF32M8>;
355
356  def : VTypeInfoToWide<VF32MF2, VF64M1>;
357  def : VTypeInfoToWide<VF32M1, VF64M2>;
358  def : VTypeInfoToWide<VF32M2, VF64M4>;
359  def : VTypeInfoToWide<VF32M4, VF64M8>;
360}
361
362defset list<VTypeInfoToFraction> AllFractionableVF2IntVectors = {
363  def : VTypeInfoToFraction<VI16MF4, VI8MF8>;
364  def : VTypeInfoToFraction<VI16MF2, VI8MF4>;
365  def : VTypeInfoToFraction<VI16M1, VI8MF2>;
366  def : VTypeInfoToFraction<VI16M2, VI8M1>;
367  def : VTypeInfoToFraction<VI16M4, VI8M2>;
368  def : VTypeInfoToFraction<VI16M8, VI8M4>;
369  def : VTypeInfoToFraction<VI32MF2, VI16MF4>;
370  def : VTypeInfoToFraction<VI32M1, VI16MF2>;
371  def : VTypeInfoToFraction<VI32M2, VI16M1>;
372  def : VTypeInfoToFraction<VI32M4, VI16M2>;
373  def : VTypeInfoToFraction<VI32M8, VI16M4>;
374  def : VTypeInfoToFraction<VI64M1, VI32MF2>;
375  def : VTypeInfoToFraction<VI64M2, VI32M1>;
376  def : VTypeInfoToFraction<VI64M4, VI32M2>;
377  def : VTypeInfoToFraction<VI64M8, VI32M4>;
378}
379
380defset list<VTypeInfoToFraction> AllFractionableVF4IntVectors = {
381  def : VTypeInfoToFraction<VI32MF2, VI8MF8>;
382  def : VTypeInfoToFraction<VI32M1, VI8MF4>;
383  def : VTypeInfoToFraction<VI32M2, VI8MF2>;
384  def : VTypeInfoToFraction<VI32M4, VI8M1>;
385  def : VTypeInfoToFraction<VI32M8, VI8M2>;
386  def : VTypeInfoToFraction<VI64M1, VI16MF4>;
387  def : VTypeInfoToFraction<VI64M2, VI16MF2>;
388  def : VTypeInfoToFraction<VI64M4, VI16M1>;
389  def : VTypeInfoToFraction<VI64M8, VI16M2>;
390}
391
392defset list<VTypeInfoToFraction> AllFractionableVF8IntVectors = {
393  def : VTypeInfoToFraction<VI64M1, VI8MF8>;
394  def : VTypeInfoToFraction<VI64M2, VI8MF4>;
395  def : VTypeInfoToFraction<VI64M4, VI8MF2>;
396  def : VTypeInfoToFraction<VI64M8, VI8M1>;
397}
398
399defset list<VTypeInfoToWide> AllWidenableIntToFloatVectors = {
400  def : VTypeInfoToWide<VI8MF8, VF16MF4>;
401  def : VTypeInfoToWide<VI8MF4, VF16MF2>;
402  def : VTypeInfoToWide<VI8MF2, VF16M1>;
403  def : VTypeInfoToWide<VI8M1, VF16M2>;
404  def : VTypeInfoToWide<VI8M2, VF16M4>;
405  def : VTypeInfoToWide<VI8M4, VF16M8>;
406
407  def : VTypeInfoToWide<VI16MF4, VF32MF2>;
408  def : VTypeInfoToWide<VI16MF2, VF32M1>;
409  def : VTypeInfoToWide<VI16M1, VF32M2>;
410  def : VTypeInfoToWide<VI16M2, VF32M4>;
411  def : VTypeInfoToWide<VI16M4, VF32M8>;
412
413  def : VTypeInfoToWide<VI32MF2, VF64M1>;
414  def : VTypeInfoToWide<VI32M1, VF64M2>;
415  def : VTypeInfoToWide<VI32M2, VF64M4>;
416  def : VTypeInfoToWide<VI32M4, VF64M8>;
417}
418
419// This class holds the record of the RISCVVPseudoTable below.
420// This represents the information we need in codegen for each pseudo.
421// The definition should be consistent with `struct PseudoInfo` in
422// RISCVBaseInfo.h.
423class CONST8b<bits<8> val> {
424  bits<8> V = val;
425}
426def InvalidIndex : CONST8b<0x80>;
427class RISCVVPseudo {
428  Pseudo Pseudo = !cast<Pseudo>(NAME); // Used as a key.
429  Instruction BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
430}
431
432// The actual table.
433def RISCVVPseudosTable : GenericTable {
434  let FilterClass = "RISCVVPseudo";
435  let CppTypeName = "PseudoInfo";
436  let Fields = [ "Pseudo", "BaseInstr" ];
437  let PrimaryKey = [ "Pseudo" ];
438  let PrimaryKeyName = "getPseudoInfo";
439  let PrimaryKeyEarlyOut = true;
440}
441
442def RISCVVIntrinsicsTable : GenericTable {
443  let FilterClass = "RISCVVIntrinsic";
444  let CppTypeName = "RISCVVIntrinsicInfo";
445  let Fields = ["IntrinsicID", "ScalarOperand", "VLOperand"];
446  let PrimaryKey = ["IntrinsicID"];
447  let PrimaryKeyName = "getRISCVVIntrinsicInfo";
448}
449
450class RISCVMaskedPseudo<bits<4> MaskIdx, bit HasTU = true> {
451  Pseudo MaskedPseudo = !cast<Pseudo>(NAME);
452  Pseudo UnmaskedPseudo = !cast<Pseudo>(!subst("_MASK", "", NAME));
453  Pseudo UnmaskedTUPseudo = !if(HasTU, !cast<Pseudo>(!subst("_MASK", "", NAME # "_TU")), MaskedPseudo);
454  bits<4> MaskOpIdx = MaskIdx;
455}
456
457def RISCVMaskedPseudosTable : GenericTable {
458  let FilterClass = "RISCVMaskedPseudo";
459  let CppTypeName = "RISCVMaskedPseudoInfo";
460  let Fields = ["MaskedPseudo", "UnmaskedPseudo", "UnmaskedTUPseudo", "MaskOpIdx"];
461  let PrimaryKey = ["MaskedPseudo"];
462  let PrimaryKeyName = "getMaskedPseudoInfo";
463}
464
465class RISCVVLE<bit M, bit TU, bit Str, bit F, bits<3> S, bits<3> L> {
466  bits<1> Masked = M;
467  bits<1> IsTU = TU;
468  bits<1> Strided = Str;
469  bits<1> FF = F;
470  bits<3> Log2SEW = S;
471  bits<3> LMUL = L;
472  Pseudo Pseudo = !cast<Pseudo>(NAME);
473}
474
475def RISCVVLETable : GenericTable {
476  let FilterClass = "RISCVVLE";
477  let CppTypeName = "VLEPseudo";
478  let Fields = ["Masked", "IsTU", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
479  let PrimaryKey = ["Masked", "IsTU", "Strided", "FF", "Log2SEW", "LMUL"];
480  let PrimaryKeyName = "getVLEPseudo";
481}
482
483class RISCVVSE<bit M, bit Str, bits<3> S, bits<3> L> {
484  bits<1> Masked = M;
485  bits<1> Strided = Str;
486  bits<3> Log2SEW = S;
487  bits<3> LMUL = L;
488  Pseudo Pseudo = !cast<Pseudo>(NAME);
489}
490
491def RISCVVSETable : GenericTable {
492  let FilterClass = "RISCVVSE";
493  let CppTypeName = "VSEPseudo";
494  let Fields = ["Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
495  let PrimaryKey = ["Masked", "Strided", "Log2SEW", "LMUL"];
496  let PrimaryKeyName = "getVSEPseudo";
497}
498
499class RISCVVLX_VSX<bit M, bit TU, bit O, bits<3> S, bits<3> L, bits<3> IL> {
500  bits<1> Masked = M;
501  bits<1> IsTU = TU;
502  bits<1> Ordered = O;
503  bits<3> Log2SEW = S;
504  bits<3> LMUL = L;
505  bits<3> IndexLMUL = IL;
506  Pseudo Pseudo = !cast<Pseudo>(NAME);
507}
508
509class RISCVVLX<bit M, bit TU, bit O, bits<3> S, bits<3> L, bits<3> IL> :
510  RISCVVLX_VSX<M, TU, O, S, L, IL>;
511class RISCVVSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
512  RISCVVLX_VSX<M, /*TU*/0, O, S, L, IL>;
513
514class RISCVVLX_VSXTable : GenericTable {
515  let CppTypeName = "VLX_VSXPseudo";
516  let Fields = ["Masked", "IsTU", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
517  let PrimaryKey = ["Masked", "IsTU", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
518}
519
520def RISCVVLXTable : RISCVVLX_VSXTable {
521  let FilterClass = "RISCVVLX";
522  let PrimaryKeyName = "getVLXPseudo";
523}
524
525def RISCVVSXTable : RISCVVLX_VSXTable {
526  let FilterClass = "RISCVVSX";
527  let PrimaryKeyName = "getVSXPseudo";
528}
529
530class RISCVVLSEG<bits<4> N, bit M, bit TU, bit Str, bit F, bits<3> S, bits<3> L> {
531  bits<4> NF = N;
532  bits<1> Masked = M;
533  bits<1> IsTU = TU;
534  bits<1> Strided = Str;
535  bits<1> FF = F;
536  bits<3> Log2SEW = S;
537  bits<3> LMUL = L;
538  Pseudo Pseudo = !cast<Pseudo>(NAME);
539}
540
541def RISCVVLSEGTable : GenericTable {
542  let FilterClass = "RISCVVLSEG";
543  let CppTypeName = "VLSEGPseudo";
544  let Fields = ["NF", "Masked", "IsTU", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
545  let PrimaryKey = ["NF", "Masked", "IsTU", "Strided", "FF", "Log2SEW", "LMUL"];
546  let PrimaryKeyName = "getVLSEGPseudo";
547}
548
549class RISCVVLXSEG<bits<4> N, bit M, bit TU, bit O, bits<3> S, bits<3> L, bits<3> IL> {
550  bits<4> NF = N;
551  bits<1> Masked = M;
552  bits<1> IsTU = TU;
553  bits<1> Ordered = O;
554  bits<3> Log2SEW = S;
555  bits<3> LMUL = L;
556  bits<3> IndexLMUL = IL;
557  Pseudo Pseudo = !cast<Pseudo>(NAME);
558}
559
560def RISCVVLXSEGTable : GenericTable {
561  let FilterClass = "RISCVVLXSEG";
562  let CppTypeName = "VLXSEGPseudo";
563  let Fields = ["NF", "Masked", "IsTU", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
564  let PrimaryKey = ["NF", "Masked", "IsTU", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
565  let PrimaryKeyName = "getVLXSEGPseudo";
566}
567
568class RISCVVSSEG<bits<4> N, bit M, bit Str, bits<3> S, bits<3> L> {
569  bits<4> NF = N;
570  bits<1> Masked = M;
571  bits<1> Strided = Str;
572  bits<3> Log2SEW = S;
573  bits<3> LMUL = L;
574  Pseudo Pseudo = !cast<Pseudo>(NAME);
575}
576
577def RISCVVSSEGTable : GenericTable {
578  let FilterClass = "RISCVVSSEG";
579  let CppTypeName = "VSSEGPseudo";
580  let Fields = ["NF", "Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
581  let PrimaryKey = ["NF", "Masked", "Strided", "Log2SEW", "LMUL"];
582  let PrimaryKeyName = "getVSSEGPseudo";
583}
584
585class RISCVVSXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
586  bits<4> NF = N;
587  bits<1> Masked = M;
588  bits<1> Ordered = O;
589  bits<3> Log2SEW = S;
590  bits<3> LMUL = L;
591  bits<3> IndexLMUL = IL;
592  Pseudo Pseudo = !cast<Pseudo>(NAME);
593}
594
595def RISCVVSXSEGTable : GenericTable {
596  let FilterClass = "RISCVVSXSEG";
597  let CppTypeName = "VSXSEGPseudo";
598  let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
599  let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
600  let PrimaryKeyName = "getVSXSEGPseudo";
601}
602
603//===----------------------------------------------------------------------===//
604// Helpers to define the different pseudo instructions.
605//===----------------------------------------------------------------------===//
606
607// The destination vector register group for a masked vector instruction cannot
608// overlap the source mask register (v0), unless the destination vector register
609// is being written with a mask value (e.g., comparisons) or the scalar result
610// of a reduction.
611class GetVRegNoV0<VReg VRegClass> {
612  VReg R = !cond(!eq(VRegClass, VR) : VRNoV0,
613                 !eq(VRegClass, VRM2) : VRM2NoV0,
614                 !eq(VRegClass, VRM4) : VRM4NoV0,
615                 !eq(VRegClass, VRM8) : VRM8NoV0,
616                 !eq(VRegClass, VRN2M1) : VRN2M1NoV0,
617                 !eq(VRegClass, VRN2M2) : VRN2M2NoV0,
618                 !eq(VRegClass, VRN2M4) : VRN2M4NoV0,
619                 !eq(VRegClass, VRN3M1) : VRN3M1NoV0,
620                 !eq(VRegClass, VRN3M2) : VRN3M2NoV0,
621                 !eq(VRegClass, VRN4M1) : VRN4M1NoV0,
622                 !eq(VRegClass, VRN4M2) : VRN4M2NoV0,
623                 !eq(VRegClass, VRN5M1) : VRN5M1NoV0,
624                 !eq(VRegClass, VRN6M1) : VRN6M1NoV0,
625                 !eq(VRegClass, VRN7M1) : VRN7M1NoV0,
626                 !eq(VRegClass, VRN8M1) : VRN8M1NoV0,
627                 true : VRegClass);
628}
629
630// Join strings in list using separator and ignoring empty elements
631class Join<list<string> strings, string separator> {
632  string ret = !foldl(!head(strings), !tail(strings), a, b,
633                      !cond(
634                        !and(!empty(a), !empty(b)) : "",
635                        !empty(a) : b,
636                        !empty(b) : a,
637                        1 : a#separator#b));
638}
639
640class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins> :
641      Pseudo<outs, ins, []>, RISCVVPseudo {
642  let BaseInstr = instr;
643  let VLMul = m.value;
644}
645
646class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit DummyMask = 1> :
647      Pseudo<(outs RetClass:$rd),
648             (ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
649      RISCVVPseudo,
650      RISCVVLE</*Masked*/0, /*TU*/0, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
651  let mayLoad = 1;
652  let mayStore = 0;
653  let hasSideEffects = 0;
654  let HasVLOp = 1;
655  let HasSEWOp = 1;
656  let HasDummyMask = DummyMask;
657}
658
659class VPseudoUSLoadNoMaskTU<VReg RetClass, int EEW> :
660      Pseudo<(outs RetClass:$rd),
661             (ins RetClass:$dest, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
662      RISCVVPseudo,
663      RISCVVLE</*Masked*/0, /*TU*/1, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
664  let mayLoad = 1;
665  let mayStore = 0;
666  let hasSideEffects = 0;
667  let HasVLOp = 1;
668  let HasSEWOp = 1;
669  let HasDummyMask = 1;
670  let HasMergeOp = 1;
671  let Constraints = "$rd = $dest";
672}
673
674class VPseudoUSLoadMask<VReg RetClass, int EEW> :
675      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
676              (ins GetVRegNoV0<RetClass>.R:$merge,
677                   GPR:$rs1,
678                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
679      RISCVVPseudo,
680      RISCVVLE</*Masked*/1, /*TU*/1, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
681  let mayLoad = 1;
682  let mayStore = 0;
683  let hasSideEffects = 0;
684  let Constraints = "$rd = $merge";
685  let HasVLOp = 1;
686  let HasSEWOp = 1;
687  let HasMergeOp = 1;
688  let HasVecPolicyOp = 1;
689  let UsesMaskPolicy = 1;
690}
691
692class VPseudoUSLoadFFNoMask<VReg RetClass, int EEW, bit DummyMask = 1> :
693      Pseudo<(outs RetClass:$rd, GPR:$vl),
694             (ins GPR:$rs1, AVL:$avl, ixlenimm:$sew),[]>,
695      RISCVVPseudo,
696      RISCVVLE</*Masked*/0, /*TU*/0, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
697  let mayLoad = 1;
698  let mayStore = 0;
699  let hasSideEffects = 0;
700  let HasVLOp = 1;
701  let HasSEWOp = 1;
702  let HasDummyMask = DummyMask;
703}
704
705class VPseudoUSLoadFFNoMaskTU<VReg RetClass, int EEW> :
706      Pseudo<(outs RetClass:$rd, GPR:$vl),
707             (ins RetClass:$dest, GPR:$rs1, AVL:$avl, ixlenimm:$sew),[]>,
708      RISCVVPseudo,
709      RISCVVLE</*Masked*/0, /*TU*/1, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
710  let mayLoad = 1;
711  let mayStore = 0;
712  let hasSideEffects = 0;
713  let HasVLOp = 1;
714  let HasSEWOp = 1;
715  let HasDummyMask = 1;
716  let HasMergeOp = 1;
717  let Constraints = "$rd = $dest";
718}
719
720class VPseudoUSLoadFFMask<VReg RetClass, int EEW> :
721      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
722              (ins GetVRegNoV0<RetClass>.R:$merge,
723                   GPR:$rs1,
724                   VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy),[]>,
725      RISCVVPseudo,
726      RISCVVLE</*Masked*/1, /*TU*/1, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
727  let mayLoad = 1;
728  let mayStore = 0;
729  let hasSideEffects = 0;
730  let Constraints = "$rd = $merge";
731  let HasVLOp = 1;
732  let HasSEWOp = 1;
733  let HasMergeOp = 1;
734  let HasVecPolicyOp = 1;
735  let UsesMaskPolicy = 1;
736}
737
738class VPseudoSLoadNoMask<VReg RetClass, int EEW>:
739      Pseudo<(outs RetClass:$rd),
740             (ins GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
741      RISCVVPseudo,
742      RISCVVLE</*Masked*/0, /*TU*/0, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
743  let mayLoad = 1;
744  let mayStore = 0;
745  let hasSideEffects = 0;
746  let HasVLOp = 1;
747  let HasSEWOp = 1;
748  let HasDummyMask = 1;
749}
750
751class VPseudoSLoadNoMaskTU<VReg RetClass, int EEW>:
752      Pseudo<(outs RetClass:$rd),
753             (ins RetClass:$dest, GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
754      RISCVVPseudo,
755      RISCVVLE</*Masked*/0, /*TU*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
756  let mayLoad = 1;
757  let mayStore = 0;
758  let hasSideEffects = 0;
759  let HasVLOp = 1;
760  let HasSEWOp = 1;
761  let HasDummyMask = 1;
762  let HasMergeOp = 1;
763  let Constraints = "$rd = $dest";
764}
765
766class VPseudoSLoadMask<VReg RetClass, int EEW>:
767      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
768              (ins GetVRegNoV0<RetClass>.R:$merge,
769                   GPR:$rs1, GPR:$rs2,
770                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
771      RISCVVPseudo,
772      RISCVVLE</*Masked*/1, /*TU*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
773  let mayLoad = 1;
774  let mayStore = 0;
775  let hasSideEffects = 0;
776  let Constraints = "$rd = $merge";
777  let HasVLOp = 1;
778  let HasSEWOp = 1;
779  let HasMergeOp = 1;
780  let HasVecPolicyOp = 1;
781  let UsesMaskPolicy = 1;
782}
783
784class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
785                         bit Ordered, bit EarlyClobber>:
786      Pseudo<(outs RetClass:$rd),
787             (ins GPR:$rs1, IdxClass:$rs2, AVL:$vl,
788              ixlenimm:$sew),[]>,
789      RISCVVPseudo,
790      RISCVVLX</*Masked*/0, /*TU*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
791  let mayLoad = 1;
792  let mayStore = 0;
793  let hasSideEffects = 0;
794  let HasVLOp = 1;
795  let HasSEWOp = 1;
796  let HasDummyMask = 1;
797  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd", "");
798}
799
800class VPseudoILoadNoMaskTU<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
801                           bit Ordered, bit EarlyClobber>:
802      Pseudo<(outs RetClass:$rd),
803             (ins RetClass:$dest, GPR:$rs1, IdxClass:$rs2, AVL:$vl,
804              ixlenimm:$sew),[]>,
805      RISCVVPseudo,
806      RISCVVLX</*Masked*/0, /*TU*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
807  let mayLoad = 1;
808  let mayStore = 0;
809  let hasSideEffects = 0;
810  let HasVLOp = 1;
811  let HasSEWOp = 1;
812  let HasDummyMask = 1;
813  let HasMergeOp = 1;
814  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $dest", "$rd = $dest");
815}
816
817class VPseudoILoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
818                       bit Ordered, bit EarlyClobber>:
819      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
820              (ins GetVRegNoV0<RetClass>.R:$merge,
821                   GPR:$rs1, IdxClass:$rs2,
822                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
823      RISCVVPseudo,
824      RISCVVLX</*Masked*/1, /*TU*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
825  let mayLoad = 1;
826  let mayStore = 0;
827  let hasSideEffects = 0;
828  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $merge", "$rd = $merge");
829  let HasVLOp = 1;
830  let HasSEWOp = 1;
831  let HasMergeOp = 1;
832  let HasVecPolicyOp = 1;
833  let UsesMaskPolicy = 1;
834}
835
836class VPseudoUSStoreNoMask<VReg StClass, int EEW, bit DummyMask = 1>:
837      Pseudo<(outs),
838              (ins StClass:$rd, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
839      RISCVVPseudo,
840      RISCVVSE</*Masked*/0, /*Strided*/0, log2<EEW>.val, VLMul> {
841  let mayLoad = 0;
842  let mayStore = 1;
843  let hasSideEffects = 0;
844  let HasVLOp = 1;
845  let HasSEWOp = 1;
846  let HasDummyMask = DummyMask;
847}
848
849class VPseudoUSStoreMask<VReg StClass, int EEW>:
850      Pseudo<(outs),
851              (ins StClass:$rd, GPR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
852      RISCVVPseudo,
853      RISCVVSE</*Masked*/1, /*Strided*/0, log2<EEW>.val, VLMul> {
854  let mayLoad = 0;
855  let mayStore = 1;
856  let hasSideEffects = 0;
857  let HasVLOp = 1;
858  let HasSEWOp = 1;
859}
860
861class VPseudoSStoreNoMask<VReg StClass, int EEW>:
862      Pseudo<(outs),
863              (ins StClass:$rd, GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
864      RISCVVPseudo,
865      RISCVVSE</*Masked*/0, /*Strided*/1, log2<EEW>.val, VLMul> {
866  let mayLoad = 0;
867  let mayStore = 1;
868  let hasSideEffects = 0;
869  let HasVLOp = 1;
870  let HasSEWOp = 1;
871  let HasDummyMask = 1;
872}
873
874class VPseudoSStoreMask<VReg StClass, int EEW>:
875      Pseudo<(outs),
876              (ins StClass:$rd, GPR:$rs1, GPR:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
877      RISCVVPseudo,
878      RISCVVSE</*Masked*/1, /*Strided*/1, log2<EEW>.val, VLMul> {
879  let mayLoad = 0;
880  let mayStore = 1;
881  let hasSideEffects = 0;
882  let HasVLOp = 1;
883  let HasSEWOp = 1;
884}
885
886// Unary instruction that is never masked so HasDummyMask=0.
887class VPseudoUnaryNoDummyMask<VReg RetClass,
888                              DAGOperand Op2Class> :
889        Pseudo<(outs RetClass:$rd),
890               (ins Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
891        RISCVVPseudo {
892  let mayLoad = 0;
893  let mayStore = 0;
894  let hasSideEffects = 0;
895  let HasVLOp = 1;
896  let HasSEWOp = 1;
897}
898
899class VPseudoUnaryNoDummyMaskTU<VReg RetClass,
900                                DAGOperand Op2Class> :
901        Pseudo<(outs RetClass:$rd),
902               (ins RetClass:$dest, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
903        RISCVVPseudo {
904  let mayLoad = 0;
905  let mayStore = 0;
906  let hasSideEffects = 0;
907  let HasVLOp = 1;
908  let HasSEWOp = 1;
909  let HasMergeOp = 1;
910  let Constraints = "$rd = $dest";
911}
912
913class VPseudoNullaryNoMask<VReg RegClass>:
914      Pseudo<(outs RegClass:$rd),
915             (ins AVL:$vl, ixlenimm:$sew),
916             []>, RISCVVPseudo {
917  let mayLoad = 0;
918  let mayStore = 0;
919  let hasSideEffects = 0;
920  let HasVLOp = 1;
921  let HasSEWOp = 1;
922  let HasDummyMask = 1;
923}
924
925class VPseudoNullaryNoMaskTU<VReg RegClass>:
926      Pseudo<(outs RegClass:$rd),
927             (ins RegClass:$merge, AVL:$vl, ixlenimm:$sew),
928             []>, RISCVVPseudo {
929  let mayLoad = 0;
930  let mayStore = 0;
931  let hasSideEffects = 0;
932  let Constraints = "$rd = $merge";
933  let HasVLOp = 1;
934  let HasSEWOp = 1;
935  let HasDummyMask = 1;
936  let HasMergeOp = 1;
937}
938
939class VPseudoNullaryMask<VReg RegClass>:
940      Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd),
941             (ins GetVRegNoV0<RegClass>.R:$merge, VMaskOp:$vm, AVL:$vl,
942              ixlenimm:$sew, ixlenimm:$policy), []>, RISCVVPseudo {
943  let mayLoad = 0;
944  let mayStore = 0;
945  let hasSideEffects = 0;
946  let Constraints ="$rd = $merge";
947  let HasVLOp = 1;
948  let HasSEWOp = 1;
949  let HasMergeOp = 1;
950  let UsesMaskPolicy = 1;
951  let HasVecPolicyOp = 1;
952}
953
954// Nullary for pseudo instructions. They are expanded in
955// RISCVExpandPseudoInsts pass.
956class VPseudoNullaryPseudoM<string BaseInst>
957       : Pseudo<(outs VR:$rd), (ins AVL:$vl, ixlenimm:$sew), []>,
958       RISCVVPseudo {
959  let mayLoad = 0;
960  let mayStore = 0;
961  let hasSideEffects = 0;
962  let HasVLOp = 1;
963  let HasSEWOp = 1;
964  // BaseInstr is not used in RISCVExpandPseudoInsts pass.
965  // Just fill a corresponding real v-inst to pass tablegen check.
966  let BaseInstr = !cast<Instruction>(BaseInst);
967}
968
969// RetClass could be GPR or VReg.
970class VPseudoUnaryNoMask<DAGOperand RetClass, VReg OpClass, string Constraint = ""> :
971        Pseudo<(outs RetClass:$rd),
972               (ins OpClass:$rs2, AVL:$vl, ixlenimm:$sew), []>,
973        RISCVVPseudo {
974  let mayLoad = 0;
975  let mayStore = 0;
976  let hasSideEffects = 0;
977  let Constraints = Constraint;
978  let HasVLOp = 1;
979  let HasSEWOp = 1;
980  let HasDummyMask = 1;
981}
982
983// RetClass could be GPR or VReg.
984class VPseudoUnaryNoMaskTU<DAGOperand RetClass, VReg OpClass, string Constraint = ""> :
985      Pseudo<(outs RetClass:$rd),
986        (ins RetClass:$merge, OpClass:$rs2, AVL:$vl, ixlenimm:$sew), []>,
987        RISCVVPseudo {
988  let mayLoad = 0;
989  let mayStore = 0;
990  let hasSideEffects = 0;
991  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
992  let HasVLOp = 1;
993  let HasSEWOp = 1;
994  let HasDummyMask = 1;
995  let HasMergeOp = 1;
996}
997
998class VPseudoUnaryMask<VReg RetClass, VReg OpClass, string Constraint = ""> :
999        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1000               (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
1001                    VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1002        RISCVVPseudo {
1003  let mayLoad = 0;
1004  let mayStore = 0;
1005  let hasSideEffects = 0;
1006  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1007  let HasVLOp = 1;
1008  let HasSEWOp = 1;
1009  let HasMergeOp = 1;
1010  let UsesMaskPolicy = 1;
1011}
1012
1013class VPseudoUnaryMaskTA<VReg RetClass, VReg OpClass, string Constraint = ""> :
1014        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1015               (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
1016                    VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1017        RISCVVPseudo {
1018  let mayLoad = 0;
1019  let mayStore = 0;
1020  let hasSideEffects = 0;
1021  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1022  let HasVLOp = 1;
1023  let HasSEWOp = 1;
1024  let HasMergeOp = 1;
1025  let HasVecPolicyOp = 1;
1026  let UsesMaskPolicy = 1;
1027}
1028
1029// mask unary operation without maskedoff
1030class VPseudoMaskUnarySOutMask:
1031        Pseudo<(outs GPR:$rd),
1032               (ins VR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1033        RISCVVPseudo {
1034  let mayLoad = 0;
1035  let mayStore = 0;
1036  let hasSideEffects = 0;
1037  let HasVLOp = 1;
1038  let HasSEWOp = 1;
1039}
1040
1041// Mask can be V0~V31
1042class VPseudoUnaryAnyMask<VReg RetClass,
1043                          VReg Op1Class> :
1044      Pseudo<(outs RetClass:$rd),
1045             (ins RetClass:$merge,
1046                  Op1Class:$rs2,
1047                  VR:$vm, AVL:$vl, ixlenimm:$sew),
1048             []>,
1049      RISCVVPseudo {
1050  let mayLoad = 0;
1051  let mayStore = 0;
1052  let hasSideEffects = 0;
1053  let Constraints = "@earlyclobber $rd, $rd = $merge";
1054  let HasVLOp = 1;
1055  let HasSEWOp = 1;
1056  let HasMergeOp = 1;
1057}
1058
1059class VPseudoBinaryNoMask<VReg RetClass,
1060                          VReg Op1Class,
1061                          DAGOperand Op2Class,
1062                          string Constraint,
1063                          int DummyMask = 1> :
1064        Pseudo<(outs RetClass:$rd),
1065               (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
1066        RISCVVPseudo {
1067  let mayLoad = 0;
1068  let mayStore = 0;
1069  let hasSideEffects = 0;
1070  let Constraints = Constraint;
1071  let HasVLOp = 1;
1072  let HasSEWOp = 1;
1073  let HasDummyMask = DummyMask;
1074}
1075
1076class VPseudoBinaryNoMaskTU<VReg RetClass,
1077                            VReg Op1Class,
1078                            DAGOperand Op2Class,
1079                            string Constraint> :
1080        Pseudo<(outs RetClass:$rd),
1081               (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
1082        RISCVVPseudo {
1083  let mayLoad = 0;
1084  let mayStore = 0;
1085  let hasSideEffects = 0;
1086  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1087  let HasVLOp = 1;
1088  let HasSEWOp = 1;
1089  let HasDummyMask = 1;
1090  let HasMergeOp = 1;
1091}
1092
1093// Special version of VPseudoBinaryNoMask where we pretend the first source is
1094// tied to the destination.
1095// This allows maskedoff and rs2 to be the same register.
1096class VPseudoTiedBinaryNoMask<VReg RetClass,
1097                              DAGOperand Op2Class,
1098                              string Constraint> :
1099        Pseudo<(outs RetClass:$rd),
1100               (ins RetClass:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew,
1101                    ixlenimm:$policy), []>,
1102        RISCVVPseudo {
1103  let mayLoad = 0;
1104  let mayStore = 0;
1105  let hasSideEffects = 0;
1106  let Constraints = Join<[Constraint, "$rd = $rs2"], ",">.ret;
1107  let HasVLOp = 1;
1108  let HasSEWOp = 1;
1109  let HasDummyMask = 1;
1110  let HasVecPolicyOp = 1;
1111  let isConvertibleToThreeAddress = 1;
1112}
1113
1114class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
1115                          bit Ordered>:
1116      Pseudo<(outs),
1117              (ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
1118      RISCVVPseudo,
1119      RISCVVSX</*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
1120  let mayLoad = 0;
1121  let mayStore = 1;
1122  let hasSideEffects = 0;
1123  let HasVLOp = 1;
1124  let HasSEWOp = 1;
1125  let HasDummyMask = 1;
1126}
1127
1128class VPseudoIStoreMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
1129                        bit Ordered>:
1130      Pseudo<(outs),
1131              (ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1132      RISCVVPseudo,
1133      RISCVVSX</*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
1134  let mayLoad = 0;
1135  let mayStore = 1;
1136  let hasSideEffects = 0;
1137  let HasVLOp = 1;
1138  let HasSEWOp = 1;
1139}
1140
1141class VPseudoBinaryMask<VReg RetClass,
1142                        RegisterClass Op1Class,
1143                        DAGOperand Op2Class,
1144                        string Constraint> :
1145        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1146                (ins GetVRegNoV0<RetClass>.R:$merge,
1147                     Op1Class:$rs2, Op2Class:$rs1,
1148                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1149        RISCVVPseudo {
1150  let mayLoad = 0;
1151  let mayStore = 0;
1152  let hasSideEffects = 0;
1153  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1154  let HasVLOp = 1;
1155  let HasSEWOp = 1;
1156  let HasMergeOp = 1;
1157}
1158
1159class VPseudoBinaryMaskPolicy<VReg RetClass,
1160                              RegisterClass Op1Class,
1161                              DAGOperand Op2Class,
1162                              string Constraint> :
1163        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1164                (ins GetVRegNoV0<RetClass>.R:$merge,
1165                     Op1Class:$rs2, Op2Class:$rs1,
1166                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1167        RISCVVPseudo {
1168  let mayLoad = 0;
1169  let mayStore = 0;
1170  let hasSideEffects = 0;
1171  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1172  let HasVLOp = 1;
1173  let HasSEWOp = 1;
1174  let HasMergeOp = 1;
1175  let HasVecPolicyOp = 1;
1176  let UsesMaskPolicy = 1;
1177}
1178
1179// Like VPseudoBinaryMask, but output can be V0.
1180class VPseudoBinaryMOutMask<VReg RetClass,
1181                            RegisterClass Op1Class,
1182                            DAGOperand Op2Class,
1183                            string Constraint> :
1184        Pseudo<(outs RetClass:$rd),
1185                (ins RetClass:$merge,
1186                     Op1Class:$rs2, Op2Class:$rs1,
1187                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1188        RISCVVPseudo {
1189  let mayLoad = 0;
1190  let mayStore = 0;
1191  let hasSideEffects = 0;
1192  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1193  let HasVLOp = 1;
1194  let HasSEWOp = 1;
1195  let HasMergeOp = 1;
1196  let UsesMaskPolicy = 1;
1197}
1198
1199// Special version of VPseudoBinaryMask where we pretend the first source is
1200// tied to the destination so we can workaround the earlyclobber constraint.
1201// This allows maskedoff and rs2 to be the same register.
1202class VPseudoTiedBinaryMask<VReg RetClass,
1203                            DAGOperand Op2Class,
1204                            string Constraint> :
1205        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1206                (ins GetVRegNoV0<RetClass>.R:$merge,
1207                     Op2Class:$rs1,
1208                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1209        RISCVVPseudo {
1210  let mayLoad = 0;
1211  let mayStore = 0;
1212  let hasSideEffects = 0;
1213  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1214  let HasVLOp = 1;
1215  let HasSEWOp = 1;
1216  let HasMergeOp = 0; // Merge is also rs2.
1217  let HasVecPolicyOp = 1;
1218  let UsesMaskPolicy = 1;
1219}
1220
1221class VPseudoBinaryCarryIn<VReg RetClass,
1222                           VReg Op1Class,
1223                           DAGOperand Op2Class,
1224                           LMULInfo MInfo,
1225                           bit CarryIn,
1226                           string Constraint> :
1227        Pseudo<(outs RetClass:$rd),
1228               !if(CarryIn,
1229                  (ins Op1Class:$rs2, Op2Class:$rs1, VMV0:$carry, AVL:$vl,
1230                       ixlenimm:$sew),
1231                  (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew)), []>,
1232        RISCVVPseudo {
1233  let mayLoad = 0;
1234  let mayStore = 0;
1235  let hasSideEffects = 0;
1236  let Constraints = Constraint;
1237  let HasVLOp = 1;
1238  let HasSEWOp = 1;
1239  let HasMergeOp = 0;
1240  let VLMul = MInfo.value;
1241}
1242
1243class VPseudoTiedBinaryCarryIn<VReg RetClass,
1244                               VReg Op1Class,
1245                               DAGOperand Op2Class,
1246                               LMULInfo MInfo,
1247                               bit CarryIn,
1248                               string Constraint> :
1249        Pseudo<(outs RetClass:$rd),
1250               !if(CarryIn,
1251                  (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, VMV0:$carry, AVL:$vl,
1252                       ixlenimm:$sew),
1253                  (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew)), []>,
1254        RISCVVPseudo {
1255  let mayLoad = 0;
1256  let mayStore = 0;
1257  let hasSideEffects = 0;
1258  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1259  let HasVLOp = 1;
1260  let HasSEWOp = 1;
1261  let HasMergeOp = 1;
1262  let HasVecPolicyOp = 0;
1263  let VLMul = MInfo.value;
1264}
1265
1266class VPseudoTernaryNoMask<VReg RetClass,
1267                           RegisterClass Op1Class,
1268                           DAGOperand Op2Class,
1269                           string Constraint> :
1270        Pseudo<(outs RetClass:$rd),
1271               (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1272                    AVL:$vl, ixlenimm:$sew),
1273               []>,
1274        RISCVVPseudo {
1275  let mayLoad = 0;
1276  let mayStore = 0;
1277  let hasSideEffects = 0;
1278  let Constraints = Join<[Constraint, "$rd = $rs3"], ",">.ret;
1279  let HasVLOp = 1;
1280  let HasSEWOp = 1;
1281  let HasMergeOp = 1;
1282  let HasDummyMask = 1;
1283}
1284
1285class VPseudoTernaryNoMaskWithPolicy<VReg RetClass,
1286                                     RegisterClass Op1Class,
1287                                     DAGOperand Op2Class,
1288                                     string Constraint> :
1289        Pseudo<(outs RetClass:$rd),
1290               (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1291                    AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
1292               []>,
1293        RISCVVPseudo {
1294  let mayLoad = 0;
1295  let mayStore = 0;
1296  let hasSideEffects = 0;
1297  let Constraints = Join<[Constraint, "$rd = $rs3"], ",">.ret;
1298  let HasVecPolicyOp = 1;
1299  let HasVLOp = 1;
1300  let HasSEWOp = 1;
1301  let HasMergeOp = 1;
1302  let HasDummyMask = 1;
1303}
1304
1305class VPseudoUSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF>:
1306      Pseudo<(outs RetClass:$rd),
1307             (ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
1308      RISCVVPseudo,
1309      RISCVVLSEG<NF, /*Masked*/0, /*TU*/0, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
1310  let mayLoad = 1;
1311  let mayStore = 0;
1312  let hasSideEffects = 0;
1313  let HasVLOp = 1;
1314  let HasSEWOp = 1;
1315  let HasDummyMask = 1;
1316}
1317
1318class VPseudoUSSegLoadNoMaskTU<VReg RetClass, int EEW, bits<4> NF>:
1319      Pseudo<(outs RetClass:$rd),
1320             (ins RetClass:$dest, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
1321      RISCVVPseudo,
1322      RISCVVLSEG<NF, /*Masked*/0, /*TU*/1, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
1323  let mayLoad = 1;
1324  let mayStore = 0;
1325  let hasSideEffects = 0;
1326  let HasVLOp = 1;
1327  let HasSEWOp = 1;
1328  let HasDummyMask = 1;
1329  let HasMergeOp = 1;
1330  let Constraints = "$rd = $dest";
1331}
1332
1333class VPseudoUSSegLoadMask<VReg RetClass, int EEW, bits<4> NF>:
1334      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1335             (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
1336                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
1337      RISCVVPseudo,
1338      RISCVVLSEG<NF, /*Masked*/1, /*TU*/1, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
1339  let mayLoad = 1;
1340  let mayStore = 0;
1341  let hasSideEffects = 0;
1342  let Constraints = "$rd = $merge";
1343  let HasVLOp = 1;
1344  let HasSEWOp = 1;
1345  let HasMergeOp = 1;
1346  let HasVecPolicyOp = 1;
1347  let UsesMaskPolicy = 1;
1348}
1349
1350class VPseudoUSSegLoadFFNoMask<VReg RetClass, int EEW, bits<4> NF>:
1351      Pseudo<(outs RetClass:$rd, GPR:$vl),
1352             (ins GPR:$rs1, AVL:$avl, ixlenimm:$sew),[]>,
1353      RISCVVPseudo,
1354      RISCVVLSEG<NF, /*Masked*/0, /*TU*/0, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
1355  let mayLoad = 1;
1356  let mayStore = 0;
1357  let hasSideEffects = 0;
1358  let HasVLOp = 1;
1359  let HasSEWOp = 1;
1360  let HasDummyMask = 1;
1361}
1362
1363class VPseudoUSSegLoadFFNoMaskTU<VReg RetClass, int EEW, bits<4> NF>:
1364      Pseudo<(outs RetClass:$rd, GPR:$vl),
1365             (ins RetClass:$dest, GPR:$rs1, AVL:$avl, ixlenimm:$sew),[]>,
1366      RISCVVPseudo,
1367      RISCVVLSEG<NF, /*Masked*/0, /*TU*/1, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
1368  let mayLoad = 1;
1369  let mayStore = 0;
1370  let hasSideEffects = 0;
1371  let HasVLOp = 1;
1372  let HasSEWOp = 1;
1373  let HasDummyMask = 1;
1374  let HasMergeOp = 1;
1375  let Constraints = "$rd = $dest";
1376}
1377
1378class VPseudoUSSegLoadFFMask<VReg RetClass, int EEW, bits<4> NF>:
1379      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
1380             (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
1381                  VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy),[]>,
1382      RISCVVPseudo,
1383      RISCVVLSEG<NF, /*Masked*/1, /*TU*/1, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
1384  let mayLoad = 1;
1385  let mayStore = 0;
1386  let hasSideEffects = 0;
1387  let Constraints = "$rd = $merge";
1388  let HasVLOp = 1;
1389  let HasSEWOp = 1;
1390  let HasMergeOp = 1;
1391  let HasVecPolicyOp = 1;
1392  let UsesMaskPolicy = 1;
1393}
1394
1395class VPseudoSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF>:
1396      Pseudo<(outs RetClass:$rd),
1397             (ins GPR:$rs1, GPR:$offset, AVL:$vl, ixlenimm:$sew),[]>,
1398      RISCVVPseudo,
1399      RISCVVLSEG<NF, /*Masked*/0, /*TU*/0, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
1400  let mayLoad = 1;
1401  let mayLoad = 1;
1402  let mayStore = 0;
1403  let hasSideEffects = 0;
1404  let HasVLOp = 1;
1405  let HasSEWOp = 1;
1406  let HasDummyMask = 1;
1407}
1408
1409class VPseudoSSegLoadNoMaskTU<VReg RetClass, int EEW, bits<4> NF>:
1410      Pseudo<(outs RetClass:$rd),
1411             (ins RetClass:$merge, GPR:$rs1, GPR:$offset, AVL:$vl, ixlenimm:$sew),[]>,
1412      RISCVVPseudo,
1413      RISCVVLSEG<NF, /*Masked*/0, /*TU*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
1414  let mayLoad = 1;
1415  let mayLoad = 1;
1416  let mayStore = 0;
1417  let hasSideEffects = 0;
1418  let HasVLOp = 1;
1419  let HasSEWOp = 1;
1420  let HasDummyMask = 1;
1421  let HasMergeOp = 1;
1422  let Constraints = "$rd = $merge";
1423}
1424
1425class VPseudoSSegLoadMask<VReg RetClass, int EEW, bits<4> NF>:
1426      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1427             (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
1428                  GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
1429                  ixlenimm:$policy),[]>,
1430      RISCVVPseudo,
1431      RISCVVLSEG<NF, /*Masked*/1, /*TU*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
1432  let mayLoad = 1;
1433  let mayStore = 0;
1434  let hasSideEffects = 0;
1435  let Constraints = "$rd = $merge";
1436  let HasVLOp = 1;
1437  let HasSEWOp = 1;
1438  let HasMergeOp = 1;
1439  let HasVecPolicyOp = 1;
1440  let UsesMaskPolicy = 1;
1441}
1442
1443class VPseudoISegLoadNoMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
1444                            bits<4> NF, bit Ordered>:
1445      Pseudo<(outs RetClass:$rd),
1446             (ins GPR:$rs1, IdxClass:$offset, AVL:$vl, ixlenimm:$sew),[]>,
1447      RISCVVPseudo,
1448      RISCVVLXSEG<NF, /*Masked*/0, /*TU*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
1449  let mayLoad = 1;
1450  let mayStore = 0;
1451  let hasSideEffects = 0;
1452  // For vector indexed segment loads, the destination vector register groups
1453  // cannot overlap the source vector register group
1454  let Constraints = "@earlyclobber $rd";
1455  let HasVLOp = 1;
1456  let HasSEWOp = 1;
1457  let HasDummyMask = 1;
1458}
1459
1460class VPseudoISegLoadNoMaskTU<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
1461                              bits<4> NF, bit Ordered>:
1462      Pseudo<(outs RetClass:$rd),
1463             (ins RetClass:$merge, GPR:$rs1, IdxClass:$offset, AVL:$vl, ixlenimm:$sew),[]>,
1464      RISCVVPseudo,
1465      RISCVVLXSEG<NF, /*Masked*/0, /*TU*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
1466  let mayLoad = 1;
1467  let mayStore = 0;
1468  let hasSideEffects = 0;
1469  // For vector indexed segment loads, the destination vector register groups
1470  // cannot overlap the source vector register group
1471  let Constraints = "@earlyclobber $rd, $rd = $merge";
1472  let HasVLOp = 1;
1473  let HasSEWOp = 1;
1474  let HasDummyMask = 1;
1475  let HasMergeOp = 1;
1476}
1477
1478class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
1479                          bits<4> NF, bit Ordered>:
1480      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1481             (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
1482                  IdxClass:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
1483                  ixlenimm:$policy),[]>,
1484      RISCVVPseudo,
1485      RISCVVLXSEG<NF, /*Masked*/1, /*TU*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
1486  let mayLoad = 1;
1487  let mayStore = 0;
1488  let hasSideEffects = 0;
1489  // For vector indexed segment loads, the destination vector register groups
1490  // cannot overlap the source vector register group
1491  let Constraints = "@earlyclobber $rd, $rd = $merge";
1492  let HasVLOp = 1;
1493  let HasSEWOp = 1;
1494  let HasMergeOp = 1;
1495  let HasVecPolicyOp = 1;
1496  let UsesMaskPolicy = 1;
1497}
1498
1499class VPseudoUSSegStoreNoMask<VReg ValClass, int EEW, bits<4> NF>:
1500      Pseudo<(outs),
1501             (ins ValClass:$rd, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
1502      RISCVVPseudo,
1503      RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, log2<EEW>.val, VLMul> {
1504  let mayLoad = 0;
1505  let mayStore = 1;
1506  let hasSideEffects = 0;
1507  let HasVLOp = 1;
1508  let HasSEWOp = 1;
1509  let HasDummyMask = 1;
1510}
1511
1512class VPseudoUSSegStoreMask<VReg ValClass, int EEW, bits<4> NF>:
1513      Pseudo<(outs),
1514             (ins ValClass:$rd, GPR:$rs1,
1515                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1516      RISCVVPseudo,
1517      RISCVVSSEG<NF, /*Masked*/1, /*Strided*/0, log2<EEW>.val, VLMul> {
1518  let mayLoad = 0;
1519  let mayStore = 1;
1520  let hasSideEffects = 0;
1521  let HasVLOp = 1;
1522  let HasSEWOp = 1;
1523}
1524
1525class VPseudoSSegStoreNoMask<VReg ValClass, int EEW, bits<4> NF>:
1526      Pseudo<(outs),
1527             (ins ValClass:$rd, GPR:$rs1, GPR: $offset, AVL:$vl, ixlenimm:$sew),[]>,
1528      RISCVVPseudo,
1529      RISCVVSSEG<NF, /*Masked*/0, /*Strided*/1, log2<EEW>.val, VLMul> {
1530  let mayLoad = 0;
1531  let mayStore = 1;
1532  let hasSideEffects = 0;
1533  let HasVLOp = 1;
1534  let HasSEWOp = 1;
1535  let HasDummyMask = 1;
1536}
1537
1538class VPseudoSSegStoreMask<VReg ValClass, int EEW, bits<4> NF>:
1539      Pseudo<(outs),
1540             (ins ValClass:$rd, GPR:$rs1, GPR: $offset,
1541                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1542      RISCVVPseudo,
1543      RISCVVSSEG<NF, /*Masked*/1, /*Strided*/1, log2<EEW>.val, VLMul> {
1544  let mayLoad = 0;
1545  let mayStore = 1;
1546  let hasSideEffects = 0;
1547  let HasVLOp = 1;
1548  let HasSEWOp = 1;
1549}
1550
1551class VPseudoISegStoreNoMask<VReg ValClass, VReg IdxClass, int EEW, bits<3> LMUL,
1552                             bits<4> NF, bit Ordered>:
1553      Pseudo<(outs),
1554             (ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
1555                  AVL:$vl, ixlenimm:$sew),[]>,
1556      RISCVVPseudo,
1557      RISCVVSXSEG<NF, /*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
1558  let mayLoad = 0;
1559  let mayStore = 1;
1560  let hasSideEffects = 0;
1561  let HasVLOp = 1;
1562  let HasSEWOp = 1;
1563  let HasDummyMask = 1;
1564}
1565
1566class VPseudoISegStoreMask<VReg ValClass, VReg IdxClass, int EEW, bits<3> LMUL,
1567                           bits<4> NF, bit Ordered>:
1568      Pseudo<(outs),
1569             (ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
1570                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1571      RISCVVPseudo,
1572      RISCVVSXSEG<NF, /*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
1573  let mayLoad = 0;
1574  let mayStore = 1;
1575  let hasSideEffects = 0;
1576  let HasVLOp = 1;
1577  let HasSEWOp = 1;
1578}
1579
1580multiclass VPseudoUSLoad {
1581  foreach eew = EEWList in {
1582    foreach lmul = MxSet<eew>.m in {
1583      defvar LInfo = lmul.MX;
1584      defvar vreg = lmul.vrclass;
1585      let VLMul = lmul.value in {
1586        def "E" # eew # "_V_" # LInfo :
1587          VPseudoUSLoadNoMask<vreg, eew>,
1588          VLESched<eew>;
1589        def "E" # eew # "_V_" # LInfo # "_TU":
1590          VPseudoUSLoadNoMaskTU<vreg, eew>,
1591          VLESched<eew>;
1592        def "E" # eew # "_V_" # LInfo # "_MASK" :
1593          VPseudoUSLoadMask<vreg, eew>,
1594          VLESched<eew>;
1595      }
1596    }
1597  }
1598}
1599
1600multiclass VPseudoFFLoad {
1601  foreach eew = EEWList in {
1602    foreach lmul = MxSet<eew>.m in {
1603      defvar LInfo = lmul.MX;
1604      defvar vreg = lmul.vrclass;
1605      let VLMul = lmul.value in {
1606        def "E" # eew # "FF_V_" # LInfo:
1607          VPseudoUSLoadFFNoMask<vreg, eew>,
1608          VLFSched<eew>;
1609        def "E" # eew # "FF_V_" # LInfo # "_TU":
1610          VPseudoUSLoadFFNoMaskTU<vreg, eew>,
1611          VLFSched<eew>;
1612        def "E" # eew # "FF_V_" # LInfo # "_MASK":
1613          VPseudoUSLoadFFMask<vreg, eew>,
1614          VLFSched<eew>;
1615      }
1616    }
1617  }
1618}
1619
1620multiclass VPseudoLoadMask {
1621  foreach mti = AllMasks in {
1622    let VLMul = mti.LMul.value in {
1623      def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, /*EEW*/1, /*DummyMask*/0>;
1624    }
1625  }
1626}
1627
1628multiclass VPseudoSLoad {
1629  foreach eew = EEWList in {
1630    foreach lmul = MxSet<eew>.m in {
1631      defvar LInfo = lmul.MX;
1632      defvar vreg = lmul.vrclass;
1633      let VLMul = lmul.value in {
1634        def "E" # eew # "_V_" # LInfo : VPseudoSLoadNoMask<vreg, eew>,
1635                                        VLSSched<eew>;
1636        def "E" # eew # "_V_" # LInfo # "_TU": VPseudoSLoadNoMaskTU<vreg, eew>,
1637                                        VLSSched<eew>;
1638        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSLoadMask<vreg, eew>,
1639                                                  VLSSched<eew>;
1640      }
1641    }
1642  }
1643}
1644
1645multiclass VPseudoILoad<bit Ordered> {
1646  foreach eew = EEWList in {
1647    foreach sew = EEWList in {
1648      foreach lmul = MxSet<sew>.m in {
1649        defvar octuple_lmul = lmul.octuple;
1650        // Calculate emul = eew * lmul / sew
1651        defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2<sew>.val);
1652        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
1653          defvar LInfo = lmul.MX;
1654          defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
1655          defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
1656          defvar Vreg = lmul.vrclass;
1657          defvar IdxVreg = idx_lmul.vrclass;
1658          defvar HasConstraint = !ne(sew, eew);
1659          defvar Order = !if(Ordered, "O", "U");
1660          let VLMul = lmul.value in {
1661            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo :
1662              VPseudoILoadNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>,
1663              VLXSched<eew, Order>;
1664            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_TU":
1665              VPseudoILoadNoMaskTU<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>,
1666              VLXSched<eew, Order>;
1667            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
1668              VPseudoILoadMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>,
1669              VLXSched<eew, Order>;
1670          }
1671        }
1672      }
1673    }
1674  }
1675}
1676
1677multiclass VPseudoUSStore {
1678  foreach eew = EEWList in {
1679    foreach lmul = MxSet<eew>.m in {
1680      defvar LInfo = lmul.MX;
1681      defvar vreg = lmul.vrclass;
1682      let VLMul = lmul.value in {
1683        def "E" # eew # "_V_" # LInfo : VPseudoUSStoreNoMask<vreg, eew>,
1684                                        VSESched<eew>;
1685        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask<vreg, eew>,
1686                                                  VSESched<eew>;
1687      }
1688    }
1689  }
1690}
1691
1692multiclass VPseudoStoreMask {
1693  foreach mti = AllMasks in {
1694    let VLMul = mti.LMul.value in {
1695      def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, /*EEW*/1, /*DummyMask*/0>;
1696    }
1697  }
1698}
1699
1700multiclass VPseudoSStore {
1701  foreach eew = EEWList in {
1702    foreach lmul = MxSet<eew>.m in {
1703      defvar LInfo = lmul.MX;
1704      defvar vreg = lmul.vrclass;
1705      let VLMul = lmul.value in {
1706        def "E" # eew # "_V_" # LInfo : VPseudoSStoreNoMask<vreg, eew>,
1707                                        VSSSched<eew>;
1708        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSStoreMask<vreg, eew>,
1709                                                  VSSSched<eew>;
1710      }
1711    }
1712  }
1713}
1714
1715multiclass VPseudoIStore<bit Ordered> {
1716  foreach eew = EEWList in {
1717    foreach sew = EEWList in {
1718      foreach lmul = MxSet<sew>.m in {
1719        defvar octuple_lmul = lmul.octuple;
1720        // Calculate emul = eew * lmul / sew
1721        defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2<sew>.val);
1722        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
1723          defvar LInfo = lmul.MX;
1724          defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
1725          defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
1726          defvar Vreg = lmul.vrclass;
1727          defvar IdxVreg = idx_lmul.vrclass;
1728          defvar Order = !if(Ordered, "O", "U");
1729          let VLMul = lmul.value in {
1730            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo :
1731              VPseudoIStoreNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>,
1732              VSXSched<eew, Order>;
1733            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
1734              VPseudoIStoreMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>,
1735              VSXSched<eew, Order>;
1736          }
1737        }
1738      }
1739    }
1740  }
1741}
1742
1743multiclass VPseudoVPOP_M {
1744  foreach mti = AllMasks in
1745  {
1746    let VLMul = mti.LMul.value in {
1747      def "_M_" # mti.BX : VPseudoUnaryNoMask<GPR, VR>,
1748                           Sched<[WriteVMPopV, ReadVMPopV, ReadVMPopV]>;
1749      def "_M_" # mti.BX # "_MASK" : VPseudoMaskUnarySOutMask,
1750                                     Sched<[WriteVMPopV, ReadVMPopV, ReadVMPopV]>;
1751    }
1752  }
1753}
1754
1755multiclass VPseudoV1ST_M {
1756  foreach mti = AllMasks in
1757  {
1758    let VLMul = mti.LMul.value in {
1759      def "_M_" # mti.BX : VPseudoUnaryNoMask<GPR, VR>,
1760                           Sched<[WriteVMFFSV, ReadVMFFSV, ReadVMFFSV]>;
1761      def "_M_" # mti.BX # "_MASK" : VPseudoMaskUnarySOutMask,
1762                                     Sched<[WriteVMFFSV, ReadVMFFSV, ReadVMFFSV]>;
1763    }
1764  }
1765}
1766
1767multiclass VPseudoVSFS_M {
1768  defvar constraint = "@earlyclobber $rd";
1769  foreach mti = AllMasks in
1770  {
1771    let VLMul = mti.LMul.value in {
1772      def "_M_" # mti.BX : VPseudoUnaryNoMask<VR, VR, constraint>,
1773                           Sched<[WriteVMSFSV, ReadVMSFSV, ReadVMask]>;
1774      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR, constraint>,
1775                                     Sched<[WriteVMSFSV, ReadVMSFSV, ReadVMask]>;
1776    }
1777  }
1778}
1779
1780multiclass VPseudoVID_V {
1781  foreach m = MxList in {
1782    let VLMul = m.value in {
1783      def "_V_" # m.MX : VPseudoNullaryNoMask<m.vrclass>,
1784                         Sched<[WriteVMIdxV, ReadVMask]>;
1785      def "_V_" # m.MX # "_TU": VPseudoNullaryNoMaskTU<m.vrclass>,
1786                                Sched<[WriteVMIdxV, ReadVMask]>;
1787      def "_V_" # m.MX # "_MASK" : VPseudoNullaryMask<m.vrclass>,
1788                                   Sched<[WriteVMIdxV, ReadVMask]>;
1789    }
1790  }
1791}
1792
1793multiclass VPseudoNullaryPseudoM <string BaseInst> {
1794  foreach mti = AllMasks in {
1795    let VLMul = mti.LMul.value in {
1796      def "_M_" # mti.BX : VPseudoNullaryPseudoM<BaseInst # "_MM">;
1797    }
1798  }
1799}
1800
1801multiclass VPseudoVIOT_M {
1802  defvar constraint = "@earlyclobber $rd";
1803  foreach m = MxList in {
1804    let VLMul = m.value in {
1805      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, VR, constraint>,
1806                       Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>;
1807      def "_" # m.MX # "_TU" : VPseudoUnaryNoMaskTU<m.vrclass, VR, constraint>,
1808                               Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>;
1809      def "_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, VR, constraint>,
1810                                 Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>;
1811    }
1812  }
1813}
1814
1815multiclass VPseudoVCPR_V {
1816  foreach m = MxList in {
1817    let VLMul = m.value in
1818      def _VM # "_" # m.MX : VPseudoUnaryAnyMask<m.vrclass, m.vrclass>,
1819                             Sched<[WriteVCompressV, ReadVCompressV, ReadVCompressV]>;
1820  }
1821}
1822
1823multiclass VPseudoBinary<VReg RetClass,
1824                         VReg Op1Class,
1825                         DAGOperand Op2Class,
1826                         LMULInfo MInfo,
1827                         string Constraint = ""> {
1828  let VLMul = MInfo.value in {
1829    def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
1830                                             Constraint>;
1831    def "_" # MInfo.MX # "_TU" : VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
1832                                                       Constraint>;
1833    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
1834                                                           Constraint>,
1835                                   RISCVMaskedPseudo</*MaskOpIdx*/ 3>;
1836  }
1837}
1838
1839multiclass VPseudoBinaryM<VReg RetClass,
1840                          VReg Op1Class,
1841                          DAGOperand Op2Class,
1842                          LMULInfo MInfo,
1843                          string Constraint = ""> {
1844  let VLMul = MInfo.value in {
1845    def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
1846                                             Constraint>;
1847    let ForceTailAgnostic = true in
1848    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask<RetClass, Op1Class,
1849                                                         Op2Class, Constraint>,
1850                                   RISCVMaskedPseudo</*MaskOpIdx*/ 3, /*HasTU*/ false>;
1851  }
1852}
1853
1854multiclass VPseudoBinaryEmul<VReg RetClass,
1855                             VReg Op1Class,
1856                             DAGOperand Op2Class,
1857                             LMULInfo lmul,
1858                             LMULInfo emul,
1859                             string Constraint = ""> {
1860  let VLMul = lmul.value in {
1861    def "_" # lmul.MX # "_" # emul.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
1862                                                            Constraint>;
1863    def "_" # lmul.MX # "_" # emul.MX # "_TU": VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
1864                                                                     Constraint>;
1865    def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
1866                                                                          Constraint>,
1867                                                  RISCVMaskedPseudo</*MaskOpIdx*/ 3>;
1868  }
1869}
1870
1871multiclass VPseudoTiedBinary<VReg RetClass,
1872                             DAGOperand Op2Class,
1873                             LMULInfo MInfo,
1874                             string Constraint = ""> {
1875  let VLMul = MInfo.value in {
1876    def "_" # MInfo.MX # "_TIED": VPseudoTiedBinaryNoMask<RetClass, Op2Class,
1877                                                          Constraint>;
1878    def "_" # MInfo.MX # "_MASK_TIED" : VPseudoTiedBinaryMask<RetClass, Op2Class,
1879                                                         Constraint>;
1880  }
1881}
1882
1883multiclass VPseudoBinaryV_VV<string Constraint = ""> {
1884  foreach m = MxList in
1885    defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
1886}
1887
1888// Similar to VPseudoBinaryV_VV, but uses MxListF.
1889multiclass VPseudoBinaryFV_VV<string Constraint = ""> {
1890  foreach m = MxListF in
1891    defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
1892}
1893
1894multiclass VPseudoVGTR_VV_EEW<int eew, string Constraint = ""> {
1895  foreach m = MxList in {
1896    foreach sew = EEWList in {
1897      defvar octuple_lmul = m.octuple;
1898      // emul = lmul * eew / sew
1899      defvar octuple_emul = !srl(!mul(octuple_lmul, eew), log2<sew>.val);
1900      if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
1901        defvar emulMX = octuple_to_str<octuple_emul>.ret;
1902        defvar emul = !cast<LMULInfo>("V_" # emulMX);
1903        defm _VV : VPseudoBinaryEmul<m.vrclass, m.vrclass, emul.vrclass, m, emul, Constraint>,
1904                   Sched<[WriteVGatherV, ReadVGatherV, ReadVGatherV]>;
1905      }
1906    }
1907  }
1908}
1909
1910multiclass VPseudoBinaryV_VX<string Constraint = ""> {
1911  foreach m = MxList in
1912    defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>;
1913}
1914
1915multiclass VPseudoVSLD1_VX<string Constraint = ""> {
1916  foreach m = MxList in
1917    defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>,
1918                 Sched<[WriteVISlide1X, ReadVISlideV, ReadVISlideX, ReadVMask]>;
1919}
1920
1921multiclass VPseudoBinaryV_VF<string Constraint = ""> {
1922  foreach f = FPList in
1923    foreach m = f.MxList in
1924      defm "_V" # f.FX : VPseudoBinary<m.vrclass, m.vrclass,
1925                                       f.fprclass, m, Constraint>;
1926}
1927
1928multiclass VPseudoVSLD1_VF<string Constraint = ""> {
1929  foreach f = FPList in
1930    foreach m = f.MxList in
1931      defm "_V" # f.FX :
1932        VPseudoBinary<m.vrclass, m.vrclass, f.fprclass, m, Constraint>,
1933        Sched<[WriteVFSlide1F, ReadVFSlideV, ReadVFSlideF, ReadVMask]>;
1934}
1935
1936multiclass VPseudoBinaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
1937  foreach m = MxList in
1938    defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
1939}
1940
1941multiclass VPseudoVALU_MM {
1942  foreach m = MxList in
1943    let VLMul = m.value in {
1944      def "_MM_" # m.MX : VPseudoBinaryNoMask<VR, VR, VR, "", /*DummyMask*/0>,
1945                          Sched<[WriteVMALUV, ReadVMALUV, ReadVMALUV]>;
1946    }
1947}
1948
1949// We use earlyclobber here due to
1950// * The destination EEW is smaller than the source EEW and the overlap is
1951//   in the lowest-numbered part of the source register group is legal.
1952//   Otherwise, it is illegal.
1953// * The destination EEW is greater than the source EEW, the source EMUL is
1954//   at least 1, and the overlap is in the highest-numbered part of the
1955//   destination register group is legal. Otherwise, it is illegal.
1956multiclass VPseudoBinaryW_VV<list<LMULInfo> mxlist = MxListW> {
1957  foreach m = mxlist in
1958    defm _VV : VPseudoBinary<m.wvrclass, m.vrclass, m.vrclass, m,
1959                             "@earlyclobber $rd">;
1960}
1961
1962multiclass VPseudoBinaryW_VX {
1963  foreach m = MxListW in
1964    defm "_VX" : VPseudoBinary<m.wvrclass, m.vrclass, GPR, m,
1965                               "@earlyclobber $rd">;
1966}
1967
1968multiclass VPseudoBinaryW_VF {
1969  foreach f = FPListW in
1970    foreach m = f.MxList in
1971      defm "_V" # f.FX : VPseudoBinary<m.wvrclass, m.vrclass,
1972                                       f.fprclass, m,
1973                                       "@earlyclobber $rd">;
1974}
1975
1976multiclass VPseudoBinaryW_WV<list<LMULInfo> mxlist = MxListW> {
1977  foreach m = mxlist in {
1978    defm _WV : VPseudoBinary<m.wvrclass, m.wvrclass, m.vrclass, m,
1979                             "@earlyclobber $rd">;
1980    defm _WV : VPseudoTiedBinary<m.wvrclass, m.vrclass, m,
1981                                 "@earlyclobber $rd">;
1982  }
1983}
1984
1985multiclass VPseudoBinaryW_WX {
1986  foreach m = MxListW in
1987    defm "_WX" : VPseudoBinary<m.wvrclass, m.wvrclass, GPR, m>;
1988}
1989
1990multiclass VPseudoBinaryW_WF {
1991  foreach f = FPListW in
1992    foreach m = f.MxList in
1993      defm "_W" # f.FX : VPseudoBinary<m.wvrclass, m.wvrclass,
1994                                       f.fprclass, m>;
1995}
1996
1997// Narrowing instructions like vnsrl/vnsra/vnclip(u) don't need @earlyclobber
1998// if the source and destination have an LMUL<=1. This matches this overlap
1999// exception from the spec.
2000// "The destination EEW is smaller than the source EEW and the overlap is in the
2001//  lowest-numbered part of the source register group."
2002multiclass VPseudoBinaryV_WV {
2003  foreach m = MxListW in
2004    defm _WV : VPseudoBinary<m.vrclass, m.wvrclass, m.vrclass, m,
2005                             !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
2006}
2007
2008multiclass VPseudoBinaryV_WX {
2009  foreach m = MxListW in
2010    defm _WX : VPseudoBinary<m.vrclass, m.wvrclass, GPR, m,
2011                             !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
2012}
2013
2014multiclass VPseudoBinaryV_WI {
2015  foreach m = MxListW in
2016    defm _WI : VPseudoBinary<m.vrclass, m.wvrclass, uimm5, m,
2017                             !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
2018}
2019
2020// For vadc and vsbc, the instruction encoding is reserved if the destination
2021// vector register is v0.
2022// For vadc and vsbc, CarryIn == 1 and CarryOut == 0
2023multiclass VPseudoBinaryV_VM<bit CarryOut = 0, bit CarryIn = 1,
2024                             string Constraint = ""> {
2025  foreach m = MxList in
2026    def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX :
2027      VPseudoBinaryCarryIn<!if(CarryOut, VR,
2028                           !if(!and(CarryIn, !not(CarryOut)),
2029                               GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2030                           m.vrclass, m.vrclass, m, CarryIn, Constraint>;
2031}
2032
2033multiclass VPseudoTiedBinaryV_VM<bit CarryOut = 0, bit CarryIn = 1,
2034                                 string Constraint = ""> {
2035  foreach m = MxList in
2036    def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX # "_TU" :
2037      VPseudoTiedBinaryCarryIn<!if(CarryOut, VR,
2038                               !if(!and(CarryIn, !not(CarryOut)),
2039                                   GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2040                               m.vrclass, m.vrclass, m, CarryIn, Constraint>;
2041}
2042
2043multiclass VPseudoBinaryV_XM<bit CarryOut = 0, bit CarryIn = 1,
2044                             string Constraint = ""> {
2045  foreach m = MxList in
2046    def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX :
2047      VPseudoBinaryCarryIn<!if(CarryOut, VR,
2048                           !if(!and(CarryIn, !not(CarryOut)),
2049                               GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2050                           m.vrclass, GPR, m, CarryIn, Constraint>;
2051}
2052
2053multiclass VPseudoTiedBinaryV_XM<bit CarryOut = 0, bit CarryIn = 1,
2054                                 string Constraint = ""> {
2055  foreach m = MxList in
2056    def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX # "_TU":
2057      VPseudoTiedBinaryCarryIn<!if(CarryOut, VR,
2058                               !if(!and(CarryIn, !not(CarryOut)),
2059                                   GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2060                               m.vrclass, GPR, m, CarryIn, Constraint>;
2061}
2062
2063multiclass VPseudoVMRG_FM {
2064  foreach f = FPList in
2065    foreach m = f.MxList in {
2066      def "_V" # f.FX # "M_" # m.MX :
2067        VPseudoBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
2068                             m.vrclass, f.fprclass, m, /*CarryIn=*/1, "">,
2069        Sched<[WriteVFMergeV, ReadVFMergeV, ReadVFMergeF, ReadVMask]>;
2070      // Tied version to allow codegen control over the tail elements
2071      def "_V" # f.FX # "M_" # m.MX # "_TU":
2072        VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
2073                                 m.vrclass, f.fprclass, m, /*CarryIn=*/1, "">,
2074        Sched<[WriteVFMergeV, ReadVFMergeV, ReadVFMergeF, ReadVMask]>;
2075    }
2076}
2077
2078multiclass VPseudoBinaryV_IM<bit CarryOut = 0, bit CarryIn = 1,
2079                             string Constraint = ""> {
2080  foreach m = MxList in
2081    def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX :
2082      VPseudoBinaryCarryIn<!if(CarryOut, VR,
2083                           !if(!and(CarryIn, !not(CarryOut)),
2084                               GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2085                           m.vrclass, simm5, m, CarryIn, Constraint>;
2086}
2087
2088multiclass VPseudoTiedBinaryV_IM<bit CarryOut = 0, bit CarryIn = 1,
2089                                 string Constraint = ""> {
2090  foreach m = MxList in
2091    def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX # "_TU":
2092      VPseudoTiedBinaryCarryIn<!if(CarryOut, VR,
2093                               !if(!and(CarryIn, !not(CarryOut)),
2094                                   GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2095                               m.vrclass, simm5, m, CarryIn, Constraint>;
2096}
2097
2098multiclass VPseudoUnaryVMV_V_X_I {
2099  foreach m = MxList in {
2100    let VLMul = m.value in {
2101      def "_V_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, m.vrclass>,
2102                         Sched<[WriteVIMovV, ReadVIMovV]>;
2103      def "_X_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, GPR>,
2104                         Sched<[WriteVIMovX, ReadVIMovX]>;
2105      def "_I_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, simm5>,
2106                         Sched<[WriteVIMovI]>;
2107      def "_V_" # m.MX # "_TU": VPseudoUnaryNoDummyMaskTU<m.vrclass, m.vrclass>,
2108                         Sched<[WriteVIMovV, ReadVIMovV]>;
2109      def "_X_" # m.MX # "_TU": VPseudoUnaryNoDummyMaskTU<m.vrclass, GPR>,
2110                         Sched<[WriteVIMovX, ReadVIMovX]>;
2111      def "_I_" # m.MX # "_TU": VPseudoUnaryNoDummyMaskTU<m.vrclass, simm5>,
2112                         Sched<[WriteVIMovI]>;
2113    }
2114  }
2115}
2116
2117multiclass VPseudoVMV_F {
2118  foreach f = FPList in {
2119    foreach m = f.MxList in {
2120      let VLMul = m.value in {
2121        def "_" # f.FX # "_" # m.MX :
2122          VPseudoUnaryNoDummyMask<m.vrclass, f.fprclass>,
2123          Sched<[WriteVFMovV, ReadVFMovF]>;
2124        def "_" # f.FX # "_" # m.MX # "_TU":
2125          VPseudoUnaryNoDummyMaskTU<m.vrclass, f.fprclass>,
2126          Sched<[WriteVFMovV, ReadVFMovF]>;
2127      }
2128    }
2129  }
2130}
2131
2132multiclass VPseudoVCLS_V {
2133  foreach m = MxListF in {
2134    let VLMul = m.value in {
2135      def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2136                         Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>;
2137      def "_V_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.vrclass>,
2138                                Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>;
2139      def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>,
2140                                   Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>;
2141    }
2142  }
2143}
2144
2145multiclass VPseudoVSQR_V {
2146  foreach m = MxListF in {
2147    let VLMul = m.value in {
2148      def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2149                         Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>;
2150      def "_V_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.vrclass>,
2151                                Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>;
2152      def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>,
2153                                   Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>;
2154    }
2155  }
2156}
2157
2158multiclass VPseudoVRCP_V {
2159  foreach m = MxListF in {
2160    let VLMul = m.value in {
2161      def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2162                         Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>;
2163      def "_V_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.vrclass>,
2164                                Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>;
2165      def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>,
2166                                   Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>;
2167    }
2168  }
2169}
2170
2171multiclass PseudoVEXT_VF2 {
2172  defvar constraints = "@earlyclobber $rd";
2173  foreach m = MxListVF2 in
2174  {
2175    let VLMul = m.value in {
2176      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f2vrclass, constraints>,
2177                       Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
2178      def "_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.f2vrclass, constraints>,
2179                              Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
2180      def "_" # m.MX # "_MASK" :
2181        VPseudoUnaryMaskTA<m.vrclass, m.f2vrclass, constraints>,
2182        RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
2183        Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
2184    }
2185  }
2186}
2187
2188multiclass PseudoVEXT_VF4 {
2189  defvar constraints = "@earlyclobber $rd";
2190  foreach m = MxListVF4 in
2191  {
2192    let VLMul = m.value in {
2193      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f4vrclass, constraints>,
2194                       Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
2195      def "_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.f4vrclass, constraints>,
2196                              Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
2197      def "_" # m.MX # "_MASK" :
2198        VPseudoUnaryMaskTA<m.vrclass, m.f4vrclass, constraints>,
2199        RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
2200        Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
2201    }
2202  }
2203}
2204
2205multiclass PseudoVEXT_VF8 {
2206  defvar constraints = "@earlyclobber $rd";
2207  foreach m = MxListVF8 in
2208  {
2209    let VLMul = m.value in {
2210      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f8vrclass, constraints>,
2211                       Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
2212      def "_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.f8vrclass, constraints>,
2213                              Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
2214      def "_" # m.MX # "_MASK" :
2215        VPseudoUnaryMaskTA<m.vrclass, m.f8vrclass, constraints>,
2216        RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
2217        Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
2218    }
2219  }
2220}
2221
2222// The destination EEW is 1 since "For the purposes of register group overlap
2223// constraints, mask elements have EEW=1."
2224// The source EEW is 8, 16, 32, or 64.
2225// When the destination EEW is different from source EEW, we need to use
2226// @earlyclobber to avoid the overlap between destination and source registers.
2227// We don't need @earlyclobber for LMUL<=1 since that matches this overlap
2228// exception from the spec
2229// "The destination EEW is smaller than the source EEW and the overlap is in the
2230//  lowest-numbered part of the source register group".
2231// With LMUL<=1 the source and dest occupy a single register so any overlap
2232// is in the lowest-numbered part.
2233multiclass VPseudoBinaryM_VV<list<LMULInfo> mxlist = MxList> {
2234  foreach m = mxlist in
2235    defm _VV : VPseudoBinaryM<VR, m.vrclass, m.vrclass, m,
2236                              !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
2237}
2238
2239multiclass VPseudoBinaryM_VX {
2240  foreach m = MxList in
2241    defm "_VX" :
2242      VPseudoBinaryM<VR, m.vrclass, GPR, m,
2243                     !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
2244}
2245
2246multiclass VPseudoBinaryM_VF {
2247  foreach f = FPList in
2248    foreach m = f.MxList in
2249      defm "_V" # f.FX :
2250        VPseudoBinaryM<VR, m.vrclass, f.fprclass, m,
2251                       !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
2252}
2253
2254multiclass VPseudoBinaryM_VI {
2255  foreach m = MxList in
2256    defm _VI : VPseudoBinaryM<VR, m.vrclass, simm5, m,
2257                              !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
2258}
2259
2260multiclass VPseudoVGTR_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2261  defm "" : VPseudoBinaryV_VV<Constraint>,
2262            Sched<[WriteVGatherV, ReadVGatherV, ReadVGatherV, ReadVMask]>;
2263  defm "" : VPseudoBinaryV_VX<Constraint>,
2264            Sched<[WriteVGatherX, ReadVGatherV, ReadVGatherX, ReadVMask]>;
2265  defm "" : VPseudoBinaryV_VI<ImmType, Constraint>,
2266            Sched<[WriteVGatherI, ReadVGatherV, ReadVMask]>;
2267}
2268
2269multiclass VPseudoVSALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2270  defm "" : VPseudoBinaryV_VV<Constraint>,
2271            Sched<[WriteVSALUV, ReadVSALUV, ReadVSALUV, ReadVMask]>;
2272  defm "" : VPseudoBinaryV_VX<Constraint>,
2273            Sched<[WriteVSALUX, ReadVSALUV, ReadVSALUX, ReadVMask]>;
2274  defm "" : VPseudoBinaryV_VI<ImmType, Constraint>,
2275            Sched<[WriteVSALUI, ReadVSALUV, ReadVMask]>;
2276}
2277
2278
2279multiclass VPseudoVSHT_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2280  defm "" : VPseudoBinaryV_VV<Constraint>,
2281            Sched<[WriteVShiftV, ReadVShiftV, ReadVShiftV, ReadVMask]>;
2282  defm "" : VPseudoBinaryV_VX<Constraint>,
2283            Sched<[WriteVShiftX, ReadVShiftV, ReadVShiftX, ReadVMask]>;
2284  defm "" : VPseudoBinaryV_VI<ImmType, Constraint>,
2285            Sched<[WriteVShiftI, ReadVShiftV, ReadVMask]>;
2286}
2287
2288multiclass VPseudoVSSHT_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2289  defm "" : VPseudoBinaryV_VV<Constraint>,
2290            Sched<[WriteVSShiftV, ReadVSShiftV, ReadVSShiftV, ReadVMask]>;
2291  defm "" : VPseudoBinaryV_VX<Constraint>,
2292            Sched<[WriteVSShiftX, ReadVSShiftV, ReadVSShiftX, ReadVMask]>;
2293  defm "" : VPseudoBinaryV_VI<ImmType, Constraint>,
2294            Sched<[WriteVSShiftI, ReadVSShiftV, ReadVMask]>;
2295}
2296
2297multiclass VPseudoVALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2298  defm "" : VPseudoBinaryV_VV<Constraint>,
2299            Sched<[WriteVIALUV, ReadVIALUV, ReadVIALUV, ReadVMask]>;
2300  defm "" : VPseudoBinaryV_VX<Constraint>,
2301            Sched<[WriteVIALUX, ReadVIALUV, ReadVIALUX, ReadVMask]>;
2302  defm "" : VPseudoBinaryV_VI<ImmType, Constraint>,
2303            Sched<[WriteVIALUI, ReadVIALUV, ReadVMask]>;
2304}
2305
2306multiclass VPseudoVSALU_VV_VX {
2307  defm "" : VPseudoBinaryV_VV,
2308            Sched<[WriteVSALUV, ReadVSALUV, ReadVSALUV, ReadVMask]>;
2309  defm "" : VPseudoBinaryV_VX,
2310            Sched<[WriteVSALUX, ReadVSALUV, ReadVSALUX, ReadVMask]>;
2311}
2312
2313multiclass VPseudoVSMUL_VV_VX {
2314  defm "" : VPseudoBinaryV_VV,
2315            Sched<[WriteVSMulV, ReadVSMulV, ReadVSMulV, ReadVMask]>;
2316  defm "" : VPseudoBinaryV_VX,
2317            Sched<[WriteVSMulX, ReadVSMulV, ReadVSMulX, ReadVMask]>;
2318}
2319
2320multiclass VPseudoVAALU_VV_VX {
2321  defm "" : VPseudoBinaryV_VV,
2322            Sched<[WriteVAALUV, ReadVAALUV, ReadVAALUV, ReadVMask]>;
2323  defm "" : VPseudoBinaryV_VX,
2324            Sched<[WriteVAALUX, ReadVAALUV, ReadVAALUX, ReadVMask]>;
2325}
2326
2327multiclass VPseudoVMINMAX_VV_VX {
2328  defm "" : VPseudoBinaryV_VV,
2329            Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpV, ReadVMask]>;
2330  defm "" : VPseudoBinaryV_VX,
2331            Sched<[WriteVICmpX, ReadVICmpV, ReadVICmpX, ReadVMask]>;
2332}
2333
2334multiclass VPseudoVMUL_VV_VX {
2335  defm "" : VPseudoBinaryV_VV,
2336            Sched<[WriteVIMulV, ReadVIMulV, ReadVIMulV, ReadVMask]>;
2337  defm "" : VPseudoBinaryV_VX,
2338            Sched<[WriteVIMulX, ReadVIMulV, ReadVIMulX, ReadVMask]>;
2339}
2340
2341multiclass VPseudoVDIV_VV_VX {
2342  defm "" : VPseudoBinaryV_VV,
2343            Sched<[WriteVIDivV, ReadVIDivV, ReadVIDivV, ReadVMask]>;
2344  defm "" : VPseudoBinaryV_VX,
2345            Sched<[WriteVIDivX, ReadVIDivV, ReadVIDivX, ReadVMask]>;
2346}
2347
2348multiclass VPseudoVFMUL_VV_VF {
2349  defm "" : VPseudoBinaryFV_VV,
2350            Sched<[WriteVFMulV, ReadVFMulV, ReadVFMulV, ReadVMask]>;
2351  defm "" : VPseudoBinaryV_VF,
2352            Sched<[WriteVFMulF, ReadVFMulV, ReadVFMulF, ReadVMask]>;
2353}
2354
2355multiclass VPseudoVFDIV_VV_VF {
2356  defm "" : VPseudoBinaryFV_VV,
2357            Sched<[WriteVFDivV, ReadVFDivV, ReadVFDivV, ReadVMask]>;
2358  defm "" : VPseudoBinaryV_VF,
2359            Sched<[WriteVFDivF, ReadVFDivV, ReadVFDivF, ReadVMask]>;
2360}
2361
2362multiclass VPseudoVFRDIV_VF {
2363  defm "" : VPseudoBinaryV_VF,
2364            Sched<[WriteVFDivF, ReadVFDivV, ReadVFDivF, ReadVMask]>;
2365}
2366
2367multiclass VPseudoVALU_VV_VX {
2368  defm "" : VPseudoBinaryV_VV,
2369            Sched<[WriteVIALUV, ReadVIALUV, ReadVIALUV, ReadVMask]>;
2370  defm "" : VPseudoBinaryV_VX,
2371            Sched<[WriteVIALUX, ReadVIALUV, ReadVIALUX, ReadVMask]>;
2372}
2373
2374multiclass VPseudoVSGNJ_VV_VF {
2375  defm "" : VPseudoBinaryFV_VV,
2376            Sched<[WriteVFSgnjV, ReadVFSgnjV, ReadVFSgnjV, ReadVMask]>;
2377  defm "" : VPseudoBinaryV_VF,
2378            Sched<[WriteVFSgnjF, ReadVFSgnjV, ReadVFSgnjF, ReadVMask]>;
2379}
2380
2381multiclass VPseudoVMAX_VV_VF {
2382  defm "" : VPseudoBinaryFV_VV,
2383            Sched<[WriteVFCmpV, ReadVFCmpV, ReadVFCmpV, ReadVMask]>;
2384  defm "" : VPseudoBinaryV_VF,
2385            Sched<[WriteVFCmpF, ReadVFCmpV, ReadVFCmpF, ReadVMask]>;
2386}
2387
2388multiclass VPseudoVALU_VV_VF {
2389  defm "" : VPseudoBinaryFV_VV,
2390            Sched<[WriteVFALUV, ReadVFALUV, ReadVFALUV, ReadVMask]>;
2391  defm "" : VPseudoBinaryV_VF,
2392            Sched<[WriteVFALUF, ReadVFALUV, ReadVFALUF, ReadVMask]>;
2393}
2394
2395multiclass VPseudoVALU_VF {
2396  defm "" : VPseudoBinaryV_VF,
2397            Sched<[WriteVFALUF, ReadVFALUV, ReadVFALUF, ReadVMask]>;
2398}
2399
2400multiclass VPseudoVALU_VX_VI<Operand ImmType = simm5> {
2401  defm "" : VPseudoBinaryV_VX,
2402            Sched<[WriteVIALUX, ReadVIALUV, ReadVIALUX, ReadVMask]>;
2403  defm "" : VPseudoBinaryV_VI<ImmType>,
2404            Sched<[WriteVIALUI, ReadVIALUV, ReadVMask]>;
2405}
2406
2407multiclass VPseudoVWALU_VV_VX {
2408  defm "" : VPseudoBinaryW_VV,
2409            Sched<[WriteVIWALUV, ReadVIWALUV, ReadVIWALUV, ReadVMask]>;
2410  defm "" : VPseudoBinaryW_VX,
2411            Sched<[WriteVIWALUX, ReadVIWALUV, ReadVIWALUX, ReadVMask]>;
2412}
2413
2414multiclass VPseudoVWMUL_VV_VX {
2415  defm "" : VPseudoBinaryW_VV,
2416            Sched<[WriteVIWMulV, ReadVIWMulV, ReadVIWMulV, ReadVMask]>;
2417  defm "" : VPseudoBinaryW_VX,
2418            Sched<[WriteVIWMulX, ReadVIWMulV, ReadVIWMulX, ReadVMask]>;
2419}
2420
2421multiclass VPseudoVWMUL_VV_VF {
2422  defm "" : VPseudoBinaryW_VV<MxListFW>,
2423            Sched<[WriteVFWMulV, ReadVFWMulV, ReadVFWMulV, ReadVMask]>;
2424  defm "" : VPseudoBinaryW_VF,
2425            Sched<[WriteVFWMulF, ReadVFWMulV, ReadVFWMulF, ReadVMask]>;
2426}
2427
2428multiclass VPseudoVWALU_WV_WX {
2429  defm "" : VPseudoBinaryW_WV,
2430            Sched<[WriteVIWALUV, ReadVIWALUV, ReadVIWALUV, ReadVMask]>;
2431  defm "" : VPseudoBinaryW_WX,
2432            Sched<[WriteVIWALUX, ReadVIWALUV, ReadVIWALUX, ReadVMask]>;
2433}
2434
2435multiclass VPseudoVFWALU_VV_VF {
2436  defm "" : VPseudoBinaryW_VV<MxListFW>,
2437            Sched<[WriteVFWALUV, ReadVFWALUV, ReadVFWALUV, ReadVMask]>;
2438  defm "" : VPseudoBinaryW_VF,
2439            Sched<[WriteVFWALUF, ReadVFWALUV, ReadVFWALUF, ReadVMask]>;
2440}
2441
2442multiclass VPseudoVFWALU_WV_WF {
2443  defm "" : VPseudoBinaryW_WV<MxListFW>,
2444            Sched<[WriteVFWALUV, ReadVFWALUV, ReadVFWALUV, ReadVMask]>;
2445  defm "" : VPseudoBinaryW_WF,
2446            Sched<[WriteVFWALUF, ReadVFWALUV, ReadVFWALUF, ReadVMask]>;
2447}
2448
2449multiclass VPseudoVMRG_VM_XM_IM {
2450  defm "" : VPseudoBinaryV_VM,
2451            Sched<[WriteVIMergeV, ReadVIMergeV, ReadVIMergeV, ReadVMask]>;
2452  defm "" : VPseudoBinaryV_XM,
2453            Sched<[WriteVIMergeX, ReadVIMergeV, ReadVIMergeX, ReadVMask]>;
2454  defm "" : VPseudoBinaryV_IM,
2455            Sched<[WriteVIMergeI, ReadVIMergeV, ReadVMask]>;
2456  // Tied versions to allow codegen control over the tail elements
2457  defm "" : VPseudoTiedBinaryV_VM,
2458            Sched<[WriteVIMergeV, ReadVIMergeV, ReadVIMergeV, ReadVMask]>;
2459  defm "" : VPseudoTiedBinaryV_XM,
2460            Sched<[WriteVIMergeX, ReadVIMergeV, ReadVIMergeX, ReadVMask]>;
2461  defm "" : VPseudoTiedBinaryV_IM,
2462            Sched<[WriteVIMergeI, ReadVIMergeV, ReadVMask]>;
2463}
2464
2465multiclass VPseudoVCALU_VM_XM_IM {
2466  defm "" : VPseudoBinaryV_VM,
2467            Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>;
2468  defm "" : VPseudoBinaryV_XM,
2469            Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>;
2470  defm "" : VPseudoBinaryV_IM,
2471            Sched<[WriteVICALUI, ReadVIALUCV, ReadVMask]>;
2472  // Tied versions to allow codegen control over the tail elements
2473  defm "" : VPseudoTiedBinaryV_VM,
2474            Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>;
2475  defm "" : VPseudoTiedBinaryV_XM,
2476            Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>;
2477  defm "" : VPseudoTiedBinaryV_IM,
2478            Sched<[WriteVICALUI, ReadVIALUCV, ReadVMask]>;
2479}
2480
2481multiclass VPseudoVCALU_VM_XM {
2482  defm "" : VPseudoBinaryV_VM,
2483            Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>;
2484  defm "" : VPseudoBinaryV_XM,
2485            Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>;
2486  // Tied versions to allow codegen control over the tail elements
2487  defm "" : VPseudoTiedBinaryV_VM,
2488            Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>;
2489  defm "" : VPseudoTiedBinaryV_XM,
2490            Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>;
2491}
2492
2493multiclass VPseudoVCALUM_VM_XM_IM<string Constraint> {
2494  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>,
2495            Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>;
2496  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>,
2497            Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>;
2498  defm "" : VPseudoBinaryV_IM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>,
2499            Sched<[WriteVICALUI, ReadVIALUCV, ReadVMask]>;
2500}
2501
2502multiclass VPseudoVCALUM_VM_XM<string Constraint> {
2503  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>,
2504            Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>;
2505  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>,
2506            Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>;
2507}
2508
2509multiclass VPseudoVCALUM_V_X_I<string Constraint> {
2510  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>,
2511            Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV]>;
2512  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>,
2513            Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX]>;
2514  defm "" : VPseudoBinaryV_IM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>,
2515            Sched<[WriteVICALUI, ReadVIALUCV]>;
2516}
2517
2518multiclass VPseudoVCALUM_V_X<string Constraint> {
2519  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>,
2520            Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV]>;
2521  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>,
2522            Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX]>;
2523}
2524
2525multiclass VPseudoVNCLP_WV_WX_WI {
2526  defm "" : VPseudoBinaryV_WV,
2527            Sched<[WriteVNClipV, ReadVNClipV, ReadVNClipV, ReadVMask]>;
2528  defm "" : VPseudoBinaryV_WX,
2529            Sched<[WriteVNClipX, ReadVNClipV, ReadVNClipX, ReadVMask]>;
2530  defm "" : VPseudoBinaryV_WI,
2531            Sched<[WriteVNClipI, ReadVNClipV, ReadVMask]>;
2532}
2533
2534multiclass VPseudoVNSHT_WV_WX_WI {
2535  defm "" : VPseudoBinaryV_WV,
2536            Sched<[WriteVNShiftV, ReadVNShiftV, ReadVNShiftV, ReadVMask]>;
2537  defm "" : VPseudoBinaryV_WX,
2538            Sched<[WriteVNShiftX, ReadVNShiftV, ReadVNShiftX, ReadVMask]>;
2539  defm "" : VPseudoBinaryV_WI,
2540            Sched<[WriteVNShiftI, ReadVNShiftV, ReadVMask]>;
2541}
2542
2543multiclass VPseudoTernary<VReg RetClass,
2544                          RegisterClass Op1Class,
2545                          DAGOperand Op2Class,
2546                          LMULInfo MInfo,
2547                          string Constraint = ""> {
2548  let VLMul = MInfo.value in {
2549    def "_" # MInfo.MX : VPseudoTernaryNoMask<RetClass, Op1Class, Op2Class, Constraint>;
2550    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class, Constraint>;
2551  }
2552}
2553
2554multiclass VPseudoTernaryNoMaskNoPolicy<VReg RetClass,
2555                                        RegisterClass Op1Class,
2556                                        DAGOperand Op2Class,
2557                                        LMULInfo MInfo,
2558                                        string Constraint = ""> {
2559  let VLMul = MInfo.value in {
2560    def "_" # MInfo.MX : VPseudoTernaryNoMask<RetClass, Op1Class, Op2Class, Constraint>;
2561    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
2562                                                           Constraint>;
2563
2564  }
2565}
2566
2567multiclass VPseudoTernaryWithPolicy<VReg RetClass,
2568                                    RegisterClass Op1Class,
2569                                    DAGOperand Op2Class,
2570                                    LMULInfo MInfo,
2571                                    string Constraint = "",
2572                                    bit Commutable = 0> {
2573  let VLMul = MInfo.value in {
2574    let isCommutable = Commutable in
2575    def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint>;
2576    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class, Constraint>;
2577  }
2578}
2579
2580multiclass VPseudoTernaryV_VV_AAXA<string Constraint = "",
2581                                   list<LMULInfo> mxlist = MxList> {
2582  foreach m = mxlist in {
2583    defm _VV : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, m.vrclass, m,
2584                                        Constraint, /*Commutable*/1>;
2585  }
2586}
2587
2588multiclass VPseudoVSLDV_VX<string Constraint = ""> {
2589  foreach m = MxList in
2590    defm _VX : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, GPR, m, Constraint>;
2591}
2592
2593multiclass VPseudoTernaryV_VX_AAXA<string Constraint = ""> {
2594  foreach m = MxList in
2595    defm "_VX" : VPseudoTernaryWithPolicy<m.vrclass, GPR, m.vrclass, m,
2596                                          Constraint, /*Commutable*/1>;
2597}
2598
2599multiclass VPseudoTernaryV_VF_AAXA<string Constraint = ""> {
2600  foreach f = FPList in
2601    foreach m = f.MxList in
2602      defm "_V" # f.FX : VPseudoTernaryWithPolicy<m.vrclass, f.fprclass,
2603                                                  m.vrclass, m, Constraint,
2604                                                  /*Commutable*/1>;
2605}
2606
2607multiclass VPseudoTernaryW_VV<list<LMULInfo> mxlist = MxListW> {
2608  defvar constraint = "@earlyclobber $rd";
2609  foreach m = mxlist in
2610    defm _VV : VPseudoTernaryWithPolicy<m.wvrclass, m.vrclass, m.vrclass, m,
2611                                        constraint>;
2612}
2613
2614multiclass VPseudoTernaryW_VX {
2615  defvar constraint = "@earlyclobber $rd";
2616  foreach m = MxListW in
2617    defm "_VX" : VPseudoTernaryWithPolicy<m.wvrclass, GPR, m.vrclass, m,
2618                                          constraint>;
2619}
2620
2621multiclass VPseudoTernaryW_VF {
2622  defvar constraint = "@earlyclobber $rd";
2623  foreach f = FPListW in
2624    foreach m = f.MxList in
2625      defm "_V" # f.FX : VPseudoTernaryWithPolicy<m.wvrclass, f.fprclass,
2626                                                  m.vrclass, m, constraint>;
2627}
2628
2629multiclass VPseudoVSLDV_VI<Operand ImmType = simm5, string Constraint = ""> {
2630  foreach m = MxList in
2631    defm _VI : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, ImmType, m, Constraint>;
2632}
2633
2634multiclass VPseudoVMAC_VV_VX_AAXA<string Constraint = ""> {
2635  defm "" : VPseudoTernaryV_VV_AAXA<Constraint>,
2636            Sched<[WriteVIMulAddV, ReadVIMulAddV, ReadVIMulAddV, ReadVIMulAddV, ReadVMask]>;
2637  defm "" : VPseudoTernaryV_VX_AAXA<Constraint>,
2638            Sched<[WriteVIMulAddX, ReadVIMulAddV, ReadVIMulAddV, ReadVIMulAddX, ReadVMask]>;
2639}
2640
2641multiclass VPseudoVMAC_VV_VF_AAXA<string Constraint = ""> {
2642  defm "" : VPseudoTernaryV_VV_AAXA<Constraint, MxListF>,
2643            Sched<[WriteVFMulAddV, ReadVFMulAddV, ReadVFMulAddV, ReadVFMulAddV, ReadVMask]>;
2644  defm "" : VPseudoTernaryV_VF_AAXA<Constraint>,
2645            Sched<[WriteVFMulAddF, ReadVFMulAddV, ReadVFMulAddV, ReadVFMulAddF, ReadVMask]>;
2646}
2647
2648multiclass VPseudoVSLD_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2649  defm "" : VPseudoVSLDV_VX<Constraint>,
2650            Sched<[WriteVISlideX, ReadVISlideV, ReadVISlideV, ReadVISlideX, ReadVMask]>;
2651  defm "" : VPseudoVSLDV_VI<ImmType, Constraint>,
2652            Sched<[WriteVISlideI, ReadVISlideV, ReadVISlideV, ReadVMask]>;
2653}
2654
2655multiclass VPseudoVWMAC_VV_VX {
2656  defm "" : VPseudoTernaryW_VV,
2657            Sched<[WriteVIWMulAddV, ReadVIWMulAddV, ReadVIWMulAddV, ReadVIWMulAddV, ReadVMask]>;
2658  defm "" : VPseudoTernaryW_VX,
2659            Sched<[WriteVIWMulAddX, ReadVIWMulAddV, ReadVIWMulAddV, ReadVIWMulAddX, ReadVMask]>;
2660}
2661
2662multiclass VPseudoVWMAC_VX {
2663  defm "" : VPseudoTernaryW_VX,
2664            Sched<[WriteVIWMulAddX, ReadVIWMulAddV, ReadVIWMulAddV, ReadVIWMulAddX, ReadVMask]>;
2665}
2666
2667multiclass VPseudoVWMAC_VV_VF {
2668  defm "" : VPseudoTernaryW_VV<MxListFW>,
2669            Sched<[WriteVFWMulAddV, ReadVFWMulAddV, ReadVFWMulAddV, ReadVFWMulAddV, ReadVMask]>;
2670  defm "" : VPseudoTernaryW_VF,
2671            Sched<[WriteVFWMulAddF, ReadVFWMulAddV, ReadVFWMulAddV, ReadVFWMulAddF, ReadVMask]>;
2672}
2673
2674multiclass VPseudoVCMPM_VV_VX_VI {
2675  defm "" : VPseudoBinaryM_VV,
2676            Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpV, ReadVMask]>;
2677  defm "" : VPseudoBinaryM_VX,
2678            Sched<[WriteVICmpX, ReadVICmpV, ReadVICmpX, ReadVMask]>;
2679  defm "" : VPseudoBinaryM_VI,
2680            Sched<[WriteVICmpI, ReadVICmpV, ReadVMask]>;
2681}
2682
2683multiclass VPseudoVCMPM_VV_VX {
2684  defm "" : VPseudoBinaryM_VV,
2685            Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpV, ReadVMask]>;
2686  defm "" : VPseudoBinaryM_VX,
2687            Sched<[WriteVICmpX, ReadVICmpV, ReadVICmpX, ReadVMask]>;
2688}
2689
2690multiclass VPseudoVCMPM_VV_VF {
2691  defm "" : VPseudoBinaryM_VV<MxListF>,
2692            Sched<[WriteVFCmpV, ReadVFCmpV, ReadVFCmpV, ReadVMask]>;
2693  defm "" : VPseudoBinaryM_VF,
2694            Sched<[WriteVFCmpF, ReadVFCmpV, ReadVFCmpF, ReadVMask]>;
2695}
2696
2697multiclass VPseudoVCMPM_VF {
2698  defm "" : VPseudoBinaryM_VF,
2699            Sched<[WriteVFCmpF, ReadVFCmpV, ReadVFCmpF, ReadVMask]>;
2700}
2701
2702multiclass VPseudoVCMPM_VX_VI {
2703  defm "" : VPseudoBinaryM_VX,
2704            Sched<[WriteVICmpX, ReadVICmpV, ReadVICmpX, ReadVMask]>;
2705  defm "" : VPseudoBinaryM_VI,
2706            Sched<[WriteVICmpI, ReadVICmpV, ReadVMask]>;
2707}
2708
2709multiclass VPseudoVRED_VS {
2710  foreach m = MxList in {
2711    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
2712               Sched<[WriteVIRedV, ReadVIRedV, ReadVIRedV, ReadVIRedV, ReadVMask]>;
2713  }
2714}
2715
2716multiclass VPseudoVWRED_VS {
2717  foreach m = MxList in {
2718    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
2719               Sched<[WriteVIWRedV, ReadVIWRedV, ReadVIWRedV, ReadVIWRedV, ReadVMask]>;
2720  }
2721}
2722
2723multiclass VPseudoVFRED_VS {
2724  foreach m = MxListF in {
2725    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
2726               Sched<[WriteVFRedV, ReadVFRedV, ReadVFRedV, ReadVFRedV, ReadVMask]>;
2727  }
2728}
2729
2730multiclass VPseudoVFREDO_VS {
2731  foreach m = MxListF in {
2732    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
2733               Sched<[WriteVFRedOV, ReadVFRedOV, ReadVFRedOV, ReadVFRedOV, ReadVMask]>;
2734  }
2735}
2736
2737multiclass VPseudoVFWRED_VS {
2738  foreach m = MxListF in {
2739    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
2740               Sched<[WriteVFWRedV, ReadVFWRedV, ReadVFWRedV, ReadVFWRedV, ReadVMask]>;
2741  }
2742}
2743
2744multiclass VPseudoConversion<VReg RetClass,
2745                             VReg Op1Class,
2746                             LMULInfo MInfo,
2747                             string Constraint = ""> {
2748  let VLMul = MInfo.value in {
2749    def "_" # MInfo.MX : VPseudoUnaryNoMask<RetClass, Op1Class, Constraint>;
2750    def "_" # MInfo.MX # "_TU": VPseudoUnaryNoMaskTU<RetClass, Op1Class, Constraint>;
2751    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMaskTA<RetClass, Op1Class,
2752                                                      Constraint>,
2753                                   RISCVMaskedPseudo</*MaskOpIdx*/ 2>;
2754  }
2755}
2756
2757multiclass VPseudoVCVTI_V {
2758  foreach m = MxListF in
2759    defm _V : VPseudoConversion<m.vrclass, m.vrclass, m>,
2760              Sched<[WriteVFCvtFToIV, ReadVFCvtFToIV, ReadVMask]>;
2761}
2762
2763multiclass VPseudoVCVTF_V {
2764  foreach m = MxListF in
2765    defm _V : VPseudoConversion<m.vrclass, m.vrclass, m>,
2766              Sched<[WriteVFCvtIToFV, ReadVFCvtIToFV, ReadVMask]>;
2767}
2768
2769multiclass VPseudoConversionW_V {
2770  defvar constraint = "@earlyclobber $rd";
2771  foreach m = MxListW in
2772    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>;
2773}
2774
2775multiclass VPseudoVWCVTI_V {
2776  defvar constraint = "@earlyclobber $rd";
2777  foreach m = MxListFW in
2778    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>,
2779              Sched<[WriteVFWCvtFToIV, ReadVFWCvtFToIV, ReadVMask]>;
2780}
2781
2782multiclass VPseudoVWCVTF_V {
2783  defvar constraint = "@earlyclobber $rd";
2784  foreach m = MxListW in
2785    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>,
2786              Sched<[WriteVFWCvtIToFV, ReadVFWCvtIToFV, ReadVMask]>;
2787}
2788
2789multiclass VPseudoVWCVTD_V {
2790  defvar constraint = "@earlyclobber $rd";
2791  foreach m = MxListFW in
2792    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>,
2793              Sched<[WriteVFWCvtFToFV, ReadVFWCvtFToFV, ReadVMask]>;
2794}
2795
2796multiclass VPseudoVNCVTI_W {
2797  defvar constraint = "@earlyclobber $rd";
2798  foreach m = MxListW in
2799    defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>,
2800              Sched<[WriteVFNCvtFToIV, ReadVFNCvtFToIV, ReadVMask]>;
2801}
2802
2803multiclass VPseudoVNCVTF_W {
2804  defvar constraint = "@earlyclobber $rd";
2805  foreach m = MxListFW in
2806    defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>,
2807              Sched<[WriteVFNCvtIToFV, ReadVFNCvtIToFV, ReadVMask]>;
2808}
2809
2810multiclass VPseudoVNCVTD_W {
2811  defvar constraint = "@earlyclobber $rd";
2812  foreach m = MxListFW in
2813    defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>,
2814              Sched<[WriteVFNCvtFToFV, ReadVFNCvtFToFV, ReadVMask]>;
2815}
2816
2817multiclass VPseudoUSSegLoad {
2818  foreach eew = EEWList in {
2819    foreach lmul = MxSet<eew>.m in {
2820      defvar LInfo = lmul.MX;
2821      let VLMul = lmul.value in {
2822        foreach nf = NFSet<lmul>.L in {
2823          defvar vreg = SegRegClass<lmul, nf>.RC;
2824          def nf # "E" # eew # "_V_" # LInfo :
2825            VPseudoUSSegLoadNoMask<vreg, eew, nf>;
2826          def nf # "E" # eew # "_V_" # LInfo # "_TU" :
2827            VPseudoUSSegLoadNoMaskTU<vreg, eew, nf>;
2828          def nf # "E" # eew # "_V_" # LInfo # "_MASK" :
2829            VPseudoUSSegLoadMask<vreg, eew, nf>;
2830        }
2831      }
2832    }
2833  }
2834}
2835
2836multiclass VPseudoUSSegLoadFF {
2837  foreach eew = EEWList in {
2838    foreach lmul = MxSet<eew>.m in {
2839      defvar LInfo = lmul.MX;
2840      let VLMul = lmul.value in {
2841        foreach nf = NFSet<lmul>.L in {
2842          defvar vreg = SegRegClass<lmul, nf>.RC;
2843          def nf # "E" # eew # "FF_V_" # LInfo :
2844            VPseudoUSSegLoadFFNoMask<vreg, eew, nf>;
2845          def nf # "E" # eew # "FF_V_" # LInfo # "_TU" :
2846            VPseudoUSSegLoadFFNoMaskTU<vreg, eew, nf>;
2847          def nf # "E" # eew # "FF_V_" # LInfo # "_MASK" :
2848            VPseudoUSSegLoadFFMask<vreg, eew, nf>;
2849        }
2850      }
2851    }
2852  }
2853}
2854
2855multiclass VPseudoSSegLoad {
2856  foreach eew = EEWList in {
2857    foreach lmul = MxSet<eew>.m in {
2858      defvar LInfo = lmul.MX;
2859      let VLMul = lmul.value in {
2860        foreach nf = NFSet<lmul>.L in {
2861          defvar vreg = SegRegClass<lmul, nf>.RC;
2862          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew, nf>;
2863          def nf # "E" # eew # "_V_" # LInfo # "_TU" : VPseudoSSegLoadNoMaskTU<vreg, eew, nf>;
2864          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew, nf>;
2865        }
2866      }
2867    }
2868  }
2869}
2870
2871multiclass VPseudoISegLoad<bit Ordered> {
2872  foreach idx_eew = EEWList in {
2873    foreach sew = EEWList in {
2874      foreach val_lmul = MxSet<sew>.m in {
2875        defvar octuple_lmul = val_lmul.octuple;
2876        // Calculate emul = eew * lmul / sew
2877        defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), log2<sew>.val);
2878        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
2879          defvar ValLInfo = val_lmul.MX;
2880          defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
2881          defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
2882          defvar Vreg = val_lmul.vrclass;
2883          defvar IdxVreg = idx_lmul.vrclass;
2884          let VLMul = val_lmul.value in {
2885            foreach nf = NFSet<val_lmul>.L in {
2886              defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
2887              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
2888                VPseudoISegLoadNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
2889                                      nf, Ordered>;
2890              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_TU" :
2891                VPseudoISegLoadNoMaskTU<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
2892                                        nf, Ordered>;
2893              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
2894                VPseudoISegLoadMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
2895                                    nf, Ordered>;
2896            }
2897          }
2898        }
2899      }
2900    }
2901  }
2902}
2903
2904multiclass VPseudoUSSegStore {
2905  foreach eew = EEWList in {
2906    foreach lmul = MxSet<eew>.m in {
2907      defvar LInfo = lmul.MX;
2908      let VLMul = lmul.value in {
2909        foreach nf = NFSet<lmul>.L in {
2910          defvar vreg = SegRegClass<lmul, nf>.RC;
2911          def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew, nf>;
2912          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew, nf>;
2913        }
2914      }
2915    }
2916  }
2917}
2918
2919multiclass VPseudoSSegStore {
2920  foreach eew = EEWList in {
2921    foreach lmul = MxSet<eew>.m in {
2922      defvar LInfo = lmul.MX;
2923      let VLMul = lmul.value in {
2924        foreach nf = NFSet<lmul>.L in {
2925          defvar vreg = SegRegClass<lmul, nf>.RC;
2926          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew, nf>;
2927          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew, nf>;
2928        }
2929      }
2930    }
2931  }
2932}
2933
2934multiclass VPseudoISegStore<bit Ordered> {
2935  foreach idx_eew = EEWList in {
2936    foreach sew = EEWList in {
2937      foreach val_lmul = MxSet<sew>.m in {
2938        defvar octuple_lmul = val_lmul.octuple;
2939        // Calculate emul = eew * lmul / sew
2940        defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), log2<sew>.val);
2941        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
2942          defvar ValLInfo = val_lmul.MX;
2943          defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
2944          defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
2945          defvar Vreg = val_lmul.vrclass;
2946          defvar IdxVreg = idx_lmul.vrclass;
2947          let VLMul = val_lmul.value in {
2948            foreach nf = NFSet<val_lmul>.L in {
2949              defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
2950              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
2951                VPseudoISegStoreNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
2952                                       nf, Ordered>;
2953              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
2954                VPseudoISegStoreMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
2955                                     nf, Ordered>;
2956            }
2957          }
2958        }
2959      }
2960    }
2961  }
2962}
2963
2964//===----------------------------------------------------------------------===//
2965// Helpers to define the intrinsic patterns.
2966//===----------------------------------------------------------------------===//
2967
2968class VPatUnaryNoMask<string intrinsic_name,
2969                      string inst,
2970                      string kind,
2971                      ValueType result_type,
2972                      ValueType op2_type,
2973                      int sew,
2974                      LMULInfo vlmul,
2975                      VReg op2_reg_class> :
2976  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
2977                   (result_type undef),
2978                   (op2_type op2_reg_class:$rs2),
2979                   VLOpFrag)),
2980                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2981                   (op2_type op2_reg_class:$rs2),
2982                   GPR:$vl, sew)>;
2983
2984class VPatUnaryNoMaskTU<string intrinsic_name,
2985                        string inst,
2986                        string kind,
2987                        ValueType result_type,
2988                        ValueType op2_type,
2989                        int sew,
2990                        LMULInfo vlmul,
2991                        VReg result_reg_class,
2992                        VReg op2_reg_class> :
2993  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
2994                   (result_type result_reg_class:$merge),
2995                   (op2_type op2_reg_class:$rs2),
2996                   VLOpFrag)),
2997                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_TU")
2998                   (result_type result_reg_class:$merge),
2999                   (op2_type op2_reg_class:$rs2),
3000                   GPR:$vl, sew)>;
3001
3002class VPatUnaryMask<string intrinsic_name,
3003                    string inst,
3004                    string kind,
3005                    ValueType result_type,
3006                    ValueType op2_type,
3007                    ValueType mask_type,
3008                    int sew,
3009                    LMULInfo vlmul,
3010                    VReg result_reg_class,
3011                    VReg op2_reg_class> :
3012  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
3013                   (result_type result_reg_class:$merge),
3014                   (op2_type op2_reg_class:$rs2),
3015                   (mask_type V0),
3016                   VLOpFrag)),
3017                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
3018                   (result_type result_reg_class:$merge),
3019                   (op2_type op2_reg_class:$rs2),
3020                   (mask_type V0), GPR:$vl, sew)>;
3021
3022class VPatUnaryMaskTA<string intrinsic_name,
3023                      string inst,
3024                      string kind,
3025                      ValueType result_type,
3026                      ValueType op2_type,
3027                      ValueType mask_type,
3028                      int sew,
3029                      LMULInfo vlmul,
3030                      VReg result_reg_class,
3031                      VReg op2_reg_class> :
3032  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
3033                   (result_type result_reg_class:$merge),
3034                   (op2_type op2_reg_class:$rs2),
3035                   (mask_type V0),
3036                   VLOpFrag, (XLenVT timm:$policy))),
3037                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
3038                   (result_type result_reg_class:$merge),
3039                   (op2_type op2_reg_class:$rs2),
3040                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
3041
3042class VPatMaskUnaryNoMask<string intrinsic_name,
3043                          string inst,
3044                          MTypeInfo mti> :
3045  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name)
3046                (mti.Mask VR:$rs2),
3047                VLOpFrag)),
3048                (!cast<Instruction>(inst#"_M_"#mti.BX)
3049                (mti.Mask VR:$rs2),
3050                GPR:$vl, mti.Log2SEW)>;
3051
3052class VPatMaskUnaryMask<string intrinsic_name,
3053                        string inst,
3054                        MTypeInfo mti> :
3055  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name#"_mask")
3056                (mti.Mask VR:$merge),
3057                (mti.Mask VR:$rs2),
3058                (mti.Mask V0),
3059                VLOpFrag)),
3060                (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
3061                (mti.Mask VR:$merge),
3062                (mti.Mask VR:$rs2),
3063                (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
3064
3065class VPatUnaryAnyMask<string intrinsic,
3066                       string inst,
3067                       string kind,
3068                       ValueType result_type,
3069                       ValueType op1_type,
3070                       ValueType mask_type,
3071                       int sew,
3072                       LMULInfo vlmul,
3073                       VReg result_reg_class,
3074                       VReg op1_reg_class> :
3075  Pat<(result_type (!cast<Intrinsic>(intrinsic)
3076                   (result_type result_reg_class:$merge),
3077                   (op1_type op1_reg_class:$rs1),
3078                   (mask_type VR:$rs2),
3079                   VLOpFrag)),
3080                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
3081                   (result_type result_reg_class:$merge),
3082                   (op1_type op1_reg_class:$rs1),
3083                   (mask_type VR:$rs2),
3084                   GPR:$vl, sew)>;
3085
3086class VPatBinaryM<string intrinsic_name,
3087                  string inst,
3088                  ValueType result_type,
3089                  ValueType op1_type,
3090                  ValueType op2_type,
3091                  int sew,
3092                  VReg op1_reg_class,
3093                  DAGOperand op2_kind> :
3094  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3095                   (op1_type op1_reg_class:$rs1),
3096                   (op2_type op2_kind:$rs2),
3097                   VLOpFrag)),
3098                   (!cast<Instruction>(inst)
3099                   (op1_type op1_reg_class:$rs1),
3100                   (op2_type op2_kind:$rs2),
3101                   GPR:$vl, sew)>;
3102
3103class VPatBinaryNoMaskTA<string intrinsic_name,
3104                         string inst,
3105                         ValueType result_type,
3106                         ValueType op1_type,
3107                         ValueType op2_type,
3108                         int sew,
3109                         VReg op1_reg_class,
3110                         DAGOperand op2_kind> :
3111  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3112                   (result_type (undef)),
3113                   (op1_type op1_reg_class:$rs1),
3114                   (op2_type op2_kind:$rs2),
3115                   VLOpFrag)),
3116                   (!cast<Instruction>(inst)
3117                   (op1_type op1_reg_class:$rs1),
3118                   (op2_type op2_kind:$rs2),
3119                   GPR:$vl, sew)>;
3120
3121class VPatBinaryNoMaskTU<string intrinsic_name,
3122                         string inst,
3123                         ValueType result_type,
3124                         ValueType op1_type,
3125                         ValueType op2_type,
3126                         int sew,
3127                         VReg result_reg_class,
3128                         VReg op1_reg_class,
3129                         DAGOperand op2_kind> :
3130  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3131                   (result_type result_reg_class:$merge),
3132                   (op1_type op1_reg_class:$rs1),
3133                   (op2_type op2_kind:$rs2),
3134                   VLOpFrag)),
3135                   (!cast<Instruction>(inst#"_TU")
3136                   (result_type result_reg_class:$merge),
3137                   (op1_type op1_reg_class:$rs1),
3138                   (op2_type op2_kind:$rs2),
3139                   GPR:$vl, sew)>;
3140
3141// Same as above but source operands are swapped.
3142class VPatBinaryNoMaskSwapped<string intrinsic_name,
3143                              string inst,
3144                              ValueType result_type,
3145                              ValueType op1_type,
3146                              ValueType op2_type,
3147                              int sew,
3148                              VReg op1_reg_class,
3149                              DAGOperand op2_kind> :
3150  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3151                   (op2_type op2_kind:$rs2),
3152                   (op1_type op1_reg_class:$rs1),
3153                   VLOpFrag)),
3154                   (!cast<Instruction>(inst)
3155                   (op1_type op1_reg_class:$rs1),
3156                   (op2_type op2_kind:$rs2),
3157                   GPR:$vl, sew)>;
3158
3159class VPatBinaryMask<string intrinsic_name,
3160                     string inst,
3161                     ValueType result_type,
3162                     ValueType op1_type,
3163                     ValueType op2_type,
3164                     ValueType mask_type,
3165                     int sew,
3166                     VReg result_reg_class,
3167                     VReg op1_reg_class,
3168                     DAGOperand op2_kind> :
3169  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
3170                   (result_type result_reg_class:$merge),
3171                   (op1_type op1_reg_class:$rs1),
3172                   (op2_type op2_kind:$rs2),
3173                   (mask_type V0),
3174                   VLOpFrag)),
3175                   (!cast<Instruction>(inst#"_MASK")
3176                   (result_type result_reg_class:$merge),
3177                   (op1_type op1_reg_class:$rs1),
3178                   (op2_type op2_kind:$rs2),
3179                   (mask_type V0), GPR:$vl, sew)>;
3180
3181class VPatBinaryMaskTA<string intrinsic_name,
3182                       string inst,
3183                       ValueType result_type,
3184                       ValueType op1_type,
3185                       ValueType op2_type,
3186                       ValueType mask_type,
3187                       int sew,
3188                       VReg result_reg_class,
3189                       VReg op1_reg_class,
3190                       DAGOperand op2_kind> :
3191  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
3192                   (result_type result_reg_class:$merge),
3193                   (op1_type op1_reg_class:$rs1),
3194                   (op2_type op2_kind:$rs2),
3195                   (mask_type V0),
3196                   VLOpFrag, (XLenVT timm:$policy))),
3197                   (!cast<Instruction>(inst#"_MASK")
3198                   (result_type result_reg_class:$merge),
3199                   (op1_type op1_reg_class:$rs1),
3200                   (op2_type op2_kind:$rs2),
3201                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
3202
3203// Same as above but source operands are swapped.
3204class VPatBinaryMaskSwapped<string intrinsic_name,
3205                            string inst,
3206                            ValueType result_type,
3207                            ValueType op1_type,
3208                            ValueType op2_type,
3209                            ValueType mask_type,
3210                            int sew,
3211                            VReg result_reg_class,
3212                            VReg op1_reg_class,
3213                            DAGOperand op2_kind> :
3214  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
3215                   (result_type result_reg_class:$merge),
3216                   (op2_type op2_kind:$rs2),
3217                   (op1_type op1_reg_class:$rs1),
3218                   (mask_type V0),
3219                   VLOpFrag)),
3220                   (!cast<Instruction>(inst#"_MASK")
3221                   (result_type result_reg_class:$merge),
3222                   (op1_type op1_reg_class:$rs1),
3223                   (op2_type op2_kind:$rs2),
3224                   (mask_type V0), GPR:$vl, sew)>;
3225
3226class VPatTiedBinaryNoMask<string intrinsic_name,
3227                           string inst,
3228                           ValueType result_type,
3229                           ValueType op2_type,
3230                           int sew,
3231                           VReg result_reg_class,
3232                           DAGOperand op2_kind> :
3233  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3234                   (result_type (undef)),
3235                   (result_type result_reg_class:$rs1),
3236                   (op2_type op2_kind:$rs2),
3237                   VLOpFrag)),
3238                   (!cast<Instruction>(inst#"_TIED")
3239                   (result_type result_reg_class:$rs1),
3240                   (op2_type op2_kind:$rs2),
3241                   GPR:$vl, sew, TAIL_AGNOSTIC)>;
3242
3243class VPatTiedBinaryNoMaskTU<string intrinsic_name,
3244                             string inst,
3245                             ValueType result_type,
3246                             ValueType op2_type,
3247                             int sew,
3248                             VReg result_reg_class,
3249                             DAGOperand op2_kind> :
3250  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3251                   (result_type result_reg_class:$merge),
3252                   (result_type result_reg_class:$merge),
3253                   (op2_type op2_kind:$rs2),
3254                   VLOpFrag)),
3255                   (!cast<Instruction>(inst#"_TIED")
3256                   (result_type result_reg_class:$merge),
3257                   (op2_type op2_kind:$rs2),
3258                   GPR:$vl, sew, TAIL_UNDISTURBED_MASK_UNDISTURBED)>;
3259
3260class VPatTiedBinaryMask<string intrinsic_name,
3261                         string inst,
3262                         ValueType result_type,
3263                         ValueType op2_type,
3264                         ValueType mask_type,
3265                         int sew,
3266                         VReg result_reg_class,
3267                         DAGOperand op2_kind> :
3268  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
3269                   (result_type result_reg_class:$merge),
3270                   (result_type result_reg_class:$merge),
3271                   (op2_type op2_kind:$rs2),
3272                   (mask_type V0),
3273                   VLOpFrag, (XLenVT timm:$policy))),
3274                   (!cast<Instruction>(inst#"_MASK_TIED")
3275                   (result_type result_reg_class:$merge),
3276                   (op2_type op2_kind:$rs2),
3277                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
3278
3279class VPatTernaryNoMask<string intrinsic,
3280                        string inst,
3281                        string kind,
3282                        ValueType result_type,
3283                        ValueType op1_type,
3284                        ValueType op2_type,
3285                        int sew,
3286                        LMULInfo vlmul,
3287                        VReg result_reg_class,
3288                        RegisterClass op1_reg_class,
3289                        DAGOperand op2_kind> :
3290  Pat<(result_type (!cast<Intrinsic>(intrinsic)
3291                    (result_type result_reg_class:$rs3),
3292                    (op1_type op1_reg_class:$rs1),
3293                    (op2_type op2_kind:$rs2),
3294                    VLOpFrag)),
3295                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
3296                    result_reg_class:$rs3,
3297                    (op1_type op1_reg_class:$rs1),
3298                    op2_kind:$rs2,
3299                    GPR:$vl, sew)>;
3300
3301class VPatTernaryNoMaskWithPolicy<string intrinsic,
3302                                  string inst,
3303                                  string kind,
3304                                  ValueType result_type,
3305                                  ValueType op1_type,
3306                                  ValueType op2_type,
3307                                  int sew,
3308                                  LMULInfo vlmul,
3309                                  VReg result_reg_class,
3310                                  RegisterClass op1_reg_class,
3311                                  DAGOperand op2_kind> :
3312  Pat<(result_type (!cast<Intrinsic>(intrinsic)
3313                    (result_type result_reg_class:$rs3),
3314                    (op1_type op1_reg_class:$rs1),
3315                    (op2_type op2_kind:$rs2),
3316                    VLOpFrag, (XLenVT timm:$policy))),
3317                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
3318                    result_reg_class:$rs3,
3319                    (op1_type op1_reg_class:$rs1),
3320                    op2_kind:$rs2,
3321                    GPR:$vl, sew, (XLenVT timm:$policy))>;
3322
3323class VPatTernaryMask<string intrinsic,
3324                      string inst,
3325                      string kind,
3326                      ValueType result_type,
3327                      ValueType op1_type,
3328                      ValueType op2_type,
3329                      ValueType mask_type,
3330                      int sew,
3331                      LMULInfo vlmul,
3332                      VReg result_reg_class,
3333                      RegisterClass op1_reg_class,
3334                      DAGOperand op2_kind> :
3335  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
3336                    (result_type result_reg_class:$rs3),
3337                    (op1_type op1_reg_class:$rs1),
3338                    (op2_type op2_kind:$rs2),
3339                    (mask_type V0),
3340                    VLOpFrag)),
3341                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
3342                    result_reg_class:$rs3,
3343                    (op1_type op1_reg_class:$rs1),
3344                    op2_kind:$rs2,
3345                    (mask_type V0),
3346                    GPR:$vl, sew)>;
3347
3348class VPatTernaryMaskPolicy<string intrinsic,
3349                            string inst,
3350                            string kind,
3351                            ValueType result_type,
3352                            ValueType op1_type,
3353                            ValueType op2_type,
3354                            ValueType mask_type,
3355                            int sew,
3356                            LMULInfo vlmul,
3357                            VReg result_reg_class,
3358                            RegisterClass op1_reg_class,
3359                            DAGOperand op2_kind> :
3360  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
3361                    (result_type result_reg_class:$rs3),
3362                    (op1_type op1_reg_class:$rs1),
3363                    (op2_type op2_kind:$rs2),
3364                    (mask_type V0),
3365                    VLOpFrag, (XLenVT timm:$policy))),
3366                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
3367                    result_reg_class:$rs3,
3368                    (op1_type op1_reg_class:$rs1),
3369                    op2_kind:$rs2,
3370                    (mask_type V0),
3371                    GPR:$vl, sew, (XLenVT timm:$policy))>;
3372
3373multiclass VPatUnaryS_M<string intrinsic_name,
3374                             string inst>
3375{
3376  foreach mti = AllMasks in {
3377    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name)
3378                      (mti.Mask VR:$rs1), VLOpFrag)),
3379                      (!cast<Instruction>(inst#"_M_"#mti.BX) $rs1,
3380                      GPR:$vl, mti.Log2SEW)>;
3381    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name # "_mask")
3382                      (mti.Mask VR:$rs1), (mti.Mask V0), VLOpFrag)),
3383                      (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK") $rs1,
3384                      (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
3385  }
3386}
3387
3388multiclass VPatUnaryV_V_AnyMask<string intrinsic, string instruction,
3389                                list<VTypeInfo> vtilist> {
3390  foreach vti = vtilist in {
3391    def : VPatUnaryAnyMask<intrinsic, instruction, "VM",
3392                           vti.Vector, vti.Vector, vti.Mask,
3393                           vti.Log2SEW, vti.LMul, vti.RegClass,
3394                           vti.RegClass>;
3395  }
3396}
3397
3398multiclass VPatUnaryM_M<string intrinsic,
3399                         string inst>
3400{
3401  foreach mti = AllMasks in {
3402    def : VPatMaskUnaryNoMask<intrinsic, inst, mti>;
3403    def : VPatMaskUnaryMask<intrinsic, inst, mti>;
3404  }
3405}
3406
3407multiclass VPatUnaryV_M<string intrinsic, string instruction>
3408{
3409  foreach vti = AllIntegerVectors in {
3410    def : VPatUnaryNoMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
3411                          vti.Log2SEW, vti.LMul, VR>;
3412    def : VPatUnaryNoMaskTU<intrinsic, instruction, "M", vti.Vector, vti.Mask,
3413                            vti.Log2SEW, vti.LMul, vti.RegClass,VR>;
3414    def : VPatUnaryMaskTA<intrinsic, instruction, "M", vti.Vector, vti.Mask,
3415                          vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, VR>;
3416  }
3417}
3418
3419multiclass VPatUnaryV_VF<string intrinsic, string instruction, string suffix,
3420                         list<VTypeInfoToFraction> fractionList>
3421{
3422  foreach vtiTofti = fractionList in
3423  {
3424      defvar vti = vtiTofti.Vti;
3425      defvar fti = vtiTofti.Fti;
3426      def : VPatUnaryNoMask<intrinsic, instruction, suffix,
3427                            vti.Vector, fti.Vector,
3428                            vti.Log2SEW, vti.LMul, fti.RegClass>;
3429      def : VPatUnaryNoMaskTU<intrinsic, instruction, suffix,
3430                              vti.Vector, fti.Vector,
3431                              vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
3432      def : VPatUnaryMaskTA<intrinsic, instruction, suffix,
3433                            vti.Vector, fti.Vector, vti.Mask,
3434                            vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
3435   }
3436}
3437
3438multiclass VPatUnaryV_V<string intrinsic, string instruction,
3439                        list<VTypeInfo> vtilist> {
3440  foreach vti = vtilist in {
3441    def : VPatUnaryNoMask<intrinsic, instruction, "V",
3442                          vti.Vector, vti.Vector,
3443                          vti.Log2SEW, vti.LMul, vti.RegClass>;
3444    def : VPatUnaryNoMaskTU<intrinsic, instruction, "V",
3445                            vti.Vector, vti.Vector,
3446                            vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
3447    def : VPatUnaryMaskTA<intrinsic, instruction, "V",
3448                          vti.Vector, vti.Vector, vti.Mask,
3449                          vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
3450  }
3451}
3452
3453multiclass VPatNullaryV<string intrinsic, string instruction>
3454{
3455  foreach vti = AllIntegerVectors in {
3456    def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
3457                          (vti.Vector undef),
3458                          VLOpFrag)),
3459                          (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
3460                          GPR:$vl, vti.Log2SEW)>;
3461    def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
3462                          (vti.Vector vti.RegClass:$merge),
3463                          VLOpFrag)),
3464                          (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_TU")
3465                          vti.RegClass:$merge, GPR:$vl, vti.Log2SEW)>;
3466    def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
3467                          (vti.Vector vti.RegClass:$merge),
3468                          (vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))),
3469                          (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK")
3470                          vti.RegClass:$merge, (vti.Mask V0),
3471                          GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
3472  }
3473}
3474
3475multiclass VPatNullaryM<string intrinsic, string inst> {
3476  foreach mti = AllMasks in
3477    def : Pat<(mti.Mask (!cast<Intrinsic>(intrinsic)
3478                        VLOpFrag)),
3479                        (!cast<Instruction>(inst#"_M_"#mti.BX)
3480                        GPR:$vl, mti.Log2SEW)>;
3481}
3482
3483multiclass VPatBinaryM<string intrinsic,
3484                      string inst,
3485                      ValueType result_type,
3486                      ValueType op1_type,
3487                      ValueType op2_type,
3488                      ValueType mask_type,
3489                      int sew,
3490                      VReg result_reg_class,
3491                      VReg op1_reg_class,
3492                      DAGOperand op2_kind>
3493{
3494  def : VPatBinaryM<intrinsic, inst, result_type, op1_type, op2_type,
3495                    sew, op1_reg_class, op2_kind>;
3496  def : VPatBinaryMask<intrinsic, inst, result_type, op1_type, op2_type,
3497                       mask_type, sew, result_reg_class, op1_reg_class,
3498                       op2_kind>;
3499}
3500
3501multiclass VPatBinaryTA<string intrinsic,
3502                        string inst,
3503                        ValueType result_type,
3504                        ValueType op1_type,
3505                        ValueType op2_type,
3506                        ValueType mask_type,
3507                        int sew,
3508                        VReg result_reg_class,
3509                        VReg op1_reg_class,
3510                        DAGOperand op2_kind>
3511{
3512  def : VPatBinaryNoMaskTA<intrinsic, inst, result_type, op1_type, op2_type,
3513                           sew, op1_reg_class, op2_kind>;
3514  def : VPatBinaryNoMaskTU<intrinsic, inst, result_type, op1_type, op2_type,
3515                           sew, result_reg_class, op1_reg_class, op2_kind>;
3516  def : VPatBinaryMaskTA<intrinsic, inst, result_type, op1_type, op2_type,
3517                         mask_type, sew, result_reg_class, op1_reg_class,
3518                         op2_kind>;
3519}
3520
3521multiclass VPatBinarySwapped<string intrinsic,
3522                      string inst,
3523                      ValueType result_type,
3524                      ValueType op1_type,
3525                      ValueType op2_type,
3526                      ValueType mask_type,
3527                      int sew,
3528                      VReg result_reg_class,
3529                      VReg op1_reg_class,
3530                      DAGOperand op2_kind>
3531{
3532  def : VPatBinaryNoMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type,
3533                                sew, op1_reg_class, op2_kind>;
3534  def : VPatBinaryMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type,
3535                              mask_type, sew, result_reg_class, op1_reg_class,
3536                              op2_kind>;
3537}
3538
3539multiclass VPatBinaryCarryInTAIL<string intrinsic,
3540                                 string inst,
3541                                 string kind,
3542                                 ValueType result_type,
3543                                 ValueType op1_type,
3544                                 ValueType op2_type,
3545                                 ValueType mask_type,
3546                                 int sew,
3547                                 LMULInfo vlmul,
3548                                 VReg result_reg_class,
3549                                 VReg op1_reg_class,
3550                                 DAGOperand op2_kind>
3551{
3552  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
3553                         (result_type undef),
3554                         (op1_type op1_reg_class:$rs1),
3555                         (op2_type op2_kind:$rs2),
3556                         (mask_type V0),
3557                         VLOpFrag)),
3558                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
3559                         (op1_type op1_reg_class:$rs1),
3560                         (op2_type op2_kind:$rs2),
3561                         (mask_type V0), GPR:$vl, sew)>;
3562  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
3563                         (result_type result_reg_class:$merge),
3564                         (op1_type op1_reg_class:$rs1),
3565                         (op2_type op2_kind:$rs2),
3566                         (mask_type V0),
3567                         VLOpFrag)),
3568                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_TU")
3569                         (result_type result_reg_class:$merge),
3570                         (op1_type op1_reg_class:$rs1),
3571                         (op2_type op2_kind:$rs2),
3572                         (mask_type V0), GPR:$vl, sew)>;
3573}
3574
3575multiclass VPatBinaryCarryIn<string intrinsic,
3576                             string inst,
3577                             string kind,
3578                             ValueType result_type,
3579                             ValueType op1_type,
3580                             ValueType op2_type,
3581                             ValueType mask_type,
3582                             int sew,
3583                             LMULInfo vlmul,
3584                             VReg op1_reg_class,
3585                             DAGOperand op2_kind>
3586{
3587  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
3588                         (op1_type op1_reg_class:$rs1),
3589                         (op2_type op2_kind:$rs2),
3590                         (mask_type V0),
3591                         VLOpFrag)),
3592                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
3593                         (op1_type op1_reg_class:$rs1),
3594                         (op2_type op2_kind:$rs2),
3595                         (mask_type V0), GPR:$vl, sew)>;
3596}
3597
3598multiclass VPatBinaryMaskOut<string intrinsic,
3599                             string inst,
3600                             string kind,
3601                             ValueType result_type,
3602                             ValueType op1_type,
3603                             ValueType op2_type,
3604                             int sew,
3605                             LMULInfo vlmul,
3606                             VReg op1_reg_class,
3607                             DAGOperand op2_kind>
3608{
3609  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
3610                         (op1_type op1_reg_class:$rs1),
3611                         (op2_type op2_kind:$rs2),
3612                         VLOpFrag)),
3613                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
3614                         (op1_type op1_reg_class:$rs1),
3615                         (op2_type op2_kind:$rs2),
3616                         GPR:$vl, sew)>;
3617}
3618
3619multiclass VPatConversionTA<string intrinsic,
3620                            string inst,
3621                            string kind,
3622                            ValueType result_type,
3623                            ValueType op1_type,
3624                            ValueType mask_type,
3625                            int sew,
3626                            LMULInfo vlmul,
3627                            VReg result_reg_class,
3628                            VReg op1_reg_class>
3629{
3630  def : VPatUnaryNoMask<intrinsic, inst, kind, result_type, op1_type,
3631                        sew, vlmul, op1_reg_class>;
3632  def : VPatUnaryNoMaskTU<intrinsic, inst, kind, result_type, op1_type,
3633                          sew, vlmul, result_reg_class, op1_reg_class>;
3634  def : VPatUnaryMaskTA<intrinsic, inst, kind, result_type, op1_type,
3635                        mask_type, sew, vlmul, result_reg_class, op1_reg_class>;
3636}
3637
3638multiclass VPatBinaryV_VV<string intrinsic, string instruction,
3639                          list<VTypeInfo> vtilist> {
3640  foreach vti = vtilist in
3641    defm : VPatBinaryTA<intrinsic, instruction # "_VV_" # vti.LMul.MX,
3642                        vti.Vector, vti.Vector, vti.Vector,vti.Mask,
3643                        vti.Log2SEW, vti.RegClass,
3644                        vti.RegClass, vti.RegClass>;
3645}
3646
3647multiclass VPatBinaryV_VV_INT<string intrinsic, string instruction,
3648                          list<VTypeInfo> vtilist> {
3649  foreach vti = vtilist in {
3650    defvar ivti = GetIntVTypeInfo<vti>.Vti;
3651    defm : VPatBinaryTA<intrinsic, instruction # "_VV_" # vti.LMul.MX,
3652                        vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
3653                        vti.Log2SEW, vti.RegClass,
3654                        vti.RegClass, vti.RegClass>;
3655  }
3656}
3657
3658multiclass VPatBinaryV_VV_INT_EEW<string intrinsic, string instruction,
3659                                  int eew, list<VTypeInfo> vtilist> {
3660  foreach vti = vtilist in {
3661    // emul = lmul * eew / sew
3662    defvar vlmul = vti.LMul;
3663    defvar octuple_lmul = vlmul.octuple;
3664    defvar octuple_emul = !srl(!mul(octuple_lmul, eew), vti.Log2SEW);
3665    if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
3666      defvar emul_str = octuple_to_str<octuple_emul>.ret;
3667      defvar ivti = !cast<VTypeInfo>("VI" # eew # emul_str);
3668      defvar inst = instruction # "_VV_" # vti.LMul.MX # "_" # emul_str;
3669      defm : VPatBinaryTA<intrinsic, inst,
3670                          vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
3671                          vti.Log2SEW, vti.RegClass,
3672                          vti.RegClass, ivti.RegClass>;
3673    }
3674  }
3675}
3676
3677multiclass VPatBinaryV_VX<string intrinsic, string instruction,
3678                          list<VTypeInfo> vtilist> {
3679  foreach vti = vtilist in {
3680    defvar kind = "V"#vti.ScalarSuffix;
3681    defm : VPatBinaryTA<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
3682                        vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
3683                        vti.Log2SEW, vti.RegClass,
3684                        vti.RegClass, vti.ScalarRegClass>;
3685  }
3686}
3687
3688multiclass VPatBinaryV_VX_INT<string intrinsic, string instruction,
3689                          list<VTypeInfo> vtilist> {
3690  foreach vti = vtilist in
3691    defm : VPatBinaryTA<intrinsic, instruction # "_VX_" # vti.LMul.MX,
3692                        vti.Vector, vti.Vector, XLenVT, vti.Mask,
3693                        vti.Log2SEW, vti.RegClass,
3694                        vti.RegClass, GPR>;
3695}
3696
3697multiclass VPatBinaryV_VI<string intrinsic, string instruction,
3698                          list<VTypeInfo> vtilist, Operand imm_type> {
3699  foreach vti = vtilist in
3700    defm : VPatBinaryTA<intrinsic, instruction # "_VI_" # vti.LMul.MX,
3701                        vti.Vector, vti.Vector, XLenVT, vti.Mask,
3702                        vti.Log2SEW, vti.RegClass,
3703                        vti.RegClass, imm_type>;
3704}
3705
3706multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
3707  foreach mti = AllMasks in
3708    def : VPatBinaryM<intrinsic, instruction # "_MM_" # mti.LMul.MX,
3709                      mti.Mask, mti.Mask, mti.Mask,
3710                      mti.Log2SEW, VR, VR>;
3711}
3712
3713multiclass VPatBinaryW_VV<string intrinsic, string instruction,
3714                          list<VTypeInfoToWide> vtilist> {
3715  foreach VtiToWti = vtilist in {
3716    defvar Vti = VtiToWti.Vti;
3717    defvar Wti = VtiToWti.Wti;
3718    defm : VPatBinaryTA<intrinsic, instruction # "_VV_" # Vti.LMul.MX,
3719                        Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
3720                        Vti.Log2SEW, Wti.RegClass,
3721                        Vti.RegClass, Vti.RegClass>;
3722  }
3723}
3724
3725multiclass VPatBinaryW_VX<string intrinsic, string instruction,
3726                          list<VTypeInfoToWide> vtilist> {
3727  foreach VtiToWti = vtilist in {
3728    defvar Vti = VtiToWti.Vti;
3729    defvar Wti = VtiToWti.Wti;
3730    defvar kind = "V"#Vti.ScalarSuffix;
3731    defm : VPatBinaryTA<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
3732                        Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
3733                        Vti.Log2SEW, Wti.RegClass,
3734                        Vti.RegClass, Vti.ScalarRegClass>;
3735  }
3736}
3737
3738multiclass VPatBinaryW_WV<string intrinsic, string instruction,
3739                          list<VTypeInfoToWide> vtilist> {
3740  foreach VtiToWti = vtilist in {
3741    defvar Vti = VtiToWti.Vti;
3742    defvar Wti = VtiToWti.Wti;
3743    def : VPatTiedBinaryNoMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
3744                               Wti.Vector, Vti.Vector,
3745                               Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
3746    def : VPatBinaryNoMaskTU<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
3747                             Wti.Vector, Wti.Vector, Vti.Vector, Vti.Log2SEW,
3748                             Wti.RegClass, Wti.RegClass, Vti.RegClass>;
3749    let AddedComplexity = 1 in {
3750    def : VPatTiedBinaryNoMaskTU<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
3751                                 Wti.Vector, Vti.Vector,
3752                                 Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
3753    def : VPatTiedBinaryMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
3754                             Wti.Vector, Vti.Vector, Vti.Mask,
3755                             Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
3756    }
3757    def : VPatBinaryMaskTA<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
3758                           Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
3759                           Vti.Log2SEW, Wti.RegClass,
3760                           Wti.RegClass, Vti.RegClass>;
3761  }
3762}
3763
3764multiclass VPatBinaryW_WX<string intrinsic, string instruction,
3765                          list<VTypeInfoToWide> vtilist> {
3766  foreach VtiToWti = vtilist in {
3767    defvar Vti = VtiToWti.Vti;
3768    defvar Wti = VtiToWti.Wti;
3769    defvar kind = "W"#Vti.ScalarSuffix;
3770    defm : VPatBinaryTA<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
3771                        Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
3772                        Vti.Log2SEW, Wti.RegClass,
3773                        Wti.RegClass, Vti.ScalarRegClass>;
3774  }
3775}
3776
3777multiclass VPatBinaryV_WV<string intrinsic, string instruction,
3778                          list<VTypeInfoToWide> vtilist> {
3779  foreach VtiToWti = vtilist in {
3780    defvar Vti = VtiToWti.Vti;
3781    defvar Wti = VtiToWti.Wti;
3782    defm : VPatBinaryTA<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
3783                        Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
3784                        Vti.Log2SEW, Vti.RegClass,
3785                        Wti.RegClass, Vti.RegClass>;
3786  }
3787}
3788
3789multiclass VPatBinaryV_WX<string intrinsic, string instruction,
3790                          list<VTypeInfoToWide> vtilist> {
3791  foreach VtiToWti = vtilist in {
3792    defvar Vti = VtiToWti.Vti;
3793    defvar Wti = VtiToWti.Wti;
3794    defvar kind = "W"#Vti.ScalarSuffix;
3795    defm : VPatBinaryTA<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
3796                        Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
3797                        Vti.Log2SEW, Vti.RegClass,
3798                        Wti.RegClass, Vti.ScalarRegClass>;
3799  }
3800}
3801
3802multiclass VPatBinaryV_WI<string intrinsic, string instruction,
3803                          list<VTypeInfoToWide> vtilist> {
3804  foreach VtiToWti = vtilist in {
3805    defvar Vti = VtiToWti.Vti;
3806    defvar Wti = VtiToWti.Wti;
3807    defm : VPatBinaryTA<intrinsic, instruction # "_WI_" # Vti.LMul.MX,
3808                        Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
3809                        Vti.Log2SEW, Vti.RegClass,
3810                        Wti.RegClass, uimm5>;
3811  }
3812}
3813
3814multiclass VPatBinaryV_VM<string intrinsic, string instruction,
3815                          bit CarryOut = 0,
3816                          list<VTypeInfo> vtilist = AllIntegerVectors> {
3817  foreach vti = vtilist in
3818    defm : VPatBinaryCarryIn<intrinsic, instruction, "VVM",
3819                             !if(CarryOut, vti.Mask, vti.Vector),
3820                             vti.Vector, vti.Vector, vti.Mask,
3821                             vti.Log2SEW, vti.LMul,
3822                             vti.RegClass, vti.RegClass>;
3823}
3824
3825multiclass VPatBinaryV_XM<string intrinsic, string instruction,
3826                          bit CarryOut = 0,
3827                          list<VTypeInfo> vtilist = AllIntegerVectors> {
3828  foreach vti = vtilist in
3829    defm : VPatBinaryCarryIn<intrinsic, instruction,
3830                             "V"#vti.ScalarSuffix#"M",
3831                             !if(CarryOut, vti.Mask, vti.Vector),
3832                             vti.Vector, vti.Scalar, vti.Mask,
3833                             vti.Log2SEW, vti.LMul,
3834                             vti.RegClass, vti.ScalarRegClass>;
3835}
3836
3837multiclass VPatBinaryV_IM<string intrinsic, string instruction,
3838                          bit CarryOut = 0> {
3839  foreach vti = AllIntegerVectors in
3840    defm : VPatBinaryCarryIn<intrinsic, instruction, "VIM",
3841                             !if(CarryOut, vti.Mask, vti.Vector),
3842                             vti.Vector, XLenVT, vti.Mask,
3843                             vti.Log2SEW, vti.LMul,
3844                             vti.RegClass, simm5>;
3845}
3846
3847multiclass VPatBinaryV_VM_TAIL<string intrinsic, string instruction,
3848                               bit CarryOut = 0,
3849                               list<VTypeInfo> vtilist = AllIntegerVectors> {
3850  foreach vti = vtilist in
3851    defm : VPatBinaryCarryInTAIL<intrinsic, instruction, "VVM",
3852                                 !if(CarryOut, vti.Mask, vti.Vector),
3853                                 vti.Vector, vti.Vector, vti.Mask,
3854                                 vti.Log2SEW, vti.LMul, vti.RegClass,
3855                                 vti.RegClass, vti.RegClass>;
3856}
3857
3858multiclass VPatBinaryV_XM_TAIL<string intrinsic, string instruction,
3859                               bit CarryOut = 0,
3860                               list<VTypeInfo> vtilist = AllIntegerVectors> {
3861  foreach vti = vtilist in
3862    defm : VPatBinaryCarryInTAIL<intrinsic, instruction,
3863                                 "V"#vti.ScalarSuffix#"M",
3864                                 !if(CarryOut, vti.Mask, vti.Vector),
3865                                 vti.Vector, vti.Scalar, vti.Mask,
3866                                 vti.Log2SEW, vti.LMul, vti.RegClass,
3867                                 vti.RegClass, vti.ScalarRegClass>;
3868}
3869
3870multiclass VPatBinaryV_IM_TAIL<string intrinsic, string instruction,
3871                               bit CarryOut = 0> {
3872  foreach vti = AllIntegerVectors in
3873    defm : VPatBinaryCarryInTAIL<intrinsic, instruction, "VIM",
3874                                 !if(CarryOut, vti.Mask, vti.Vector),
3875                                 vti.Vector, XLenVT, vti.Mask,
3876                                 vti.Log2SEW, vti.LMul,
3877                                 vti.RegClass, vti.RegClass, simm5>;
3878}
3879
3880multiclass VPatBinaryV_V<string intrinsic, string instruction> {
3881  foreach vti = AllIntegerVectors in
3882    defm : VPatBinaryMaskOut<intrinsic, instruction, "VV",
3883                             vti.Mask, vti.Vector, vti.Vector,
3884                             vti.Log2SEW, vti.LMul,
3885                             vti.RegClass, vti.RegClass>;
3886}
3887
3888multiclass VPatBinaryV_X<string intrinsic, string instruction> {
3889  foreach vti = AllIntegerVectors in
3890    defm : VPatBinaryMaskOut<intrinsic, instruction, "VX",
3891                             vti.Mask, vti.Vector, XLenVT,
3892                             vti.Log2SEW, vti.LMul,
3893                             vti.RegClass, GPR>;
3894}
3895
3896multiclass VPatBinaryV_I<string intrinsic, string instruction> {
3897  foreach vti = AllIntegerVectors in
3898    defm : VPatBinaryMaskOut<intrinsic, instruction, "VI",
3899                             vti.Mask, vti.Vector, XLenVT,
3900                             vti.Log2SEW, vti.LMul,
3901                             vti.RegClass, simm5>;
3902}
3903
3904multiclass VPatBinaryM_VV<string intrinsic, string instruction,
3905                          list<VTypeInfo> vtilist> {
3906  foreach vti = vtilist in
3907    defm : VPatBinaryM<intrinsic, instruction # "_VV_" # vti.LMul.MX,
3908                       vti.Mask, vti.Vector, vti.Vector, vti.Mask,
3909                       vti.Log2SEW, VR,
3910                       vti.RegClass, vti.RegClass>;
3911}
3912
3913multiclass VPatBinarySwappedM_VV<string intrinsic, string instruction,
3914                                 list<VTypeInfo> vtilist> {
3915  foreach vti = vtilist in
3916    defm : VPatBinarySwapped<intrinsic, instruction # "_VV_" # vti.LMul.MX,
3917                             vti.Mask, vti.Vector, vti.Vector, vti.Mask,
3918                             vti.Log2SEW, VR,
3919                             vti.RegClass, vti.RegClass>;
3920}
3921
3922multiclass VPatBinaryM_VX<string intrinsic, string instruction,
3923                          list<VTypeInfo> vtilist> {
3924  foreach vti = vtilist in {
3925    defvar kind = "V"#vti.ScalarSuffix;
3926    defm : VPatBinaryM<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
3927                       vti.Mask, vti.Vector, vti.Scalar, vti.Mask,
3928                       vti.Log2SEW, VR,
3929                       vti.RegClass, vti.ScalarRegClass>;
3930  }
3931}
3932
3933multiclass VPatBinaryM_VI<string intrinsic, string instruction,
3934                          list<VTypeInfo> vtilist> {
3935  foreach vti = vtilist in
3936    defm : VPatBinaryM<intrinsic, instruction # "_VI_" # vti.LMul.MX,
3937                       vti.Mask, vti.Vector, XLenVT, vti.Mask,
3938                       vti.Log2SEW, VR,
3939                       vti.RegClass, simm5>;
3940}
3941
3942multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
3943                                list<VTypeInfo> vtilist, Operand ImmType = simm5>
3944    : VPatBinaryV_VV<intrinsic, instruction, vtilist>,
3945      VPatBinaryV_VX<intrinsic, instruction, vtilist>,
3946      VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
3947
3948multiclass VPatBinaryV_VV_VX<string intrinsic, string instruction,
3949                             list<VTypeInfo> vtilist>
3950    : VPatBinaryV_VV<intrinsic, instruction, vtilist>,
3951      VPatBinaryV_VX<intrinsic, instruction, vtilist>;
3952
3953multiclass VPatBinaryV_VX_VI<string intrinsic, string instruction,
3954                             list<VTypeInfo> vtilist>
3955    : VPatBinaryV_VX<intrinsic, instruction, vtilist>,
3956      VPatBinaryV_VI<intrinsic, instruction, vtilist, simm5>;
3957
3958multiclass VPatBinaryW_VV_VX<string intrinsic, string instruction,
3959                             list<VTypeInfoToWide> vtilist>
3960    : VPatBinaryW_VV<intrinsic, instruction, vtilist>,
3961      VPatBinaryW_VX<intrinsic, instruction, vtilist>;
3962
3963multiclass VPatBinaryW_WV_WX<string intrinsic, string instruction,
3964                             list<VTypeInfoToWide> vtilist>
3965    : VPatBinaryW_WV<intrinsic, instruction, vtilist>,
3966      VPatBinaryW_WX<intrinsic, instruction, vtilist>;
3967
3968multiclass VPatBinaryV_WV_WX_WI<string intrinsic, string instruction,
3969                                list<VTypeInfoToWide> vtilist>
3970    : VPatBinaryV_WV<intrinsic, instruction, vtilist>,
3971      VPatBinaryV_WX<intrinsic, instruction, vtilist>,
3972      VPatBinaryV_WI<intrinsic, instruction, vtilist>;
3973
3974multiclass VPatBinaryV_VM_XM_IM<string intrinsic, string instruction>
3975    : VPatBinaryV_VM_TAIL<intrinsic, instruction>,
3976      VPatBinaryV_XM_TAIL<intrinsic, instruction>,
3977      VPatBinaryV_IM_TAIL<intrinsic, instruction>;
3978
3979multiclass VPatBinaryM_VM_XM_IM<string intrinsic, string instruction>
3980    : VPatBinaryV_VM<intrinsic, instruction, /*CarryOut=*/1>,
3981      VPatBinaryV_XM<intrinsic, instruction, /*CarryOut=*/1>,
3982      VPatBinaryV_IM<intrinsic, instruction, /*CarryOut=*/1>;
3983
3984multiclass VPatBinaryM_V_X_I<string intrinsic, string instruction>
3985    : VPatBinaryV_V<intrinsic, instruction>,
3986      VPatBinaryV_X<intrinsic, instruction>,
3987      VPatBinaryV_I<intrinsic, instruction>;
3988
3989multiclass VPatBinaryV_VM_XM<string intrinsic, string instruction>
3990    : VPatBinaryV_VM_TAIL<intrinsic, instruction>,
3991      VPatBinaryV_XM_TAIL<intrinsic, instruction>;
3992
3993multiclass VPatBinaryM_VM_XM<string intrinsic, string instruction>
3994    : VPatBinaryV_VM<intrinsic, instruction, /*CarryOut=*/1>,
3995      VPatBinaryV_XM<intrinsic, instruction, /*CarryOut=*/1>;
3996
3997multiclass VPatBinaryM_V_X<string intrinsic, string instruction>
3998    : VPatBinaryV_V<intrinsic, instruction>,
3999      VPatBinaryV_X<intrinsic, instruction>;
4000
4001multiclass VPatTernary<string intrinsic,
4002                       string inst,
4003                       string kind,
4004                       ValueType result_type,
4005                       ValueType op1_type,
4006                       ValueType op2_type,
4007                       ValueType mask_type,
4008                       int sew,
4009                       LMULInfo vlmul,
4010                       VReg result_reg_class,
4011                       RegisterClass op1_reg_class,
4012                       DAGOperand op2_kind> {
4013  def : VPatTernaryNoMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
4014                          sew, vlmul, result_reg_class, op1_reg_class,
4015                          op2_kind>;
4016  def : VPatTernaryMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
4017                        mask_type, sew, vlmul, result_reg_class, op1_reg_class,
4018                        op2_kind>;
4019}
4020
4021multiclass VPatTernaryNoMaskNoPolicy<string intrinsic,
4022                                     string inst,
4023                                     string kind,
4024                                     ValueType result_type,
4025                                     ValueType op1_type,
4026                                     ValueType op2_type,
4027                                     ValueType mask_type,
4028                                     int sew,
4029                                     LMULInfo vlmul,
4030                                     VReg result_reg_class,
4031                                     RegisterClass op1_reg_class,
4032                                     DAGOperand op2_kind> {
4033  def : VPatTernaryNoMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
4034                          sew, vlmul, result_reg_class, op1_reg_class,
4035                          op2_kind>;
4036  def : VPatTernaryMaskPolicy<intrinsic, inst, kind, result_type, op1_type, op2_type,
4037                              mask_type, sew, vlmul, result_reg_class, op1_reg_class,
4038                              op2_kind>;
4039}
4040
4041multiclass VPatTernaryWithPolicy<string intrinsic,
4042                                 string inst,
4043                                 string kind,
4044                                 ValueType result_type,
4045                                 ValueType op1_type,
4046                                 ValueType op2_type,
4047                                 ValueType mask_type,
4048                                 int sew,
4049                                 LMULInfo vlmul,
4050                                 VReg result_reg_class,
4051                                 RegisterClass op1_reg_class,
4052                                 DAGOperand op2_kind> {
4053  def : VPatTernaryNoMaskWithPolicy<intrinsic, inst, kind, result_type, op1_type,
4054                                    op2_type, sew, vlmul, result_reg_class,
4055                                    op1_reg_class, op2_kind>;
4056  def : VPatTernaryMaskPolicy<intrinsic, inst, kind, result_type, op1_type, op2_type,
4057                              mask_type, sew, vlmul, result_reg_class, op1_reg_class,
4058                              op2_kind>;
4059}
4060
4061multiclass VPatTernaryV_VV_AAXA<string intrinsic, string instruction,
4062                                list<VTypeInfo> vtilist> {
4063  foreach vti = vtilist in
4064    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VV",
4065                                 vti.Vector, vti.Vector, vti.Vector, vti.Mask,
4066                                 vti.Log2SEW, vti.LMul, vti.RegClass,
4067                                 vti.RegClass, vti.RegClass>;
4068}
4069
4070multiclass VPatTernaryV_VX<string intrinsic, string instruction,
4071                           list<VTypeInfo> vtilist> {
4072  foreach vti = vtilist in
4073    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VX",
4074                                 vti.Vector, vti.Vector, XLenVT, vti.Mask,
4075                                 vti.Log2SEW, vti.LMul, vti.RegClass,
4076                                 vti.RegClass, GPR>;
4077}
4078
4079multiclass VPatTernaryV_VX_AAXA<string intrinsic, string instruction,
4080                           list<VTypeInfo> vtilist> {
4081  foreach vti = vtilist in
4082    defm : VPatTernaryWithPolicy<intrinsic, instruction,
4083                                 "V"#vti.ScalarSuffix,
4084                                 vti.Vector, vti.Scalar, vti.Vector, vti.Mask,
4085                                 vti.Log2SEW, vti.LMul, vti.RegClass,
4086                                 vti.ScalarRegClass, vti.RegClass>;
4087}
4088
4089multiclass VPatTernaryV_VI<string intrinsic, string instruction,
4090                           list<VTypeInfo> vtilist, Operand Imm_type> {
4091  foreach vti = vtilist in
4092    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VI",
4093                                 vti.Vector, vti.Vector, XLenVT, vti.Mask,
4094                                 vti.Log2SEW, vti.LMul, vti.RegClass,
4095                                 vti.RegClass, Imm_type>;
4096}
4097
4098multiclass VPatTernaryW_VV<string intrinsic, string instruction,
4099                           list<VTypeInfoToWide> vtilist> {
4100  foreach vtiToWti = vtilist in {
4101    defvar vti = vtiToWti.Vti;
4102    defvar wti = vtiToWti.Wti;
4103    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VV",
4104                                 wti.Vector, vti.Vector, vti.Vector,
4105                                 vti.Mask, vti.Log2SEW, vti.LMul,
4106                                 wti.RegClass, vti.RegClass, vti.RegClass>;
4107  }
4108}
4109
4110multiclass VPatTernaryW_VX<string intrinsic, string instruction,
4111                           list<VTypeInfoToWide> vtilist> {
4112  foreach vtiToWti = vtilist in {
4113    defvar vti = vtiToWti.Vti;
4114    defvar wti = vtiToWti.Wti;
4115    defm : VPatTernaryWithPolicy<intrinsic, instruction,
4116                                 "V"#vti.ScalarSuffix,
4117                                 wti.Vector, vti.Scalar, vti.Vector,
4118                                 vti.Mask, vti.Log2SEW, vti.LMul,
4119                                 wti.RegClass, vti.ScalarRegClass, vti.RegClass>;
4120  }
4121}
4122
4123multiclass VPatTernaryV_VV_VX_AAXA<string intrinsic, string instruction,
4124                              list<VTypeInfo> vtilist>
4125    : VPatTernaryV_VV_AAXA<intrinsic, instruction, vtilist>,
4126      VPatTernaryV_VX_AAXA<intrinsic, instruction, vtilist>;
4127
4128multiclass VPatTernaryV_VX_VI<string intrinsic, string instruction,
4129                              list<VTypeInfo> vtilist, Operand Imm_type = simm5>
4130    : VPatTernaryV_VX<intrinsic, instruction, vtilist>,
4131      VPatTernaryV_VI<intrinsic, instruction, vtilist, Imm_type>;
4132
4133
4134multiclass VPatBinaryM_VV_VX_VI<string intrinsic, string instruction,
4135                                list<VTypeInfo> vtilist>
4136    : VPatBinaryM_VV<intrinsic, instruction, vtilist>,
4137      VPatBinaryM_VX<intrinsic, instruction, vtilist>,
4138      VPatBinaryM_VI<intrinsic, instruction, vtilist>;
4139
4140multiclass VPatTernaryW_VV_VX<string intrinsic, string instruction,
4141                              list<VTypeInfoToWide> vtilist>
4142    : VPatTernaryW_VV<intrinsic, instruction, vtilist>,
4143      VPatTernaryW_VX<intrinsic, instruction, vtilist>;
4144
4145multiclass VPatBinaryM_VV_VX<string intrinsic, string instruction,
4146                             list<VTypeInfo> vtilist>
4147    : VPatBinaryM_VV<intrinsic, instruction, vtilist>,
4148      VPatBinaryM_VX<intrinsic, instruction, vtilist>;
4149
4150multiclass VPatBinaryM_VX_VI<string intrinsic, string instruction,
4151                             list<VTypeInfo> vtilist>
4152    : VPatBinaryM_VX<intrinsic, instruction, vtilist>,
4153      VPatBinaryM_VI<intrinsic, instruction, vtilist>;
4154
4155multiclass VPatBinaryV_VV_VX_VI_INT<string intrinsic, string instruction,
4156                                    list<VTypeInfo> vtilist, Operand ImmType = simm5>
4157    : VPatBinaryV_VV_INT<intrinsic#"_vv", instruction, vtilist>,
4158      VPatBinaryV_VX_INT<intrinsic#"_vx", instruction, vtilist>,
4159      VPatBinaryV_VI<intrinsic#"_vx", instruction, vtilist, ImmType>;
4160
4161multiclass VPatReductionV_VS<string intrinsic, string instruction, bit IsFloat = 0> {
4162  foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in
4163  {
4164    defvar vectorM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # vti.SEW # "M1");
4165    defm : VPatTernary<intrinsic, instruction, "VS",
4166                       vectorM1.Vector, vti.Vector,
4167                       vectorM1.Vector, vti.Mask,
4168                       vti.Log2SEW, vti.LMul,
4169                       VR, vti.RegClass, VR>;
4170  }
4171  foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in
4172  {
4173    defm : VPatTernary<intrinsic, instruction, "VS",
4174                       gvti.VectorM1, gvti.Vector,
4175                       gvti.VectorM1, gvti.Mask,
4176                       gvti.Log2SEW, gvti.LMul,
4177                       VR, gvti.RegClass, VR>;
4178  }
4179}
4180
4181multiclass VPatReductionW_VS<string intrinsic, string instruction, bit IsFloat = 0> {
4182  foreach vti = !if(IsFloat, AllFloatVectors, AllIntegerVectors) in
4183  {
4184    defvar wtiSEW = !mul(vti.SEW, 2);
4185    if !le(wtiSEW, 64) then {
4186      defvar wtiM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # wtiSEW # "M1");
4187      defm : VPatTernary<intrinsic, instruction, "VS",
4188                         wtiM1.Vector, vti.Vector,
4189                         wtiM1.Vector, vti.Mask,
4190                         vti.Log2SEW, vti.LMul,
4191                         wtiM1.RegClass, vti.RegClass,
4192                         wtiM1.RegClass>;
4193    }
4194  }
4195}
4196
4197multiclass VPatConversionVI_VF<string intrinsic,
4198                               string instruction>
4199{
4200  foreach fvti = AllFloatVectors in
4201  {
4202    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
4203
4204    defm : VPatConversionTA<intrinsic, instruction, "V",
4205                            ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
4206                            fvti.LMul, ivti.RegClass, fvti.RegClass>;
4207  }
4208}
4209
4210multiclass VPatConversionVF_VI<string intrinsic,
4211                               string instruction>
4212{
4213  foreach fvti = AllFloatVectors in
4214  {
4215    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
4216
4217    defm : VPatConversionTA<intrinsic, instruction, "V",
4218                            fvti.Vector, ivti.Vector, fvti.Mask, ivti.Log2SEW,
4219                            ivti.LMul, fvti.RegClass, ivti.RegClass>;
4220  }
4221}
4222
4223multiclass VPatConversionWI_VF<string intrinsic, string instruction> {
4224  foreach fvtiToFWti = AllWidenableFloatVectors in
4225  {
4226    defvar fvti = fvtiToFWti.Vti;
4227    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
4228
4229    defm : VPatConversionTA<intrinsic, instruction, "V",
4230                            iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
4231                            fvti.LMul, iwti.RegClass, fvti.RegClass>;
4232  }
4233}
4234
4235multiclass VPatConversionWF_VI<string intrinsic, string instruction> {
4236  foreach vtiToWti = AllWidenableIntToFloatVectors in
4237  {
4238    defvar vti = vtiToWti.Vti;
4239    defvar fwti = vtiToWti.Wti;
4240
4241    defm : VPatConversionTA<intrinsic, instruction, "V",
4242                            fwti.Vector, vti.Vector, fwti.Mask, vti.Log2SEW,
4243                            vti.LMul, fwti.RegClass, vti.RegClass>;
4244  }
4245}
4246
4247multiclass VPatConversionWF_VF <string intrinsic, string instruction> {
4248  foreach fvtiToFWti = AllWidenableFloatVectors in
4249  {
4250    defvar fvti = fvtiToFWti.Vti;
4251    defvar fwti = fvtiToFWti.Wti;
4252
4253    defm : VPatConversionTA<intrinsic, instruction, "V",
4254                            fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
4255                            fvti.LMul, fwti.RegClass, fvti.RegClass>;
4256  }
4257}
4258
4259multiclass VPatConversionVI_WF <string intrinsic, string instruction> {
4260  foreach vtiToWti = AllWidenableIntToFloatVectors in
4261  {
4262    defvar vti = vtiToWti.Vti;
4263    defvar fwti = vtiToWti.Wti;
4264
4265    defm : VPatConversionTA<intrinsic, instruction, "W",
4266                            vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
4267                            vti.LMul, vti.RegClass, fwti.RegClass>;
4268  }
4269}
4270
4271multiclass VPatConversionVF_WI <string intrinsic, string instruction> {
4272  foreach fvtiToFWti = AllWidenableFloatVectors in
4273  {
4274    defvar fvti = fvtiToFWti.Vti;
4275    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
4276
4277    defm : VPatConversionTA<intrinsic, instruction, "W",
4278                            fvti.Vector, iwti.Vector, fvti.Mask, fvti.Log2SEW,
4279                            fvti.LMul, fvti.RegClass, iwti.RegClass>;
4280  }
4281}
4282
4283multiclass VPatConversionVF_WF <string intrinsic, string instruction> {
4284  foreach fvtiToFWti = AllWidenableFloatVectors in
4285  {
4286    defvar fvti = fvtiToFWti.Vti;
4287    defvar fwti = fvtiToFWti.Wti;
4288
4289    defm : VPatConversionTA<intrinsic, instruction, "W",
4290                            fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
4291                            fvti.LMul, fvti.RegClass, fwti.RegClass>;
4292  }
4293}
4294
4295multiclass VPatCompare_VI<string intrinsic, string inst,
4296                          ImmLeaf ImmType> {
4297  foreach vti = AllIntegerVectors in {
4298    defvar Intr = !cast<Intrinsic>(intrinsic);
4299    defvar Pseudo = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX);
4300    def : Pat<(vti.Mask (Intr (vti.Vector vti.RegClass:$rs1),
4301                              (vti.Scalar ImmType:$rs2),
4302                              VLOpFrag)),
4303              (Pseudo vti.RegClass:$rs1, (DecImm ImmType:$rs2),
4304                      GPR:$vl, vti.Log2SEW)>;
4305    defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
4306    defvar PseudoMask = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX#"_MASK");
4307    def : Pat<(vti.Mask (IntrMask (vti.Mask VR:$merge),
4308                                  (vti.Vector vti.RegClass:$rs1),
4309                                  (vti.Scalar ImmType:$rs2),
4310                                  (vti.Mask V0),
4311                                  VLOpFrag)),
4312              (PseudoMask VR:$merge, vti.RegClass:$rs1, (DecImm ImmType:$rs2),
4313                          (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
4314  }
4315}
4316
4317//===----------------------------------------------------------------------===//
4318// Pseudo instructions
4319//===----------------------------------------------------------------------===//
4320
4321let Predicates = [HasVInstructions] in {
4322
4323//===----------------------------------------------------------------------===//
4324// Pseudo Instructions for CodeGen
4325//===----------------------------------------------------------------------===//
4326let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
4327  def PseudoVMV1R_V : VPseudo<VMV1R_V, V_M1, (outs VR:$vd), (ins VR:$vs2)>;
4328  def PseudoVMV2R_V : VPseudo<VMV2R_V, V_M2, (outs VRM2:$vd), (ins VRM2:$vs2)>;
4329  def PseudoVMV4R_V : VPseudo<VMV4R_V, V_M4, (outs VRM4:$vd), (ins VRM4:$vs2)>;
4330  def PseudoVMV8R_V : VPseudo<VMV8R_V, V_M8, (outs VRM8:$vd), (ins VRM8:$vs2)>;
4331}
4332
4333let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1 in {
4334  def PseudoReadVLENB : Pseudo<(outs GPR:$rd), (ins),
4335                               [(set GPR:$rd, (riscv_read_vlenb))]>;
4336}
4337
4338let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1,
4339    Uses = [VL] in
4340def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins), []>;
4341
4342let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1 in {
4343  def PseudoVSPILL_M1 : VPseudo<VS1R_V, V_M1, (outs), (ins VR:$rs1, GPR:$rs2)>;
4344  def PseudoVSPILL_M2 : VPseudo<VS2R_V, V_M2, (outs), (ins VRM2:$rs1, GPR:$rs2)>;
4345  def PseudoVSPILL_M4 : VPseudo<VS4R_V, V_M4, (outs), (ins VRM4:$rs1, GPR:$rs2)>;
4346  def PseudoVSPILL_M8 : VPseudo<VS8R_V, V_M8, (outs), (ins VRM8:$rs1, GPR:$rs2)>;
4347}
4348
4349let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1 in {
4350  def PseudoVRELOAD_M1 : VPseudo<VL1RE8_V, V_M1, (outs VR:$rs1), (ins GPR:$rs2)>;
4351  def PseudoVRELOAD_M2 : VPseudo<VL2RE8_V, V_M2, (outs VRM2:$rs1), (ins GPR:$rs2)>;
4352  def PseudoVRELOAD_M4 : VPseudo<VL4RE8_V, V_M4, (outs VRM4:$rs1), (ins GPR:$rs2)>;
4353  def PseudoVRELOAD_M8 : VPseudo<VL8RE8_V, V_M8, (outs VRM8:$rs1), (ins GPR:$rs2)>;
4354}
4355
4356foreach lmul = MxList in {
4357  foreach nf = NFSet<lmul>.L in {
4358    defvar vreg = SegRegClass<lmul, nf>.RC;
4359    let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1,
4360        Size = !mul(4, !sub(!mul(nf, 2), 1)) in {
4361      def "PseudoVSPILL" # nf # "_" # lmul.MX :
4362        Pseudo<(outs), (ins vreg:$rs1, GPR:$rs2, GPR:$vlenb), []>;
4363    }
4364    let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1,
4365        Size = !mul(4, !sub(!mul(nf, 2), 1)) in {
4366      def "PseudoVRELOAD" # nf # "_" # lmul.MX :
4367        Pseudo<(outs vreg:$rs1), (ins GPR:$rs2, GPR:$vlenb), []>;
4368    }
4369  }
4370}
4371
4372//===----------------------------------------------------------------------===//
4373// 6. Configuration-Setting Instructions
4374//===----------------------------------------------------------------------===//
4375
4376// Pseudos.
4377let hasSideEffects = 1, mayLoad = 0, mayStore = 0, Defs = [VL, VTYPE] in {
4378// Due to rs1=X0 having special meaning, we need a GPRNoX0 register class for
4379// the when we aren't using one of the special X0 encodings. Otherwise it could
4380// be accidentally be made X0 by MachineIR optimizations. To satisfy the
4381// verifier, we also need a GPRX0 instruction for the special encodings.
4382def PseudoVSETVLI : Pseudo<(outs GPR:$rd), (ins GPRNoX0:$rs1, VTypeIOp11:$vtypei), []>;
4383def PseudoVSETVLIX0 : Pseudo<(outs GPR:$rd), (ins GPRX0:$rs1, VTypeIOp11:$vtypei), []>;
4384def PseudoVSETIVLI : Pseudo<(outs GPR:$rd), (ins uimm5:$rs1, VTypeIOp10:$vtypei), []>;
4385}
4386
4387//===----------------------------------------------------------------------===//
4388// 7. Vector Loads and Stores
4389//===----------------------------------------------------------------------===//
4390
4391//===----------------------------------------------------------------------===//
4392// 7.4 Vector Unit-Stride Instructions
4393//===----------------------------------------------------------------------===//
4394
4395// Pseudos Unit-Stride Loads and Stores
4396defm PseudoVL : VPseudoUSLoad;
4397defm PseudoVS : VPseudoUSStore;
4398
4399defm PseudoVLM : VPseudoLoadMask,
4400                 Sched<[WriteVLDM, ReadVLDX]>;
4401defm PseudoVSM : VPseudoStoreMask,
4402                 Sched<[WriteVSTM, ReadVSTX]>;
4403
4404//===----------------------------------------------------------------------===//
4405// 7.5 Vector Strided Instructions
4406//===----------------------------------------------------------------------===//
4407
4408// Vector Strided Loads and Stores
4409defm PseudoVLS : VPseudoSLoad;
4410defm PseudoVSS : VPseudoSStore;
4411
4412//===----------------------------------------------------------------------===//
4413// 7.6 Vector Indexed Instructions
4414//===----------------------------------------------------------------------===//
4415
4416// Vector Indexed Loads and Stores
4417defm PseudoVLUX : VPseudoILoad</*Ordered=*/false>;
4418defm PseudoVLOX : VPseudoILoad</*Ordered=*/true>;
4419defm PseudoVSOX : VPseudoIStore</*Ordered=*/true>;
4420defm PseudoVSUX : VPseudoIStore</*Ordered=*/false>;
4421
4422//===----------------------------------------------------------------------===//
4423// 7.7. Unit-stride Fault-Only-First Loads
4424//===----------------------------------------------------------------------===//
4425
4426// vleff may update VL register
4427let hasSideEffects = 1, Defs = [VL] in
4428defm PseudoVL : VPseudoFFLoad;
4429
4430//===----------------------------------------------------------------------===//
4431// 7.8. Vector Load/Store Segment Instructions
4432//===----------------------------------------------------------------------===//
4433defm PseudoVLSEG : VPseudoUSSegLoad;
4434defm PseudoVLSSEG : VPseudoSSegLoad;
4435defm PseudoVLOXSEG : VPseudoISegLoad</*Ordered=*/true>;
4436defm PseudoVLUXSEG : VPseudoISegLoad</*Ordered=*/false>;
4437defm PseudoVSSEG : VPseudoUSSegStore;
4438defm PseudoVSSSEG : VPseudoSSegStore;
4439defm PseudoVSOXSEG : VPseudoISegStore</*Ordered=*/true>;
4440defm PseudoVSUXSEG : VPseudoISegStore</*Ordered=*/false>;
4441
4442// vlseg<nf>e<eew>ff.v may update VL register
4443let hasSideEffects = 1, Defs = [VL] in {
4444defm PseudoVLSEG : VPseudoUSSegLoadFF;
4445}
4446
4447//===----------------------------------------------------------------------===//
4448// 12. Vector Integer Arithmetic Instructions
4449//===----------------------------------------------------------------------===//
4450
4451//===----------------------------------------------------------------------===//
4452// 12.1. Vector Single-Width Integer Add and Subtract
4453//===----------------------------------------------------------------------===//
4454defm PseudoVADD   : VPseudoVALU_VV_VX_VI;
4455defm PseudoVSUB   : VPseudoVALU_VV_VX;
4456defm PseudoVRSUB  : VPseudoVALU_VX_VI;
4457
4458foreach vti = AllIntegerVectors in {
4459  // Match vrsub with 2 vector operands to vsub.vv by swapping operands. This
4460  // Occurs when legalizing vrsub.vx intrinsics for i64 on RV32 since we need
4461  // to use a more complex splat sequence. Add the pattern for all VTs for
4462  // consistency.
4463  def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector (undef)),
4464                                         (vti.Vector vti.RegClass:$rs2),
4465                                         (vti.Vector vti.RegClass:$rs1),
4466                                         VLOpFrag)),
4467            (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
4468                                                              vti.RegClass:$rs2,
4469                                                              GPR:$vl,
4470                                                              vti.Log2SEW)>;
4471  def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$merge),
4472                                         (vti.Vector vti.RegClass:$rs2),
4473                                         (vti.Vector vti.RegClass:$rs1),
4474                                         VLOpFrag)),
4475            (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_TU")
4476                                                      vti.RegClass:$merge,
4477                                                      vti.RegClass:$rs1,
4478                                                      vti.RegClass:$rs2,
4479                                                      GPR:$vl,
4480                                                      vti.Log2SEW)>;
4481  def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge),
4482                                              (vti.Vector vti.RegClass:$rs2),
4483                                              (vti.Vector vti.RegClass:$rs1),
4484                                              (vti.Mask V0),
4485                                              VLOpFrag,
4486                                              (XLenVT timm:$policy))),
4487            (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK")
4488                                                      vti.RegClass:$merge,
4489                                                      vti.RegClass:$rs1,
4490                                                      vti.RegClass:$rs2,
4491                                                      (vti.Mask V0),
4492                                                      GPR:$vl,
4493                                                      vti.Log2SEW,
4494                                                      (XLenVT timm:$policy))>;
4495
4496  // Match VSUB with a small immediate to vadd.vi by negating the immediate.
4497  def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector (undef)),
4498                                        (vti.Vector vti.RegClass:$rs1),
4499                                        (vti.Scalar simm5_plus1:$rs2),
4500                                        VLOpFrag)),
4501            (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
4502                                                              (NegImm simm5_plus1:$rs2),
4503                                                              GPR:$vl,
4504                                                              vti.Log2SEW)>;
4505  def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge),
4506                                             (vti.Vector vti.RegClass:$rs1),
4507                                             (vti.Scalar simm5_plus1:$rs2),
4508                                             (vti.Mask V0),
4509                                             VLOpFrag,
4510                                             (XLenVT timm:$policy))),
4511            (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX#"_MASK")
4512                                                      vti.RegClass:$merge,
4513                                                      vti.RegClass:$rs1,
4514                                                      (NegImm simm5_plus1:$rs2),
4515                                                      (vti.Mask V0),
4516                                                      GPR:$vl,
4517                                                      vti.Log2SEW,
4518                                                      (XLenVT timm:$policy))>;
4519}
4520
4521//===----------------------------------------------------------------------===//
4522// 12.2. Vector Widening Integer Add/Subtract
4523//===----------------------------------------------------------------------===//
4524defm PseudoVWADDU : VPseudoVWALU_VV_VX;
4525defm PseudoVWSUBU : VPseudoVWALU_VV_VX;
4526defm PseudoVWADD  : VPseudoVWALU_VV_VX;
4527defm PseudoVWSUB  : VPseudoVWALU_VV_VX;
4528defm PseudoVWADDU : VPseudoVWALU_WV_WX;
4529defm PseudoVWSUBU : VPseudoVWALU_WV_WX;
4530defm PseudoVWADD  : VPseudoVWALU_WV_WX;
4531defm PseudoVWSUB  : VPseudoVWALU_WV_WX;
4532
4533//===----------------------------------------------------------------------===//
4534// 12.3. Vector Integer Extension
4535//===----------------------------------------------------------------------===//
4536defm PseudoVZEXT_VF2 : PseudoVEXT_VF2;
4537defm PseudoVZEXT_VF4 : PseudoVEXT_VF4;
4538defm PseudoVZEXT_VF8 : PseudoVEXT_VF8;
4539defm PseudoVSEXT_VF2 : PseudoVEXT_VF2;
4540defm PseudoVSEXT_VF4 : PseudoVEXT_VF4;
4541defm PseudoVSEXT_VF8 : PseudoVEXT_VF8;
4542
4543//===----------------------------------------------------------------------===//
4544// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
4545//===----------------------------------------------------------------------===//
4546defm PseudoVADC  : VPseudoVCALU_VM_XM_IM;
4547defm PseudoVMADC : VPseudoVCALUM_VM_XM_IM<"@earlyclobber $rd">;
4548defm PseudoVMADC : VPseudoVCALUM_V_X_I<"@earlyclobber $rd">;
4549
4550defm PseudoVSBC  : VPseudoVCALU_VM_XM;
4551defm PseudoVMSBC : VPseudoVCALUM_VM_XM<"@earlyclobber $rd">;
4552defm PseudoVMSBC : VPseudoVCALUM_V_X<"@earlyclobber $rd">;
4553
4554//===----------------------------------------------------------------------===//
4555// 12.5. Vector Bitwise Logical Instructions
4556//===----------------------------------------------------------------------===//
4557defm PseudoVAND : VPseudoVALU_VV_VX_VI;
4558defm PseudoVOR  : VPseudoVALU_VV_VX_VI;
4559defm PseudoVXOR : VPseudoVALU_VV_VX_VI;
4560
4561//===----------------------------------------------------------------------===//
4562// 12.6. Vector Single-Width Bit Shift Instructions
4563//===----------------------------------------------------------------------===//
4564defm PseudoVSLL : VPseudoVSHT_VV_VX_VI<uimm5>;
4565defm PseudoVSRL : VPseudoVSHT_VV_VX_VI<uimm5>;
4566defm PseudoVSRA : VPseudoVSHT_VV_VX_VI<uimm5>;
4567
4568//===----------------------------------------------------------------------===//
4569// 12.7. Vector Narrowing Integer Right Shift Instructions
4570//===----------------------------------------------------------------------===//
4571defm PseudoVNSRL : VPseudoVNSHT_WV_WX_WI;
4572defm PseudoVNSRA : VPseudoVNSHT_WV_WX_WI;
4573
4574//===----------------------------------------------------------------------===//
4575// 12.8. Vector Integer Comparison Instructions
4576//===----------------------------------------------------------------------===//
4577defm PseudoVMSEQ  : VPseudoVCMPM_VV_VX_VI;
4578defm PseudoVMSNE  : VPseudoVCMPM_VV_VX_VI;
4579defm PseudoVMSLTU : VPseudoVCMPM_VV_VX;
4580defm PseudoVMSLT  : VPseudoVCMPM_VV_VX;
4581defm PseudoVMSLEU : VPseudoVCMPM_VV_VX_VI;
4582defm PseudoVMSLE  : VPseudoVCMPM_VV_VX_VI;
4583defm PseudoVMSGTU : VPseudoVCMPM_VX_VI;
4584defm PseudoVMSGT  : VPseudoVCMPM_VX_VI;
4585
4586//===----------------------------------------------------------------------===//
4587// 12.9. Vector Integer Min/Max Instructions
4588//===----------------------------------------------------------------------===//
4589defm PseudoVMINU : VPseudoVMINMAX_VV_VX;
4590defm PseudoVMIN  : VPseudoVMINMAX_VV_VX;
4591defm PseudoVMAXU : VPseudoVMINMAX_VV_VX;
4592defm PseudoVMAX  : VPseudoVMINMAX_VV_VX;
4593
4594//===----------------------------------------------------------------------===//
4595// 12.10. Vector Single-Width Integer Multiply Instructions
4596//===----------------------------------------------------------------------===//
4597defm PseudoVMUL    : VPseudoVMUL_VV_VX;
4598defm PseudoVMULH   : VPseudoVMUL_VV_VX;
4599defm PseudoVMULHU  : VPseudoVMUL_VV_VX;
4600defm PseudoVMULHSU : VPseudoVMUL_VV_VX;
4601
4602//===----------------------------------------------------------------------===//
4603// 12.11. Vector Integer Divide Instructions
4604//===----------------------------------------------------------------------===//
4605defm PseudoVDIVU : VPseudoVDIV_VV_VX;
4606defm PseudoVDIV  : VPseudoVDIV_VV_VX;
4607defm PseudoVREMU : VPseudoVDIV_VV_VX;
4608defm PseudoVREM  : VPseudoVDIV_VV_VX;
4609
4610//===----------------------------------------------------------------------===//
4611// 12.12. Vector Widening Integer Multiply Instructions
4612//===----------------------------------------------------------------------===//
4613defm PseudoVWMUL   : VPseudoVWMUL_VV_VX;
4614defm PseudoVWMULU  : VPseudoVWMUL_VV_VX;
4615defm PseudoVWMULSU : VPseudoVWMUL_VV_VX;
4616
4617//===----------------------------------------------------------------------===//
4618// 12.13. Vector Single-Width Integer Multiply-Add Instructions
4619//===----------------------------------------------------------------------===//
4620defm PseudoVMACC  : VPseudoVMAC_VV_VX_AAXA;
4621defm PseudoVNMSAC : VPseudoVMAC_VV_VX_AAXA;
4622defm PseudoVMADD  : VPseudoVMAC_VV_VX_AAXA;
4623defm PseudoVNMSUB : VPseudoVMAC_VV_VX_AAXA;
4624
4625//===----------------------------------------------------------------------===//
4626// 12.14. Vector Widening Integer Multiply-Add Instructions
4627//===----------------------------------------------------------------------===//
4628defm PseudoVWMACCU  : VPseudoVWMAC_VV_VX;
4629defm PseudoVWMACC   : VPseudoVWMAC_VV_VX;
4630defm PseudoVWMACCSU : VPseudoVWMAC_VV_VX;
4631defm PseudoVWMACCUS : VPseudoVWMAC_VX;
4632
4633//===----------------------------------------------------------------------===//
4634// 12.15. Vector Integer Merge Instructions
4635//===----------------------------------------------------------------------===//
4636defm PseudoVMERGE : VPseudoVMRG_VM_XM_IM;
4637
4638//===----------------------------------------------------------------------===//
4639// 12.16. Vector Integer Move Instructions
4640//===----------------------------------------------------------------------===//
4641defm PseudoVMV_V : VPseudoUnaryVMV_V_X_I;
4642
4643//===----------------------------------------------------------------------===//
4644// 13.1. Vector Single-Width Saturating Add and Subtract
4645//===----------------------------------------------------------------------===//
4646let Defs = [VXSAT], hasSideEffects = 1 in {
4647  defm PseudoVSADDU : VPseudoVSALU_VV_VX_VI;
4648  defm PseudoVSADD  : VPseudoVSALU_VV_VX_VI;
4649  defm PseudoVSSUBU : VPseudoVSALU_VV_VX;
4650  defm PseudoVSSUB  : VPseudoVSALU_VV_VX;
4651}
4652
4653//===----------------------------------------------------------------------===//
4654// 13.2. Vector Single-Width Averaging Add and Subtract
4655//===----------------------------------------------------------------------===//
4656let Uses = [VXRM], hasSideEffects = 1 in {
4657  defm PseudoVAADDU : VPseudoVAALU_VV_VX;
4658  defm PseudoVAADD  : VPseudoVAALU_VV_VX;
4659  defm PseudoVASUBU : VPseudoVAALU_VV_VX;
4660  defm PseudoVASUB  : VPseudoVAALU_VV_VX;
4661}
4662
4663//===----------------------------------------------------------------------===//
4664// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
4665//===----------------------------------------------------------------------===//
4666let Uses = [VXRM], Defs = [VXSAT], hasSideEffects = 1 in {
4667  defm PseudoVSMUL : VPseudoVSMUL_VV_VX;
4668}
4669
4670//===----------------------------------------------------------------------===//
4671// 13.4. Vector Single-Width Scaling Shift Instructions
4672//===----------------------------------------------------------------------===//
4673let Uses = [VXRM], hasSideEffects = 1 in {
4674  defm PseudoVSSRL : VPseudoVSSHT_VV_VX_VI<uimm5>;
4675  defm PseudoVSSRA : VPseudoVSSHT_VV_VX_VI<uimm5>;
4676}
4677
4678//===----------------------------------------------------------------------===//
4679// 13.5. Vector Narrowing Fixed-Point Clip Instructions
4680//===----------------------------------------------------------------------===//
4681let Uses = [VXRM], Defs = [VXSAT], hasSideEffects = 1 in {
4682  defm PseudoVNCLIP  : VPseudoVNCLP_WV_WX_WI;
4683  defm PseudoVNCLIPU : VPseudoVNCLP_WV_WX_WI;
4684}
4685
4686} // Predicates = [HasVInstructions]
4687
4688let Predicates = [HasVInstructionsAnyF] in {
4689//===----------------------------------------------------------------------===//
4690// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
4691//===----------------------------------------------------------------------===//
4692let Uses = [FRM], mayRaiseFPException = true in {
4693defm PseudoVFADD  : VPseudoVALU_VV_VF;
4694defm PseudoVFSUB  : VPseudoVALU_VV_VF;
4695defm PseudoVFRSUB : VPseudoVALU_VF;
4696}
4697
4698//===----------------------------------------------------------------------===//
4699// 14.3. Vector Widening Floating-Point Add/Subtract Instructions
4700//===----------------------------------------------------------------------===//
4701let Uses = [FRM], mayRaiseFPException = true in {
4702defm PseudoVFWADD : VPseudoVFWALU_VV_VF;
4703defm PseudoVFWSUB : VPseudoVFWALU_VV_VF;
4704defm PseudoVFWADD : VPseudoVFWALU_WV_WF;
4705defm PseudoVFWSUB : VPseudoVFWALU_WV_WF;
4706}
4707
4708//===----------------------------------------------------------------------===//
4709// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
4710//===----------------------------------------------------------------------===//
4711let Uses = [FRM], mayRaiseFPException = true in {
4712defm PseudoVFMUL  : VPseudoVFMUL_VV_VF;
4713defm PseudoVFDIV  : VPseudoVFDIV_VV_VF;
4714defm PseudoVFRDIV : VPseudoVFRDIV_VF;
4715}
4716
4717//===----------------------------------------------------------------------===//
4718// 14.5. Vector Widening Floating-Point Multiply
4719//===----------------------------------------------------------------------===//
4720let Uses = [FRM], mayRaiseFPException = true in {
4721defm PseudoVFWMUL : VPseudoVWMUL_VV_VF;
4722}
4723
4724//===----------------------------------------------------------------------===//
4725// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
4726//===----------------------------------------------------------------------===//
4727let Uses = [FRM], mayRaiseFPException = true in {
4728defm PseudoVFMACC  : VPseudoVMAC_VV_VF_AAXA;
4729defm PseudoVFNMACC : VPseudoVMAC_VV_VF_AAXA;
4730defm PseudoVFMSAC  : VPseudoVMAC_VV_VF_AAXA;
4731defm PseudoVFNMSAC : VPseudoVMAC_VV_VF_AAXA;
4732defm PseudoVFMADD  : VPseudoVMAC_VV_VF_AAXA;
4733defm PseudoVFNMADD : VPseudoVMAC_VV_VF_AAXA;
4734defm PseudoVFMSUB  : VPseudoVMAC_VV_VF_AAXA;
4735defm PseudoVFNMSUB : VPseudoVMAC_VV_VF_AAXA;
4736}
4737
4738//===----------------------------------------------------------------------===//
4739// 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
4740//===----------------------------------------------------------------------===//
4741let Uses = [FRM], mayRaiseFPException = true in {
4742defm PseudoVFWMACC  : VPseudoVWMAC_VV_VF;
4743defm PseudoVFWNMACC : VPseudoVWMAC_VV_VF;
4744defm PseudoVFWMSAC  : VPseudoVWMAC_VV_VF;
4745defm PseudoVFWNMSAC : VPseudoVWMAC_VV_VF;
4746}
4747
4748//===----------------------------------------------------------------------===//
4749// 14.8. Vector Floating-Point Square-Root Instruction
4750//===----------------------------------------------------------------------===//
4751let Uses = [FRM], mayRaiseFPException = true in
4752defm PseudoVFSQRT : VPseudoVSQR_V;
4753
4754//===----------------------------------------------------------------------===//
4755// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
4756//===----------------------------------------------------------------------===//
4757let mayRaiseFPException = true in
4758defm PseudoVFRSQRT7 : VPseudoVRCP_V;
4759
4760//===----------------------------------------------------------------------===//
4761// 14.10. Vector Floating-Point Reciprocal Estimate Instruction
4762//===----------------------------------------------------------------------===//
4763let Uses = [FRM], mayRaiseFPException = true in
4764defm PseudoVFREC7 : VPseudoVRCP_V;
4765
4766//===----------------------------------------------------------------------===//
4767// 14.11. Vector Floating-Point Min/Max Instructions
4768//===----------------------------------------------------------------------===//
4769let mayRaiseFPException = true in {
4770defm PseudoVFMIN : VPseudoVMAX_VV_VF;
4771defm PseudoVFMAX : VPseudoVMAX_VV_VF;
4772}
4773
4774//===----------------------------------------------------------------------===//
4775// 14.12. Vector Floating-Point Sign-Injection Instructions
4776//===----------------------------------------------------------------------===//
4777defm PseudoVFSGNJ  : VPseudoVSGNJ_VV_VF;
4778defm PseudoVFSGNJN : VPseudoVSGNJ_VV_VF;
4779defm PseudoVFSGNJX : VPseudoVSGNJ_VV_VF;
4780
4781//===----------------------------------------------------------------------===//
4782// 14.13. Vector Floating-Point Compare Instructions
4783//===----------------------------------------------------------------------===//
4784let mayRaiseFPException = true in {
4785defm PseudoVMFEQ : VPseudoVCMPM_VV_VF;
4786defm PseudoVMFNE : VPseudoVCMPM_VV_VF;
4787defm PseudoVMFLT : VPseudoVCMPM_VV_VF;
4788defm PseudoVMFLE : VPseudoVCMPM_VV_VF;
4789defm PseudoVMFGT : VPseudoVCMPM_VF;
4790defm PseudoVMFGE : VPseudoVCMPM_VF;
4791}
4792
4793//===----------------------------------------------------------------------===//
4794// 14.14. Vector Floating-Point Classify Instruction
4795//===----------------------------------------------------------------------===//
4796defm PseudoVFCLASS : VPseudoVCLS_V;
4797
4798//===----------------------------------------------------------------------===//
4799// 14.15. Vector Floating-Point Merge Instruction
4800//===----------------------------------------------------------------------===//
4801defm PseudoVFMERGE : VPseudoVMRG_FM;
4802
4803//===----------------------------------------------------------------------===//
4804// 14.16. Vector Floating-Point Move Instruction
4805//===----------------------------------------------------------------------===//
4806defm PseudoVFMV_V : VPseudoVMV_F;
4807
4808//===----------------------------------------------------------------------===//
4809// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
4810//===----------------------------------------------------------------------===//
4811defm PseudoVFCVT_XU_F : VPseudoVCVTI_V;
4812defm PseudoVFCVT_X_F : VPseudoVCVTI_V;
4813defm PseudoVFCVT_RTZ_XU_F : VPseudoVCVTI_V;
4814defm PseudoVFCVT_RTZ_X_F : VPseudoVCVTI_V;
4815defm PseudoVFCVT_F_XU : VPseudoVCVTF_V;
4816defm PseudoVFCVT_F_X : VPseudoVCVTF_V;
4817
4818//===----------------------------------------------------------------------===//
4819// 14.18. Widening Floating-Point/Integer Type-Convert Instructions
4820//===----------------------------------------------------------------------===//
4821defm PseudoVFWCVT_XU_F     : VPseudoVWCVTI_V;
4822defm PseudoVFWCVT_X_F      : VPseudoVWCVTI_V;
4823defm PseudoVFWCVT_RTZ_XU_F : VPseudoVWCVTI_V;
4824defm PseudoVFWCVT_RTZ_X_F  : VPseudoVWCVTI_V;
4825defm PseudoVFWCVT_F_XU     : VPseudoVWCVTF_V;
4826defm PseudoVFWCVT_F_X      : VPseudoVWCVTF_V;
4827defm PseudoVFWCVT_F_F      : VPseudoVWCVTD_V;
4828
4829//===----------------------------------------------------------------------===//
4830// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
4831//===----------------------------------------------------------------------===//
4832defm PseudoVFNCVT_XU_F     : VPseudoVNCVTI_W;
4833defm PseudoVFNCVT_X_F      : VPseudoVNCVTI_W;
4834defm PseudoVFNCVT_RTZ_XU_F : VPseudoVNCVTI_W;
4835defm PseudoVFNCVT_RTZ_X_F  : VPseudoVNCVTI_W;
4836defm PseudoVFNCVT_F_XU     : VPseudoVNCVTF_W;
4837defm PseudoVFNCVT_F_X      : VPseudoVNCVTF_W;
4838defm PseudoVFNCVT_F_F      : VPseudoVNCVTD_W;
4839defm PseudoVFNCVT_ROD_F_F  : VPseudoVNCVTD_W;
4840} // Predicates = [HasVInstructionsAnyF]
4841
4842let Predicates = [HasVInstructions] in {
4843//===----------------------------------------------------------------------===//
4844// 15.1. Vector Single-Width Integer Reduction Instructions
4845//===----------------------------------------------------------------------===//
4846defm PseudoVREDSUM  : VPseudoVRED_VS;
4847defm PseudoVREDAND  : VPseudoVRED_VS;
4848defm PseudoVREDOR   : VPseudoVRED_VS;
4849defm PseudoVREDXOR  : VPseudoVRED_VS;
4850defm PseudoVREDMINU : VPseudoVRED_VS;
4851defm PseudoVREDMIN  : VPseudoVRED_VS;
4852defm PseudoVREDMAXU : VPseudoVRED_VS;
4853defm PseudoVREDMAX  : VPseudoVRED_VS;
4854
4855//===----------------------------------------------------------------------===//
4856// 15.2. Vector Widening Integer Reduction Instructions
4857//===----------------------------------------------------------------------===//
4858let IsRVVWideningReduction = 1 in {
4859defm PseudoVWREDSUMU   : VPseudoVWRED_VS;
4860defm PseudoVWREDSUM    : VPseudoVWRED_VS;
4861}
4862} // Predicates = [HasVInstructions]
4863
4864let Predicates = [HasVInstructionsAnyF] in {
4865//===----------------------------------------------------------------------===//
4866// 15.3. Vector Single-Width Floating-Point Reduction Instructions
4867//===----------------------------------------------------------------------===//
4868let Uses = [FRM], mayRaiseFPException = true in {
4869defm PseudoVFREDOSUM : VPseudoVFREDO_VS;
4870defm PseudoVFREDUSUM : VPseudoVFRED_VS;
4871}
4872let mayRaiseFPException = true in {
4873defm PseudoVFREDMIN  : VPseudoVFRED_VS;
4874defm PseudoVFREDMAX  : VPseudoVFRED_VS;
4875}
4876
4877//===----------------------------------------------------------------------===//
4878// 15.4. Vector Widening Floating-Point Reduction Instructions
4879//===----------------------------------------------------------------------===//
4880let IsRVVWideningReduction = 1,
4881    Uses = [FRM],
4882    mayRaiseFPException = true in {
4883defm PseudoVFWREDUSUM  : VPseudoVFWRED_VS;
4884defm PseudoVFWREDOSUM  : VPseudoVFWRED_VS;
4885}
4886
4887} // Predicates = [HasVInstructionsAnyF]
4888
4889//===----------------------------------------------------------------------===//
4890// 16. Vector Mask Instructions
4891//===----------------------------------------------------------------------===//
4892
4893//===----------------------------------------------------------------------===//
4894// 16.1 Vector Mask-Register Logical Instructions
4895//===----------------------------------------------------------------------===//
4896
4897defm PseudoVMAND: VPseudoVALU_MM;
4898defm PseudoVMNAND: VPseudoVALU_MM;
4899defm PseudoVMANDN: VPseudoVALU_MM;
4900defm PseudoVMXOR: VPseudoVALU_MM;
4901defm PseudoVMOR: VPseudoVALU_MM;
4902defm PseudoVMNOR: VPseudoVALU_MM;
4903defm PseudoVMORN: VPseudoVALU_MM;
4904defm PseudoVMXNOR: VPseudoVALU_MM;
4905
4906// Pseudo instructions
4907defm PseudoVMCLR : VPseudoNullaryPseudoM<"VMXOR">,
4908                   Sched<[WriteVMALUV, ReadVMALUV, ReadVMALUV]>;
4909defm PseudoVMSET : VPseudoNullaryPseudoM<"VMXNOR">,
4910                   Sched<[WriteVMALUV, ReadVMALUV, ReadVMALUV]>;
4911
4912//===----------------------------------------------------------------------===//
4913// 16.2. Vector mask population count vcpop
4914//===----------------------------------------------------------------------===//
4915
4916defm PseudoVCPOP: VPseudoVPOP_M;
4917
4918//===----------------------------------------------------------------------===//
4919// 16.3. vfirst find-first-set mask bit
4920//===----------------------------------------------------------------------===//
4921
4922defm PseudoVFIRST: VPseudoV1ST_M;
4923
4924//===----------------------------------------------------------------------===//
4925// 16.4. vmsbf.m set-before-first mask bit
4926//===----------------------------------------------------------------------===//
4927defm PseudoVMSBF: VPseudoVSFS_M;
4928
4929//===----------------------------------------------------------------------===//
4930// 16.5. vmsif.m set-including-first mask bit
4931//===----------------------------------------------------------------------===//
4932defm PseudoVMSIF: VPseudoVSFS_M;
4933
4934//===----------------------------------------------------------------------===//
4935// 16.6. vmsof.m set-only-first mask bit
4936//===----------------------------------------------------------------------===//
4937defm PseudoVMSOF: VPseudoVSFS_M;
4938
4939//===----------------------------------------------------------------------===//
4940// 16.8.  Vector Iota Instruction
4941//===----------------------------------------------------------------------===//
4942defm PseudoVIOTA_M: VPseudoVIOT_M;
4943
4944//===----------------------------------------------------------------------===//
4945// 16.9. Vector Element Index Instruction
4946//===----------------------------------------------------------------------===//
4947defm PseudoVID : VPseudoVID_V;
4948
4949//===----------------------------------------------------------------------===//
4950// 17. Vector Permutation Instructions
4951//===----------------------------------------------------------------------===//
4952
4953//===----------------------------------------------------------------------===//
4954// 17.1. Integer Scalar Move Instructions
4955//===----------------------------------------------------------------------===//
4956
4957let Predicates = [HasVInstructions] in {
4958let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
4959  foreach m = MxList in {
4960    let VLMul = m.value in {
4961      let HasSEWOp = 1, BaseInstr = VMV_X_S in
4962      def PseudoVMV_X_S # "_" # m.MX:
4963        Pseudo<(outs GPR:$rd), (ins m.vrclass:$rs2, ixlenimm:$sew), []>,
4964        Sched<[WriteVIMovVX, ReadVIMovVX]>,
4965        RISCVVPseudo;
4966      let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X,
4967          Constraints = "$rd = $rs1" in
4968      def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd),
4969                                             (ins m.vrclass:$rs1, GPR:$rs2,
4970                                                  AVL:$vl, ixlenimm:$sew),
4971                                             []>,
4972        Sched<[WriteVIMovXV, ReadVIMovXV, ReadVIMovXX]>,
4973        RISCVVPseudo;
4974    }
4975  }
4976}
4977} // Predicates = [HasVInstructions]
4978
4979//===----------------------------------------------------------------------===//
4980// 17.2. Floating-Point Scalar Move Instructions
4981//===----------------------------------------------------------------------===//
4982
4983let Predicates = [HasVInstructionsAnyF] in {
4984let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
4985  foreach f = FPList in {
4986    foreach m = f.MxList in {
4987      let VLMul = m.value in {
4988        let HasSEWOp = 1, BaseInstr = VFMV_F_S in
4989        def "PseudoVFMV_" # f.FX # "_S_" # m.MX :
4990          Pseudo<(outs f.fprclass:$rd),
4991                 (ins m.vrclass:$rs2, ixlenimm:$sew), []>,
4992          Sched<[WriteVFMovVF, ReadVFMovVF]>,
4993          RISCVVPseudo;
4994        let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F,
4995            Constraints = "$rd = $rs1" in
4996        def "PseudoVFMV_S_" # f.FX # "_" # m.MX :
4997                                          Pseudo<(outs m.vrclass:$rd),
4998                                                 (ins m.vrclass:$rs1, f.fprclass:$rs2,
4999                                                      AVL:$vl, ixlenimm:$sew),
5000                                                 []>,
5001          Sched<[WriteVFMovFV, ReadVFMovFV, ReadVFMovFX]>,
5002          RISCVVPseudo;
5003      }
5004    }
5005  }
5006}
5007} // Predicates = [HasVInstructionsAnyF]
5008
5009//===----------------------------------------------------------------------===//
5010// 17.3. Vector Slide Instructions
5011//===----------------------------------------------------------------------===//
5012let Predicates = [HasVInstructions] in {
5013  defm PseudoVSLIDEUP    : VPseudoVSLD_VX_VI<uimm5, "@earlyclobber $rd">;
5014  defm PseudoVSLIDEDOWN  : VPseudoVSLD_VX_VI<uimm5>;
5015  defm PseudoVSLIDE1UP   : VPseudoVSLD1_VX<"@earlyclobber $rd">;
5016  defm PseudoVSLIDE1DOWN : VPseudoVSLD1_VX;
5017} // Predicates = [HasVInstructions]
5018
5019let Predicates = [HasVInstructionsAnyF] in {
5020  defm PseudoVFSLIDE1UP  : VPseudoVSLD1_VF<"@earlyclobber $rd">;
5021  defm PseudoVFSLIDE1DOWN : VPseudoVSLD1_VF;
5022} // Predicates = [HasVInstructionsAnyF]
5023
5024//===----------------------------------------------------------------------===//
5025// 17.4. Vector Register Gather Instructions
5026//===----------------------------------------------------------------------===//
5027defm PseudoVRGATHER     : VPseudoVGTR_VV_VX_VI<uimm5, "@earlyclobber $rd">;
5028defm PseudoVRGATHEREI16 : VPseudoVGTR_VV_EEW</* eew */ 16, "@earlyclobber $rd">;
5029
5030//===----------------------------------------------------------------------===//
5031// 17.5. Vector Compress Instruction
5032//===----------------------------------------------------------------------===//
5033defm PseudoVCOMPRESS : VPseudoVCPR_V;
5034
5035//===----------------------------------------------------------------------===//
5036// Patterns.
5037//===----------------------------------------------------------------------===//
5038
5039//===----------------------------------------------------------------------===//
5040// 12. Vector Integer Arithmetic Instructions
5041//===----------------------------------------------------------------------===//
5042
5043let Predicates = [HasVInstructions] in {
5044//===----------------------------------------------------------------------===//
5045// 12.1. Vector Single-Width Integer Add and Subtract
5046//===----------------------------------------------------------------------===//
5047defm : VPatBinaryV_VV_VX_VI<"int_riscv_vadd", "PseudoVADD", AllIntegerVectors>;
5048defm : VPatBinaryV_VV_VX<"int_riscv_vsub", "PseudoVSUB", AllIntegerVectors>;
5049defm : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>;
5050
5051//===----------------------------------------------------------------------===//
5052// 12.2. Vector Widening Integer Add/Subtract
5053//===----------------------------------------------------------------------===//
5054defm : VPatBinaryW_VV_VX<"int_riscv_vwaddu", "PseudoVWADDU", AllWidenableIntVectors>;
5055defm : VPatBinaryW_VV_VX<"int_riscv_vwsubu", "PseudoVWSUBU", AllWidenableIntVectors>;
5056defm : VPatBinaryW_VV_VX<"int_riscv_vwadd", "PseudoVWADD", AllWidenableIntVectors>;
5057defm : VPatBinaryW_VV_VX<"int_riscv_vwsub", "PseudoVWSUB", AllWidenableIntVectors>;
5058defm : VPatBinaryW_WV_WX<"int_riscv_vwaddu_w", "PseudoVWADDU", AllWidenableIntVectors>;
5059defm : VPatBinaryW_WV_WX<"int_riscv_vwsubu_w", "PseudoVWSUBU", AllWidenableIntVectors>;
5060defm : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD", AllWidenableIntVectors>;
5061defm : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB", AllWidenableIntVectors>;
5062
5063//===----------------------------------------------------------------------===//
5064// 12.3. Vector Integer Extension
5065//===----------------------------------------------------------------------===//
5066defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF2",
5067                     AllFractionableVF2IntVectors>;
5068defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF4",
5069                     AllFractionableVF4IntVectors>;
5070defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF8",
5071                     AllFractionableVF8IntVectors>;
5072defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF2",
5073                     AllFractionableVF2IntVectors>;
5074defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF4",
5075                     AllFractionableVF4IntVectors>;
5076defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF8",
5077                     AllFractionableVF8IntVectors>;
5078
5079//===----------------------------------------------------------------------===//
5080// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
5081//===----------------------------------------------------------------------===//
5082defm : VPatBinaryV_VM_XM_IM<"int_riscv_vadc", "PseudoVADC">;
5083defm : VPatBinaryM_VM_XM_IM<"int_riscv_vmadc_carry_in", "PseudoVMADC">;
5084defm : VPatBinaryM_V_X_I<"int_riscv_vmadc", "PseudoVMADC">;
5085
5086defm : VPatBinaryV_VM_XM<"int_riscv_vsbc", "PseudoVSBC">;
5087defm : VPatBinaryM_VM_XM<"int_riscv_vmsbc_borrow_in", "PseudoVMSBC">;
5088defm : VPatBinaryM_V_X<"int_riscv_vmsbc", "PseudoVMSBC">;
5089
5090//===----------------------------------------------------------------------===//
5091// 12.5. Vector Bitwise Logical Instructions
5092//===----------------------------------------------------------------------===//
5093defm : VPatBinaryV_VV_VX_VI<"int_riscv_vand", "PseudoVAND", AllIntegerVectors>;
5094defm : VPatBinaryV_VV_VX_VI<"int_riscv_vor", "PseudoVOR", AllIntegerVectors>;
5095defm : VPatBinaryV_VV_VX_VI<"int_riscv_vxor", "PseudoVXOR", AllIntegerVectors>;
5096
5097//===----------------------------------------------------------------------===//
5098// 12.6. Vector Single-Width Bit Shift Instructions
5099//===----------------------------------------------------------------------===//
5100defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsll", "PseudoVSLL", AllIntegerVectors,
5101                            uimm5>;
5102defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsrl", "PseudoVSRL", AllIntegerVectors,
5103                            uimm5>;
5104defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors,
5105                            uimm5>;
5106
5107foreach vti = AllIntegerVectors in {
5108  // Emit shift by 1 as an add since it might be faster.
5109  def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector undef),
5110                                        (vti.Vector vti.RegClass:$rs1),
5111                                        (XLenVT 1), VLOpFrag)),
5112            (!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
5113                                                              vti.RegClass:$rs1,
5114                                                              GPR:$vl,
5115                                                              vti.Log2SEW)>;
5116  def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$merge),
5117                                             (vti.Vector vti.RegClass:$rs1),
5118                                             (XLenVT 1),
5119                                             (vti.Mask V0),
5120                                             VLOpFrag,
5121                                             (XLenVT timm:$policy))),
5122            (!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX#"_MASK")
5123                                                        vti.RegClass:$merge,
5124                                                        vti.RegClass:$rs1,
5125                                                        vti.RegClass:$rs1,
5126                                                        (vti.Mask V0),
5127                                                        GPR:$vl,
5128                                                        vti.Log2SEW,
5129                                                        (XLenVT timm:$policy))>;
5130}
5131
5132//===----------------------------------------------------------------------===//
5133// 12.7. Vector Narrowing Integer Right Shift Instructions
5134//===----------------------------------------------------------------------===//
5135defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsrl", "PseudoVNSRL", AllWidenableIntVectors>;
5136defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsra", "PseudoVNSRA", AllWidenableIntVectors>;
5137
5138//===----------------------------------------------------------------------===//
5139// 12.8. Vector Integer Comparison Instructions
5140//===----------------------------------------------------------------------===//
5141defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmseq", "PseudoVMSEQ", AllIntegerVectors>;
5142defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsne", "PseudoVMSNE", AllIntegerVectors>;
5143defm : VPatBinaryM_VV_VX<"int_riscv_vmsltu", "PseudoVMSLTU", AllIntegerVectors>;
5144defm : VPatBinaryM_VV_VX<"int_riscv_vmslt", "PseudoVMSLT", AllIntegerVectors>;
5145defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsleu", "PseudoVMSLEU", AllIntegerVectors>;
5146defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsle", "PseudoVMSLE", AllIntegerVectors>;
5147
5148defm : VPatBinaryM_VX_VI<"int_riscv_vmsgtu", "PseudoVMSGTU", AllIntegerVectors>;
5149defm : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>;
5150
5151// Match vmsgt with 2 vector operands to vmslt with the operands swapped.
5152defm : VPatBinarySwappedM_VV<"int_riscv_vmsgtu", "PseudoVMSLTU", AllIntegerVectors>;
5153defm : VPatBinarySwappedM_VV<"int_riscv_vmsgt", "PseudoVMSLT", AllIntegerVectors>;
5154
5155defm : VPatBinarySwappedM_VV<"int_riscv_vmsgeu", "PseudoVMSLEU", AllIntegerVectors>;
5156defm : VPatBinarySwappedM_VV<"int_riscv_vmsge", "PseudoVMSLE", AllIntegerVectors>;
5157
5158// Match vmslt(u).vx intrinsics to vmsle(u).vi if the scalar is -15 to 16 and
5159// non-zero. Zero can be .vx with x0. This avoids the user needing to know that
5160// there is no vmslt(u).vi instruction. Similar for vmsge(u).vx intrinsics
5161// using vmslt(u).vi.
5162defm : VPatCompare_VI<"int_riscv_vmslt", "PseudoVMSLE", simm5_plus1_nonzero>;
5163defm : VPatCompare_VI<"int_riscv_vmsltu", "PseudoVMSLEU", simm5_plus1_nonzero>;
5164
5165// We need to handle 0 for vmsge.vi using vmslt.vi because there is no vmsge.vx.
5166defm : VPatCompare_VI<"int_riscv_vmsge", "PseudoVMSGT", simm5_plus1>;
5167defm : VPatCompare_VI<"int_riscv_vmsgeu", "PseudoVMSGTU", simm5_plus1_nonzero>;
5168
5169//===----------------------------------------------------------------------===//
5170// 12.9. Vector Integer Min/Max Instructions
5171//===----------------------------------------------------------------------===//
5172defm : VPatBinaryV_VV_VX<"int_riscv_vminu", "PseudoVMINU", AllIntegerVectors>;
5173defm : VPatBinaryV_VV_VX<"int_riscv_vmin", "PseudoVMIN", AllIntegerVectors>;
5174defm : VPatBinaryV_VV_VX<"int_riscv_vmaxu", "PseudoVMAXU", AllIntegerVectors>;
5175defm : VPatBinaryV_VV_VX<"int_riscv_vmax", "PseudoVMAX", AllIntegerVectors>;
5176
5177//===----------------------------------------------------------------------===//
5178// 12.10. Vector Single-Width Integer Multiply Instructions
5179//===----------------------------------------------------------------------===//
5180defm : VPatBinaryV_VV_VX<"int_riscv_vmul", "PseudoVMUL", AllIntegerVectors>;
5181defm : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH", AllIntegerVectors>;
5182defm : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU", AllIntegerVectors>;
5183defm : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU", AllIntegerVectors>;
5184
5185//===----------------------------------------------------------------------===//
5186// 12.11. Vector Integer Divide Instructions
5187//===----------------------------------------------------------------------===//
5188defm : VPatBinaryV_VV_VX<"int_riscv_vdivu", "PseudoVDIVU", AllIntegerVectors>;
5189defm : VPatBinaryV_VV_VX<"int_riscv_vdiv", "PseudoVDIV", AllIntegerVectors>;
5190defm : VPatBinaryV_VV_VX<"int_riscv_vremu", "PseudoVREMU", AllIntegerVectors>;
5191defm : VPatBinaryV_VV_VX<"int_riscv_vrem", "PseudoVREM", AllIntegerVectors>;
5192
5193//===----------------------------------------------------------------------===//
5194// 12.12. Vector Widening Integer Multiply Instructions
5195//===----------------------------------------------------------------------===//
5196defm : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL", AllWidenableIntVectors>;
5197defm : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU", AllWidenableIntVectors>;
5198defm : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU", AllWidenableIntVectors>;
5199
5200//===----------------------------------------------------------------------===//
5201// 12.13. Vector Single-Width Integer Multiply-Add Instructions
5202//===----------------------------------------------------------------------===//
5203defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmadd", "PseudoVMADD", AllIntegerVectors>;
5204defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsub", "PseudoVNMSUB", AllIntegerVectors>;
5205defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmacc", "PseudoVMACC", AllIntegerVectors>;
5206defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsac", "PseudoVNMSAC", AllIntegerVectors>;
5207
5208//===----------------------------------------------------------------------===//
5209// 12.14. Vector Widening Integer Multiply-Add Instructions
5210//===----------------------------------------------------------------------===//
5211defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccu", "PseudoVWMACCU", AllWidenableIntVectors>;
5212defm : VPatTernaryW_VV_VX<"int_riscv_vwmacc", "PseudoVWMACC", AllWidenableIntVectors>;
5213defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccsu", "PseudoVWMACCSU", AllWidenableIntVectors>;
5214defm : VPatTernaryW_VX<"int_riscv_vwmaccus", "PseudoVWMACCUS", AllWidenableIntVectors>;
5215
5216//===----------------------------------------------------------------------===//
5217// 12.15. Vector Integer Merge Instructions
5218//===----------------------------------------------------------------------===//
5219defm : VPatBinaryV_VM_XM_IM<"int_riscv_vmerge", "PseudoVMERGE">;
5220
5221//===----------------------------------------------------------------------===//
5222// 12.16. Vector Integer Move Instructions
5223//===----------------------------------------------------------------------===//
5224foreach vti = AllVectors in {
5225  def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector undef),
5226                                           (vti.Vector vti.RegClass:$rs1),
5227                                           VLOpFrag)),
5228            (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX)
5229             $rs1, GPR:$vl, vti.Log2SEW)>;
5230  def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$passthru),
5231                                           (vti.Vector vti.RegClass:$rs1),
5232                                           VLOpFrag)),
5233            (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX#"_TU")
5234             $passthru, $rs1, GPR:$vl, vti.Log2SEW)>;
5235
5236  // vmv.v.x/vmv.v.i are handled in RISCInstrVInstrInfoVVLPatterns.td
5237}
5238
5239//===----------------------------------------------------------------------===//
5240// 13.1. Vector Single-Width Saturating Add and Subtract
5241//===----------------------------------------------------------------------===//
5242defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsaddu", "PseudoVSADDU", AllIntegerVectors>;
5243defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsadd", "PseudoVSADD", AllIntegerVectors>;
5244defm : VPatBinaryV_VV_VX<"int_riscv_vssubu", "PseudoVSSUBU", AllIntegerVectors>;
5245defm : VPatBinaryV_VV_VX<"int_riscv_vssub", "PseudoVSSUB", AllIntegerVectors>;
5246
5247//===----------------------------------------------------------------------===//
5248// 13.2. Vector Single-Width Averaging Add and Subtract
5249//===----------------------------------------------------------------------===//
5250defm : VPatBinaryV_VV_VX<"int_riscv_vaaddu", "PseudoVAADDU", AllIntegerVectors>;
5251defm : VPatBinaryV_VV_VX<"int_riscv_vaadd", "PseudoVAADD", AllIntegerVectors>;
5252defm : VPatBinaryV_VV_VX<"int_riscv_vasubu", "PseudoVASUBU", AllIntegerVectors>;
5253defm : VPatBinaryV_VV_VX<"int_riscv_vasub", "PseudoVASUB", AllIntegerVectors>;
5254
5255//===----------------------------------------------------------------------===//
5256// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
5257//===----------------------------------------------------------------------===//
5258defm : VPatBinaryV_VV_VX<"int_riscv_vsmul", "PseudoVSMUL", AllIntegerVectors>;
5259
5260//===----------------------------------------------------------------------===//
5261// 13.4. Vector Single-Width Scaling Shift Instructions
5262//===----------------------------------------------------------------------===//
5263defm : VPatBinaryV_VV_VX_VI<"int_riscv_vssrl", "PseudoVSSRL", AllIntegerVectors,
5264                            uimm5>;
5265defm : VPatBinaryV_VV_VX_VI<"int_riscv_vssra", "PseudoVSSRA", AllIntegerVectors,
5266                            uimm5>;
5267
5268//===----------------------------------------------------------------------===//
5269// 13.5. Vector Narrowing Fixed-Point Clip Instructions
5270//===----------------------------------------------------------------------===//
5271defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclipu", "PseudoVNCLIPU", AllWidenableIntVectors>;
5272defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclip", "PseudoVNCLIP", AllWidenableIntVectors>;
5273
5274} // Predicates = [HasVInstructions]
5275
5276let Predicates = [HasVInstructionsAnyF] in {
5277//===----------------------------------------------------------------------===//
5278// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
5279//===----------------------------------------------------------------------===//
5280defm : VPatBinaryV_VV_VX<"int_riscv_vfadd", "PseudoVFADD", AllFloatVectors>;
5281defm : VPatBinaryV_VV_VX<"int_riscv_vfsub", "PseudoVFSUB", AllFloatVectors>;
5282defm : VPatBinaryV_VX<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>;
5283
5284//===----------------------------------------------------------------------===//
5285// 14.3. Vector Widening Floating-Point Add/Subtract Instructions
5286//===----------------------------------------------------------------------===//
5287defm : VPatBinaryW_VV_VX<"int_riscv_vfwadd", "PseudoVFWADD", AllWidenableFloatVectors>;
5288defm : VPatBinaryW_VV_VX<"int_riscv_vfwsub", "PseudoVFWSUB", AllWidenableFloatVectors>;
5289defm : VPatBinaryW_WV_WX<"int_riscv_vfwadd_w", "PseudoVFWADD", AllWidenableFloatVectors>;
5290defm : VPatBinaryW_WV_WX<"int_riscv_vfwsub_w", "PseudoVFWSUB", AllWidenableFloatVectors>;
5291
5292//===----------------------------------------------------------------------===//
5293// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
5294//===----------------------------------------------------------------------===//
5295defm : VPatBinaryV_VV_VX<"int_riscv_vfmul", "PseudoVFMUL", AllFloatVectors>;
5296defm : VPatBinaryV_VV_VX<"int_riscv_vfdiv", "PseudoVFDIV", AllFloatVectors>;
5297defm : VPatBinaryV_VX<"int_riscv_vfrdiv", "PseudoVFRDIV", AllFloatVectors>;
5298
5299//===----------------------------------------------------------------------===//
5300// 14.5. Vector Widening Floating-Point Multiply
5301//===----------------------------------------------------------------------===//
5302defm : VPatBinaryW_VV_VX<"int_riscv_vfwmul", "PseudoVFWMUL", AllWidenableFloatVectors>;
5303
5304//===----------------------------------------------------------------------===//
5305// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
5306//===----------------------------------------------------------------------===//
5307defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmacc", "PseudoVFMACC", AllFloatVectors>;
5308defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmacc", "PseudoVFNMACC", AllFloatVectors>;
5309defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmsac", "PseudoVFMSAC", AllFloatVectors>;
5310defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmsac", "PseudoVFNMSAC", AllFloatVectors>;
5311defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmadd", "PseudoVFMADD", AllFloatVectors>;
5312defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmadd", "PseudoVFNMADD", AllFloatVectors>;
5313defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmsub", "PseudoVFMSUB", AllFloatVectors>;
5314defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmsub", "PseudoVFNMSUB", AllFloatVectors>;
5315
5316//===----------------------------------------------------------------------===//
5317// 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
5318//===----------------------------------------------------------------------===//
5319defm : VPatTernaryW_VV_VX<"int_riscv_vfwmacc", "PseudoVFWMACC", AllWidenableFloatVectors>;
5320defm : VPatTernaryW_VV_VX<"int_riscv_vfwnmacc", "PseudoVFWNMACC", AllWidenableFloatVectors>;
5321defm : VPatTernaryW_VV_VX<"int_riscv_vfwmsac", "PseudoVFWMSAC", AllWidenableFloatVectors>;
5322defm : VPatTernaryW_VV_VX<"int_riscv_vfwnmsac", "PseudoVFWNMSAC", AllWidenableFloatVectors>;
5323
5324//===----------------------------------------------------------------------===//
5325// 14.8. Vector Floating-Point Square-Root Instruction
5326//===----------------------------------------------------------------------===//
5327defm : VPatUnaryV_V<"int_riscv_vfsqrt", "PseudoVFSQRT", AllFloatVectors>;
5328
5329//===----------------------------------------------------------------------===//
5330// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
5331//===----------------------------------------------------------------------===//
5332defm : VPatUnaryV_V<"int_riscv_vfrsqrt7", "PseudoVFRSQRT7", AllFloatVectors>;
5333
5334//===----------------------------------------------------------------------===//
5335// 14.10. Vector Floating-Point Reciprocal Estimate Instruction
5336//===----------------------------------------------------------------------===//
5337defm : VPatUnaryV_V<"int_riscv_vfrec7", "PseudoVFREC7", AllFloatVectors>;
5338
5339//===----------------------------------------------------------------------===//
5340// 14.11. Vector Floating-Point Min/Max Instructions
5341//===----------------------------------------------------------------------===//
5342defm : VPatBinaryV_VV_VX<"int_riscv_vfmin", "PseudoVFMIN", AllFloatVectors>;
5343defm : VPatBinaryV_VV_VX<"int_riscv_vfmax", "PseudoVFMAX", AllFloatVectors>;
5344
5345//===----------------------------------------------------------------------===//
5346// 14.12. Vector Floating-Point Sign-Injection Instructions
5347//===----------------------------------------------------------------------===//
5348defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnj", "PseudoVFSGNJ", AllFloatVectors>;
5349defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjn", "PseudoVFSGNJN", AllFloatVectors>;
5350defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjx", "PseudoVFSGNJX", AllFloatVectors>;
5351
5352//===----------------------------------------------------------------------===//
5353// 14.13. Vector Floating-Point Compare Instructions
5354//===----------------------------------------------------------------------===//
5355defm : VPatBinaryM_VV_VX<"int_riscv_vmfeq", "PseudoVMFEQ", AllFloatVectors>;
5356defm : VPatBinaryM_VV_VX<"int_riscv_vmfle", "PseudoVMFLE", AllFloatVectors>;
5357defm : VPatBinaryM_VV_VX<"int_riscv_vmflt", "PseudoVMFLT", AllFloatVectors>;
5358defm : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE", AllFloatVectors>;
5359defm : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT", AllFloatVectors>;
5360defm : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE", AllFloatVectors>;
5361defm : VPatBinarySwappedM_VV<"int_riscv_vmfgt", "PseudoVMFLT", AllFloatVectors>;
5362defm : VPatBinarySwappedM_VV<"int_riscv_vmfge", "PseudoVMFLE", AllFloatVectors>;
5363
5364//===----------------------------------------------------------------------===//
5365// 14.14. Vector Floating-Point Classify Instruction
5366//===----------------------------------------------------------------------===//
5367defm : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">;
5368
5369//===----------------------------------------------------------------------===//
5370// 14.15. Vector Floating-Point Merge Instruction
5371//===----------------------------------------------------------------------===//
5372// We can use vmerge.vvm to support vector-vector vfmerge.
5373// NOTE: Clang previously used int_riscv_vfmerge for vector-vector, but now uses
5374// int_riscv_vmerge. Support both for compatibility.
5375defm : VPatBinaryV_VM_TAIL<"int_riscv_vmerge", "PseudoVMERGE",
5376                           /*CarryOut = */0, /*vtilist=*/AllFloatVectors>;
5377defm : VPatBinaryV_VM_TAIL<"int_riscv_vfmerge", "PseudoVMERGE",
5378                           /*CarryOut = */0, /*vtilist=*/AllFloatVectors>;
5379defm : VPatBinaryV_XM_TAIL<"int_riscv_vfmerge", "PseudoVFMERGE",
5380                           /*CarryOut = */0, /*vtilist=*/AllFloatVectors>;
5381
5382foreach fvti = AllFloatVectors in {
5383  defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX);
5384  def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector undef),
5385                                            (fvti.Vector fvti.RegClass:$rs2),
5386                                            (fvti.Scalar (fpimm0)),
5387                                            (fvti.Mask V0), VLOpFrag)),
5388            (instr fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
5389  defvar instr_tu = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX#"_TU");
5390  def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$merge),
5391                                            (fvti.Vector fvti.RegClass:$rs2),
5392                                            (fvti.Scalar (fpimm0)),
5393                                            (fvti.Mask V0), VLOpFrag)),
5394            (instr_tu fvti.RegClass:$merge, fvti.RegClass:$rs2, 0,
5395                      (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
5396}
5397
5398//===----------------------------------------------------------------------===//
5399// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
5400//===----------------------------------------------------------------------===//
5401defm : VPatConversionVI_VF<"int_riscv_vfcvt_xu_f_v", "PseudoVFCVT_XU_F">;
5402defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_xu_f_v", "PseudoVFCVT_RTZ_XU_F">;
5403defm : VPatConversionVI_VF<"int_riscv_vfcvt_x_f_v", "PseudoVFCVT_X_F">;
5404defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_x_f_v", "PseudoVFCVT_RTZ_X_F">;
5405defm : VPatConversionVF_VI<"int_riscv_vfcvt_f_x_v", "PseudoVFCVT_F_X">;
5406defm : VPatConversionVF_VI<"int_riscv_vfcvt_f_xu_v", "PseudoVFCVT_F_XU">;
5407
5408//===----------------------------------------------------------------------===//
5409// 14.18. Widening Floating-Point/Integer Type-Convert Instructions
5410//===----------------------------------------------------------------------===//
5411defm : VPatConversionWI_VF<"int_riscv_vfwcvt_xu_f_v", "PseudoVFWCVT_XU_F">;
5412defm : VPatConversionWI_VF<"int_riscv_vfwcvt_x_f_v", "PseudoVFWCVT_X_F">;
5413defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_xu_f_v", "PseudoVFWCVT_RTZ_XU_F">;
5414defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_x_f_v", "PseudoVFWCVT_RTZ_X_F">;
5415defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU">;
5416defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X">;
5417defm : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F">;
5418
5419//===----------------------------------------------------------------------===//
5420// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
5421//===----------------------------------------------------------------------===//
5422defm : VPatConversionVI_WF<"int_riscv_vfncvt_xu_f_w", "PseudoVFNCVT_XU_F">;
5423defm : VPatConversionVI_WF<"int_riscv_vfncvt_x_f_w", "PseudoVFNCVT_X_F">;
5424defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_xu_f_w", "PseudoVFNCVT_RTZ_XU_F">;
5425defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_x_f_w", "PseudoVFNCVT_RTZ_X_F">;
5426defm : VPatConversionVF_WI <"int_riscv_vfncvt_f_xu_w", "PseudoVFNCVT_F_XU">;
5427defm : VPatConversionVF_WI <"int_riscv_vfncvt_f_x_w", "PseudoVFNCVT_F_X">;
5428defm : VPatConversionVF_WF<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F">;
5429defm : VPatConversionVF_WF<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F">;
5430} // Predicates = [HasVInstructionsAnyF]
5431
5432let Predicates = [HasVInstructions] in {
5433//===----------------------------------------------------------------------===//
5434// 15.1. Vector Single-Width Integer Reduction Instructions
5435//===----------------------------------------------------------------------===//
5436defm : VPatReductionV_VS<"int_riscv_vredsum", "PseudoVREDSUM">;
5437defm : VPatReductionV_VS<"int_riscv_vredand", "PseudoVREDAND">;
5438defm : VPatReductionV_VS<"int_riscv_vredor", "PseudoVREDOR">;
5439defm : VPatReductionV_VS<"int_riscv_vredxor", "PseudoVREDXOR">;
5440defm : VPatReductionV_VS<"int_riscv_vredminu", "PseudoVREDMINU">;
5441defm : VPatReductionV_VS<"int_riscv_vredmin", "PseudoVREDMIN">;
5442defm : VPatReductionV_VS<"int_riscv_vredmaxu", "PseudoVREDMAXU">;
5443defm : VPatReductionV_VS<"int_riscv_vredmax", "PseudoVREDMAX">;
5444
5445//===----------------------------------------------------------------------===//
5446// 15.2. Vector Widening Integer Reduction Instructions
5447//===----------------------------------------------------------------------===//
5448defm : VPatReductionW_VS<"int_riscv_vwredsumu", "PseudoVWREDSUMU">;
5449defm : VPatReductionW_VS<"int_riscv_vwredsum", "PseudoVWREDSUM">;
5450} // Predicates = [HasVInstructions]
5451
5452let Predicates = [HasVInstructionsAnyF] in {
5453//===----------------------------------------------------------------------===//
5454// 15.3. Vector Single-Width Floating-Point Reduction Instructions
5455//===----------------------------------------------------------------------===//
5456defm : VPatReductionV_VS<"int_riscv_vfredosum", "PseudoVFREDOSUM", /*IsFloat=*/1>;
5457defm : VPatReductionV_VS<"int_riscv_vfredusum", "PseudoVFREDUSUM", /*IsFloat=*/1>;
5458defm : VPatReductionV_VS<"int_riscv_vfredmin", "PseudoVFREDMIN", /*IsFloat=*/1>;
5459defm : VPatReductionV_VS<"int_riscv_vfredmax", "PseudoVFREDMAX", /*IsFloat=*/1>;
5460
5461//===----------------------------------------------------------------------===//
5462// 15.4. Vector Widening Floating-Point Reduction Instructions
5463//===----------------------------------------------------------------------===//
5464defm : VPatReductionW_VS<"int_riscv_vfwredusum", "PseudoVFWREDUSUM", /*IsFloat=*/1>;
5465defm : VPatReductionW_VS<"int_riscv_vfwredosum", "PseudoVFWREDOSUM", /*IsFloat=*/1>;
5466
5467} // Predicates = [HasVInstructionsAnyF]
5468
5469//===----------------------------------------------------------------------===//
5470// 16. Vector Mask Instructions
5471//===----------------------------------------------------------------------===//
5472
5473let Predicates = [HasVInstructions] in {
5474//===----------------------------------------------------------------------===//
5475// 16.1 Vector Mask-Register Logical Instructions
5476//===----------------------------------------------------------------------===//
5477defm : VPatBinaryM_MM<"int_riscv_vmand", "PseudoVMAND">;
5478defm : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">;
5479defm : VPatBinaryM_MM<"int_riscv_vmandn", "PseudoVMANDN">;
5480defm : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">;
5481defm : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">;
5482defm : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">;
5483defm : VPatBinaryM_MM<"int_riscv_vmorn", "PseudoVMORN">;
5484defm : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">;
5485
5486// pseudo instructions
5487defm : VPatNullaryM<"int_riscv_vmclr", "PseudoVMCLR">;
5488defm : VPatNullaryM<"int_riscv_vmset", "PseudoVMSET">;
5489
5490//===----------------------------------------------------------------------===//
5491// 16.2. Vector count population in mask vcpop.m
5492//===----------------------------------------------------------------------===//
5493defm : VPatUnaryS_M<"int_riscv_vcpop", "PseudoVCPOP">;
5494
5495//===----------------------------------------------------------------------===//
5496// 16.3. vfirst find-first-set mask bit
5497//===----------------------------------------------------------------------===//
5498defm : VPatUnaryS_M<"int_riscv_vfirst", "PseudoVFIRST">;
5499
5500//===----------------------------------------------------------------------===//
5501// 16.4. vmsbf.m set-before-first mask bit
5502//===----------------------------------------------------------------------===//
5503defm : VPatUnaryM_M<"int_riscv_vmsbf", "PseudoVMSBF">;
5504
5505//===----------------------------------------------------------------------===//
5506// 16.5. vmsif.m set-including-first mask bit
5507//===----------------------------------------------------------------------===//
5508defm : VPatUnaryM_M<"int_riscv_vmsif", "PseudoVMSIF">;
5509
5510//===----------------------------------------------------------------------===//
5511// 16.6. vmsof.m set-only-first mask bit
5512//===----------------------------------------------------------------------===//
5513defm : VPatUnaryM_M<"int_riscv_vmsof", "PseudoVMSOF">;
5514
5515//===----------------------------------------------------------------------===//
5516// 16.8.  Vector Iota Instruction
5517//===----------------------------------------------------------------------===//
5518defm : VPatUnaryV_M<"int_riscv_viota", "PseudoVIOTA">;
5519
5520//===----------------------------------------------------------------------===//
5521// 16.9. Vector Element Index Instruction
5522//===----------------------------------------------------------------------===//
5523defm : VPatNullaryV<"int_riscv_vid", "PseudoVID">;
5524
5525} // Predicates = [HasVInstructions]
5526
5527//===----------------------------------------------------------------------===//
5528// 17. Vector Permutation Instructions
5529//===----------------------------------------------------------------------===//
5530
5531//===----------------------------------------------------------------------===//
5532// 17.1. Integer Scalar Move Instructions
5533//===----------------------------------------------------------------------===//
5534
5535let Predicates = [HasVInstructions] in {
5536foreach vti = AllIntegerVectors in {
5537  def : Pat<(riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2)),
5538            (!cast<Instruction>("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.Log2SEW)>;
5539  // vmv.s.x is handled with a custom node in RISCVInstrInfoVVLPatterns.td
5540}
5541} // Predicates = [HasVInstructions]
5542
5543//===----------------------------------------------------------------------===//
5544// 17.2. Floating-Point Scalar Move Instructions
5545//===----------------------------------------------------------------------===//
5546
5547let Predicates = [HasVInstructionsAnyF] in {
5548foreach fvti = AllFloatVectors in {
5549  defvar instr = !cast<Instruction>("PseudoVFMV_"#fvti.ScalarSuffix#"_S_" #
5550                                    fvti.LMul.MX);
5551  def : Pat<(fvti.Scalar (int_riscv_vfmv_f_s (fvti.Vector fvti.RegClass:$rs2))),
5552                         (instr $rs2, fvti.Log2SEW)>;
5553
5554  def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1),
5555                         (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)),
5556            (!cast<Instruction>("PseudoVFMV_S_"#fvti.ScalarSuffix#"_" #
5557                                fvti.LMul.MX)
5558             (fvti.Vector $rs1),
5559             (fvti.Scalar fvti.ScalarRegClass:$rs2),
5560             GPR:$vl, fvti.Log2SEW)>;
5561
5562  def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1),
5563                         (fvti.Scalar (fpimm0)), VLOpFrag)),
5564            (!cast<Instruction>("PseudoVMV_S_X_" # fvti.LMul.MX)
5565             (fvti.Vector $rs1), X0, GPR:$vl, fvti.Log2SEW)>;
5566}
5567} // Predicates = [HasVInstructionsAnyF]
5568
5569//===----------------------------------------------------------------------===//
5570// 17.3. Vector Slide Instructions
5571//===----------------------------------------------------------------------===//
5572let Predicates = [HasVInstructions] in {
5573  defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllIntegerVectors, uimm5>;
5574  defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllIntegerVectors, uimm5>;
5575  defm : VPatBinaryV_VX<"int_riscv_vslide1up", "PseudoVSLIDE1UP", AllIntegerVectors>;
5576  defm : VPatBinaryV_VX<"int_riscv_vslide1down", "PseudoVSLIDE1DOWN", AllIntegerVectors>;
5577} // Predicates = [HasVInstructions]
5578
5579let Predicates = [HasVInstructionsAnyF] in {
5580  defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllFloatVectors, uimm5>;
5581  defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVectors, uimm5>;
5582  defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>;
5583  defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>;
5584} // Predicates = [HasVInstructionsAnyF]
5585
5586//===----------------------------------------------------------------------===//
5587// 17.4. Vector Register Gather Instructions
5588//===----------------------------------------------------------------------===//
5589let Predicates = [HasVInstructions] in {
5590  defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
5591                                  AllIntegerVectors, uimm5>;
5592  defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
5593                                /* eew */ 16, AllIntegerVectors>;
5594} // Predicates = [HasVInstructions]
5595
5596let Predicates = [HasVInstructionsAnyF] in {
5597  defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
5598                                  AllFloatVectors, uimm5>;
5599  defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
5600                                /* eew */ 16, AllFloatVectors>;
5601} // Predicates = [HasVInstructionsAnyF]
5602
5603//===----------------------------------------------------------------------===//
5604// 17.5. Vector Compress Instruction
5605//===----------------------------------------------------------------------===//
5606let Predicates = [HasVInstructions] in {
5607  defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>;
5608} // Predicates = [HasVInstructions]
5609
5610let Predicates = [HasVInstructionsAnyF] in {
5611  defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>;
5612} // Predicates = [HasVInstructionsAnyF]
5613
5614// Include the non-intrinsic ISel patterns
5615include "RISCVInstrInfoVVLPatterns.td"
5616include "RISCVInstrInfoVSDPatterns.td"
5617