xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td (revision 5956d97f4b3204318ceb6aa9c77bd0bc6ea87a41)
1//===-- RISCVInstrInfoVPseudos.td - RISC-V 'V' Pseudos -----*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// This file contains the required infrastructure to support code generation
10/// for the standard 'V' (Vector) extension, version 0.10.  This version is still
11/// experimental as the 'V' extension hasn't been ratified yet.
12///
13/// This file is included from RISCVInstrInfoV.td
14///
15//===----------------------------------------------------------------------===//
16
17def riscv_vmv_x_s : SDNode<"RISCVISD::VMV_X_S",
18                           SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>,
19                                                SDTCisInt<1>]>>;
20def riscv_read_vlenb : SDNode<"RISCVISD::READ_VLENB",
21                              SDTypeProfile<1, 0, [SDTCisVT<0, XLenVT>]>>;
22
23// Operand that is allowed to be a register or a 5 bit immediate.
24// This allows us to pick between VSETIVLI and VSETVLI opcodes using the same
25// pseudo instructions.
26def AVL : RegisterOperand<GPRNoX0> {
27  let OperandNamespace = "RISCVOp";
28  let OperandType = "OPERAND_AVL";
29}
30
31// X0 has special meaning for vsetvl/vsetvli.
32//  rd | rs1 |   AVL value | Effect on vl
33//--------------------------------------------------------------
34// !X0 |  X0 |       VLMAX | Set vl to VLMAX
35//  X0 |  X0 | Value in vl | Keep current vl, just change vtype.
36def VLOp : ComplexPattern<XLenVT, 1, "selectVLOp">;
37
38def DecImm : SDNodeXForm<imm, [{
39  return CurDAG->getTargetConstant(N->getSExtValue() - 1, SDLoc(N),
40                                   N->getValueType(0));
41}]>;
42
43defvar TAIL_UNDISTURBED = 0;
44defvar TAIL_AGNOSTIC = 1;
45
46//===----------------------------------------------------------------------===//
47// Utilities.
48//===----------------------------------------------------------------------===//
49
50// This class describes information associated to the LMUL.
51class LMULInfo<int lmul, int oct, VReg regclass, VReg wregclass,
52               VReg f2regclass, VReg f4regclass, VReg f8regclass, string mx> {
53  bits<3> value = lmul; // This is encoded as the vlmul field of vtype.
54  VReg vrclass = regclass;
55  VReg wvrclass = wregclass;
56  VReg f8vrclass = f8regclass;
57  VReg f4vrclass = f4regclass;
58  VReg f2vrclass = f2regclass;
59  string MX = mx;
60  int octuple = oct;
61}
62
63// Associate LMUL with tablegen records of register classes.
64def V_M1  : LMULInfo<0b000,  8,   VR,        VRM2,   VR,   VR, VR, "M1">;
65def V_M2  : LMULInfo<0b001, 16, VRM2,        VRM4,   VR,   VR, VR, "M2">;
66def V_M4  : LMULInfo<0b010, 32, VRM4,        VRM8, VRM2,   VR, VR, "M4">;
67def V_M8  : LMULInfo<0b011, 64, VRM8,/*NoVReg*/VR, VRM4, VRM2, VR, "M8">;
68
69def V_MF8 : LMULInfo<0b101, 1, VR, VR,/*NoVReg*/VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF8">;
70def V_MF4 : LMULInfo<0b110, 2, VR, VR,          VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF4">;
71def V_MF2 : LMULInfo<0b111, 4, VR, VR,          VR,          VR,/*NoVReg*/VR, "MF2">;
72
73// Used to iterate over all possible LMULs.
74defvar MxList = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
75// For floating point which don't need MF8.
76defvar MxListF = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
77
78// Used for widening and narrowing instructions as it doesn't contain M8.
79defvar MxListW = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4];
80// For floating point which don't need MF8.
81defvar MxListFW = [V_MF4, V_MF2, V_M1, V_M2, V_M4];
82
83// Use for zext/sext.vf2
84defvar MxListVF2 = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
85
86// Use for zext/sext.vf4
87defvar MxListVF4 = [V_MF2, V_M1, V_M2, V_M4, V_M8];
88
89// Use for zext/sext.vf8
90defvar MxListVF8 = [V_M1, V_M2, V_M4, V_M8];
91
92class MxSet<int eew> {
93  list<LMULInfo> m = !cond(!eq(eew, 8) : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
94                           !eq(eew, 16) : [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
95                           !eq(eew, 32) : [V_MF2, V_M1, V_M2, V_M4, V_M8],
96                           !eq(eew, 64) : [V_M1, V_M2, V_M4, V_M8]);
97}
98
99class FPR_Info<RegisterClass regclass, string fx, list<LMULInfo> mxlist> {
100  RegisterClass fprclass = regclass;
101  string FX = fx;
102  list<LMULInfo> MxList = mxlist;
103}
104
105def SCALAR_F16 : FPR_Info<FPR16, "F16", MxSet<16>.m>;
106def SCALAR_F32 : FPR_Info<FPR32, "F32", MxSet<32>.m>;
107def SCALAR_F64 : FPR_Info<FPR64, "F64", MxSet<64>.m>;
108
109defvar FPList = [SCALAR_F16, SCALAR_F32, SCALAR_F64];
110
111// Used for widening instructions. It excludes F64.
112defvar FPListW = [SCALAR_F16, SCALAR_F32];
113
114class NFSet<LMULInfo m> {
115  list<int> L = !cond(!eq(m.value, V_M8.value): [],
116                      !eq(m.value, V_M4.value): [2],
117                      !eq(m.value, V_M2.value): [2, 3, 4],
118                      true: [2, 3, 4, 5, 6, 7, 8]);
119}
120
121class log2<int num> {
122  int val = !if(!eq(num, 1), 0, !add(1, log2<!srl(num, 1)>.val));
123}
124
125class octuple_to_str<int octuple> {
126  string ret = !if(!eq(octuple, 1), "MF8",
127                   !if(!eq(octuple, 2), "MF4",
128                   !if(!eq(octuple, 4), "MF2",
129                   !if(!eq(octuple, 8), "M1",
130                   !if(!eq(octuple, 16), "M2",
131                   !if(!eq(octuple, 32), "M4",
132                   !if(!eq(octuple, 64), "M8",
133                   "NoDef")))))));
134}
135
136def VLOpFrag : PatFrag<(ops), (XLenVT (VLOp (XLenVT AVL:$vl)))>;
137
138// Output pattern for X0 used to represent VLMAX in the pseudo instructions.
139// We can't use X0 register becuase the AVL operands use GPRNoX0.
140// This must be kept in sync with RISCV::VLMaxSentinel.
141def VLMax : OutPatFrag<(ops), (XLenVT -1)>;
142
143// List of EEW.
144defvar EEWList = [8, 16, 32, 64];
145
146class SegRegClass<LMULInfo m, int nf> {
147  VReg RC = !cast<VReg>("VRN" # nf # !cond(!eq(m.value, V_MF8.value): V_M1.MX,
148                                           !eq(m.value, V_MF4.value): V_M1.MX,
149                                           !eq(m.value, V_MF2.value): V_M1.MX,
150                                           true: m.MX));
151}
152
153//===----------------------------------------------------------------------===//
154// Vector register and vector group type information.
155//===----------------------------------------------------------------------===//
156
157class VTypeInfo<ValueType Vec, ValueType Mas, int Sew, VReg Reg, LMULInfo M,
158                ValueType Scal = XLenVT, RegisterClass ScalarReg = GPR>
159{
160  ValueType Vector = Vec;
161  ValueType Mask = Mas;
162  int SEW = Sew;
163  int Log2SEW = log2<Sew>.val;
164  VReg RegClass = Reg;
165  LMULInfo LMul = M;
166  ValueType Scalar = Scal;
167  RegisterClass ScalarRegClass = ScalarReg;
168  // The pattern fragment which produces the AVL operand, representing the
169  // "natural" vector length for this type. For scalable vectors this is VLMax.
170  OutPatFrag AVL = VLMax;
171
172  string ScalarSuffix = !cond(!eq(Scal, XLenVT) : "X",
173                              !eq(Scal, f16) : "F16",
174                              !eq(Scal, f32) : "F32",
175                              !eq(Scal, f64) : "F64");
176}
177
178class GroupVTypeInfo<ValueType Vec, ValueType VecM1, ValueType Mas, int Sew,
179                     VReg Reg, LMULInfo M, ValueType Scal = XLenVT,
180                     RegisterClass ScalarReg = GPR>
181    : VTypeInfo<Vec, Mas, Sew, Reg, M, Scal, ScalarReg>
182{
183  ValueType VectorM1 = VecM1;
184}
185
186defset list<VTypeInfo> AllVectors = {
187  defset list<VTypeInfo> AllIntegerVectors = {
188    defset list<VTypeInfo> NoGroupIntegerVectors = {
189      defset list<VTypeInfo> FractionalGroupIntegerVectors = {
190        def VI8MF8: VTypeInfo<vint8mf8_t,  vbool64_t,  8, VR, V_MF8>;
191        def VI8MF4: VTypeInfo<vint8mf4_t,  vbool32_t,  8, VR, V_MF4>;
192        def VI8MF2: VTypeInfo<vint8mf2_t,  vbool16_t,  8, VR, V_MF2>;
193        def VI16MF4: VTypeInfo<vint16mf4_t, vbool64_t, 16, VR, V_MF4>;
194        def VI16MF2: VTypeInfo<vint16mf2_t, vbool32_t, 16, VR, V_MF2>;
195        def VI32MF2: VTypeInfo<vint32mf2_t, vbool64_t, 32, VR, V_MF2>;
196      }
197      def VI8M1: VTypeInfo<vint8m1_t,   vbool8_t,   8, VR, V_M1>;
198      def VI16M1: VTypeInfo<vint16m1_t,  vbool16_t, 16, VR, V_M1>;
199      def VI32M1: VTypeInfo<vint32m1_t,  vbool32_t, 32, VR, V_M1>;
200      def VI64M1: VTypeInfo<vint64m1_t,  vbool64_t, 64, VR, V_M1>;
201    }
202    defset list<GroupVTypeInfo> GroupIntegerVectors = {
203      def VI8M2: GroupVTypeInfo<vint8m2_t, vint8m1_t, vbool4_t, 8, VRM2, V_M2>;
204      def VI8M4: GroupVTypeInfo<vint8m4_t, vint8m1_t, vbool2_t, 8, VRM4, V_M4>;
205      def VI8M8: GroupVTypeInfo<vint8m8_t, vint8m1_t, vbool1_t, 8, VRM8, V_M8>;
206
207      def VI16M2: GroupVTypeInfo<vint16m2_t,vint16m1_t,vbool8_t, 16,VRM2, V_M2>;
208      def VI16M4: GroupVTypeInfo<vint16m4_t,vint16m1_t,vbool4_t, 16,VRM4, V_M4>;
209      def VI16M8: GroupVTypeInfo<vint16m8_t,vint16m1_t,vbool2_t, 16,VRM8, V_M8>;
210
211      def VI32M2: GroupVTypeInfo<vint32m2_t,vint32m1_t,vbool16_t,32,VRM2, V_M2>;
212      def VI32M4: GroupVTypeInfo<vint32m4_t,vint32m1_t,vbool8_t, 32,VRM4, V_M4>;
213      def VI32M8: GroupVTypeInfo<vint32m8_t,vint32m1_t,vbool4_t, 32,VRM8, V_M8>;
214
215      def VI64M2: GroupVTypeInfo<vint64m2_t,vint64m1_t,vbool32_t,64,VRM2, V_M2>;
216      def VI64M4: GroupVTypeInfo<vint64m4_t,vint64m1_t,vbool16_t,64,VRM4, V_M4>;
217      def VI64M8: GroupVTypeInfo<vint64m8_t,vint64m1_t,vbool8_t, 64,VRM8, V_M8>;
218    }
219  }
220
221  defset list<VTypeInfo> AllFloatVectors = {
222    defset list<VTypeInfo> NoGroupFloatVectors = {
223      defset list<VTypeInfo> FractionalGroupFloatVectors = {
224        def VF16MF4: VTypeInfo<vfloat16mf4_t, vbool64_t, 16, VR, V_MF4, f16, FPR16>;
225        def VF16MF2: VTypeInfo<vfloat16mf2_t, vbool32_t, 16, VR, V_MF2, f16, FPR16>;
226        def VF32MF2: VTypeInfo<vfloat32mf2_t,vbool64_t, 32, VR, V_MF2, f32, FPR32>;
227      }
228      def VF16M1:  VTypeInfo<vfloat16m1_t,  vbool16_t, 16, VR, V_M1,  f16, FPR16>;
229      def VF32M1:  VTypeInfo<vfloat32m1_t, vbool32_t, 32, VR, V_M1,  f32, FPR32>;
230      def VF64M1: VTypeInfo<vfloat64m1_t, vbool64_t, 64, VR, V_M1, f64, FPR64>;
231    }
232
233    defset list<GroupVTypeInfo> GroupFloatVectors = {
234      def VF16M2: GroupVTypeInfo<vfloat16m2_t, vfloat16m1_t, vbool8_t, 16,
235                                 VRM2, V_M2, f16, FPR16>;
236      def VF16M4: GroupVTypeInfo<vfloat16m4_t, vfloat16m1_t, vbool4_t, 16,
237                                 VRM4, V_M4, f16, FPR16>;
238      def VF16M8: GroupVTypeInfo<vfloat16m8_t, vfloat16m1_t, vbool2_t, 16,
239                                 VRM8, V_M8, f16, FPR16>;
240
241      def VF32M2: GroupVTypeInfo<vfloat32m2_t, vfloat32m1_t, vbool16_t, 32,
242                                 VRM2, V_M2, f32, FPR32>;
243      def VF32M4: GroupVTypeInfo<vfloat32m4_t, vfloat32m1_t, vbool8_t,  32,
244                                 VRM4, V_M4, f32, FPR32>;
245      def VF32M8: GroupVTypeInfo<vfloat32m8_t, vfloat32m1_t, vbool4_t,  32,
246                                 VRM8, V_M8, f32, FPR32>;
247
248      def VF64M2: GroupVTypeInfo<vfloat64m2_t, vfloat64m1_t, vbool32_t, 64,
249                                 VRM2, V_M2, f64, FPR64>;
250      def VF64M4: GroupVTypeInfo<vfloat64m4_t, vfloat64m1_t, vbool16_t, 64,
251                                 VRM4, V_M4, f64, FPR64>;
252      def VF64M8: GroupVTypeInfo<vfloat64m8_t, vfloat64m1_t, vbool8_t,  64,
253                                 VRM8, V_M8, f64, FPR64>;
254    }
255  }
256}
257
258// This functor is used to obtain the int vector type that has the same SEW and
259// multiplier as the input parameter type
260class GetIntVTypeInfo<VTypeInfo vti>
261{
262  // Equivalent integer vector type. Eg.
263  //   VI8M1 → VI8M1 (identity)
264  //   VF64M4 → VI64M4
265  VTypeInfo Vti = !cast<VTypeInfo>(!subst("VF", "VI", !cast<string>(vti)));
266}
267
268class MTypeInfo<ValueType Mas, LMULInfo M, string Bx> {
269  ValueType Mask = Mas;
270  // {SEW, VLMul} values set a valid VType to deal with this mask type.
271  // we assume SEW=1 and set corresponding LMUL. vsetvli insertion will
272  // look for SEW=1 to optimize based on surrounding instructions.
273  int SEW = 1;
274  int Log2SEW = 0;
275  LMULInfo LMul = M;
276  string BX = Bx; // Appendix of mask operations.
277  // The pattern fragment which produces the AVL operand, representing the
278  // "natural" vector length for this mask type. For scalable masks this is
279  // VLMax.
280  OutPatFrag AVL = VLMax;
281}
282
283defset list<MTypeInfo> AllMasks = {
284  // vbool<n>_t, <n> = SEW/LMUL, we assume SEW=8 and corresponding LMUL.
285  def : MTypeInfo<vbool64_t, V_MF8, "B1">;
286  def : MTypeInfo<vbool32_t, V_MF4, "B2">;
287  def : MTypeInfo<vbool16_t, V_MF2, "B4">;
288  def : MTypeInfo<vbool8_t, V_M1, "B8">;
289  def : MTypeInfo<vbool4_t, V_M2, "B16">;
290  def : MTypeInfo<vbool2_t, V_M4, "B32">;
291  def : MTypeInfo<vbool1_t, V_M8, "B64">;
292}
293
294class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti>
295{
296  VTypeInfo Vti = vti;
297  VTypeInfo Wti = wti;
298}
299
300class VTypeInfoToFraction<VTypeInfo vti, VTypeInfo fti>
301{
302  VTypeInfo Vti = vti;
303  VTypeInfo Fti = fti;
304}
305
306defset list<VTypeInfoToWide> AllWidenableIntVectors = {
307  def : VTypeInfoToWide<VI8MF8,  VI16MF4>;
308  def : VTypeInfoToWide<VI8MF4,  VI16MF2>;
309  def : VTypeInfoToWide<VI8MF2,  VI16M1>;
310  def : VTypeInfoToWide<VI8M1,   VI16M2>;
311  def : VTypeInfoToWide<VI8M2,   VI16M4>;
312  def : VTypeInfoToWide<VI8M4,   VI16M8>;
313
314  def : VTypeInfoToWide<VI16MF4, VI32MF2>;
315  def : VTypeInfoToWide<VI16MF2, VI32M1>;
316  def : VTypeInfoToWide<VI16M1,  VI32M2>;
317  def : VTypeInfoToWide<VI16M2,  VI32M4>;
318  def : VTypeInfoToWide<VI16M4,  VI32M8>;
319
320  def : VTypeInfoToWide<VI32MF2, VI64M1>;
321  def : VTypeInfoToWide<VI32M1,  VI64M2>;
322  def : VTypeInfoToWide<VI32M2,  VI64M4>;
323  def : VTypeInfoToWide<VI32M4,  VI64M8>;
324}
325
326defset list<VTypeInfoToWide> AllWidenableFloatVectors = {
327  def : VTypeInfoToWide<VF16MF4, VF32MF2>;
328  def : VTypeInfoToWide<VF16MF2, VF32M1>;
329  def : VTypeInfoToWide<VF16M1, VF32M2>;
330  def : VTypeInfoToWide<VF16M2, VF32M4>;
331  def : VTypeInfoToWide<VF16M4, VF32M8>;
332
333  def : VTypeInfoToWide<VF32MF2, VF64M1>;
334  def : VTypeInfoToWide<VF32M1, VF64M2>;
335  def : VTypeInfoToWide<VF32M2, VF64M4>;
336  def : VTypeInfoToWide<VF32M4, VF64M8>;
337}
338
339defset list<VTypeInfoToFraction> AllFractionableVF2IntVectors = {
340  def : VTypeInfoToFraction<VI16MF4, VI8MF8>;
341  def : VTypeInfoToFraction<VI16MF2, VI8MF4>;
342  def : VTypeInfoToFraction<VI16M1, VI8MF2>;
343  def : VTypeInfoToFraction<VI16M2, VI8M1>;
344  def : VTypeInfoToFraction<VI16M4, VI8M2>;
345  def : VTypeInfoToFraction<VI16M8, VI8M4>;
346  def : VTypeInfoToFraction<VI32MF2, VI16MF4>;
347  def : VTypeInfoToFraction<VI32M1, VI16MF2>;
348  def : VTypeInfoToFraction<VI32M2, VI16M1>;
349  def : VTypeInfoToFraction<VI32M4, VI16M2>;
350  def : VTypeInfoToFraction<VI32M8, VI16M4>;
351  def : VTypeInfoToFraction<VI64M1, VI32MF2>;
352  def : VTypeInfoToFraction<VI64M2, VI32M1>;
353  def : VTypeInfoToFraction<VI64M4, VI32M2>;
354  def : VTypeInfoToFraction<VI64M8, VI32M4>;
355}
356
357defset list<VTypeInfoToFraction> AllFractionableVF4IntVectors = {
358  def : VTypeInfoToFraction<VI32MF2, VI8MF8>;
359  def : VTypeInfoToFraction<VI32M1, VI8MF4>;
360  def : VTypeInfoToFraction<VI32M2, VI8MF2>;
361  def : VTypeInfoToFraction<VI32M4, VI8M1>;
362  def : VTypeInfoToFraction<VI32M8, VI8M2>;
363  def : VTypeInfoToFraction<VI64M1, VI16MF4>;
364  def : VTypeInfoToFraction<VI64M2, VI16MF2>;
365  def : VTypeInfoToFraction<VI64M4, VI16M1>;
366  def : VTypeInfoToFraction<VI64M8, VI16M2>;
367}
368
369defset list<VTypeInfoToFraction> AllFractionableVF8IntVectors = {
370  def : VTypeInfoToFraction<VI64M1, VI8MF8>;
371  def : VTypeInfoToFraction<VI64M2, VI8MF4>;
372  def : VTypeInfoToFraction<VI64M4, VI8MF2>;
373  def : VTypeInfoToFraction<VI64M8, VI8M1>;
374}
375
376defset list<VTypeInfoToWide> AllWidenableIntToFloatVectors = {
377  def : VTypeInfoToWide<VI8MF8, VF16MF4>;
378  def : VTypeInfoToWide<VI8MF4, VF16MF2>;
379  def : VTypeInfoToWide<VI8MF2, VF16M1>;
380  def : VTypeInfoToWide<VI8M1, VF16M2>;
381  def : VTypeInfoToWide<VI8M2, VF16M4>;
382  def : VTypeInfoToWide<VI8M4, VF16M8>;
383
384  def : VTypeInfoToWide<VI16MF4, VF32MF2>;
385  def : VTypeInfoToWide<VI16MF2, VF32M1>;
386  def : VTypeInfoToWide<VI16M1, VF32M2>;
387  def : VTypeInfoToWide<VI16M2, VF32M4>;
388  def : VTypeInfoToWide<VI16M4, VF32M8>;
389
390  def : VTypeInfoToWide<VI32MF2, VF64M1>;
391  def : VTypeInfoToWide<VI32M1, VF64M2>;
392  def : VTypeInfoToWide<VI32M2, VF64M4>;
393  def : VTypeInfoToWide<VI32M4, VF64M8>;
394}
395
396// This class holds the record of the RISCVVPseudoTable below.
397// This represents the information we need in codegen for each pseudo.
398// The definition should be consistent with `struct PseudoInfo` in
399// RISCVBaseInfo.h.
400class CONST8b<bits<8> val> {
401  bits<8> V = val;
402}
403def InvalidIndex : CONST8b<0x80>;
404class RISCVVPseudo {
405  Pseudo Pseudo = !cast<Pseudo>(NAME); // Used as a key.
406  Instruction BaseInstr;
407}
408
409// The actual table.
410def RISCVVPseudosTable : GenericTable {
411  let FilterClass = "RISCVVPseudo";
412  let CppTypeName = "PseudoInfo";
413  let Fields = [ "Pseudo", "BaseInstr" ];
414  let PrimaryKey = [ "Pseudo" ];
415  let PrimaryKeyName = "getPseudoInfo";
416  let PrimaryKeyEarlyOut = true;
417}
418
419def RISCVVIntrinsicsTable : GenericTable {
420  let FilterClass = "RISCVVIntrinsic";
421  let CppTypeName = "RISCVVIntrinsicInfo";
422  let Fields = ["IntrinsicID", "SplatOperand", "VLOperand"];
423  let PrimaryKey = ["IntrinsicID"];
424  let PrimaryKeyName = "getRISCVVIntrinsicInfo";
425}
426
427class RISCVVLE<bit M, bit TU, bit Str, bit F, bits<3> S, bits<3> L> {
428  bits<1> Masked = M;
429  bits<1> IsTU = TU;
430  bits<1> Strided = Str;
431  bits<1> FF = F;
432  bits<3> Log2SEW = S;
433  bits<3> LMUL = L;
434  Pseudo Pseudo = !cast<Pseudo>(NAME);
435}
436
437def RISCVVLETable : GenericTable {
438  let FilterClass = "RISCVVLE";
439  let CppTypeName = "VLEPseudo";
440  let Fields = ["Masked", "IsTU", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
441  let PrimaryKey = ["Masked", "IsTU", "Strided", "FF", "Log2SEW", "LMUL"];
442  let PrimaryKeyName = "getVLEPseudo";
443}
444
445class RISCVVSE<bit M, bit Str, bits<3> S, bits<3> L> {
446  bits<1> Masked = M;
447  bits<1> Strided = Str;
448  bits<3> Log2SEW = S;
449  bits<3> LMUL = L;
450  Pseudo Pseudo = !cast<Pseudo>(NAME);
451}
452
453def RISCVVSETable : GenericTable {
454  let FilterClass = "RISCVVSE";
455  let CppTypeName = "VSEPseudo";
456  let Fields = ["Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
457  let PrimaryKey = ["Masked", "Strided", "Log2SEW", "LMUL"];
458  let PrimaryKeyName = "getVSEPseudo";
459}
460
461class RISCVVLX_VSX<bit M, bit TU, bit O, bits<3> S, bits<3> L, bits<3> IL> {
462  bits<1> Masked = M;
463  bits<1> IsTU = TU;
464  bits<1> Ordered = O;
465  bits<3> Log2SEW = S;
466  bits<3> LMUL = L;
467  bits<3> IndexLMUL = IL;
468  Pseudo Pseudo = !cast<Pseudo>(NAME);
469}
470
471class RISCVVLX<bit M, bit TU, bit O, bits<3> S, bits<3> L, bits<3> IL> :
472  RISCVVLX_VSX<M, TU, O, S, L, IL>;
473class RISCVVSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
474  RISCVVLX_VSX<M, /*TU*/0, O, S, L, IL>;
475
476class RISCVVLX_VSXTable : GenericTable {
477  let CppTypeName = "VLX_VSXPseudo";
478  let Fields = ["Masked", "IsTU", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
479  let PrimaryKey = ["Masked", "IsTU", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
480}
481
482def RISCVVLXTable : RISCVVLX_VSXTable {
483  let FilterClass = "RISCVVLX";
484  let PrimaryKeyName = "getVLXPseudo";
485}
486
487def RISCVVSXTable : RISCVVLX_VSXTable {
488  let FilterClass = "RISCVVSX";
489  let PrimaryKeyName = "getVSXPseudo";
490}
491
492class RISCVVLSEG<bits<4> N, bit M, bit Str, bit F, bits<3> S, bits<3> L> {
493  bits<4> NF = N;
494  bits<1> Masked = M;
495  bits<1> Strided = Str;
496  bits<1> FF = F;
497  bits<3> Log2SEW = S;
498  bits<3> LMUL = L;
499  Pseudo Pseudo = !cast<Pseudo>(NAME);
500}
501
502def RISCVVLSEGTable : GenericTable {
503  let FilterClass = "RISCVVLSEG";
504  let CppTypeName = "VLSEGPseudo";
505  let Fields = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
506  let PrimaryKey = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL"];
507  let PrimaryKeyName = "getVLSEGPseudo";
508}
509
510class RISCVVLXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
511  bits<4> NF = N;
512  bits<1> Masked = M;
513  bits<1> Ordered = O;
514  bits<3> Log2SEW = S;
515  bits<3> LMUL = L;
516  bits<3> IndexLMUL = IL;
517  Pseudo Pseudo = !cast<Pseudo>(NAME);
518}
519
520def RISCVVLXSEGTable : GenericTable {
521  let FilterClass = "RISCVVLXSEG";
522  let CppTypeName = "VLXSEGPseudo";
523  let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
524  let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
525  let PrimaryKeyName = "getVLXSEGPseudo";
526}
527
528class RISCVVSSEG<bits<4> N, bit M, bit Str, bits<3> S, bits<3> L> {
529  bits<4> NF = N;
530  bits<1> Masked = M;
531  bits<1> Strided = Str;
532  bits<3> Log2SEW = S;
533  bits<3> LMUL = L;
534  Pseudo Pseudo = !cast<Pseudo>(NAME);
535}
536
537def RISCVVSSEGTable : GenericTable {
538  let FilterClass = "RISCVVSSEG";
539  let CppTypeName = "VSSEGPseudo";
540  let Fields = ["NF", "Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
541  let PrimaryKey = ["NF", "Masked", "Strided", "Log2SEW", "LMUL"];
542  let PrimaryKeyName = "getVSSEGPseudo";
543}
544
545class RISCVVSXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
546  bits<4> NF = N;
547  bits<1> Masked = M;
548  bits<1> Ordered = O;
549  bits<3> Log2SEW = S;
550  bits<3> LMUL = L;
551  bits<3> IndexLMUL = IL;
552  Pseudo Pseudo = !cast<Pseudo>(NAME);
553}
554
555def RISCVVSXSEGTable : GenericTable {
556  let FilterClass = "RISCVVSXSEG";
557  let CppTypeName = "VSXSEGPseudo";
558  let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
559  let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
560  let PrimaryKeyName = "getVSXSEGPseudo";
561}
562
563//===----------------------------------------------------------------------===//
564// Helpers to define the different pseudo instructions.
565//===----------------------------------------------------------------------===//
566
567class PseudoToVInst<string PseudoInst> {
568  string VInst = !subst("_M8", "",
569                 !subst("_M4", "",
570                 !subst("_M2", "",
571                 !subst("_M1", "",
572                 !subst("_MF2", "",
573                 !subst("_MF4", "",
574                 !subst("_MF8", "",
575                 !subst("_B1", "",
576                 !subst("_B2", "",
577                 !subst("_B4", "",
578                 !subst("_B8", "",
579                 !subst("_B16", "",
580                 !subst("_B32", "",
581                 !subst("_B64", "",
582                 !subst("_MASK", "",
583                 !subst("_TIED", "",
584                 !subst("_TU", "",
585                 !subst("F16", "F",
586                 !subst("F32", "F",
587                 !subst("F64", "F",
588                 !subst("Pseudo", "", PseudoInst)))))))))))))))))))));
589}
590
591// The destination vector register group for a masked vector instruction cannot
592// overlap the source mask register (v0), unless the destination vector register
593// is being written with a mask value (e.g., comparisons) or the scalar result
594// of a reduction.
595class GetVRegNoV0<VReg VRegClass> {
596  VReg R = !cond(!eq(VRegClass, VR) : VRNoV0,
597                 !eq(VRegClass, VRM2) : VRM2NoV0,
598                 !eq(VRegClass, VRM4) : VRM4NoV0,
599                 !eq(VRegClass, VRM8) : VRM8NoV0,
600                 !eq(VRegClass, VRN2M1) : VRN2M1NoV0,
601                 !eq(VRegClass, VRN2M2) : VRN2M2NoV0,
602                 !eq(VRegClass, VRN2M4) : VRN2M4NoV0,
603                 !eq(VRegClass, VRN3M1) : VRN3M1NoV0,
604                 !eq(VRegClass, VRN3M2) : VRN3M2NoV0,
605                 !eq(VRegClass, VRN4M1) : VRN4M1NoV0,
606                 !eq(VRegClass, VRN4M2) : VRN4M2NoV0,
607                 !eq(VRegClass, VRN5M1) : VRN5M1NoV0,
608                 !eq(VRegClass, VRN6M1) : VRN6M1NoV0,
609                 !eq(VRegClass, VRN7M1) : VRN7M1NoV0,
610                 !eq(VRegClass, VRN8M1) : VRN8M1NoV0,
611                 true : VRegClass);
612}
613
614// Join strings in list using separator and ignoring empty elements
615class Join<list<string> strings, string separator> {
616  string ret = !foldl(!head(strings), !tail(strings), a, b,
617                      !cond(
618                        !and(!empty(a), !empty(b)) : "",
619                        !empty(a) : b,
620                        !empty(b) : a,
621                        1 : a#separator#b));
622}
623
624class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins> :
625      Pseudo<outs, ins, []>, RISCVVPseudo {
626  let BaseInstr = instr;
627  let VLMul = m.value;
628}
629
630class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit isFF> :
631      Pseudo<(outs RetClass:$rd),
632             (ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
633      RISCVVPseudo,
634      RISCVVLE</*Masked*/0, /*TU*/0, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
635  let mayLoad = 1;
636  let mayStore = 0;
637  let hasSideEffects = 0;
638  let HasVLOp = 1;
639  let HasSEWOp = 1;
640  let HasDummyMask = 1;
641  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
642}
643
644class VPseudoUSLoadNoMaskTU<VReg RetClass, int EEW, bit isFF> :
645      Pseudo<(outs RetClass:$rd),
646             (ins RetClass:$dest, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
647      RISCVVPseudo,
648      RISCVVLE</*Masked*/0, /*TU*/1, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
649  let mayLoad = 1;
650  let mayStore = 0;
651  let hasSideEffects = 0;
652  let HasVLOp = 1;
653  let HasSEWOp = 1;
654  let HasDummyMask = 1;
655  let HasMergeOp = 1;
656  let Constraints = "$rd = $dest";
657  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
658}
659
660class VPseudoUSLoadMask<VReg RetClass, int EEW, bit isFF> :
661      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
662              (ins GetVRegNoV0<RetClass>.R:$merge,
663                   GPR:$rs1,
664                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
665      RISCVVPseudo,
666      RISCVVLE</*Masked*/1, /*TU*/1, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
667  let mayLoad = 1;
668  let mayStore = 0;
669  let hasSideEffects = 0;
670  let Constraints = "$rd = $merge";
671  let HasVLOp = 1;
672  let HasSEWOp = 1;
673  let HasMergeOp = 1;
674  let HasVecPolicyOp = 1;
675  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
676}
677
678class VPseudoSLoadNoMask<VReg RetClass, int EEW>:
679      Pseudo<(outs RetClass:$rd),
680             (ins GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
681      RISCVVPseudo,
682      RISCVVLE</*Masked*/0, /*TU*/0, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
683  let mayLoad = 1;
684  let mayStore = 0;
685  let hasSideEffects = 0;
686  let HasVLOp = 1;
687  let HasSEWOp = 1;
688  let HasDummyMask = 1;
689  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
690}
691
692class VPseudoSLoadNoMaskTU<VReg RetClass, int EEW>:
693      Pseudo<(outs RetClass:$rd),
694             (ins RetClass:$dest, GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
695      RISCVVPseudo,
696      RISCVVLE</*Masked*/0, /*TU*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
697  let mayLoad = 1;
698  let mayStore = 0;
699  let hasSideEffects = 0;
700  let HasVLOp = 1;
701  let HasSEWOp = 1;
702  let HasDummyMask = 1;
703  let HasMergeOp = 1;
704  let Constraints = "$rd = $dest";
705  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
706}
707
708class VPseudoSLoadMask<VReg RetClass, int EEW>:
709      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
710              (ins GetVRegNoV0<RetClass>.R:$merge,
711                   GPR:$rs1, GPR:$rs2,
712                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
713      RISCVVPseudo,
714      RISCVVLE</*Masked*/1, /*TU*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
715  let mayLoad = 1;
716  let mayStore = 0;
717  let hasSideEffects = 0;
718  let Constraints = "$rd = $merge";
719  let HasVLOp = 1;
720  let HasSEWOp = 1;
721  let HasMergeOp = 1;
722  let HasVecPolicyOp = 1;
723  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
724}
725
726class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
727                         bit Ordered, bit EarlyClobber>:
728      Pseudo<(outs RetClass:$rd),
729             (ins GPR:$rs1, IdxClass:$rs2, AVL:$vl,
730              ixlenimm:$sew),[]>,
731      RISCVVPseudo,
732      RISCVVLX</*Masked*/0, /*TU*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
733  let mayLoad = 1;
734  let mayStore = 0;
735  let hasSideEffects = 0;
736  let HasVLOp = 1;
737  let HasSEWOp = 1;
738  let HasDummyMask = 1;
739  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd", "");
740  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
741}
742
743class VPseudoILoadNoMaskTU<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
744                           bit Ordered, bit EarlyClobber>:
745      Pseudo<(outs RetClass:$rd),
746             (ins RetClass:$dest, GPR:$rs1, IdxClass:$rs2, AVL:$vl,
747              ixlenimm:$sew),[]>,
748      RISCVVPseudo,
749      RISCVVLX</*Masked*/0, /*TU*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
750  let mayLoad = 1;
751  let mayStore = 0;
752  let hasSideEffects = 0;
753  let HasVLOp = 1;
754  let HasSEWOp = 1;
755  let HasDummyMask = 1;
756  let HasMergeOp = 1;
757  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $dest", "$rd = $dest");
758  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
759}
760
761class VPseudoILoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
762                       bit Ordered, bit EarlyClobber>:
763      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
764              (ins GetVRegNoV0<RetClass>.R:$merge,
765                   GPR:$rs1, IdxClass:$rs2,
766                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
767      RISCVVPseudo,
768      RISCVVLX</*Masked*/1, /*TU*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
769  let mayLoad = 1;
770  let mayStore = 0;
771  let hasSideEffects = 0;
772  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $merge", "$rd = $merge");
773  let HasVLOp = 1;
774  let HasSEWOp = 1;
775  let HasMergeOp = 1;
776  let HasVecPolicyOp = 1;
777  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
778}
779
780class VPseudoUSStoreNoMask<VReg StClass, int EEW>:
781      Pseudo<(outs),
782              (ins StClass:$rd, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
783      RISCVVPseudo,
784      RISCVVSE</*Masked*/0, /*Strided*/0, log2<EEW>.val, VLMul> {
785  let mayLoad = 0;
786  let mayStore = 1;
787  let hasSideEffects = 0;
788  let HasVLOp = 1;
789  let HasSEWOp = 1;
790  let HasDummyMask = 1;
791  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
792}
793
794class VPseudoUSStoreMask<VReg StClass, int EEW>:
795      Pseudo<(outs),
796              (ins StClass:$rd, GPR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
797      RISCVVPseudo,
798      RISCVVSE</*Masked*/1, /*Strided*/0, log2<EEW>.val, VLMul> {
799  let mayLoad = 0;
800  let mayStore = 1;
801  let hasSideEffects = 0;
802  let HasVLOp = 1;
803  let HasSEWOp = 1;
804  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
805}
806
807class VPseudoSStoreNoMask<VReg StClass, int EEW>:
808      Pseudo<(outs),
809              (ins StClass:$rd, GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
810      RISCVVPseudo,
811      RISCVVSE</*Masked*/0, /*Strided*/1, log2<EEW>.val, VLMul> {
812  let mayLoad = 0;
813  let mayStore = 1;
814  let hasSideEffects = 0;
815  let HasVLOp = 1;
816  let HasSEWOp = 1;
817  let HasDummyMask = 1;
818  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
819}
820
821class VPseudoSStoreMask<VReg StClass, int EEW>:
822      Pseudo<(outs),
823              (ins StClass:$rd, GPR:$rs1, GPR:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
824      RISCVVPseudo,
825      RISCVVSE</*Masked*/1, /*Strided*/1, log2<EEW>.val, VLMul> {
826  let mayLoad = 0;
827  let mayStore = 1;
828  let hasSideEffects = 0;
829  let HasVLOp = 1;
830  let HasSEWOp = 1;
831  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
832}
833
834// Unary instruction that is never masked so HasDummyMask=0.
835class VPseudoUnaryNoDummyMask<VReg RetClass,
836                              DAGOperand Op2Class> :
837        Pseudo<(outs RetClass:$rd),
838               (ins Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
839        RISCVVPseudo {
840  let mayLoad = 0;
841  let mayStore = 0;
842  let hasSideEffects = 0;
843  let HasVLOp = 1;
844  let HasSEWOp = 1;
845  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
846}
847
848class VPseudoNullaryNoMask<VReg RegClass>:
849      Pseudo<(outs RegClass:$rd),
850             (ins AVL:$vl, ixlenimm:$sew),
851             []>, RISCVVPseudo {
852  let mayLoad = 0;
853  let mayStore = 0;
854  let hasSideEffects = 0;
855  let HasVLOp = 1;
856  let HasSEWOp = 1;
857  let HasDummyMask = 1;
858  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
859}
860
861class VPseudoNullaryMask<VReg RegClass>:
862      Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd),
863             (ins GetVRegNoV0<RegClass>.R:$merge, VMaskOp:$vm, AVL:$vl,
864              ixlenimm:$sew), []>, RISCVVPseudo {
865  let mayLoad = 0;
866  let mayStore = 0;
867  let hasSideEffects = 0;
868  let Constraints ="$rd = $merge";
869  let HasVLOp = 1;
870  let HasSEWOp = 1;
871  let HasMergeOp = 1;
872  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
873}
874
875// Nullary for pseudo instructions. They are expanded in
876// RISCVExpandPseudoInsts pass.
877class VPseudoNullaryPseudoM<string BaseInst>
878       : Pseudo<(outs VR:$rd), (ins AVL:$vl, ixlenimm:$sew), []>,
879       RISCVVPseudo {
880  let mayLoad = 0;
881  let mayStore = 0;
882  let hasSideEffects = 0;
883  let HasVLOp = 1;
884  let HasSEWOp = 1;
885  // BaseInstr is not used in RISCVExpandPseudoInsts pass.
886  // Just fill a corresponding real v-inst to pass tablegen check.
887  let BaseInstr = !cast<Instruction>(BaseInst);
888}
889
890// RetClass could be GPR or VReg.
891class VPseudoUnaryNoMask<DAGOperand RetClass, VReg OpClass, string Constraint = ""> :
892        Pseudo<(outs RetClass:$rd),
893               (ins OpClass:$rs2, AVL:$vl, ixlenimm:$sew), []>,
894        RISCVVPseudo {
895  let mayLoad = 0;
896  let mayStore = 0;
897  let hasSideEffects = 0;
898  let Constraints = Constraint;
899  let HasVLOp = 1;
900  let HasSEWOp = 1;
901  let HasDummyMask = 1;
902  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
903}
904
905class VPseudoUnaryMask<VReg RetClass, VReg OpClass, string Constraint = ""> :
906        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
907               (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
908                    VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
909        RISCVVPseudo {
910  let mayLoad = 0;
911  let mayStore = 0;
912  let hasSideEffects = 0;
913  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
914  let HasVLOp = 1;
915  let HasSEWOp = 1;
916  let HasMergeOp = 1;
917  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
918}
919
920class VPseudoUnaryMaskTA<VReg RetClass, VReg OpClass, string Constraint = ""> :
921        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
922               (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
923                    VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
924        RISCVVPseudo {
925  let mayLoad = 0;
926  let mayStore = 0;
927  let hasSideEffects = 0;
928  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
929  let HasVLOp = 1;
930  let HasSEWOp = 1;
931  let HasMergeOp = 1;
932  let HasVecPolicyOp = 1;
933  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
934}
935
936// mask unary operation without maskedoff
937class VPseudoMaskUnarySOutMask:
938        Pseudo<(outs GPR:$rd),
939               (ins VR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
940        RISCVVPseudo {
941  let mayLoad = 0;
942  let mayStore = 0;
943  let hasSideEffects = 0;
944  let HasVLOp = 1;
945  let HasSEWOp = 1;
946  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
947}
948
949// Mask can be V0~V31
950class VPseudoUnaryAnyMask<VReg RetClass,
951                          VReg Op1Class> :
952      Pseudo<(outs RetClass:$rd),
953             (ins RetClass:$merge,
954                  Op1Class:$rs2,
955                  VR:$vm, AVL:$vl, ixlenimm:$sew),
956             []>,
957      RISCVVPseudo {
958  let mayLoad = 0;
959  let mayStore = 0;
960  let hasSideEffects = 0;
961  let Constraints = "@earlyclobber $rd, $rd = $merge";
962  let HasVLOp = 1;
963  let HasSEWOp = 1;
964  let HasMergeOp = 1;
965  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
966}
967
968class VPseudoBinaryNoMask<VReg RetClass,
969                          VReg Op1Class,
970                          DAGOperand Op2Class,
971                          string Constraint> :
972        Pseudo<(outs RetClass:$rd),
973               (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
974        RISCVVPseudo {
975  let mayLoad = 0;
976  let mayStore = 0;
977  let hasSideEffects = 0;
978  let Constraints = Constraint;
979  let HasVLOp = 1;
980  let HasSEWOp = 1;
981  let HasDummyMask = 1;
982  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
983}
984
985// Special version of VPseudoBinaryNoMask where we pretend the first source is
986// tied to the destination.
987// This allows maskedoff and rs2 to be the same register.
988class VPseudoTiedBinaryNoMask<VReg RetClass,
989                              DAGOperand Op2Class,
990                              string Constraint> :
991        Pseudo<(outs RetClass:$rd),
992               (ins RetClass:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
993        RISCVVPseudo {
994  let mayLoad = 0;
995  let mayStore = 0;
996  let hasSideEffects = 0;
997  let Constraints = Join<[Constraint, "$rd = $rs2"], ",">.ret;
998  let HasVLOp = 1;
999  let HasSEWOp = 1;
1000  let HasDummyMask = 1;
1001  let ForceTailAgnostic = 1;
1002  let isConvertibleToThreeAddress = 1;
1003  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1004}
1005
1006class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
1007                          bit Ordered>:
1008      Pseudo<(outs),
1009              (ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
1010      RISCVVPseudo,
1011      RISCVVSX</*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
1012  let mayLoad = 0;
1013  let mayStore = 1;
1014  let hasSideEffects = 0;
1015  let HasVLOp = 1;
1016  let HasSEWOp = 1;
1017  let HasDummyMask = 1;
1018  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1019}
1020
1021class VPseudoIStoreMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
1022                        bit Ordered>:
1023      Pseudo<(outs),
1024              (ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1025      RISCVVPseudo,
1026      RISCVVSX</*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
1027  let mayLoad = 0;
1028  let mayStore = 1;
1029  let hasSideEffects = 0;
1030  let HasVLOp = 1;
1031  let HasSEWOp = 1;
1032  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1033}
1034
1035class VPseudoBinaryMask<VReg RetClass,
1036                        RegisterClass Op1Class,
1037                        DAGOperand Op2Class,
1038                        string Constraint> :
1039        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1040                (ins GetVRegNoV0<RetClass>.R:$merge,
1041                     Op1Class:$rs2, Op2Class:$rs1,
1042                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1043        RISCVVPseudo {
1044  let mayLoad = 0;
1045  let mayStore = 0;
1046  let hasSideEffects = 0;
1047  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1048  let HasVLOp = 1;
1049  let HasSEWOp = 1;
1050  let HasMergeOp = 1;
1051  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1052}
1053
1054class VPseudoBinaryMaskTA<VReg RetClass,
1055                          RegisterClass Op1Class,
1056                          DAGOperand Op2Class,
1057                          string Constraint> :
1058        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1059                (ins GetVRegNoV0<RetClass>.R:$merge,
1060                     Op1Class:$rs2, Op2Class:$rs1,
1061                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1062        RISCVVPseudo {
1063  let mayLoad = 0;
1064  let mayStore = 0;
1065  let hasSideEffects = 0;
1066  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1067  let HasVLOp = 1;
1068  let HasSEWOp = 1;
1069  let HasMergeOp = 1;
1070  let HasVecPolicyOp = 1;
1071  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1072}
1073
1074// Like VPseudoBinaryMask, but output can be V0.
1075class VPseudoBinaryMOutMask<VReg RetClass,
1076                            RegisterClass Op1Class,
1077                            DAGOperand Op2Class,
1078                            string Constraint> :
1079        Pseudo<(outs RetClass:$rd),
1080                (ins RetClass:$merge,
1081                     Op1Class:$rs2, Op2Class:$rs1,
1082                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1083        RISCVVPseudo {
1084  let mayLoad = 0;
1085  let mayStore = 0;
1086  let hasSideEffects = 0;
1087  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1088  let HasVLOp = 1;
1089  let HasSEWOp = 1;
1090  let HasMergeOp = 1;
1091  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1092}
1093
1094// Special version of VPseudoBinaryMask where we pretend the first source is
1095// tied to the destination so we can workaround the earlyclobber constraint.
1096// This allows maskedoff and rs2 to be the same register.
1097class VPseudoTiedBinaryMask<VReg RetClass,
1098                            DAGOperand Op2Class,
1099                            string Constraint> :
1100        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1101                (ins GetVRegNoV0<RetClass>.R:$merge,
1102                     Op2Class:$rs1,
1103                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1104        RISCVVPseudo {
1105  let mayLoad = 0;
1106  let mayStore = 0;
1107  let hasSideEffects = 0;
1108  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1109  let HasVLOp = 1;
1110  let HasSEWOp = 1;
1111  let HasMergeOp = 0; // Merge is also rs2.
1112  let HasVecPolicyOp = 1;
1113  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1114}
1115
1116class VPseudoBinaryCarryIn<VReg RetClass,
1117                           VReg Op1Class,
1118                           DAGOperand Op2Class,
1119                           LMULInfo MInfo,
1120                           bit CarryIn,
1121                           string Constraint> :
1122        Pseudo<(outs RetClass:$rd),
1123               !if(CarryIn,
1124                  (ins Op1Class:$rs2, Op2Class:$rs1, VMV0:$carry, AVL:$vl,
1125                       ixlenimm:$sew),
1126                  (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew)), []>,
1127        RISCVVPseudo {
1128  let mayLoad = 0;
1129  let mayStore = 0;
1130  let hasSideEffects = 0;
1131  let Constraints = Constraint;
1132  let HasVLOp = 1;
1133  let HasSEWOp = 1;
1134  let HasMergeOp = 0;
1135  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1136  let VLMul = MInfo.value;
1137}
1138
1139class VPseudoTiedBinaryCarryIn<VReg RetClass,
1140                               VReg Op1Class,
1141                               DAGOperand Op2Class,
1142                               LMULInfo MInfo,
1143                               bit CarryIn,
1144                               string Constraint> :
1145        Pseudo<(outs RetClass:$rd),
1146               !if(CarryIn,
1147                  (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, VMV0:$carry, AVL:$vl,
1148                       ixlenimm:$sew),
1149                  (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew)), []>,
1150        RISCVVPseudo {
1151  let mayLoad = 0;
1152  let mayStore = 0;
1153  let hasSideEffects = 0;
1154  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1155  let HasVLOp = 1;
1156  let HasSEWOp = 1;
1157  let HasMergeOp = 1;
1158  let HasVecPolicyOp = 0;
1159  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1160  let VLMul = MInfo.value;
1161}
1162
1163class VPseudoTernaryNoMask<VReg RetClass,
1164                           RegisterClass Op1Class,
1165                           DAGOperand Op2Class,
1166                           string Constraint> :
1167        Pseudo<(outs RetClass:$rd),
1168               (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1169                    AVL:$vl, ixlenimm:$sew),
1170               []>,
1171        RISCVVPseudo {
1172  let mayLoad = 0;
1173  let mayStore = 0;
1174  let hasSideEffects = 0;
1175  let Constraints = Join<[Constraint, "$rd = $rs3"], ",">.ret;
1176  let HasVLOp = 1;
1177  let HasSEWOp = 1;
1178  let HasMergeOp = 1;
1179  let HasDummyMask = 1;
1180  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1181}
1182
1183class VPseudoTernaryNoMaskWithPolicy<VReg RetClass,
1184                                     RegisterClass Op1Class,
1185                                     DAGOperand Op2Class,
1186                                     string Constraint> :
1187        Pseudo<(outs RetClass:$rd),
1188               (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1189                    AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
1190               []>,
1191        RISCVVPseudo {
1192  let mayLoad = 0;
1193  let mayStore = 0;
1194  let hasSideEffects = 0;
1195  let Constraints = Join<[Constraint, "$rd = $rs3"], ",">.ret;
1196  let HasVecPolicyOp = 1;
1197  let HasVLOp = 1;
1198  let HasSEWOp = 1;
1199  let HasMergeOp = 1;
1200  let HasDummyMask = 1;
1201  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1202}
1203
1204class VPseudoUSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF, bit isFF>:
1205      Pseudo<(outs RetClass:$rd),
1206             (ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
1207      RISCVVPseudo,
1208      RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
1209  let mayLoad = 1;
1210  let mayStore = 0;
1211  let hasSideEffects = 0;
1212  let HasVLOp = 1;
1213  let HasSEWOp = 1;
1214  let HasDummyMask = 1;
1215  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1216}
1217
1218class VPseudoUSSegLoadMask<VReg RetClass, int EEW, bits<4> NF, bit isFF>:
1219      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1220             (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
1221                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
1222      RISCVVPseudo,
1223      RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
1224  let mayLoad = 1;
1225  let mayStore = 0;
1226  let hasSideEffects = 0;
1227  let Constraints = "$rd = $merge";
1228  let HasVLOp = 1;
1229  let HasSEWOp = 1;
1230  let HasMergeOp = 1;
1231  let HasVecPolicyOp = 1;
1232  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1233}
1234
1235class VPseudoSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF>:
1236      Pseudo<(outs RetClass:$rd),
1237             (ins GPR:$rs1, GPR:$offset, AVL:$vl, ixlenimm:$sew),[]>,
1238      RISCVVPseudo,
1239      RISCVVLSEG<NF, /*Masked*/0, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
1240  let mayLoad = 1;
1241  let mayLoad = 1;
1242  let mayStore = 0;
1243  let hasSideEffects = 0;
1244  let HasVLOp = 1;
1245  let HasSEWOp = 1;
1246  let HasDummyMask = 1;
1247  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1248}
1249
1250class VPseudoSSegLoadMask<VReg RetClass, int EEW, bits<4> NF>:
1251      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1252             (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
1253                  GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
1254                  ixlenimm:$policy),[]>,
1255      RISCVVPseudo,
1256      RISCVVLSEG<NF, /*Masked*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
1257  let mayLoad = 1;
1258  let mayStore = 0;
1259  let hasSideEffects = 0;
1260  let Constraints = "$rd = $merge";
1261  let HasVLOp = 1;
1262  let HasSEWOp = 1;
1263  let HasMergeOp = 1;
1264  let HasVecPolicyOp = 1;
1265  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1266}
1267
1268class VPseudoISegLoadNoMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
1269                            bits<4> NF, bit Ordered>:
1270      Pseudo<(outs RetClass:$rd),
1271             (ins GPR:$rs1, IdxClass:$offset, AVL:$vl, ixlenimm:$sew),[]>,
1272      RISCVVPseudo,
1273      RISCVVLXSEG<NF, /*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
1274  let mayLoad = 1;
1275  let mayStore = 0;
1276  let hasSideEffects = 0;
1277  // For vector indexed segment loads, the destination vector register groups
1278  // cannot overlap the source vector register group
1279  let Constraints = "@earlyclobber $rd";
1280  let HasVLOp = 1;
1281  let HasSEWOp = 1;
1282  let HasDummyMask = 1;
1283  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1284}
1285
1286class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
1287                          bits<4> NF, bit Ordered>:
1288      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1289             (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
1290                  IdxClass:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
1291                  ixlenimm:$policy),[]>,
1292      RISCVVPseudo,
1293      RISCVVLXSEG<NF, /*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
1294  let mayLoad = 1;
1295  let mayStore = 0;
1296  let hasSideEffects = 0;
1297  // For vector indexed segment loads, the destination vector register groups
1298  // cannot overlap the source vector register group
1299  let Constraints = "@earlyclobber $rd, $rd = $merge";
1300  let HasVLOp = 1;
1301  let HasSEWOp = 1;
1302  let HasMergeOp = 1;
1303  let HasVecPolicyOp = 1;
1304  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1305}
1306
1307class VPseudoUSSegStoreNoMask<VReg ValClass, int EEW, bits<4> NF>:
1308      Pseudo<(outs),
1309             (ins ValClass:$rd, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
1310      RISCVVPseudo,
1311      RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, log2<EEW>.val, VLMul> {
1312  let mayLoad = 0;
1313  let mayStore = 1;
1314  let hasSideEffects = 0;
1315  let HasVLOp = 1;
1316  let HasSEWOp = 1;
1317  let HasDummyMask = 1;
1318  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1319}
1320
1321class VPseudoUSSegStoreMask<VReg ValClass, int EEW, bits<4> NF>:
1322      Pseudo<(outs),
1323             (ins ValClass:$rd, GPR:$rs1,
1324                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1325      RISCVVPseudo,
1326      RISCVVSSEG<NF, /*Masked*/1, /*Strided*/0, log2<EEW>.val, VLMul> {
1327  let mayLoad = 0;
1328  let mayStore = 1;
1329  let hasSideEffects = 0;
1330  let HasVLOp = 1;
1331  let HasSEWOp = 1;
1332  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1333}
1334
1335class VPseudoSSegStoreNoMask<VReg ValClass, int EEW, bits<4> NF>:
1336      Pseudo<(outs),
1337             (ins ValClass:$rd, GPR:$rs1, GPR: $offset, AVL:$vl, ixlenimm:$sew),[]>,
1338      RISCVVPseudo,
1339      RISCVVSSEG<NF, /*Masked*/0, /*Strided*/1, log2<EEW>.val, VLMul> {
1340  let mayLoad = 0;
1341  let mayStore = 1;
1342  let hasSideEffects = 0;
1343  let HasVLOp = 1;
1344  let HasSEWOp = 1;
1345  let HasDummyMask = 1;
1346  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1347}
1348
1349class VPseudoSSegStoreMask<VReg ValClass, int EEW, bits<4> NF>:
1350      Pseudo<(outs),
1351             (ins ValClass:$rd, GPR:$rs1, GPR: $offset,
1352                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1353      RISCVVPseudo,
1354      RISCVVSSEG<NF, /*Masked*/1, /*Strided*/1, log2<EEW>.val, VLMul> {
1355  let mayLoad = 0;
1356  let mayStore = 1;
1357  let hasSideEffects = 0;
1358  let HasVLOp = 1;
1359  let HasSEWOp = 1;
1360  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1361}
1362
1363class VPseudoISegStoreNoMask<VReg ValClass, VReg IdxClass, int EEW, bits<3> LMUL,
1364                             bits<4> NF, bit Ordered>:
1365      Pseudo<(outs),
1366             (ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
1367                  AVL:$vl, ixlenimm:$sew),[]>,
1368      RISCVVPseudo,
1369      RISCVVSXSEG<NF, /*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
1370  let mayLoad = 0;
1371  let mayStore = 1;
1372  let hasSideEffects = 0;
1373  let HasVLOp = 1;
1374  let HasSEWOp = 1;
1375  let HasDummyMask = 1;
1376  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1377}
1378
1379class VPseudoISegStoreMask<VReg ValClass, VReg IdxClass, int EEW, bits<3> LMUL,
1380                           bits<4> NF, bit Ordered>:
1381      Pseudo<(outs),
1382             (ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
1383                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1384      RISCVVPseudo,
1385      RISCVVSXSEG<NF, /*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
1386  let mayLoad = 0;
1387  let mayStore = 1;
1388  let hasSideEffects = 0;
1389  let HasVLOp = 1;
1390  let HasSEWOp = 1;
1391  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1392}
1393
1394multiclass VPseudoUSLoad {
1395  foreach eew = EEWList in {
1396    foreach lmul = MxSet<eew>.m in {
1397      defvar LInfo = lmul.MX;
1398      defvar vreg = lmul.vrclass;
1399      let VLMul = lmul.value in {
1400        def "E" # eew # "_V_" # LInfo :
1401          VPseudoUSLoadNoMask<vreg, eew, false>,
1402          VLESched<eew>;
1403        def "E" # eew # "_V_" # LInfo # "_TU":
1404          VPseudoUSLoadNoMaskTU<vreg, eew, false>,
1405          VLESched<eew>;
1406        def "E" # eew # "_V_" # LInfo # "_MASK" :
1407          VPseudoUSLoadMask<vreg, eew, false>,
1408          VLESched<eew>;
1409      }
1410    }
1411  }
1412}
1413
1414multiclass VPseudoFFLoad {
1415  foreach eew = EEWList in {
1416    foreach lmul = MxSet<eew>.m in {
1417      defvar LInfo = lmul.MX;
1418      defvar vreg = lmul.vrclass;
1419      let VLMul = lmul.value in {
1420        def "E" # eew # "FF_V_" # LInfo :
1421          VPseudoUSLoadNoMask<vreg, eew, true>,
1422          VLFSched<eew>;
1423        def "E" # eew # "FF_V_" # LInfo # "_TU":
1424          VPseudoUSLoadNoMaskTU<vreg, eew, true>,
1425          VLFSched<eew>;
1426        def "E" # eew # "FF_V_" # LInfo # "_MASK" :
1427          VPseudoUSLoadMask<vreg, eew, true>,
1428          VLFSched<eew>;
1429      }
1430    }
1431  }
1432}
1433
1434multiclass VPseudoLoadMask {
1435  foreach mti = AllMasks in {
1436    let VLMul = mti.LMul.value in {
1437      def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, /*EEW*/1, /*isFF*/0>;
1438    }
1439  }
1440}
1441
1442multiclass VPseudoSLoad {
1443  foreach eew = EEWList in {
1444    foreach lmul = MxSet<eew>.m in {
1445      defvar LInfo = lmul.MX;
1446      defvar vreg = lmul.vrclass;
1447      let VLMul = lmul.value in {
1448        def "E" # eew # "_V_" # LInfo : VPseudoSLoadNoMask<vreg, eew>,
1449                                        VLSSched<eew>;
1450        def "E" # eew # "_V_" # LInfo # "_TU": VPseudoSLoadNoMaskTU<vreg, eew>,
1451                                        VLSSched<eew>;
1452        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSLoadMask<vreg, eew>,
1453                                                  VLSSched<eew>;
1454      }
1455    }
1456  }
1457}
1458
1459multiclass VPseudoILoad<bit Ordered> {
1460  foreach eew = EEWList in {
1461    foreach sew = EEWList in {
1462      foreach lmul = MxSet<sew>.m in {
1463        defvar octuple_lmul = lmul.octuple;
1464        // Calculate emul = eew * lmul / sew
1465        defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2<sew>.val);
1466        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
1467          defvar LInfo = lmul.MX;
1468          defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
1469          defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
1470          defvar Vreg = lmul.vrclass;
1471          defvar IdxVreg = idx_lmul.vrclass;
1472          defvar HasConstraint = !ne(sew, eew);
1473          defvar Order = !if(Ordered, "O", "U");
1474          let VLMul = lmul.value in {
1475            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo :
1476              VPseudoILoadNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>,
1477              VLXSched<eew, Order>;
1478            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_TU":
1479              VPseudoILoadNoMaskTU<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>,
1480              VLXSched<eew, Order>;
1481            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
1482              VPseudoILoadMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>,
1483              VLXSched<eew, Order>;
1484          }
1485        }
1486      }
1487    }
1488  }
1489}
1490
1491multiclass VPseudoUSStore {
1492  foreach eew = EEWList in {
1493    foreach lmul = MxSet<eew>.m in {
1494      defvar LInfo = lmul.MX;
1495      defvar vreg = lmul.vrclass;
1496      let VLMul = lmul.value in {
1497        def "E" # eew # "_V_" # LInfo : VPseudoUSStoreNoMask<vreg, eew>,
1498                                        VSESched<eew>;
1499        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask<vreg, eew>,
1500                                                  VSESched<eew>;
1501      }
1502    }
1503  }
1504}
1505
1506multiclass VPseudoStoreMask {
1507  foreach mti = AllMasks in {
1508    let VLMul = mti.LMul.value in {
1509      def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, /*EEW*/1>;
1510    }
1511  }
1512}
1513
1514multiclass VPseudoSStore {
1515  foreach eew = EEWList in {
1516    foreach lmul = MxSet<eew>.m in {
1517      defvar LInfo = lmul.MX;
1518      defvar vreg = lmul.vrclass;
1519      let VLMul = lmul.value in {
1520        def "E" # eew # "_V_" # LInfo : VPseudoSStoreNoMask<vreg, eew>,
1521                                        VSSSched<eew>;
1522        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSStoreMask<vreg, eew>,
1523                                                  VSSSched<eew>;
1524      }
1525    }
1526  }
1527}
1528
1529multiclass VPseudoIStore<bit Ordered> {
1530  foreach eew = EEWList in {
1531    foreach sew = EEWList in {
1532      foreach lmul = MxSet<sew>.m in {
1533        defvar octuple_lmul = lmul.octuple;
1534        // Calculate emul = eew * lmul / sew
1535        defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2<sew>.val);
1536        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
1537          defvar LInfo = lmul.MX;
1538          defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
1539          defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
1540          defvar Vreg = lmul.vrclass;
1541          defvar IdxVreg = idx_lmul.vrclass;
1542          defvar Order = !if(Ordered, "O", "U");
1543          let VLMul = lmul.value in {
1544            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo :
1545              VPseudoIStoreNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>,
1546              VSXSched<eew, Order>;
1547            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
1548              VPseudoIStoreMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>,
1549              VSXSched<eew, Order>;
1550          }
1551        }
1552      }
1553    }
1554  }
1555}
1556
1557multiclass VPseudoVPOP_M {
1558  foreach mti = AllMasks in
1559  {
1560    let VLMul = mti.LMul.value in {
1561      def "_M_" # mti.BX : VPseudoUnaryNoMask<GPR, VR>,
1562                           Sched<[WriteVMPopV, ReadVMPopV, ReadVMPopV]>;
1563      def "_M_" # mti.BX # "_MASK" : VPseudoMaskUnarySOutMask,
1564                                     Sched<[WriteVMPopV, ReadVMPopV, ReadVMPopV]>;
1565    }
1566  }
1567}
1568
1569multiclass VPseudoV1ST_M {
1570  foreach mti = AllMasks in
1571  {
1572    let VLMul = mti.LMul.value in {
1573      def "_M_" # mti.BX : VPseudoUnaryNoMask<GPR, VR>,
1574                           Sched<[WriteVMFFSV, ReadVMFFSV, ReadVMFFSV]>;
1575      def "_M_" # mti.BX # "_MASK" : VPseudoMaskUnarySOutMask,
1576                                     Sched<[WriteVMFFSV, ReadVMFFSV, ReadVMFFSV]>;
1577    }
1578  }
1579}
1580
1581multiclass VPseudoVSFS_M {
1582  defvar constraint = "@earlyclobber $rd";
1583  foreach mti = AllMasks in
1584  {
1585    let VLMul = mti.LMul.value in {
1586      def "_M_" # mti.BX : VPseudoUnaryNoMask<VR, VR, constraint>,
1587                           Sched<[WriteVMSFSV, ReadVMSFSV, ReadVMask]>;
1588      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR, constraint>,
1589                                     Sched<[WriteVMSFSV, ReadVMSFSV, ReadVMask]>;
1590    }
1591  }
1592}
1593
1594multiclass VPseudoVID_V {
1595  foreach m = MxList in {
1596    let VLMul = m.value in {
1597      def "_V_" # m.MX : VPseudoNullaryNoMask<m.vrclass>,
1598                         Sched<[WriteVMIdxV, ReadVMask]>;
1599      def "_V_" # m.MX # "_MASK" : VPseudoNullaryMask<m.vrclass>,
1600                                   Sched<[WriteVMIdxV, ReadVMask]>;
1601    }
1602  }
1603}
1604
1605multiclass VPseudoNullaryPseudoM <string BaseInst> {
1606  foreach mti = AllMasks in {
1607    let VLMul = mti.LMul.value in {
1608      def "_M_" # mti.BX : VPseudoNullaryPseudoM<BaseInst # "_MM">;
1609    }
1610  }
1611}
1612
1613multiclass VPseudoVIOT_M {
1614  defvar constraint = "@earlyclobber $rd";
1615  foreach m = MxList in {
1616    let VLMul = m.value in {
1617      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, VR, constraint>,
1618                       Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>;
1619      def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, VR, constraint>,
1620                                 Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>;
1621    }
1622  }
1623}
1624
1625multiclass VPseudoVCPR_V {
1626  foreach m = MxList in {
1627    let VLMul = m.value in
1628      def _VM # "_" # m.MX : VPseudoUnaryAnyMask<m.vrclass, m.vrclass>,
1629                             Sched<[WriteVCompressV, ReadVCompressV, ReadVCompressV]>;
1630  }
1631}
1632
1633multiclass VPseudoBinary<VReg RetClass,
1634                         VReg Op1Class,
1635                         DAGOperand Op2Class,
1636                         LMULInfo MInfo,
1637                         string Constraint = ""> {
1638  let VLMul = MInfo.value in {
1639    def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
1640                                             Constraint>;
1641    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskTA<RetClass, Op1Class, Op2Class,
1642                                                       Constraint>;
1643  }
1644}
1645
1646multiclass VPseudoBinaryM<VReg RetClass,
1647                          VReg Op1Class,
1648                          DAGOperand Op2Class,
1649                          LMULInfo MInfo,
1650                          string Constraint = ""> {
1651  let VLMul = MInfo.value in {
1652    def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
1653                                             Constraint>;
1654    let ForceTailAgnostic = true in
1655    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask<RetClass, Op1Class,
1656                                                         Op2Class, Constraint>;
1657  }
1658}
1659
1660multiclass VPseudoBinaryEmul<VReg RetClass,
1661                             VReg Op1Class,
1662                             DAGOperand Op2Class,
1663                             LMULInfo lmul,
1664                             LMULInfo emul,
1665                             string Constraint = ""> {
1666  let VLMul = lmul.value in {
1667    def "_" # lmul.MX # "_" # emul.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
1668                                                            Constraint>;
1669    def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskTA<RetClass, Op1Class, Op2Class,
1670                                                                      Constraint>;
1671  }
1672}
1673
1674multiclass VPseudoTiedBinary<VReg RetClass,
1675                             DAGOperand Op2Class,
1676                             LMULInfo MInfo,
1677                             string Constraint = ""> {
1678  let VLMul = MInfo.value in {
1679    def "_" # MInfo.MX # "_TIED": VPseudoTiedBinaryNoMask<RetClass, Op2Class,
1680                                                          Constraint>;
1681    def "_" # MInfo.MX # "_MASK_TIED" : VPseudoTiedBinaryMask<RetClass, Op2Class,
1682                                                         Constraint>;
1683  }
1684}
1685
1686multiclass VPseudoBinaryV_VV<string Constraint = ""> {
1687  foreach m = MxList in
1688    defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
1689}
1690
1691// Similar to VPseudoBinaryV_VV, but uses MxListF.
1692multiclass VPseudoBinaryFV_VV<string Constraint = ""> {
1693  foreach m = MxListF in
1694    defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
1695}
1696
1697multiclass VPseudoVGTR_VV_EEW<int eew, string Constraint = ""> {
1698  foreach m = MxList in {
1699    foreach sew = EEWList in {
1700      defvar octuple_lmul = m.octuple;
1701      // emul = lmul * eew / sew
1702      defvar octuple_emul = !srl(!mul(octuple_lmul, eew), log2<sew>.val);
1703      if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
1704        defvar emulMX = octuple_to_str<octuple_emul>.ret;
1705        defvar emul = !cast<LMULInfo>("V_" # emulMX);
1706        defm _VV : VPseudoBinaryEmul<m.vrclass, m.vrclass, emul.vrclass, m, emul, Constraint>,
1707                   Sched<[WriteVGatherV, ReadVGatherV, ReadVGatherV]>;
1708      }
1709    }
1710  }
1711}
1712
1713multiclass VPseudoBinaryV_VX<string Constraint = ""> {
1714  foreach m = MxList in
1715    defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>;
1716}
1717
1718multiclass VPseudoVSLD1_VX<string Constraint = ""> {
1719  foreach m = MxList in
1720    defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>,
1721                 Sched<[WriteVISlide1X, ReadVISlideV, ReadVISlideX, ReadVMask]>;
1722}
1723
1724multiclass VPseudoBinaryV_VF<string Constraint = ""> {
1725  foreach f = FPList in
1726    foreach m = f.MxList in
1727      defm "_V" # f.FX : VPseudoBinary<m.vrclass, m.vrclass,
1728                                       f.fprclass, m, Constraint>;
1729}
1730
1731multiclass VPseudoVSLD1_VF<string Constraint = ""> {
1732  foreach f = FPList in
1733    foreach m = f.MxList in
1734      defm "_V" # f.FX :
1735        VPseudoBinary<m.vrclass, m.vrclass, f.fprclass, m, Constraint>,
1736        Sched<[WriteVFSlide1F, ReadVFSlideV, ReadVFSlideF, ReadVMask]>;
1737}
1738
1739multiclass VPseudoBinaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
1740  foreach m = MxList in
1741    defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
1742}
1743
1744multiclass VPseudoVALU_MM {
1745  foreach m = MxList in
1746    let VLMul = m.value in {
1747      def "_MM_" # m.MX : VPseudoBinaryNoMask<VR, VR, VR, "">,
1748                          Sched<[WriteVMALUV, ReadVMALUV, ReadVMALUV]>;
1749    }
1750}
1751
1752// We use earlyclobber here due to
1753// * The destination EEW is smaller than the source EEW and the overlap is
1754//   in the lowest-numbered part of the source register group is legal.
1755//   Otherwise, it is illegal.
1756// * The destination EEW is greater than the source EEW, the source EMUL is
1757//   at least 1, and the overlap is in the highest-numbered part of the
1758//   destination register group is legal. Otherwise, it is illegal.
1759multiclass VPseudoBinaryW_VV<list<LMULInfo> mxlist = MxListW> {
1760  foreach m = mxlist in
1761    defm _VV : VPseudoBinary<m.wvrclass, m.vrclass, m.vrclass, m,
1762                             "@earlyclobber $rd">;
1763}
1764
1765multiclass VPseudoBinaryW_VX {
1766  foreach m = MxListW in
1767    defm "_VX" : VPseudoBinary<m.wvrclass, m.vrclass, GPR, m,
1768                               "@earlyclobber $rd">;
1769}
1770
1771multiclass VPseudoBinaryW_VF {
1772  foreach f = FPListW in
1773    foreach m = f.MxList in
1774      defm "_V" # f.FX : VPseudoBinary<m.wvrclass, m.vrclass,
1775                                       f.fprclass, m,
1776                                       "@earlyclobber $rd">;
1777}
1778
1779multiclass VPseudoBinaryW_WV<list<LMULInfo> mxlist = MxListW> {
1780  foreach m = mxlist in {
1781    defm _WV : VPseudoBinary<m.wvrclass, m.wvrclass, m.vrclass, m,
1782                             "@earlyclobber $rd">;
1783    defm _WV : VPseudoTiedBinary<m.wvrclass, m.vrclass, m,
1784                                 "@earlyclobber $rd">;
1785  }
1786}
1787
1788multiclass VPseudoBinaryW_WX {
1789  foreach m = MxListW in
1790    defm "_WX" : VPseudoBinary<m.wvrclass, m.wvrclass, GPR, m>;
1791}
1792
1793multiclass VPseudoBinaryW_WF {
1794  foreach f = FPListW in
1795    foreach m = f.MxList in
1796      defm "_W" # f.FX : VPseudoBinary<m.wvrclass, m.wvrclass,
1797                                       f.fprclass, m>;
1798}
1799
1800// Narrowing instructions like vnsrl/vnsra/vnclip(u) don't need @earlyclobber
1801// if the source and destination have an LMUL<=1. This matches this overlap
1802// exception from the spec.
1803// "The destination EEW is smaller than the source EEW and the overlap is in the
1804//  lowest-numbered part of the source register group."
1805multiclass VPseudoBinaryV_WV {
1806  foreach m = MxListW in
1807    defm _WV : VPseudoBinary<m.vrclass, m.wvrclass, m.vrclass, m,
1808                             !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
1809}
1810
1811multiclass VPseudoBinaryV_WX {
1812  foreach m = MxListW in
1813    defm _WX : VPseudoBinary<m.vrclass, m.wvrclass, GPR, m,
1814                             !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
1815}
1816
1817multiclass VPseudoBinaryV_WI {
1818  foreach m = MxListW in
1819    defm _WI : VPseudoBinary<m.vrclass, m.wvrclass, uimm5, m,
1820                             !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
1821}
1822
1823// For vadc and vsbc, the instruction encoding is reserved if the destination
1824// vector register is v0.
1825// For vadc and vsbc, CarryIn == 1 and CarryOut == 0
1826multiclass VPseudoBinaryV_VM<bit CarryOut = 0, bit CarryIn = 1,
1827                             string Constraint = ""> {
1828  foreach m = MxList in
1829    def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX :
1830      VPseudoBinaryCarryIn<!if(CarryOut, VR,
1831                           !if(!and(CarryIn, !not(CarryOut)),
1832                               GetVRegNoV0<m.vrclass>.R, m.vrclass)),
1833                           m.vrclass, m.vrclass, m, CarryIn, Constraint>;
1834}
1835
1836multiclass VPseudoTiedBinaryV_VM<bit CarryOut = 0, bit CarryIn = 1,
1837                                 string Constraint = ""> {
1838  foreach m = MxList in
1839    def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX # "_TU" :
1840      VPseudoTiedBinaryCarryIn<!if(CarryOut, VR,
1841                               !if(!and(CarryIn, !not(CarryOut)),
1842                                   GetVRegNoV0<m.vrclass>.R, m.vrclass)),
1843                               m.vrclass, m.vrclass, m, CarryIn, Constraint>;
1844}
1845
1846multiclass VPseudoBinaryV_XM<bit CarryOut = 0, bit CarryIn = 1,
1847                             string Constraint = ""> {
1848  foreach m = MxList in
1849    def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX :
1850      VPseudoBinaryCarryIn<!if(CarryOut, VR,
1851                           !if(!and(CarryIn, !not(CarryOut)),
1852                               GetVRegNoV0<m.vrclass>.R, m.vrclass)),
1853                           m.vrclass, GPR, m, CarryIn, Constraint>;
1854}
1855
1856multiclass VPseudoTiedBinaryV_XM<bit CarryOut = 0, bit CarryIn = 1,
1857                                 string Constraint = ""> {
1858  foreach m = MxList in
1859    def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX # "_TU":
1860      VPseudoTiedBinaryCarryIn<!if(CarryOut, VR,
1861                               !if(!and(CarryIn, !not(CarryOut)),
1862                                   GetVRegNoV0<m.vrclass>.R, m.vrclass)),
1863                               m.vrclass, GPR, m, CarryIn, Constraint>;
1864}
1865
1866multiclass VPseudoVMRG_FM {
1867  foreach f = FPList in
1868    foreach m = f.MxList in {
1869      def "_V" # f.FX # "M_" # m.MX :
1870        VPseudoBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
1871                             m.vrclass, f.fprclass, m, /*CarryIn=*/1, "">,
1872        Sched<[WriteVFMergeV, ReadVFMergeV, ReadVFMergeF, ReadVMask]>;
1873      // Tied version to allow codegen control over the tail elements
1874      def "_V" # f.FX # "M_" # m.MX # "_TU":
1875        VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
1876                                 m.vrclass, f.fprclass, m, /*CarryIn=*/1, "">,
1877        Sched<[WriteVFMergeV, ReadVFMergeV, ReadVFMergeF, ReadVMask]>;
1878    }
1879}
1880
1881multiclass VPseudoBinaryV_IM<bit CarryOut = 0, bit CarryIn = 1,
1882                             string Constraint = ""> {
1883  foreach m = MxList in
1884    def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX :
1885      VPseudoBinaryCarryIn<!if(CarryOut, VR,
1886                           !if(!and(CarryIn, !not(CarryOut)),
1887                               GetVRegNoV0<m.vrclass>.R, m.vrclass)),
1888                           m.vrclass, simm5, m, CarryIn, Constraint>;
1889}
1890
1891multiclass VPseudoTiedBinaryV_IM<bit CarryOut = 0, bit CarryIn = 1,
1892                                 string Constraint = ""> {
1893  foreach m = MxList in
1894    def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX # "_TU":
1895      VPseudoTiedBinaryCarryIn<!if(CarryOut, VR,
1896                               !if(!and(CarryIn, !not(CarryOut)),
1897                                   GetVRegNoV0<m.vrclass>.R, m.vrclass)),
1898                               m.vrclass, simm5, m, CarryIn, Constraint>;
1899}
1900
1901multiclass VPseudoUnaryVMV_V_X_I {
1902  foreach m = MxList in {
1903    let VLMul = m.value in {
1904      def "_V_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, m.vrclass>,
1905                         Sched<[WriteVIMovV, ReadVIMovV]>;
1906      def "_X_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, GPR>,
1907                         Sched<[WriteVIMovX, ReadVIMovX]>;
1908      def "_I_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, simm5>,
1909                         Sched<[WriteVIMovI]>;
1910    }
1911  }
1912}
1913
1914multiclass VPseudoVMV_F {
1915  foreach f = FPList in {
1916    foreach m = f.MxList in {
1917      let VLMul = m.value in {
1918        def "_" # f.FX # "_" # m.MX :
1919          VPseudoUnaryNoDummyMask<m.vrclass, f.fprclass>,
1920          Sched<[WriteVFMovV, ReadVFMovF]>;
1921      }
1922    }
1923  }
1924}
1925
1926multiclass VPseudoVCLS_V {
1927  foreach m = MxListF in {
1928    let VLMul = m.value in {
1929      def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
1930                         Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>;
1931      def "_V_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>,
1932                                   Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>;
1933    }
1934  }
1935}
1936
1937multiclass VPseudoVSQR_V {
1938  foreach m = MxListF in {
1939    let VLMul = m.value in {
1940      def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
1941                         Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>;
1942      def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>,
1943                                   Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>;
1944    }
1945  }
1946}
1947
1948multiclass VPseudoVRCP_V {
1949  foreach m = MxListF in {
1950    let VLMul = m.value in {
1951      def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
1952                         Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>;
1953      def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>,
1954                                   Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>;
1955    }
1956  }
1957}
1958
1959multiclass PseudoVEXT_VF2 {
1960  defvar constraints = "@earlyclobber $rd";
1961  foreach m = MxListVF2 in
1962  {
1963    let VLMul = m.value in {
1964      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f2vrclass, constraints>,
1965                       Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
1966      def "_" # m.MX # "_MASK" :
1967        VPseudoUnaryMaskTA<m.vrclass, m.f2vrclass, constraints>,
1968        Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
1969    }
1970  }
1971}
1972
1973multiclass PseudoVEXT_VF4 {
1974  defvar constraints = "@earlyclobber $rd";
1975  foreach m = MxListVF4 in
1976  {
1977    let VLMul = m.value in {
1978      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f4vrclass, constraints>,
1979                       Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
1980      def "_" # m.MX # "_MASK" :
1981        VPseudoUnaryMaskTA<m.vrclass, m.f4vrclass, constraints>,
1982        Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
1983    }
1984  }
1985}
1986
1987multiclass PseudoVEXT_VF8 {
1988  defvar constraints = "@earlyclobber $rd";
1989  foreach m = MxListVF8 in
1990  {
1991    let VLMul = m.value in {
1992      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f8vrclass, constraints>,
1993                       Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
1994      def "_" # m.MX # "_MASK" :
1995        VPseudoUnaryMaskTA<m.vrclass, m.f8vrclass, constraints>,
1996        Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
1997    }
1998  }
1999}
2000
2001// The destination EEW is 1 since "For the purposes of register group overlap
2002// constraints, mask elements have EEW=1."
2003// The source EEW is 8, 16, 32, or 64.
2004// When the destination EEW is different from source EEW, we need to use
2005// @earlyclobber to avoid the overlap between destination and source registers.
2006// We don't need @earlyclobber for LMUL<=1 since that matches this overlap
2007// exception from the spec
2008// "The destination EEW is smaller than the source EEW and the overlap is in the
2009//  lowest-numbered part of the source register group".
2010// With LMUL<=1 the source and dest occupy a single register so any overlap
2011// is in the lowest-numbered part.
2012multiclass VPseudoBinaryM_VV<list<LMULInfo> mxlist = MxList> {
2013  foreach m = mxlist in
2014    defm _VV : VPseudoBinaryM<VR, m.vrclass, m.vrclass, m,
2015                              !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
2016}
2017
2018multiclass VPseudoBinaryM_VX {
2019  foreach m = MxList in
2020    defm "_VX" :
2021      VPseudoBinaryM<VR, m.vrclass, GPR, m,
2022                     !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
2023}
2024
2025multiclass VPseudoBinaryM_VF {
2026  foreach f = FPList in
2027    foreach m = f.MxList in
2028      defm "_V" # f.FX :
2029        VPseudoBinaryM<VR, m.vrclass, f.fprclass, m,
2030                       !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
2031}
2032
2033multiclass VPseudoBinaryM_VI {
2034  foreach m = MxList in
2035    defm _VI : VPseudoBinaryM<VR, m.vrclass, simm5, m,
2036                              !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
2037}
2038
2039multiclass VPseudoVGTR_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2040  defm "" : VPseudoBinaryV_VV<Constraint>,
2041            Sched<[WriteVGatherV, ReadVGatherV, ReadVGatherV, ReadVMask]>;
2042  defm "" : VPseudoBinaryV_VX<Constraint>,
2043            Sched<[WriteVGatherX, ReadVGatherV, ReadVGatherX, ReadVMask]>;
2044  defm "" : VPseudoBinaryV_VI<ImmType, Constraint>,
2045            Sched<[WriteVGatherI, ReadVGatherV, ReadVMask]>;
2046}
2047
2048multiclass VPseudoVSALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2049  defm "" : VPseudoBinaryV_VV<Constraint>,
2050            Sched<[WriteVSALUV, ReadVSALUV, ReadVSALUV, ReadVMask]>;
2051  defm "" : VPseudoBinaryV_VX<Constraint>,
2052            Sched<[WriteVSALUX, ReadVSALUV, ReadVSALUX, ReadVMask]>;
2053  defm "" : VPseudoBinaryV_VI<ImmType, Constraint>,
2054            Sched<[WriteVSALUI, ReadVSALUV, ReadVMask]>;
2055}
2056
2057
2058multiclass VPseudoVSHT_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2059  defm "" : VPseudoBinaryV_VV<Constraint>,
2060            Sched<[WriteVShiftV, ReadVShiftV, ReadVShiftV, ReadVMask]>;
2061  defm "" : VPseudoBinaryV_VX<Constraint>,
2062            Sched<[WriteVShiftX, ReadVShiftV, ReadVShiftX, ReadVMask]>;
2063  defm "" : VPseudoBinaryV_VI<ImmType, Constraint>,
2064            Sched<[WriteVShiftI, ReadVShiftV, ReadVMask]>;
2065}
2066
2067multiclass VPseudoVSSHT_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2068  defm "" : VPseudoBinaryV_VV<Constraint>,
2069            Sched<[WriteVSShiftV, ReadVSShiftV, ReadVSShiftV, ReadVMask]>;
2070  defm "" : VPseudoBinaryV_VX<Constraint>,
2071            Sched<[WriteVSShiftX, ReadVSShiftV, ReadVSShiftX, ReadVMask]>;
2072  defm "" : VPseudoBinaryV_VI<ImmType, Constraint>,
2073            Sched<[WriteVSShiftI, ReadVSShiftV, ReadVMask]>;
2074}
2075
2076multiclass VPseudoVALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2077  defm "" : VPseudoBinaryV_VV<Constraint>,
2078            Sched<[WriteVIALUV, ReadVIALUV, ReadVIALUV, ReadVMask]>;
2079  defm "" : VPseudoBinaryV_VX<Constraint>,
2080            Sched<[WriteVIALUX, ReadVIALUV, ReadVIALUX, ReadVMask]>;
2081  defm "" : VPseudoBinaryV_VI<ImmType, Constraint>,
2082            Sched<[WriteVIALUI, ReadVIALUV, ReadVMask]>;
2083}
2084
2085multiclass VPseudoVSALU_VV_VX {
2086  defm "" : VPseudoBinaryV_VV,
2087            Sched<[WriteVSALUV, ReadVSALUV, ReadVSALUV, ReadVMask]>;
2088  defm "" : VPseudoBinaryV_VX,
2089            Sched<[WriteVSALUX, ReadVSALUV, ReadVSALUX, ReadVMask]>;
2090}
2091
2092multiclass VPseudoVSMUL_VV_VX {
2093  defm "" : VPseudoBinaryV_VV,
2094            Sched<[WriteVSMulV, ReadVSMulV, ReadVSMulV, ReadVMask]>;
2095  defm "" : VPseudoBinaryV_VX,
2096            Sched<[WriteVSMulX, ReadVSMulV, ReadVSMulX, ReadVMask]>;
2097}
2098
2099multiclass VPseudoVAALU_VV_VX {
2100  defm "" : VPseudoBinaryV_VV,
2101            Sched<[WriteVAALUV, ReadVAALUV, ReadVAALUV, ReadVMask]>;
2102  defm "" : VPseudoBinaryV_VX,
2103            Sched<[WriteVAALUX, ReadVAALUV, ReadVAALUX, ReadVMask]>;
2104}
2105
2106multiclass VPseudoVMINMAX_VV_VX {
2107  defm "" : VPseudoBinaryV_VV,
2108            Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpV, ReadVMask]>;
2109  defm "" : VPseudoBinaryV_VX,
2110            Sched<[WriteVICmpX, ReadVICmpV, ReadVICmpX, ReadVMask]>;
2111}
2112
2113multiclass VPseudoVMUL_VV_VX {
2114  defm "" : VPseudoBinaryV_VV,
2115            Sched<[WriteVIMulV, ReadVIMulV, ReadVIMulV, ReadVMask]>;
2116  defm "" : VPseudoBinaryV_VX,
2117            Sched<[WriteVIMulX, ReadVIMulV, ReadVIMulX, ReadVMask]>;
2118}
2119
2120multiclass VPseudoVDIV_VV_VX {
2121  defm "" : VPseudoBinaryV_VV,
2122            Sched<[WriteVIDivV, ReadVIDivV, ReadVIDivV, ReadVMask]>;
2123  defm "" : VPseudoBinaryV_VX,
2124            Sched<[WriteVIDivX, ReadVIDivV, ReadVIDivX, ReadVMask]>;
2125}
2126
2127multiclass VPseudoVFMUL_VV_VF {
2128  defm "" : VPseudoBinaryFV_VV,
2129            Sched<[WriteVFMulV, ReadVFMulV, ReadVFMulV, ReadVMask]>;
2130  defm "" : VPseudoBinaryV_VF,
2131            Sched<[WriteVFMulF, ReadVFMulV, ReadVFMulF, ReadVMask]>;
2132}
2133
2134multiclass VPseudoVFDIV_VV_VF {
2135  defm "" : VPseudoBinaryFV_VV,
2136            Sched<[WriteVFDivV, ReadVFDivV, ReadVFDivV, ReadVMask]>;
2137  defm "" : VPseudoBinaryV_VF,
2138            Sched<[WriteVFDivF, ReadVFDivV, ReadVFDivF, ReadVMask]>;
2139}
2140
2141multiclass VPseudoVFRDIV_VF {
2142  defm "" : VPseudoBinaryV_VF,
2143            Sched<[WriteVFDivF, ReadVFDivV, ReadVFDivF, ReadVMask]>;
2144}
2145
2146multiclass VPseudoVALU_VV_VX {
2147  defm "" : VPseudoBinaryV_VV,
2148            Sched<[WriteVIALUV, ReadVIALUV, ReadVIALUV, ReadVMask]>;
2149  defm "" : VPseudoBinaryV_VX,
2150            Sched<[WriteVIALUX, ReadVIALUV, ReadVIALUX, ReadVMask]>;
2151}
2152
2153multiclass VPseudoVSGNJ_VV_VF {
2154  defm "" : VPseudoBinaryFV_VV,
2155            Sched<[WriteVFSgnjV, ReadVFSgnjV, ReadVFSgnjV, ReadVMask]>;
2156  defm "" : VPseudoBinaryV_VF,
2157            Sched<[WriteVFSgnjF, ReadVFSgnjV, ReadVFSgnjF, ReadVMask]>;
2158}
2159
2160multiclass VPseudoVMAX_VV_VF {
2161  defm "" : VPseudoBinaryFV_VV,
2162            Sched<[WriteVFCmpV, ReadVFCmpV, ReadVFCmpV, ReadVMask]>;
2163  defm "" : VPseudoBinaryV_VF,
2164            Sched<[WriteVFCmpF, ReadVFCmpV, ReadVFCmpF, ReadVMask]>;
2165}
2166
2167multiclass VPseudoVALU_VV_VF {
2168  defm "" : VPseudoBinaryFV_VV,
2169            Sched<[WriteVFALUV, ReadVFALUV, ReadVFALUV, ReadVMask]>;
2170  defm "" : VPseudoBinaryV_VF,
2171            Sched<[WriteVFALUF, ReadVFALUV, ReadVFALUF, ReadVMask]>;
2172}
2173
2174multiclass VPseudoVALU_VF {
2175  defm "" : VPseudoBinaryV_VF,
2176            Sched<[WriteVFALUF, ReadVFALUV, ReadVFALUF, ReadVMask]>;
2177}
2178
2179multiclass VPseudoVALU_VX_VI<Operand ImmType = simm5> {
2180  defm "" : VPseudoBinaryV_VX,
2181            Sched<[WriteVIALUX, ReadVIALUV, ReadVIALUX, ReadVMask]>;
2182  defm "" : VPseudoBinaryV_VI<ImmType>,
2183            Sched<[WriteVIALUI, ReadVIALUV, ReadVMask]>;
2184}
2185
2186multiclass VPseudoVWALU_VV_VX {
2187  defm "" : VPseudoBinaryW_VV,
2188            Sched<[WriteVIWALUV, ReadVIWALUV, ReadVIWALUV, ReadVMask]>;
2189  defm "" : VPseudoBinaryW_VX,
2190            Sched<[WriteVIWALUX, ReadVIWALUV, ReadVIWALUX, ReadVMask]>;
2191}
2192
2193multiclass VPseudoVWMUL_VV_VX {
2194  defm "" : VPseudoBinaryW_VV,
2195            Sched<[WriteVIWMulV, ReadVIWMulV, ReadVIWMulV, ReadVMask]>;
2196  defm "" : VPseudoBinaryW_VX,
2197            Sched<[WriteVIWMulX, ReadVIWMulV, ReadVIWMulX, ReadVMask]>;
2198}
2199
2200multiclass VPseudoVWMUL_VV_VF {
2201  defm "" : VPseudoBinaryW_VV<MxListFW>,
2202            Sched<[WriteVFWMulV, ReadVFWMulV, ReadVFWMulV, ReadVMask]>;
2203  defm "" : VPseudoBinaryW_VF,
2204            Sched<[WriteVFWMulF, ReadVFWMulV, ReadVFWMulF, ReadVMask]>;
2205}
2206
2207multiclass VPseudoVWALU_WV_WX {
2208  defm "" : VPseudoBinaryW_WV,
2209            Sched<[WriteVIWALUV, ReadVIWALUV, ReadVIWALUV, ReadVMask]>;
2210  defm "" : VPseudoBinaryW_WX,
2211            Sched<[WriteVIWALUX, ReadVIWALUV, ReadVIWALUX, ReadVMask]>;
2212}
2213
2214multiclass VPseudoVFWALU_VV_VF {
2215  defm "" : VPseudoBinaryW_VV<MxListFW>,
2216            Sched<[WriteVFWALUV, ReadVFWALUV, ReadVFWALUV, ReadVMask]>;
2217  defm "" : VPseudoBinaryW_VF,
2218            Sched<[WriteVFWALUF, ReadVFWALUV, ReadVFWALUF, ReadVMask]>;
2219}
2220
2221multiclass VPseudoVFWALU_WV_WF {
2222  defm "" : VPseudoBinaryW_WV<MxListFW>,
2223            Sched<[WriteVFWALUV, ReadVFWALUV, ReadVFWALUV, ReadVMask]>;
2224  defm "" : VPseudoBinaryW_WF,
2225            Sched<[WriteVFWALUF, ReadVFWALUV, ReadVFWALUF, ReadVMask]>;
2226}
2227
2228multiclass VPseudoVMRG_VM_XM_IM {
2229  defm "" : VPseudoBinaryV_VM,
2230            Sched<[WriteVIMergeV, ReadVIMergeV, ReadVIMergeV, ReadVMask]>;
2231  defm "" : VPseudoBinaryV_XM,
2232            Sched<[WriteVIMergeX, ReadVIMergeV, ReadVIMergeX, ReadVMask]>;
2233  defm "" : VPseudoBinaryV_IM,
2234            Sched<[WriteVIMergeI, ReadVIMergeV, ReadVMask]>;
2235  // Tied versions to allow codegen control over the tail elements
2236  defm "" : VPseudoTiedBinaryV_VM,
2237            Sched<[WriteVIMergeV, ReadVIMergeV, ReadVIMergeV, ReadVMask]>;
2238  defm "" : VPseudoTiedBinaryV_XM,
2239            Sched<[WriteVIMergeX, ReadVIMergeV, ReadVIMergeX, ReadVMask]>;
2240  defm "" : VPseudoTiedBinaryV_IM,
2241            Sched<[WriteVIMergeI, ReadVIMergeV, ReadVMask]>;
2242}
2243
2244multiclass VPseudoVCALU_VM_XM_IM {
2245  defm "" : VPseudoBinaryV_VM,
2246            Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>;
2247  defm "" : VPseudoBinaryV_XM,
2248            Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>;
2249  defm "" : VPseudoBinaryV_IM,
2250            Sched<[WriteVICALUI, ReadVIALUCV, ReadVMask]>;
2251}
2252
2253multiclass VPseudoVCALU_VM_XM {
2254  defm "" : VPseudoBinaryV_VM,
2255            Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>;
2256  defm "" : VPseudoBinaryV_XM,
2257            Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>;
2258}
2259
2260multiclass VPseudoVCALUM_VM_XM_IM<string Constraint> {
2261  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>,
2262            Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>;
2263  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>,
2264            Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>;
2265  defm "" : VPseudoBinaryV_IM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>,
2266            Sched<[WriteVICALUI, ReadVIALUCV, ReadVMask]>;
2267}
2268
2269multiclass VPseudoVCALUM_VM_XM<string Constraint> {
2270  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>,
2271            Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>;
2272  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>,
2273            Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>;
2274}
2275
2276multiclass VPseudoVCALUM_V_X_I<string Constraint> {
2277  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>,
2278            Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV]>;
2279  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>,
2280            Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX]>;
2281  defm "" : VPseudoBinaryV_IM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>,
2282            Sched<[WriteVICALUI, ReadVIALUCV]>;
2283}
2284
2285multiclass VPseudoVCALUM_V_X<string Constraint> {
2286  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>,
2287            Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV]>;
2288  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>,
2289            Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX]>;
2290}
2291
2292multiclass VPseudoVNCLP_WV_WX_WI {
2293  defm "" : VPseudoBinaryV_WV,
2294            Sched<[WriteVNClipV, ReadVNClipV, ReadVNClipV, ReadVMask]>;
2295  defm "" : VPseudoBinaryV_WX,
2296            Sched<[WriteVNClipX, ReadVNClipV, ReadVNClipX, ReadVMask]>;
2297  defm "" : VPseudoBinaryV_WI,
2298            Sched<[WriteVNClipI, ReadVNClipV, ReadVMask]>;
2299}
2300
2301multiclass VPseudoVNSHT_WV_WX_WI {
2302  defm "" : VPseudoBinaryV_WV,
2303            Sched<[WriteVNShiftV, ReadVNShiftV, ReadVNShiftV, ReadVMask]>;
2304  defm "" : VPseudoBinaryV_WX,
2305            Sched<[WriteVNShiftX, ReadVNShiftV, ReadVNShiftX, ReadVMask]>;
2306  defm "" : VPseudoBinaryV_WI,
2307            Sched<[WriteVNShiftI, ReadVNShiftV, ReadVMask]>;
2308}
2309
2310multiclass VPseudoTernary<VReg RetClass,
2311                          RegisterClass Op1Class,
2312                          DAGOperand Op2Class,
2313                          LMULInfo MInfo,
2314                          string Constraint = ""> {
2315  let VLMul = MInfo.value in {
2316    def "_" # MInfo.MX : VPseudoTernaryNoMask<RetClass, Op1Class, Op2Class, Constraint>;
2317    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class, Constraint>;
2318  }
2319}
2320
2321multiclass VPseudoTernaryWithPolicy<VReg RetClass,
2322                                    RegisterClass Op1Class,
2323                                    DAGOperand Op2Class,
2324                                    LMULInfo MInfo,
2325                                    string Constraint = "",
2326                                    bit Commutable = 0> {
2327  let VLMul = MInfo.value in {
2328    let isCommutable = Commutable in
2329    def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint>;
2330    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class, Constraint>;
2331  }
2332}
2333
2334multiclass VPseudoTernaryV_VV_AAXA<string Constraint = "",
2335                                   list<LMULInfo> mxlist = MxList> {
2336  foreach m = mxlist in {
2337    defm _VV : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, m.vrclass, m,
2338                                        Constraint, /*Commutable*/1>;
2339  }
2340}
2341
2342multiclass VPseudoTernaryV_VX<string Constraint = ""> {
2343  foreach m = MxList in
2344    defm _VX : VPseudoTernary<m.vrclass, m.vrclass, GPR, m, Constraint>;
2345}
2346
2347multiclass VPseudoTernaryV_VX_AAXA<string Constraint = ""> {
2348  foreach m = MxList in
2349    defm "_VX" : VPseudoTernaryWithPolicy<m.vrclass, GPR, m.vrclass, m,
2350                                          Constraint, /*Commutable*/1>;
2351}
2352
2353multiclass VPseudoTernaryV_VF_AAXA<string Constraint = ""> {
2354  foreach f = FPList in
2355    foreach m = f.MxList in
2356      defm "_V" # f.FX : VPseudoTernaryWithPolicy<m.vrclass, f.fprclass,
2357                                                  m.vrclass, m, Constraint,
2358                                                  /*Commutable*/1>;
2359}
2360
2361multiclass VPseudoTernaryW_VV<list<LMULInfo> mxlist = MxListW> {
2362  defvar constraint = "@earlyclobber $rd";
2363  foreach m = mxlist in
2364    defm _VV : VPseudoTernaryWithPolicy<m.wvrclass, m.vrclass, m.vrclass, m,
2365                                        constraint>;
2366}
2367
2368multiclass VPseudoTernaryW_VX {
2369  defvar constraint = "@earlyclobber $rd";
2370  foreach m = MxListW in
2371    defm "_VX" : VPseudoTernaryWithPolicy<m.wvrclass, GPR, m.vrclass, m,
2372                                          constraint>;
2373}
2374
2375multiclass VPseudoTernaryW_VF {
2376  defvar constraint = "@earlyclobber $rd";
2377  foreach f = FPListW in
2378    foreach m = f.MxList in
2379      defm "_V" # f.FX : VPseudoTernaryWithPolicy<m.wvrclass, f.fprclass,
2380                                                  m.vrclass, m, constraint>;
2381}
2382
2383multiclass VPseudoTernaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
2384  foreach m = MxList in
2385    defm _VI : VPseudoTernary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
2386}
2387
2388multiclass VPseudoVMAC_VV_VX_AAXA<string Constraint = ""> {
2389  defm "" : VPseudoTernaryV_VV_AAXA<Constraint>,
2390            Sched<[WriteVIMulAddV, ReadVIMulAddV, ReadVIMulAddV, ReadVIMulAddV, ReadVMask]>;
2391  defm "" : VPseudoTernaryV_VX_AAXA<Constraint>,
2392            Sched<[WriteVIMulAddX, ReadVIMulAddV, ReadVIMulAddV, ReadVIMulAddX, ReadVMask]>;
2393}
2394
2395multiclass VPseudoVMAC_VV_VF_AAXA<string Constraint = ""> {
2396  defm "" : VPseudoTernaryV_VV_AAXA<Constraint, MxListF>,
2397            Sched<[WriteVFMulAddV, ReadVFMulAddV, ReadVFMulAddV, ReadVFMulAddV, ReadVMask]>;
2398  defm "" : VPseudoTernaryV_VF_AAXA<Constraint>,
2399            Sched<[WriteVFMulAddF, ReadVFMulAddV, ReadVFMulAddV, ReadVFMulAddF, ReadVMask]>;
2400}
2401
2402multiclass VPseudoVSLD_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2403  defm "" : VPseudoTernaryV_VX<Constraint>,
2404            Sched<[WriteVISlideX, ReadVISlideV, ReadVISlideV, ReadVISlideX, ReadVMask]>;
2405  defm "" : VPseudoTernaryV_VI<ImmType, Constraint>,
2406            Sched<[WriteVISlideI, ReadVISlideV, ReadVISlideV, ReadVMask]>;
2407}
2408
2409multiclass VPseudoVWMAC_VV_VX {
2410  defm "" : VPseudoTernaryW_VV,
2411            Sched<[WriteVIWMulAddV, ReadVIWMulAddV, ReadVIWMulAddV, ReadVIWMulAddV, ReadVMask]>;
2412  defm "" : VPseudoTernaryW_VX,
2413            Sched<[WriteVIWMulAddX, ReadVIWMulAddV, ReadVIWMulAddV, ReadVIWMulAddX, ReadVMask]>;
2414}
2415
2416multiclass VPseudoVWMAC_VX {
2417  defm "" : VPseudoTernaryW_VX,
2418            Sched<[WriteVIWMulAddX, ReadVIWMulAddV, ReadVIWMulAddV, ReadVIWMulAddX, ReadVMask]>;
2419}
2420
2421multiclass VPseudoVWMAC_VV_VF {
2422  defm "" : VPseudoTernaryW_VV<MxListFW>,
2423            Sched<[WriteVFWMulAddV, ReadVFWMulAddV, ReadVFWMulAddV, ReadVFWMulAddV, ReadVMask]>;
2424  defm "" : VPseudoTernaryW_VF,
2425            Sched<[WriteVFWMulAddF, ReadVFWMulAddV, ReadVFWMulAddV, ReadVFWMulAddF, ReadVMask]>;
2426}
2427
2428multiclass VPseudoVCMPM_VV_VX_VI {
2429  defm "" : VPseudoBinaryM_VV,
2430            Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpV, ReadVMask]>;
2431  defm "" : VPseudoBinaryM_VX,
2432            Sched<[WriteVICmpX, ReadVICmpV, ReadVICmpX, ReadVMask]>;
2433  defm "" : VPseudoBinaryM_VI,
2434            Sched<[WriteVICmpI, ReadVICmpV, ReadVMask]>;
2435}
2436
2437multiclass VPseudoVCMPM_VV_VX {
2438  defm "" : VPseudoBinaryM_VV,
2439            Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpV, ReadVMask]>;
2440  defm "" : VPseudoBinaryM_VX,
2441            Sched<[WriteVICmpX, ReadVICmpV, ReadVICmpX, ReadVMask]>;
2442}
2443
2444multiclass VPseudoVCMPM_VV_VF {
2445  defm "" : VPseudoBinaryM_VV<MxListF>,
2446            Sched<[WriteVFCmpV, ReadVFCmpV, ReadVFCmpV, ReadVMask]>;
2447  defm "" : VPseudoBinaryM_VF,
2448            Sched<[WriteVFCmpF, ReadVFCmpV, ReadVFCmpF, ReadVMask]>;
2449}
2450
2451multiclass VPseudoVCMPM_VF {
2452  defm "" : VPseudoBinaryM_VF,
2453            Sched<[WriteVFCmpF, ReadVFCmpV, ReadVFCmpF, ReadVMask]>;
2454}
2455
2456multiclass VPseudoVCMPM_VX_VI {
2457  defm "" : VPseudoBinaryM_VX,
2458            Sched<[WriteVICmpX, ReadVICmpV, ReadVICmpX, ReadVMask]>;
2459  defm "" : VPseudoBinaryM_VI,
2460            Sched<[WriteVICmpI, ReadVICmpV, ReadVMask]>;
2461}
2462
2463multiclass VPseudoVRED_VS {
2464  foreach m = MxList in {
2465    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
2466               Sched<[WriteVIRedV, ReadVIRedV, ReadVIRedV, ReadVIRedV, ReadVMask]>;
2467  }
2468}
2469
2470multiclass VPseudoVWRED_VS {
2471  foreach m = MxList in {
2472    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
2473               Sched<[WriteVIWRedV, ReadVIWRedV, ReadVIWRedV, ReadVIWRedV, ReadVMask]>;
2474  }
2475}
2476
2477multiclass VPseudoVFRED_VS {
2478  foreach m = MxListF in {
2479    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
2480               Sched<[WriteVFRedV, ReadVFRedV, ReadVFRedV, ReadVFRedV, ReadVMask]>;
2481  }
2482}
2483
2484multiclass VPseudoVFREDO_VS {
2485  foreach m = MxListF in {
2486    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
2487               Sched<[WriteVFRedOV, ReadVFRedOV, ReadVFRedOV, ReadVFRedOV, ReadVMask]>;
2488  }
2489}
2490
2491multiclass VPseudoVFWRED_VS {
2492  foreach m = MxListF in {
2493    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
2494               Sched<[WriteVFWRedV, ReadVFWRedV, ReadVFWRedV, ReadVFWRedV, ReadVMask]>;
2495  }
2496}
2497
2498multiclass VPseudoConversion<VReg RetClass,
2499                             VReg Op1Class,
2500                             LMULInfo MInfo,
2501                             string Constraint = ""> {
2502  let VLMul = MInfo.value in {
2503    def "_" # MInfo.MX : VPseudoUnaryNoMask<RetClass, Op1Class, Constraint>;
2504    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMaskTA<RetClass, Op1Class,
2505                                                      Constraint>;
2506  }
2507}
2508
2509multiclass VPseudoVCVTI_V {
2510  foreach m = MxListF in
2511    defm _V : VPseudoConversion<m.vrclass, m.vrclass, m>,
2512              Sched<[WriteVFCvtFToIV, ReadVFCvtFToIV, ReadVMask]>;
2513}
2514
2515multiclass VPseudoVCVTF_V {
2516  foreach m = MxListF in
2517    defm _V : VPseudoConversion<m.vrclass, m.vrclass, m>,
2518              Sched<[WriteVFCvtIToFV, ReadVFCvtIToFV, ReadVMask]>;
2519}
2520
2521multiclass VPseudoConversionW_V {
2522  defvar constraint = "@earlyclobber $rd";
2523  foreach m = MxListW in
2524    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>;
2525}
2526
2527multiclass VPseudoVWCVTI_V {
2528  defvar constraint = "@earlyclobber $rd";
2529  foreach m = MxListFW in
2530    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>,
2531              Sched<[WriteVFWCvtFToIV, ReadVFWCvtFToIV, ReadVMask]>;
2532}
2533
2534multiclass VPseudoVWCVTF_V {
2535  defvar constraint = "@earlyclobber $rd";
2536  foreach m = MxListW in
2537    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>,
2538              Sched<[WriteVFWCvtIToFV, ReadVFWCvtIToFV, ReadVMask]>;
2539}
2540
2541multiclass VPseudoVWCVTD_V {
2542  defvar constraint = "@earlyclobber $rd";
2543  foreach m = MxListFW in
2544    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>,
2545              Sched<[WriteVFWCvtFToFV, ReadVFWCvtFToFV, ReadVMask]>;
2546}
2547
2548multiclass VPseudoVNCVTI_W {
2549  defvar constraint = "@earlyclobber $rd";
2550  foreach m = MxListW in
2551    defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>,
2552              Sched<[WriteVFNCvtFToIV, ReadVFNCvtFToIV, ReadVMask]>;
2553}
2554
2555multiclass VPseudoVNCVTF_W {
2556  defvar constraint = "@earlyclobber $rd";
2557  foreach m = MxListFW in
2558    defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>,
2559              Sched<[WriteVFNCvtIToFV, ReadVFNCvtIToFV, ReadVMask]>;
2560}
2561
2562multiclass VPseudoVNCVTD_W {
2563  defvar constraint = "@earlyclobber $rd";
2564  foreach m = MxListFW in
2565    defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>,
2566              Sched<[WriteVFNCvtFToFV, ReadVFNCvtFToFV, ReadVMask]>;
2567}
2568
2569multiclass VPseudoUSSegLoad<bit isFF> {
2570  foreach eew = EEWList in {
2571    foreach lmul = MxSet<eew>.m in {
2572      defvar LInfo = lmul.MX;
2573      let VLMul = lmul.value in {
2574        foreach nf = NFSet<lmul>.L in {
2575          defvar vreg = SegRegClass<lmul, nf>.RC;
2576          defvar FFStr = !if(isFF, "FF", "");
2577          def nf # "E" # eew # FFStr # "_V_" # LInfo :
2578            VPseudoUSSegLoadNoMask<vreg, eew, nf, isFF>;
2579          def nf # "E" # eew # FFStr # "_V_" # LInfo # "_MASK" :
2580            VPseudoUSSegLoadMask<vreg, eew, nf, isFF>;
2581        }
2582      }
2583    }
2584  }
2585}
2586
2587multiclass VPseudoSSegLoad {
2588  foreach eew = EEWList in {
2589    foreach lmul = MxSet<eew>.m in {
2590      defvar LInfo = lmul.MX;
2591      let VLMul = lmul.value in {
2592        foreach nf = NFSet<lmul>.L in {
2593          defvar vreg = SegRegClass<lmul, nf>.RC;
2594          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew, nf>;
2595          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew, nf>;
2596        }
2597      }
2598    }
2599  }
2600}
2601
2602multiclass VPseudoISegLoad<bit Ordered> {
2603  foreach idx_eew = EEWList in {
2604    foreach sew = EEWList in {
2605      foreach val_lmul = MxSet<sew>.m in {
2606        defvar octuple_lmul = val_lmul.octuple;
2607        // Calculate emul = eew * lmul / sew
2608        defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), log2<sew>.val);
2609        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
2610          defvar ValLInfo = val_lmul.MX;
2611          defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
2612          defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
2613          defvar Vreg = val_lmul.vrclass;
2614          defvar IdxVreg = idx_lmul.vrclass;
2615          let VLMul = val_lmul.value in {
2616            foreach nf = NFSet<val_lmul>.L in {
2617              defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
2618              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
2619                VPseudoISegLoadNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
2620                                      nf, Ordered>;
2621              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
2622                VPseudoISegLoadMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
2623                                    nf, Ordered>;
2624            }
2625          }
2626        }
2627      }
2628    }
2629  }
2630}
2631
2632multiclass VPseudoUSSegStore {
2633  foreach eew = EEWList in {
2634    foreach lmul = MxSet<eew>.m in {
2635      defvar LInfo = lmul.MX;
2636      let VLMul = lmul.value in {
2637        foreach nf = NFSet<lmul>.L in {
2638          defvar vreg = SegRegClass<lmul, nf>.RC;
2639          def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew, nf>;
2640          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew, nf>;
2641        }
2642      }
2643    }
2644  }
2645}
2646
2647multiclass VPseudoSSegStore {
2648  foreach eew = EEWList in {
2649    foreach lmul = MxSet<eew>.m in {
2650      defvar LInfo = lmul.MX;
2651      let VLMul = lmul.value in {
2652        foreach nf = NFSet<lmul>.L in {
2653          defvar vreg = SegRegClass<lmul, nf>.RC;
2654          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew, nf>;
2655          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew, nf>;
2656        }
2657      }
2658    }
2659  }
2660}
2661
2662multiclass VPseudoISegStore<bit Ordered> {
2663  foreach idx_eew = EEWList in {
2664    foreach sew = EEWList in {
2665      foreach val_lmul = MxSet<sew>.m in {
2666        defvar octuple_lmul = val_lmul.octuple;
2667        // Calculate emul = eew * lmul / sew
2668        defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), log2<sew>.val);
2669        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
2670          defvar ValLInfo = val_lmul.MX;
2671          defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
2672          defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
2673          defvar Vreg = val_lmul.vrclass;
2674          defvar IdxVreg = idx_lmul.vrclass;
2675          let VLMul = val_lmul.value in {
2676            foreach nf = NFSet<val_lmul>.L in {
2677              defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
2678              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
2679                VPseudoISegStoreNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
2680                                       nf, Ordered>;
2681              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
2682                VPseudoISegStoreMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
2683                                     nf, Ordered>;
2684            }
2685          }
2686        }
2687      }
2688    }
2689  }
2690}
2691
2692//===----------------------------------------------------------------------===//
2693// Helpers to define the intrinsic patterns.
2694//===----------------------------------------------------------------------===//
2695
2696class VPatUnaryNoMask<string intrinsic_name,
2697                      string inst,
2698                      string kind,
2699                      ValueType result_type,
2700                      ValueType op2_type,
2701                      int sew,
2702                      LMULInfo vlmul,
2703                      VReg op2_reg_class> :
2704  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
2705                   (op2_type op2_reg_class:$rs2),
2706                   VLOpFrag)),
2707                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2708                   (op2_type op2_reg_class:$rs2),
2709                   GPR:$vl, sew)>;
2710
2711class VPatUnaryMask<string intrinsic_name,
2712                    string inst,
2713                    string kind,
2714                    ValueType result_type,
2715                    ValueType op2_type,
2716                    ValueType mask_type,
2717                    int sew,
2718                    LMULInfo vlmul,
2719                    VReg result_reg_class,
2720                    VReg op2_reg_class> :
2721  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
2722                   (result_type result_reg_class:$merge),
2723                   (op2_type op2_reg_class:$rs2),
2724                   (mask_type V0),
2725                   VLOpFrag)),
2726                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
2727                   (result_type result_reg_class:$merge),
2728                   (op2_type op2_reg_class:$rs2),
2729                   (mask_type V0), GPR:$vl, sew)>;
2730
2731class VPatUnaryMaskTA<string intrinsic_name,
2732                      string inst,
2733                      string kind,
2734                      ValueType result_type,
2735                      ValueType op2_type,
2736                      ValueType mask_type,
2737                      int sew,
2738                      LMULInfo vlmul,
2739                      VReg result_reg_class,
2740                      VReg op2_reg_class> :
2741  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
2742                   (result_type result_reg_class:$merge),
2743                   (op2_type op2_reg_class:$rs2),
2744                   (mask_type V0),
2745                   VLOpFrag, (XLenVT timm:$policy))),
2746                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
2747                   (result_type result_reg_class:$merge),
2748                   (op2_type op2_reg_class:$rs2),
2749                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
2750
2751class VPatMaskUnaryNoMask<string intrinsic_name,
2752                          string inst,
2753                          MTypeInfo mti> :
2754  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name)
2755                (mti.Mask VR:$rs2),
2756                VLOpFrag)),
2757                (!cast<Instruction>(inst#"_M_"#mti.BX)
2758                (mti.Mask VR:$rs2),
2759                GPR:$vl, mti.Log2SEW)>;
2760
2761class VPatMaskUnaryMask<string intrinsic_name,
2762                        string inst,
2763                        MTypeInfo mti> :
2764  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name#"_mask")
2765                (mti.Mask VR:$merge),
2766                (mti.Mask VR:$rs2),
2767                (mti.Mask V0),
2768                VLOpFrag)),
2769                (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
2770                (mti.Mask VR:$merge),
2771                (mti.Mask VR:$rs2),
2772                (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
2773
2774class VPatUnaryAnyMask<string intrinsic,
2775                       string inst,
2776                       string kind,
2777                       ValueType result_type,
2778                       ValueType op1_type,
2779                       ValueType mask_type,
2780                       int sew,
2781                       LMULInfo vlmul,
2782                       VReg result_reg_class,
2783                       VReg op1_reg_class> :
2784  Pat<(result_type (!cast<Intrinsic>(intrinsic)
2785                   (result_type result_reg_class:$merge),
2786                   (op1_type op1_reg_class:$rs1),
2787                   (mask_type VR:$rs2),
2788                   VLOpFrag)),
2789                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2790                   (result_type result_reg_class:$merge),
2791                   (op1_type op1_reg_class:$rs1),
2792                   (mask_type VR:$rs2),
2793                   GPR:$vl, sew)>;
2794
2795class VPatBinaryNoMask<string intrinsic_name,
2796                       string inst,
2797                       ValueType result_type,
2798                       ValueType op1_type,
2799                       ValueType op2_type,
2800                       int sew,
2801                       VReg op1_reg_class,
2802                       DAGOperand op2_kind> :
2803  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
2804                   (op1_type op1_reg_class:$rs1),
2805                   (op2_type op2_kind:$rs2),
2806                   VLOpFrag)),
2807                   (!cast<Instruction>(inst)
2808                   (op1_type op1_reg_class:$rs1),
2809                   (op2_type op2_kind:$rs2),
2810                   GPR:$vl, sew)>;
2811
2812// Same as above but source operands are swapped.
2813class VPatBinaryNoMaskSwapped<string intrinsic_name,
2814                              string inst,
2815                              ValueType result_type,
2816                              ValueType op1_type,
2817                              ValueType op2_type,
2818                              int sew,
2819                              VReg op1_reg_class,
2820                              DAGOperand op2_kind> :
2821  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
2822                   (op2_type op2_kind:$rs2),
2823                   (op1_type op1_reg_class:$rs1),
2824                   VLOpFrag)),
2825                   (!cast<Instruction>(inst)
2826                   (op1_type op1_reg_class:$rs1),
2827                   (op2_type op2_kind:$rs2),
2828                   GPR:$vl, sew)>;
2829
2830class VPatBinaryMask<string intrinsic_name,
2831                     string inst,
2832                     ValueType result_type,
2833                     ValueType op1_type,
2834                     ValueType op2_type,
2835                     ValueType mask_type,
2836                     int sew,
2837                     VReg result_reg_class,
2838                     VReg op1_reg_class,
2839                     DAGOperand op2_kind> :
2840  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
2841                   (result_type result_reg_class:$merge),
2842                   (op1_type op1_reg_class:$rs1),
2843                   (op2_type op2_kind:$rs2),
2844                   (mask_type V0),
2845                   VLOpFrag)),
2846                   (!cast<Instruction>(inst#"_MASK")
2847                   (result_type result_reg_class:$merge),
2848                   (op1_type op1_reg_class:$rs1),
2849                   (op2_type op2_kind:$rs2),
2850                   (mask_type V0), GPR:$vl, sew)>;
2851
2852class VPatBinaryMaskTA<string intrinsic_name,
2853                       string inst,
2854                       ValueType result_type,
2855                       ValueType op1_type,
2856                       ValueType op2_type,
2857                       ValueType mask_type,
2858                       int sew,
2859                       VReg result_reg_class,
2860                       VReg op1_reg_class,
2861                       DAGOperand op2_kind> :
2862  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
2863                   (result_type result_reg_class:$merge),
2864                   (op1_type op1_reg_class:$rs1),
2865                   (op2_type op2_kind:$rs2),
2866                   (mask_type V0),
2867                   VLOpFrag, (XLenVT timm:$policy))),
2868                   (!cast<Instruction>(inst#"_MASK")
2869                   (result_type result_reg_class:$merge),
2870                   (op1_type op1_reg_class:$rs1),
2871                   (op2_type op2_kind:$rs2),
2872                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
2873
2874// Same as above but source operands are swapped.
2875class VPatBinaryMaskSwapped<string intrinsic_name,
2876                            string inst,
2877                            ValueType result_type,
2878                            ValueType op1_type,
2879                            ValueType op2_type,
2880                            ValueType mask_type,
2881                            int sew,
2882                            VReg result_reg_class,
2883                            VReg op1_reg_class,
2884                            DAGOperand op2_kind> :
2885  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
2886                   (result_type result_reg_class:$merge),
2887                   (op2_type op2_kind:$rs2),
2888                   (op1_type op1_reg_class:$rs1),
2889                   (mask_type V0),
2890                   VLOpFrag)),
2891                   (!cast<Instruction>(inst#"_MASK")
2892                   (result_type result_reg_class:$merge),
2893                   (op1_type op1_reg_class:$rs1),
2894                   (op2_type op2_kind:$rs2),
2895                   (mask_type V0), GPR:$vl, sew)>;
2896
2897class VPatTiedBinaryNoMask<string intrinsic_name,
2898                           string inst,
2899                           ValueType result_type,
2900                           ValueType op2_type,
2901                           int sew,
2902                           VReg result_reg_class,
2903                           DAGOperand op2_kind> :
2904  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
2905                   (result_type result_reg_class:$rs1),
2906                   (op2_type op2_kind:$rs2),
2907                   VLOpFrag)),
2908                   (!cast<Instruction>(inst#"_TIED")
2909                   (result_type result_reg_class:$rs1),
2910                   (op2_type op2_kind:$rs2),
2911                   GPR:$vl, sew)>;
2912
2913class VPatTiedBinaryMask<string intrinsic_name,
2914                         string inst,
2915                         ValueType result_type,
2916                         ValueType op2_type,
2917                         ValueType mask_type,
2918                         int sew,
2919                         VReg result_reg_class,
2920                         DAGOperand op2_kind> :
2921  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
2922                   (result_type result_reg_class:$merge),
2923                   (result_type result_reg_class:$merge),
2924                   (op2_type op2_kind:$rs2),
2925                   (mask_type V0),
2926                   VLOpFrag, (XLenVT timm:$policy))),
2927                   (!cast<Instruction>(inst#"_MASK_TIED")
2928                   (result_type result_reg_class:$merge),
2929                   (op2_type op2_kind:$rs2),
2930                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
2931
2932class VPatTernaryNoMask<string intrinsic,
2933                        string inst,
2934                        string kind,
2935                        ValueType result_type,
2936                        ValueType op1_type,
2937                        ValueType op2_type,
2938                        int sew,
2939                        LMULInfo vlmul,
2940                        VReg result_reg_class,
2941                        RegisterClass op1_reg_class,
2942                        DAGOperand op2_kind> :
2943  Pat<(result_type (!cast<Intrinsic>(intrinsic)
2944                    (result_type result_reg_class:$rs3),
2945                    (op1_type op1_reg_class:$rs1),
2946                    (op2_type op2_kind:$rs2),
2947                    VLOpFrag)),
2948                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2949                    result_reg_class:$rs3,
2950                    (op1_type op1_reg_class:$rs1),
2951                    op2_kind:$rs2,
2952                    GPR:$vl, sew)>;
2953
2954class VPatTernaryNoMaskWithPolicy<string intrinsic,
2955                                  string inst,
2956                                  string kind,
2957                                  ValueType result_type,
2958                                  ValueType op1_type,
2959                                  ValueType op2_type,
2960                                  int sew,
2961                                  LMULInfo vlmul,
2962                                  VReg result_reg_class,
2963                                  RegisterClass op1_reg_class,
2964                                  DAGOperand op2_kind> :
2965  Pat<(result_type (!cast<Intrinsic>(intrinsic)
2966                    (result_type result_reg_class:$rs3),
2967                    (op1_type op1_reg_class:$rs1),
2968                    (op2_type op2_kind:$rs2),
2969                    VLOpFrag)),
2970                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2971                    result_reg_class:$rs3,
2972                    (op1_type op1_reg_class:$rs1),
2973                    op2_kind:$rs2,
2974                    GPR:$vl, sew, TAIL_UNDISTURBED)>;
2975
2976class VPatTernaryMask<string intrinsic,
2977                      string inst,
2978                      string kind,
2979                      ValueType result_type,
2980                      ValueType op1_type,
2981                      ValueType op2_type,
2982                      ValueType mask_type,
2983                      int sew,
2984                      LMULInfo vlmul,
2985                      VReg result_reg_class,
2986                      RegisterClass op1_reg_class,
2987                      DAGOperand op2_kind> :
2988  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
2989                    (result_type result_reg_class:$rs3),
2990                    (op1_type op1_reg_class:$rs1),
2991                    (op2_type op2_kind:$rs2),
2992                    (mask_type V0),
2993                    VLOpFrag)),
2994                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
2995                    result_reg_class:$rs3,
2996                    (op1_type op1_reg_class:$rs1),
2997                    op2_kind:$rs2,
2998                    (mask_type V0),
2999                    GPR:$vl, sew)>;
3000
3001multiclass VPatUnaryS_M<string intrinsic_name,
3002                             string inst>
3003{
3004  foreach mti = AllMasks in {
3005    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name)
3006                      (mti.Mask VR:$rs1), VLOpFrag)),
3007                      (!cast<Instruction>(inst#"_M_"#mti.BX) $rs1,
3008                      GPR:$vl, mti.Log2SEW)>;
3009    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name # "_mask")
3010                      (mti.Mask VR:$rs1), (mti.Mask V0), VLOpFrag)),
3011                      (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK") $rs1,
3012                      (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
3013  }
3014}
3015
3016multiclass VPatUnaryV_V_AnyMask<string intrinsic, string instruction,
3017                                list<VTypeInfo> vtilist> {
3018  foreach vti = vtilist in {
3019    def : VPatUnaryAnyMask<intrinsic, instruction, "VM",
3020                           vti.Vector, vti.Vector, vti.Mask,
3021                           vti.Log2SEW, vti.LMul, vti.RegClass,
3022                           vti.RegClass>;
3023  }
3024}
3025
3026multiclass VPatUnaryM_M<string intrinsic,
3027                         string inst>
3028{
3029  foreach mti = AllMasks in {
3030    def : VPatMaskUnaryNoMask<intrinsic, inst, mti>;
3031    def : VPatMaskUnaryMask<intrinsic, inst, mti>;
3032  }
3033}
3034
3035multiclass VPatUnaryV_M<string intrinsic, string instruction>
3036{
3037  foreach vti = AllIntegerVectors in {
3038    def : VPatUnaryNoMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
3039                          vti.Log2SEW, vti.LMul, VR>;
3040    def : VPatUnaryMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
3041                        vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, VR>;
3042  }
3043}
3044
3045multiclass VPatUnaryV_VF<string intrinsic, string instruction, string suffix,
3046                         list<VTypeInfoToFraction> fractionList>
3047{
3048  foreach vtiTofti = fractionList in
3049  {
3050      defvar vti = vtiTofti.Vti;
3051      defvar fti = vtiTofti.Fti;
3052      def : VPatUnaryNoMask<intrinsic, instruction, suffix,
3053                            vti.Vector, fti.Vector,
3054                            vti.Log2SEW, vti.LMul, fti.RegClass>;
3055      def : VPatUnaryMaskTA<intrinsic, instruction, suffix,
3056                            vti.Vector, fti.Vector, vti.Mask,
3057                            vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
3058   }
3059}
3060
3061multiclass VPatUnaryV_V<string intrinsic, string instruction,
3062                        list<VTypeInfo> vtilist> {
3063  foreach vti = vtilist in {
3064    def : VPatUnaryNoMask<intrinsic, instruction, "V",
3065                          vti.Vector, vti.Vector,
3066                          vti.Log2SEW, vti.LMul, vti.RegClass>;
3067    def : VPatUnaryMaskTA<intrinsic, instruction, "V",
3068                          vti.Vector, vti.Vector, vti.Mask,
3069                          vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
3070  }
3071}
3072
3073multiclass VPatNullaryV<string intrinsic, string instruction>
3074{
3075  foreach vti = AllIntegerVectors in {
3076    def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
3077                          VLOpFrag)),
3078                          (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
3079                          GPR:$vl, vti.Log2SEW)>;
3080    def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
3081                          (vti.Vector vti.RegClass:$merge),
3082                          (vti.Mask V0), VLOpFrag)),
3083                          (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK")
3084                          vti.RegClass:$merge, (vti.Mask V0),
3085                          GPR:$vl, vti.Log2SEW)>;
3086  }
3087}
3088
3089multiclass VPatNullaryM<string intrinsic, string inst> {
3090  foreach mti = AllMasks in
3091    def : Pat<(mti.Mask (!cast<Intrinsic>(intrinsic)
3092                        (XLenVT (VLOp (XLenVT (XLenVT GPR:$vl)))))),
3093                        (!cast<Instruction>(inst#"_M_"#mti.BX)
3094                        GPR:$vl, mti.Log2SEW)>;
3095}
3096
3097multiclass VPatBinary<string intrinsic,
3098                      string inst,
3099                      ValueType result_type,
3100                      ValueType op1_type,
3101                      ValueType op2_type,
3102                      ValueType mask_type,
3103                      int sew,
3104                      VReg result_reg_class,
3105                      VReg op1_reg_class,
3106                      DAGOperand op2_kind>
3107{
3108  def : VPatBinaryNoMask<intrinsic, inst, result_type, op1_type, op2_type,
3109                         sew, op1_reg_class, op2_kind>;
3110  def : VPatBinaryMask<intrinsic, inst, result_type, op1_type, op2_type,
3111                       mask_type, sew, result_reg_class, op1_reg_class,
3112                       op2_kind>;
3113}
3114
3115multiclass VPatBinaryTA<string intrinsic,
3116                        string inst,
3117                        ValueType result_type,
3118                        ValueType op1_type,
3119                        ValueType op2_type,
3120                        ValueType mask_type,
3121                        int sew,
3122                        VReg result_reg_class,
3123                        VReg op1_reg_class,
3124                        DAGOperand op2_kind>
3125{
3126  def : VPatBinaryNoMask<intrinsic, inst, result_type, op1_type, op2_type,
3127                         sew, op1_reg_class, op2_kind>;
3128  def : VPatBinaryMaskTA<intrinsic, inst, result_type, op1_type, op2_type,
3129                         mask_type, sew, result_reg_class, op1_reg_class,
3130                         op2_kind>;
3131}
3132
3133multiclass VPatBinarySwapped<string intrinsic,
3134                      string inst,
3135                      ValueType result_type,
3136                      ValueType op1_type,
3137                      ValueType op2_type,
3138                      ValueType mask_type,
3139                      int sew,
3140                      VReg result_reg_class,
3141                      VReg op1_reg_class,
3142                      DAGOperand op2_kind>
3143{
3144  def : VPatBinaryNoMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type,
3145                                sew, op1_reg_class, op2_kind>;
3146  def : VPatBinaryMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type,
3147                              mask_type, sew, result_reg_class, op1_reg_class,
3148                              op2_kind>;
3149}
3150
3151multiclass VPatBinaryCarryIn<string intrinsic,
3152                             string inst,
3153                             string kind,
3154                             ValueType result_type,
3155                             ValueType op1_type,
3156                             ValueType op2_type,
3157                             ValueType mask_type,
3158                             int sew,
3159                             LMULInfo vlmul,
3160                             VReg op1_reg_class,
3161                             DAGOperand op2_kind>
3162{
3163  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
3164                         (op1_type op1_reg_class:$rs1),
3165                         (op2_type op2_kind:$rs2),
3166                         (mask_type V0),
3167                         VLOpFrag)),
3168                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
3169                         (op1_type op1_reg_class:$rs1),
3170                         (op2_type op2_kind:$rs2),
3171                         (mask_type V0), GPR:$vl, sew)>;
3172}
3173
3174multiclass VPatBinaryMaskOut<string intrinsic,
3175                             string inst,
3176                             string kind,
3177                             ValueType result_type,
3178                             ValueType op1_type,
3179                             ValueType op2_type,
3180                             int sew,
3181                             LMULInfo vlmul,
3182                             VReg op1_reg_class,
3183                             DAGOperand op2_kind>
3184{
3185  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
3186                         (op1_type op1_reg_class:$rs1),
3187                         (op2_type op2_kind:$rs2),
3188                         VLOpFrag)),
3189                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
3190                         (op1_type op1_reg_class:$rs1),
3191                         (op2_type op2_kind:$rs2),
3192                         GPR:$vl, sew)>;
3193}
3194
3195multiclass VPatConversion<string intrinsic,
3196                          string inst,
3197                          string kind,
3198                          ValueType result_type,
3199                          ValueType op1_type,
3200                          ValueType mask_type,
3201                          int sew,
3202                          LMULInfo vlmul,
3203                          VReg result_reg_class,
3204                          VReg op1_reg_class>
3205{
3206  def : VPatUnaryNoMask<intrinsic, inst, kind, result_type, op1_type,
3207                        sew, vlmul, op1_reg_class>;
3208  def : VPatUnaryMask<intrinsic, inst, kind, result_type, op1_type,
3209                      mask_type, sew, vlmul, result_reg_class, op1_reg_class>;
3210}
3211
3212multiclass VPatConversionTA<string intrinsic,
3213                            string inst,
3214                            string kind,
3215                            ValueType result_type,
3216                            ValueType op1_type,
3217                            ValueType mask_type,
3218                            int sew,
3219                            LMULInfo vlmul,
3220                            VReg result_reg_class,
3221                            VReg op1_reg_class>
3222{
3223  def : VPatUnaryNoMask<intrinsic, inst, kind, result_type, op1_type,
3224                        sew, vlmul, op1_reg_class>;
3225  def : VPatUnaryMaskTA<intrinsic, inst, kind, result_type, op1_type,
3226                        mask_type, sew, vlmul, result_reg_class, op1_reg_class>;
3227}
3228
3229multiclass VPatBinaryV_VV<string intrinsic, string instruction,
3230                          list<VTypeInfo> vtilist> {
3231  foreach vti = vtilist in
3232    defm : VPatBinaryTA<intrinsic, instruction # "_VV_" # vti.LMul.MX,
3233                        vti.Vector, vti.Vector, vti.Vector,vti.Mask,
3234                        vti.Log2SEW, vti.RegClass,
3235                        vti.RegClass, vti.RegClass>;
3236}
3237
3238multiclass VPatBinaryV_VV_INT<string intrinsic, string instruction,
3239                          list<VTypeInfo> vtilist> {
3240  foreach vti = vtilist in {
3241    defvar ivti = GetIntVTypeInfo<vti>.Vti;
3242    defm : VPatBinaryTA<intrinsic, instruction # "_VV_" # vti.LMul.MX,
3243                        vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
3244                        vti.Log2SEW, vti.RegClass,
3245                        vti.RegClass, vti.RegClass>;
3246  }
3247}
3248
3249multiclass VPatBinaryV_VV_INT_EEW<string intrinsic, string instruction,
3250                                  int eew, list<VTypeInfo> vtilist> {
3251  foreach vti = vtilist in {
3252    // emul = lmul * eew / sew
3253    defvar vlmul = vti.LMul;
3254    defvar octuple_lmul = vlmul.octuple;
3255    defvar octuple_emul = !srl(!mul(octuple_lmul, eew), vti.Log2SEW);
3256    if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
3257      defvar emul_str = octuple_to_str<octuple_emul>.ret;
3258      defvar ivti = !cast<VTypeInfo>("VI" # eew # emul_str);
3259      defvar inst = instruction # "_VV_" # vti.LMul.MX # "_" # emul_str;
3260      defm : VPatBinaryTA<intrinsic, inst,
3261                          vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
3262                          vti.Log2SEW, vti.RegClass,
3263                          vti.RegClass, ivti.RegClass>;
3264    }
3265  }
3266}
3267
3268multiclass VPatBinaryV_VX<string intrinsic, string instruction,
3269                          list<VTypeInfo> vtilist> {
3270  foreach vti = vtilist in {
3271    defvar kind = "V"#vti.ScalarSuffix;
3272    defm : VPatBinaryTA<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
3273                        vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
3274                        vti.Log2SEW, vti.RegClass,
3275                        vti.RegClass, vti.ScalarRegClass>;
3276  }
3277}
3278
3279multiclass VPatBinaryV_VX_INT<string intrinsic, string instruction,
3280                          list<VTypeInfo> vtilist> {
3281  foreach vti = vtilist in
3282    defm : VPatBinaryTA<intrinsic, instruction # "_VX_" # vti.LMul.MX,
3283                        vti.Vector, vti.Vector, XLenVT, vti.Mask,
3284                        vti.Log2SEW, vti.RegClass,
3285                        vti.RegClass, GPR>;
3286}
3287
3288multiclass VPatBinaryV_VI<string intrinsic, string instruction,
3289                          list<VTypeInfo> vtilist, Operand imm_type> {
3290  foreach vti = vtilist in
3291    defm : VPatBinaryTA<intrinsic, instruction # "_VI_" # vti.LMul.MX,
3292                        vti.Vector, vti.Vector, XLenVT, vti.Mask,
3293                        vti.Log2SEW, vti.RegClass,
3294                        vti.RegClass, imm_type>;
3295}
3296
3297multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
3298  foreach mti = AllMasks in
3299    def : VPatBinaryNoMask<intrinsic, instruction # "_MM_" # mti.LMul.MX,
3300                           mti.Mask, mti.Mask, mti.Mask,
3301                           mti.Log2SEW, VR, VR>;
3302}
3303
3304multiclass VPatBinaryW_VV<string intrinsic, string instruction,
3305                          list<VTypeInfoToWide> vtilist> {
3306  foreach VtiToWti = vtilist in {
3307    defvar Vti = VtiToWti.Vti;
3308    defvar Wti = VtiToWti.Wti;
3309    defm : VPatBinaryTA<intrinsic, instruction # "_VV_" # Vti.LMul.MX,
3310                        Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
3311                        Vti.Log2SEW, Wti.RegClass,
3312                        Vti.RegClass, Vti.RegClass>;
3313  }
3314}
3315
3316multiclass VPatBinaryW_VX<string intrinsic, string instruction,
3317                          list<VTypeInfoToWide> vtilist> {
3318  foreach VtiToWti = vtilist in {
3319    defvar Vti = VtiToWti.Vti;
3320    defvar Wti = VtiToWti.Wti;
3321    defvar kind = "V"#Vti.ScalarSuffix;
3322    defm : VPatBinaryTA<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
3323                        Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
3324                        Vti.Log2SEW, Wti.RegClass,
3325                        Vti.RegClass, Vti.ScalarRegClass>;
3326  }
3327}
3328
3329multiclass VPatBinaryW_WV<string intrinsic, string instruction,
3330                          list<VTypeInfoToWide> vtilist> {
3331  foreach VtiToWti = vtilist in {
3332    defvar Vti = VtiToWti.Vti;
3333    defvar Wti = VtiToWti.Wti;
3334    def : VPatTiedBinaryNoMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
3335                               Wti.Vector, Vti.Vector,
3336                               Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
3337    let AddedComplexity = 1 in
3338    def : VPatTiedBinaryMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
3339                             Wti.Vector, Vti.Vector, Vti.Mask,
3340                             Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
3341    def : VPatBinaryMaskTA<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
3342                           Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
3343                           Vti.Log2SEW, Wti.RegClass,
3344                           Wti.RegClass, Vti.RegClass>;
3345  }
3346}
3347
3348multiclass VPatBinaryW_WX<string intrinsic, string instruction,
3349                          list<VTypeInfoToWide> vtilist> {
3350  foreach VtiToWti = vtilist in {
3351    defvar Vti = VtiToWti.Vti;
3352    defvar Wti = VtiToWti.Wti;
3353    defvar kind = "W"#Vti.ScalarSuffix;
3354    defm : VPatBinaryTA<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
3355                        Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
3356                        Vti.Log2SEW, Wti.RegClass,
3357                        Wti.RegClass, Vti.ScalarRegClass>;
3358  }
3359}
3360
3361multiclass VPatBinaryV_WV<string intrinsic, string instruction,
3362                          list<VTypeInfoToWide> vtilist> {
3363  foreach VtiToWti = vtilist in {
3364    defvar Vti = VtiToWti.Vti;
3365    defvar Wti = VtiToWti.Wti;
3366    defm : VPatBinaryTA<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
3367                        Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
3368                        Vti.Log2SEW, Vti.RegClass,
3369                        Wti.RegClass, Vti.RegClass>;
3370  }
3371}
3372
3373multiclass VPatBinaryV_WX<string intrinsic, string instruction,
3374                          list<VTypeInfoToWide> vtilist> {
3375  foreach VtiToWti = vtilist in {
3376    defvar Vti = VtiToWti.Vti;
3377    defvar Wti = VtiToWti.Wti;
3378    defvar kind = "W"#Vti.ScalarSuffix;
3379    defm : VPatBinaryTA<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
3380                        Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
3381                        Vti.Log2SEW, Vti.RegClass,
3382                        Wti.RegClass, Vti.ScalarRegClass>;
3383  }
3384}
3385
3386multiclass VPatBinaryV_WI<string intrinsic, string instruction,
3387                          list<VTypeInfoToWide> vtilist> {
3388  foreach VtiToWti = vtilist in {
3389    defvar Vti = VtiToWti.Vti;
3390    defvar Wti = VtiToWti.Wti;
3391    defm : VPatBinaryTA<intrinsic, instruction # "_WI_" # Vti.LMul.MX,
3392                        Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
3393                        Vti.Log2SEW, Vti.RegClass,
3394                        Wti.RegClass, uimm5>;
3395  }
3396}
3397
3398multiclass VPatBinaryV_VM<string intrinsic, string instruction,
3399                          bit CarryOut = 0,
3400                          list<VTypeInfo> vtilist = AllIntegerVectors> {
3401  foreach vti = vtilist in
3402    defm : VPatBinaryCarryIn<intrinsic, instruction, "VVM",
3403                             !if(CarryOut, vti.Mask, vti.Vector),
3404                             vti.Vector, vti.Vector, vti.Mask,
3405                             vti.Log2SEW, vti.LMul,
3406                             vti.RegClass, vti.RegClass>;
3407}
3408
3409multiclass VPatBinaryV_XM<string intrinsic, string instruction,
3410                          bit CarryOut = 0,
3411                          list<VTypeInfo> vtilist = AllIntegerVectors> {
3412  foreach vti = vtilist in
3413    defm : VPatBinaryCarryIn<intrinsic, instruction,
3414                             "V"#vti.ScalarSuffix#"M",
3415                             !if(CarryOut, vti.Mask, vti.Vector),
3416                             vti.Vector, vti.Scalar, vti.Mask,
3417                             vti.Log2SEW, vti.LMul,
3418                             vti.RegClass, vti.ScalarRegClass>;
3419}
3420
3421multiclass VPatBinaryV_IM<string intrinsic, string instruction,
3422                          bit CarryOut = 0> {
3423  foreach vti = AllIntegerVectors in
3424    defm : VPatBinaryCarryIn<intrinsic, instruction, "VIM",
3425                             !if(CarryOut, vti.Mask, vti.Vector),
3426                             vti.Vector, XLenVT, vti.Mask,
3427                             vti.Log2SEW, vti.LMul,
3428                             vti.RegClass, simm5>;
3429}
3430
3431multiclass VPatBinaryV_V<string intrinsic, string instruction> {
3432  foreach vti = AllIntegerVectors in
3433    defm : VPatBinaryMaskOut<intrinsic, instruction, "VV",
3434                             vti.Mask, vti.Vector, vti.Vector,
3435                             vti.Log2SEW, vti.LMul,
3436                             vti.RegClass, vti.RegClass>;
3437}
3438
3439multiclass VPatBinaryV_X<string intrinsic, string instruction> {
3440  foreach vti = AllIntegerVectors in
3441    defm : VPatBinaryMaskOut<intrinsic, instruction, "VX",
3442                             vti.Mask, vti.Vector, XLenVT,
3443                             vti.Log2SEW, vti.LMul,
3444                             vti.RegClass, GPR>;
3445}
3446
3447multiclass VPatBinaryV_I<string intrinsic, string instruction> {
3448  foreach vti = AllIntegerVectors in
3449    defm : VPatBinaryMaskOut<intrinsic, instruction, "VI",
3450                             vti.Mask, vti.Vector, XLenVT,
3451                             vti.Log2SEW, vti.LMul,
3452                             vti.RegClass, simm5>;
3453}
3454
3455multiclass VPatBinaryM_VV<string intrinsic, string instruction,
3456                          list<VTypeInfo> vtilist> {
3457  foreach vti = vtilist in
3458    defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX,
3459                      vti.Mask, vti.Vector, vti.Vector, vti.Mask,
3460                      vti.Log2SEW, VR,
3461                      vti.RegClass, vti.RegClass>;
3462}
3463
3464multiclass VPatBinarySwappedM_VV<string intrinsic, string instruction,
3465                                 list<VTypeInfo> vtilist> {
3466  foreach vti = vtilist in
3467    defm : VPatBinarySwapped<intrinsic, instruction # "_VV_" # vti.LMul.MX,
3468                             vti.Mask, vti.Vector, vti.Vector, vti.Mask,
3469                             vti.Log2SEW, VR,
3470                             vti.RegClass, vti.RegClass>;
3471}
3472
3473multiclass VPatBinaryM_VX<string intrinsic, string instruction,
3474                          list<VTypeInfo> vtilist> {
3475  foreach vti = vtilist in {
3476    defvar kind = "V"#vti.ScalarSuffix;
3477    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
3478                      vti.Mask, vti.Vector, vti.Scalar, vti.Mask,
3479                      vti.Log2SEW, VR,
3480                      vti.RegClass, vti.ScalarRegClass>;
3481  }
3482}
3483
3484multiclass VPatBinaryM_VI<string intrinsic, string instruction,
3485                          list<VTypeInfo> vtilist> {
3486  foreach vti = vtilist in
3487    defm : VPatBinary<intrinsic, instruction # "_VI_" # vti.LMul.MX,
3488                      vti.Mask, vti.Vector, XLenVT, vti.Mask,
3489                      vti.Log2SEW, VR,
3490                      vti.RegClass, simm5>;
3491}
3492
3493multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
3494                                list<VTypeInfo> vtilist, Operand ImmType = simm5>
3495    : VPatBinaryV_VV<intrinsic, instruction, vtilist>,
3496      VPatBinaryV_VX<intrinsic, instruction, vtilist>,
3497      VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
3498
3499multiclass VPatBinaryV_VV_VX<string intrinsic, string instruction,
3500                             list<VTypeInfo> vtilist>
3501    : VPatBinaryV_VV<intrinsic, instruction, vtilist>,
3502      VPatBinaryV_VX<intrinsic, instruction, vtilist>;
3503
3504multiclass VPatBinaryV_VX_VI<string intrinsic, string instruction,
3505                             list<VTypeInfo> vtilist>
3506    : VPatBinaryV_VX<intrinsic, instruction, vtilist>,
3507      VPatBinaryV_VI<intrinsic, instruction, vtilist, simm5>;
3508
3509multiclass VPatBinaryW_VV_VX<string intrinsic, string instruction,
3510                             list<VTypeInfoToWide> vtilist>
3511    : VPatBinaryW_VV<intrinsic, instruction, vtilist>,
3512      VPatBinaryW_VX<intrinsic, instruction, vtilist>;
3513
3514multiclass VPatBinaryW_WV_WX<string intrinsic, string instruction,
3515                             list<VTypeInfoToWide> vtilist>
3516    : VPatBinaryW_WV<intrinsic, instruction, vtilist>,
3517      VPatBinaryW_WX<intrinsic, instruction, vtilist>;
3518
3519multiclass VPatBinaryV_WV_WX_WI<string intrinsic, string instruction,
3520                                list<VTypeInfoToWide> vtilist>
3521    : VPatBinaryV_WV<intrinsic, instruction, vtilist>,
3522      VPatBinaryV_WX<intrinsic, instruction, vtilist>,
3523      VPatBinaryV_WI<intrinsic, instruction, vtilist>;
3524
3525multiclass VPatBinaryV_VM_XM_IM<string intrinsic, string instruction>
3526    : VPatBinaryV_VM<intrinsic, instruction>,
3527      VPatBinaryV_XM<intrinsic, instruction>,
3528      VPatBinaryV_IM<intrinsic, instruction>;
3529
3530multiclass VPatBinaryM_VM_XM_IM<string intrinsic, string instruction>
3531    : VPatBinaryV_VM<intrinsic, instruction, /*CarryOut=*/1>,
3532      VPatBinaryV_XM<intrinsic, instruction, /*CarryOut=*/1>,
3533      VPatBinaryV_IM<intrinsic, instruction, /*CarryOut=*/1>;
3534
3535multiclass VPatBinaryM_V_X_I<string intrinsic, string instruction>
3536    : VPatBinaryV_V<intrinsic, instruction>,
3537      VPatBinaryV_X<intrinsic, instruction>,
3538      VPatBinaryV_I<intrinsic, instruction>;
3539
3540multiclass VPatBinaryV_VM_XM<string intrinsic, string instruction>
3541    : VPatBinaryV_VM<intrinsic, instruction>,
3542      VPatBinaryV_XM<intrinsic, instruction>;
3543
3544multiclass VPatBinaryM_VM_XM<string intrinsic, string instruction>
3545    : VPatBinaryV_VM<intrinsic, instruction, /*CarryOut=*/1>,
3546      VPatBinaryV_XM<intrinsic, instruction, /*CarryOut=*/1>;
3547
3548multiclass VPatBinaryM_V_X<string intrinsic, string instruction>
3549    : VPatBinaryV_V<intrinsic, instruction>,
3550      VPatBinaryV_X<intrinsic, instruction>;
3551
3552multiclass VPatTernary<string intrinsic,
3553                       string inst,
3554                       string kind,
3555                       ValueType result_type,
3556                       ValueType op1_type,
3557                       ValueType op2_type,
3558                       ValueType mask_type,
3559                       int sew,
3560                       LMULInfo vlmul,
3561                       VReg result_reg_class,
3562                       RegisterClass op1_reg_class,
3563                       DAGOperand op2_kind> {
3564  def : VPatTernaryNoMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
3565                          sew, vlmul, result_reg_class, op1_reg_class,
3566                          op2_kind>;
3567  def : VPatTernaryMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
3568                        mask_type, sew, vlmul, result_reg_class, op1_reg_class,
3569                        op2_kind>;
3570}
3571
3572multiclass VPatTernaryWithPolicy<string intrinsic,
3573                                 string inst,
3574                                 string kind,
3575                                 ValueType result_type,
3576                                 ValueType op1_type,
3577                                 ValueType op2_type,
3578                                 ValueType mask_type,
3579                                 int sew,
3580                                 LMULInfo vlmul,
3581                                 VReg result_reg_class,
3582                                 RegisterClass op1_reg_class,
3583                                 DAGOperand op2_kind> {
3584  def : VPatTernaryNoMaskWithPolicy<intrinsic, inst, kind, result_type, op1_type,
3585                                    op2_type, sew, vlmul, result_reg_class,
3586                                    op1_reg_class, op2_kind>;
3587  def : VPatTernaryMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
3588                        mask_type, sew, vlmul, result_reg_class, op1_reg_class,
3589                        op2_kind>;
3590}
3591
3592multiclass VPatTernaryV_VV_AAXA<string intrinsic, string instruction,
3593                                list<VTypeInfo> vtilist> {
3594  foreach vti = vtilist in
3595    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VV",
3596                                 vti.Vector, vti.Vector, vti.Vector, vti.Mask,
3597                                 vti.Log2SEW, vti.LMul, vti.RegClass,
3598                                 vti.RegClass, vti.RegClass>;
3599}
3600
3601multiclass VPatTernaryV_VX<string intrinsic, string instruction,
3602                           list<VTypeInfo> vtilist> {
3603  foreach vti = vtilist in
3604    defm : VPatTernary<intrinsic, instruction, "VX",
3605                       vti.Vector, vti.Vector, XLenVT, vti.Mask,
3606                       vti.Log2SEW, vti.LMul, vti.RegClass,
3607                       vti.RegClass, GPR>;
3608}
3609
3610multiclass VPatTernaryV_VX_AAXA<string intrinsic, string instruction,
3611                           list<VTypeInfo> vtilist> {
3612  foreach vti = vtilist in
3613    defm : VPatTernaryWithPolicy<intrinsic, instruction,
3614                                 "V"#vti.ScalarSuffix,
3615                                 vti.Vector, vti.Scalar, vti.Vector, vti.Mask,
3616                                 vti.Log2SEW, vti.LMul, vti.RegClass,
3617                                 vti.ScalarRegClass, vti.RegClass>;
3618}
3619
3620multiclass VPatTernaryV_VI<string intrinsic, string instruction,
3621                           list<VTypeInfo> vtilist, Operand Imm_type> {
3622  foreach vti = vtilist in
3623    defm : VPatTernary<intrinsic, instruction, "VI",
3624                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
3625                      vti.Log2SEW, vti.LMul, vti.RegClass,
3626                      vti.RegClass, Imm_type>;
3627}
3628
3629multiclass VPatTernaryW_VV<string intrinsic, string instruction,
3630                           list<VTypeInfoToWide> vtilist> {
3631  foreach vtiToWti = vtilist in {
3632    defvar vti = vtiToWti.Vti;
3633    defvar wti = vtiToWti.Wti;
3634    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VV",
3635                                 wti.Vector, vti.Vector, vti.Vector,
3636                                 vti.Mask, vti.Log2SEW, vti.LMul,
3637                                 wti.RegClass, vti.RegClass, vti.RegClass>;
3638  }
3639}
3640
3641multiclass VPatTernaryW_VX<string intrinsic, string instruction,
3642                           list<VTypeInfoToWide> vtilist> {
3643  foreach vtiToWti = vtilist in {
3644    defvar vti = vtiToWti.Vti;
3645    defvar wti = vtiToWti.Wti;
3646    defm : VPatTernaryWithPolicy<intrinsic, instruction,
3647                                 "V"#vti.ScalarSuffix,
3648                                 wti.Vector, vti.Scalar, vti.Vector,
3649                                 vti.Mask, vti.Log2SEW, vti.LMul,
3650                                 wti.RegClass, vti.ScalarRegClass, vti.RegClass>;
3651  }
3652}
3653
3654multiclass VPatTernaryV_VV_VX_AAXA<string intrinsic, string instruction,
3655                              list<VTypeInfo> vtilist>
3656    : VPatTernaryV_VV_AAXA<intrinsic, instruction, vtilist>,
3657      VPatTernaryV_VX_AAXA<intrinsic, instruction, vtilist>;
3658
3659multiclass VPatTernaryV_VX_VI<string intrinsic, string instruction,
3660                              list<VTypeInfo> vtilist, Operand Imm_type = simm5>
3661    : VPatTernaryV_VX<intrinsic, instruction, vtilist>,
3662      VPatTernaryV_VI<intrinsic, instruction, vtilist, Imm_type>;
3663
3664multiclass VPatBinaryM_VV_VX_VI<string intrinsic, string instruction,
3665                                list<VTypeInfo> vtilist>
3666    : VPatBinaryM_VV<intrinsic, instruction, vtilist>,
3667      VPatBinaryM_VX<intrinsic, instruction, vtilist>,
3668      VPatBinaryM_VI<intrinsic, instruction, vtilist>;
3669
3670multiclass VPatTernaryW_VV_VX<string intrinsic, string instruction,
3671                              list<VTypeInfoToWide> vtilist>
3672    : VPatTernaryW_VV<intrinsic, instruction, vtilist>,
3673      VPatTernaryW_VX<intrinsic, instruction, vtilist>;
3674
3675multiclass VPatBinaryM_VV_VX<string intrinsic, string instruction,
3676                             list<VTypeInfo> vtilist>
3677    : VPatBinaryM_VV<intrinsic, instruction, vtilist>,
3678      VPatBinaryM_VX<intrinsic, instruction, vtilist>;
3679
3680multiclass VPatBinaryM_VX_VI<string intrinsic, string instruction,
3681                             list<VTypeInfo> vtilist>
3682    : VPatBinaryM_VX<intrinsic, instruction, vtilist>,
3683      VPatBinaryM_VI<intrinsic, instruction, vtilist>;
3684
3685multiclass VPatBinaryV_VV_VX_VI_INT<string intrinsic, string instruction,
3686                                    list<VTypeInfo> vtilist, Operand ImmType = simm5>
3687    : VPatBinaryV_VV_INT<intrinsic#"_vv", instruction, vtilist>,
3688      VPatBinaryV_VX_INT<intrinsic#"_vx", instruction, vtilist>,
3689      VPatBinaryV_VI<intrinsic#"_vx", instruction, vtilist, ImmType>;
3690
3691multiclass VPatReductionV_VS<string intrinsic, string instruction, bit IsFloat = 0> {
3692  foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in
3693  {
3694    defvar vectorM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # vti.SEW # "M1");
3695    defm : VPatTernary<intrinsic, instruction, "VS",
3696                       vectorM1.Vector, vti.Vector,
3697                       vectorM1.Vector, vti.Mask,
3698                       vti.Log2SEW, vti.LMul,
3699                       VR, vti.RegClass, VR>;
3700  }
3701  foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in
3702  {
3703    defm : VPatTernary<intrinsic, instruction, "VS",
3704                       gvti.VectorM1, gvti.Vector,
3705                       gvti.VectorM1, gvti.Mask,
3706                       gvti.Log2SEW, gvti.LMul,
3707                       VR, gvti.RegClass, VR>;
3708  }
3709}
3710
3711multiclass VPatReductionW_VS<string intrinsic, string instruction, bit IsFloat = 0> {
3712  foreach vti = !if(IsFloat, AllFloatVectors, AllIntegerVectors) in
3713  {
3714    defvar wtiSEW = !mul(vti.SEW, 2);
3715    if !le(wtiSEW, 64) then {
3716      defvar wtiM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # wtiSEW # "M1");
3717      defm : VPatTernary<intrinsic, instruction, "VS",
3718                         wtiM1.Vector, vti.Vector,
3719                         wtiM1.Vector, vti.Mask,
3720                         vti.Log2SEW, vti.LMul,
3721                         wtiM1.RegClass, vti.RegClass,
3722                         wtiM1.RegClass>;
3723    }
3724  }
3725}
3726
3727multiclass VPatClassifyVI_VF<string intrinsic,
3728                             string instruction>
3729{
3730  foreach fvti = AllFloatVectors in
3731  {
3732    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
3733
3734    defm : VPatConversion<intrinsic, instruction, "V",
3735                          ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
3736                          fvti.LMul, ivti.RegClass, fvti.RegClass>;
3737  }
3738}
3739
3740multiclass VPatConversionVI_VF<string intrinsic,
3741                               string instruction>
3742{
3743  foreach fvti = AllFloatVectors in
3744  {
3745    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
3746
3747    defm : VPatConversionTA<intrinsic, instruction, "V",
3748                            ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
3749                            fvti.LMul, ivti.RegClass, fvti.RegClass>;
3750  }
3751}
3752
3753multiclass VPatConversionVF_VI<string intrinsic,
3754                               string instruction>
3755{
3756  foreach fvti = AllFloatVectors in
3757  {
3758    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
3759
3760    defm : VPatConversionTA<intrinsic, instruction, "V",
3761                            fvti.Vector, ivti.Vector, fvti.Mask, ivti.Log2SEW,
3762                            ivti.LMul, fvti.RegClass, ivti.RegClass>;
3763  }
3764}
3765
3766multiclass VPatConversionWI_VF<string intrinsic, string instruction> {
3767  foreach fvtiToFWti = AllWidenableFloatVectors in
3768  {
3769    defvar fvti = fvtiToFWti.Vti;
3770    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
3771
3772    defm : VPatConversionTA<intrinsic, instruction, "V",
3773                            iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
3774                            fvti.LMul, iwti.RegClass, fvti.RegClass>;
3775  }
3776}
3777
3778multiclass VPatConversionWF_VI<string intrinsic, string instruction> {
3779  foreach vtiToWti = AllWidenableIntToFloatVectors in
3780  {
3781    defvar vti = vtiToWti.Vti;
3782    defvar fwti = vtiToWti.Wti;
3783
3784    defm : VPatConversionTA<intrinsic, instruction, "V",
3785                            fwti.Vector, vti.Vector, fwti.Mask, vti.Log2SEW,
3786                            vti.LMul, fwti.RegClass, vti.RegClass>;
3787  }
3788}
3789
3790multiclass VPatConversionWF_VF <string intrinsic, string instruction> {
3791  foreach fvtiToFWti = AllWidenableFloatVectors in
3792  {
3793    defvar fvti = fvtiToFWti.Vti;
3794    defvar fwti = fvtiToFWti.Wti;
3795
3796    defm : VPatConversionTA<intrinsic, instruction, "V",
3797                            fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
3798                            fvti.LMul, fwti.RegClass, fvti.RegClass>;
3799  }
3800}
3801
3802multiclass VPatConversionVI_WF <string intrinsic, string instruction> {
3803  foreach vtiToWti = AllWidenableIntToFloatVectors in
3804  {
3805    defvar vti = vtiToWti.Vti;
3806    defvar fwti = vtiToWti.Wti;
3807
3808    defm : VPatConversionTA<intrinsic, instruction, "W",
3809                            vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
3810                            vti.LMul, vti.RegClass, fwti.RegClass>;
3811  }
3812}
3813
3814multiclass VPatConversionVF_WI <string intrinsic, string instruction> {
3815  foreach fvtiToFWti = AllWidenableFloatVectors in
3816  {
3817    defvar fvti = fvtiToFWti.Vti;
3818    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
3819
3820    defm : VPatConversionTA<intrinsic, instruction, "W",
3821                            fvti.Vector, iwti.Vector, fvti.Mask, fvti.Log2SEW,
3822                            fvti.LMul, fvti.RegClass, iwti.RegClass>;
3823  }
3824}
3825
3826multiclass VPatConversionVF_WF <string intrinsic, string instruction> {
3827  foreach fvtiToFWti = AllWidenableFloatVectors in
3828  {
3829    defvar fvti = fvtiToFWti.Vti;
3830    defvar fwti = fvtiToFWti.Wti;
3831
3832    defm : VPatConversionTA<intrinsic, instruction, "W",
3833                            fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
3834                            fvti.LMul, fvti.RegClass, fwti.RegClass>;
3835  }
3836}
3837
3838multiclass VPatCompare_VI<string intrinsic, string inst,
3839                          ImmLeaf ImmType> {
3840  foreach vti = AllIntegerVectors in {
3841    defvar Intr = !cast<Intrinsic>(intrinsic);
3842    defvar Pseudo = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX);
3843    def : Pat<(vti.Mask (Intr (vti.Vector vti.RegClass:$rs1),
3844                              (vti.Scalar ImmType:$rs2),
3845                              VLOpFrag)),
3846              (Pseudo vti.RegClass:$rs1, (DecImm ImmType:$rs2),
3847                      GPR:$vl, vti.Log2SEW)>;
3848    defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
3849    defvar PseudoMask = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX#"_MASK");
3850    def : Pat<(vti.Mask (IntrMask (vti.Mask VR:$merge),
3851                                  (vti.Vector vti.RegClass:$rs1),
3852                                  (vti.Scalar ImmType:$rs2),
3853                                  (vti.Mask V0),
3854                                  VLOpFrag)),
3855              (PseudoMask VR:$merge, vti.RegClass:$rs1, (DecImm ImmType:$rs2),
3856                          (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
3857  }
3858}
3859
3860//===----------------------------------------------------------------------===//
3861// Pseudo instructions
3862//===----------------------------------------------------------------------===//
3863
3864let Predicates = [HasVInstructions] in {
3865
3866//===----------------------------------------------------------------------===//
3867// Pseudo Instructions for CodeGen
3868//===----------------------------------------------------------------------===//
3869let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
3870  def PseudoVMV1R_V : VPseudo<VMV1R_V, V_M1, (outs VR:$vd), (ins VR:$vs2)>;
3871  def PseudoVMV2R_V : VPseudo<VMV2R_V, V_M2, (outs VRM2:$vd), (ins VRM2:$vs2)>;
3872  def PseudoVMV4R_V : VPseudo<VMV4R_V, V_M4, (outs VRM4:$vd), (ins VRM4:$vs2)>;
3873  def PseudoVMV8R_V : VPseudo<VMV8R_V, V_M8, (outs VRM8:$vd), (ins VRM8:$vs2)>;
3874}
3875
3876let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1 in {
3877  def PseudoReadVLENB : Pseudo<(outs GPR:$rd), (ins),
3878                               [(set GPR:$rd, (riscv_read_vlenb))]>;
3879}
3880
3881let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1,
3882    Uses = [VL] in
3883def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins), []>;
3884
3885let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1 in {
3886  def PseudoVSPILL_M1 : VPseudo<VS1R_V, V_M1, (outs), (ins VR:$rs1, GPR:$rs2)>;
3887  def PseudoVSPILL_M2 : VPseudo<VS2R_V, V_M2, (outs), (ins VRM2:$rs1, GPR:$rs2)>;
3888  def PseudoVSPILL_M4 : VPseudo<VS4R_V, V_M4, (outs), (ins VRM4:$rs1, GPR:$rs2)>;
3889  def PseudoVSPILL_M8 : VPseudo<VS8R_V, V_M8, (outs), (ins VRM8:$rs1, GPR:$rs2)>;
3890}
3891
3892let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1 in {
3893  def PseudoVRELOAD_M1 : VPseudo<VL1RE8_V, V_M1, (outs VR:$rs1), (ins GPR:$rs2)>;
3894  def PseudoVRELOAD_M2 : VPseudo<VL2RE8_V, V_M2, (outs VRM2:$rs1), (ins GPR:$rs2)>;
3895  def PseudoVRELOAD_M4 : VPseudo<VL4RE8_V, V_M4, (outs VRM4:$rs1), (ins GPR:$rs2)>;
3896  def PseudoVRELOAD_M8 : VPseudo<VL8RE8_V, V_M8, (outs VRM8:$rs1), (ins GPR:$rs2)>;
3897}
3898
3899foreach lmul = MxList in {
3900  foreach nf = NFSet<lmul>.L in {
3901    defvar vreg = SegRegClass<lmul, nf>.RC;
3902    let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1,
3903        Size = !mul(4, !sub(!mul(nf, 2), 1)) in {
3904      def "PseudoVSPILL" # nf # "_" # lmul.MX :
3905        Pseudo<(outs), (ins vreg:$rs1, GPR:$rs2, GPR:$vlenb), []>;
3906    }
3907    let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1,
3908        Size = !mul(4, !sub(!mul(nf, 2), 1)) in {
3909      def "PseudoVRELOAD" # nf # "_" # lmul.MX :
3910        Pseudo<(outs vreg:$rs1), (ins GPR:$rs2, GPR:$vlenb), []>;
3911    }
3912  }
3913}
3914
3915//===----------------------------------------------------------------------===//
3916// 6. Configuration-Setting Instructions
3917//===----------------------------------------------------------------------===//
3918
3919// Pseudos.
3920let hasSideEffects = 1, mayLoad = 0, mayStore = 0, Defs = [VL, VTYPE] in {
3921// Due to rs1=X0 having special meaning, we need a GPRNoX0 register class for
3922// the when we aren't using one of the special X0 encodings. Otherwise it could
3923// be accidentally be made X0 by MachineIR optimizations. To satisfy the
3924// verifier, we also need a GPRX0 instruction for the special encodings.
3925def PseudoVSETVLI : Pseudo<(outs GPR:$rd), (ins GPRNoX0:$rs1, VTypeIOp11:$vtypei), []>;
3926def PseudoVSETVLIX0 : Pseudo<(outs GPR:$rd), (ins GPRX0:$rs1, VTypeIOp11:$vtypei), []>;
3927def PseudoVSETIVLI : Pseudo<(outs GPR:$rd), (ins uimm5:$rs1, VTypeIOp10:$vtypei), []>;
3928}
3929
3930//===----------------------------------------------------------------------===//
3931// 7. Vector Loads and Stores
3932//===----------------------------------------------------------------------===//
3933
3934//===----------------------------------------------------------------------===//
3935// 7.4 Vector Unit-Stride Instructions
3936//===----------------------------------------------------------------------===//
3937
3938// Pseudos Unit-Stride Loads and Stores
3939defm PseudoVL : VPseudoUSLoad;
3940defm PseudoVS : VPseudoUSStore;
3941
3942defm PseudoVLM : VPseudoLoadMask,
3943                 Sched<[WriteVLDM, ReadVLDX]>;
3944defm PseudoVSM : VPseudoStoreMask,
3945                 Sched<[WriteVSTM, ReadVSTX]>;
3946
3947//===----------------------------------------------------------------------===//
3948// 7.5 Vector Strided Instructions
3949//===----------------------------------------------------------------------===//
3950
3951// Vector Strided Loads and Stores
3952defm PseudoVLS : VPseudoSLoad;
3953defm PseudoVSS : VPseudoSStore;
3954
3955//===----------------------------------------------------------------------===//
3956// 7.6 Vector Indexed Instructions
3957//===----------------------------------------------------------------------===//
3958
3959// Vector Indexed Loads and Stores
3960defm PseudoVLUX : VPseudoILoad</*Ordered=*/false>;
3961defm PseudoVLOX : VPseudoILoad</*Ordered=*/true>;
3962defm PseudoVSOX : VPseudoIStore</*Ordered=*/true>;
3963defm PseudoVSUX : VPseudoIStore</*Ordered=*/false>;
3964
3965//===----------------------------------------------------------------------===//
3966// 7.7. Unit-stride Fault-Only-First Loads
3967//===----------------------------------------------------------------------===//
3968
3969// vleff may update VL register
3970let hasSideEffects = 1, Defs = [VL] in
3971defm PseudoVL : VPseudoFFLoad;
3972
3973//===----------------------------------------------------------------------===//
3974// 7.8. Vector Load/Store Segment Instructions
3975//===----------------------------------------------------------------------===//
3976defm PseudoVLSEG : VPseudoUSSegLoad</*isFF=*/false>;
3977defm PseudoVLSSEG : VPseudoSSegLoad;
3978defm PseudoVLOXSEG : VPseudoISegLoad</*Ordered=*/true>;
3979defm PseudoVLUXSEG : VPseudoISegLoad</*Ordered=*/false>;
3980defm PseudoVSSEG : VPseudoUSSegStore;
3981defm PseudoVSSSEG : VPseudoSSegStore;
3982defm PseudoVSOXSEG : VPseudoISegStore</*Ordered=*/true>;
3983defm PseudoVSUXSEG : VPseudoISegStore</*Ordered=*/false>;
3984
3985// vlseg<nf>e<eew>ff.v may update VL register
3986let hasSideEffects = 1, Defs = [VL] in
3987defm PseudoVLSEG : VPseudoUSSegLoad</*isFF=*/true>;
3988
3989//===----------------------------------------------------------------------===//
3990// 12. Vector Integer Arithmetic Instructions
3991//===----------------------------------------------------------------------===//
3992
3993//===----------------------------------------------------------------------===//
3994// 12.1. Vector Single-Width Integer Add and Subtract
3995//===----------------------------------------------------------------------===//
3996defm PseudoVADD   : VPseudoVALU_VV_VX_VI;
3997defm PseudoVSUB   : VPseudoVALU_VV_VX;
3998defm PseudoVRSUB  : VPseudoVALU_VX_VI;
3999
4000foreach vti = AllIntegerVectors in {
4001  // Match vrsub with 2 vector operands to vsub.vv by swapping operands. This
4002  // Occurs when legalizing vrsub.vx intrinsics for i64 on RV32 since we need
4003  // to use a more complex splat sequence. Add the pattern for all VTs for
4004  // consistency.
4005  def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$rs2),
4006                                         (vti.Vector vti.RegClass:$rs1),
4007                                         VLOpFrag)),
4008            (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
4009                                                              vti.RegClass:$rs2,
4010                                                              GPR:$vl,
4011                                                              vti.Log2SEW)>;
4012  def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge),
4013                                              (vti.Vector vti.RegClass:$rs2),
4014                                              (vti.Vector vti.RegClass:$rs1),
4015                                              (vti.Mask V0),
4016                                              VLOpFrag,
4017                                              (XLenVT timm:$policy))),
4018            (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK")
4019                                                      vti.RegClass:$merge,
4020                                                      vti.RegClass:$rs1,
4021                                                      vti.RegClass:$rs2,
4022                                                      (vti.Mask V0),
4023                                                      GPR:$vl,
4024                                                      vti.Log2SEW,
4025                                                      (XLenVT timm:$policy))>;
4026
4027  // Match VSUB with a small immediate to vadd.vi by negating the immediate.
4028  def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$rs1),
4029                                        (vti.Scalar simm5_plus1:$rs2),
4030                                        VLOpFrag)),
4031            (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
4032                                                              (NegImm simm5_plus1:$rs2),
4033                                                              GPR:$vl,
4034                                                              vti.Log2SEW)>;
4035  def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge),
4036                                             (vti.Vector vti.RegClass:$rs1),
4037                                             (vti.Scalar simm5_plus1:$rs2),
4038                                             (vti.Mask V0),
4039                                             VLOpFrag,
4040                                             (XLenVT timm:$policy))),
4041            (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX#"_MASK")
4042                                                      vti.RegClass:$merge,
4043                                                      vti.RegClass:$rs1,
4044                                                      (NegImm simm5_plus1:$rs2),
4045                                                      (vti.Mask V0),
4046                                                      GPR:$vl,
4047                                                      vti.Log2SEW,
4048                                                      (XLenVT timm:$policy))>;
4049}
4050
4051//===----------------------------------------------------------------------===//
4052// 12.2. Vector Widening Integer Add/Subtract
4053//===----------------------------------------------------------------------===//
4054defm PseudoVWADDU : VPseudoVWALU_VV_VX;
4055defm PseudoVWSUBU : VPseudoVWALU_VV_VX;
4056defm PseudoVWADD  : VPseudoVWALU_VV_VX;
4057defm PseudoVWSUB  : VPseudoVWALU_VV_VX;
4058defm PseudoVWADDU : VPseudoVWALU_WV_WX;
4059defm PseudoVWSUBU : VPseudoVWALU_WV_WX;
4060defm PseudoVWADD  : VPseudoVWALU_WV_WX;
4061defm PseudoVWSUB  : VPseudoVWALU_WV_WX;
4062
4063//===----------------------------------------------------------------------===//
4064// 12.3. Vector Integer Extension
4065//===----------------------------------------------------------------------===//
4066defm PseudoVZEXT_VF2 : PseudoVEXT_VF2;
4067defm PseudoVZEXT_VF4 : PseudoVEXT_VF4;
4068defm PseudoVZEXT_VF8 : PseudoVEXT_VF8;
4069defm PseudoVSEXT_VF2 : PseudoVEXT_VF2;
4070defm PseudoVSEXT_VF4 : PseudoVEXT_VF4;
4071defm PseudoVSEXT_VF8 : PseudoVEXT_VF8;
4072
4073//===----------------------------------------------------------------------===//
4074// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
4075//===----------------------------------------------------------------------===//
4076defm PseudoVADC  : VPseudoVCALU_VM_XM_IM;
4077defm PseudoVMADC : VPseudoVCALUM_VM_XM_IM<"@earlyclobber $rd">;
4078defm PseudoVMADC : VPseudoVCALUM_V_X_I<"@earlyclobber $rd">;
4079
4080defm PseudoVSBC  : VPseudoVCALU_VM_XM;
4081defm PseudoVMSBC : VPseudoVCALUM_VM_XM<"@earlyclobber $rd">;
4082defm PseudoVMSBC : VPseudoVCALUM_V_X<"@earlyclobber $rd">;
4083
4084//===----------------------------------------------------------------------===//
4085// 12.5. Vector Bitwise Logical Instructions
4086//===----------------------------------------------------------------------===//
4087defm PseudoVAND : VPseudoVALU_VV_VX_VI;
4088defm PseudoVOR  : VPseudoVALU_VV_VX_VI;
4089defm PseudoVXOR : VPseudoVALU_VV_VX_VI;
4090
4091//===----------------------------------------------------------------------===//
4092// 12.6. Vector Single-Width Bit Shift Instructions
4093//===----------------------------------------------------------------------===//
4094defm PseudoVSLL : VPseudoVSHT_VV_VX_VI<uimm5>;
4095defm PseudoVSRL : VPseudoVSHT_VV_VX_VI<uimm5>;
4096defm PseudoVSRA : VPseudoVSHT_VV_VX_VI<uimm5>;
4097
4098//===----------------------------------------------------------------------===//
4099// 12.7. Vector Narrowing Integer Right Shift Instructions
4100//===----------------------------------------------------------------------===//
4101defm PseudoVNSRL : VPseudoVNSHT_WV_WX_WI;
4102defm PseudoVNSRA : VPseudoVNSHT_WV_WX_WI;
4103
4104//===----------------------------------------------------------------------===//
4105// 12.8. Vector Integer Comparison Instructions
4106//===----------------------------------------------------------------------===//
4107defm PseudoVMSEQ  : VPseudoVCMPM_VV_VX_VI;
4108defm PseudoVMSNE  : VPseudoVCMPM_VV_VX_VI;
4109defm PseudoVMSLTU : VPseudoVCMPM_VV_VX;
4110defm PseudoVMSLT  : VPseudoVCMPM_VV_VX;
4111defm PseudoVMSLEU : VPseudoVCMPM_VV_VX_VI;
4112defm PseudoVMSLE  : VPseudoVCMPM_VV_VX_VI;
4113defm PseudoVMSGTU : VPseudoVCMPM_VX_VI;
4114defm PseudoVMSGT  : VPseudoVCMPM_VX_VI;
4115
4116//===----------------------------------------------------------------------===//
4117// 12.9. Vector Integer Min/Max Instructions
4118//===----------------------------------------------------------------------===//
4119defm PseudoVMINU : VPseudoVMINMAX_VV_VX;
4120defm PseudoVMIN  : VPseudoVMINMAX_VV_VX;
4121defm PseudoVMAXU : VPseudoVMINMAX_VV_VX;
4122defm PseudoVMAX  : VPseudoVMINMAX_VV_VX;
4123
4124//===----------------------------------------------------------------------===//
4125// 12.10. Vector Single-Width Integer Multiply Instructions
4126//===----------------------------------------------------------------------===//
4127defm PseudoVMUL    : VPseudoVMUL_VV_VX;
4128defm PseudoVMULH   : VPseudoVMUL_VV_VX;
4129defm PseudoVMULHU  : VPseudoVMUL_VV_VX;
4130defm PseudoVMULHSU : VPseudoVMUL_VV_VX;
4131
4132//===----------------------------------------------------------------------===//
4133// 12.11. Vector Integer Divide Instructions
4134//===----------------------------------------------------------------------===//
4135defm PseudoVDIVU : VPseudoVDIV_VV_VX;
4136defm PseudoVDIV  : VPseudoVDIV_VV_VX;
4137defm PseudoVREMU : VPseudoVDIV_VV_VX;
4138defm PseudoVREM  : VPseudoVDIV_VV_VX;
4139
4140//===----------------------------------------------------------------------===//
4141// 12.12. Vector Widening Integer Multiply Instructions
4142//===----------------------------------------------------------------------===//
4143defm PseudoVWMUL   : VPseudoVWMUL_VV_VX;
4144defm PseudoVWMULU  : VPseudoVWMUL_VV_VX;
4145defm PseudoVWMULSU : VPseudoVWMUL_VV_VX;
4146
4147//===----------------------------------------------------------------------===//
4148// 12.13. Vector Single-Width Integer Multiply-Add Instructions
4149//===----------------------------------------------------------------------===//
4150defm PseudoVMACC  : VPseudoVMAC_VV_VX_AAXA;
4151defm PseudoVNMSAC : VPseudoVMAC_VV_VX_AAXA;
4152defm PseudoVMADD  : VPseudoVMAC_VV_VX_AAXA;
4153defm PseudoVNMSUB : VPseudoVMAC_VV_VX_AAXA;
4154
4155//===----------------------------------------------------------------------===//
4156// 12.14. Vector Widening Integer Multiply-Add Instructions
4157//===----------------------------------------------------------------------===//
4158defm PseudoVWMACCU  : VPseudoVWMAC_VV_VX;
4159defm PseudoVWMACC   : VPseudoVWMAC_VV_VX;
4160defm PseudoVWMACCSU : VPseudoVWMAC_VV_VX;
4161defm PseudoVWMACCUS : VPseudoVWMAC_VX;
4162
4163//===----------------------------------------------------------------------===//
4164// 12.15. Vector Integer Merge Instructions
4165//===----------------------------------------------------------------------===//
4166defm PseudoVMERGE : VPseudoVMRG_VM_XM_IM;
4167
4168//===----------------------------------------------------------------------===//
4169// 12.16. Vector Integer Move Instructions
4170//===----------------------------------------------------------------------===//
4171defm PseudoVMV_V : VPseudoUnaryVMV_V_X_I;
4172
4173//===----------------------------------------------------------------------===//
4174// 13.1. Vector Single-Width Saturating Add and Subtract
4175//===----------------------------------------------------------------------===//
4176let Defs = [VXSAT], hasSideEffects = 1 in {
4177  defm PseudoVSADDU : VPseudoVSALU_VV_VX_VI;
4178  defm PseudoVSADD  : VPseudoVSALU_VV_VX_VI;
4179  defm PseudoVSSUBU : VPseudoVSALU_VV_VX;
4180  defm PseudoVSSUB  : VPseudoVSALU_VV_VX;
4181}
4182
4183//===----------------------------------------------------------------------===//
4184// 13.2. Vector Single-Width Averaging Add and Subtract
4185//===----------------------------------------------------------------------===//
4186let Uses = [VXRM], hasSideEffects = 1 in {
4187  defm PseudoVAADDU : VPseudoVAALU_VV_VX;
4188  defm PseudoVAADD  : VPseudoVAALU_VV_VX;
4189  defm PseudoVASUBU : VPseudoVAALU_VV_VX;
4190  defm PseudoVASUB  : VPseudoVAALU_VV_VX;
4191}
4192
4193//===----------------------------------------------------------------------===//
4194// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
4195//===----------------------------------------------------------------------===//
4196let Uses = [VXRM], Defs = [VXSAT], hasSideEffects = 1 in {
4197  defm PseudoVSMUL : VPseudoVSMUL_VV_VX;
4198}
4199
4200//===----------------------------------------------------------------------===//
4201// 13.4. Vector Single-Width Scaling Shift Instructions
4202//===----------------------------------------------------------------------===//
4203let Uses = [VXRM], hasSideEffects = 1 in {
4204  defm PseudoVSSRL : VPseudoVSSHT_VV_VX_VI<uimm5>;
4205  defm PseudoVSSRA : VPseudoVSSHT_VV_VX_VI<uimm5>;
4206}
4207
4208//===----------------------------------------------------------------------===//
4209// 13.5. Vector Narrowing Fixed-Point Clip Instructions
4210//===----------------------------------------------------------------------===//
4211let Uses = [VXRM], Defs = [VXSAT], hasSideEffects = 1 in {
4212  defm PseudoVNCLIP  : VPseudoVNCLP_WV_WX_WI;
4213  defm PseudoVNCLIPU : VPseudoVNCLP_WV_WX_WI;
4214}
4215
4216} // Predicates = [HasVInstructions]
4217
4218let Predicates = [HasVInstructionsAnyF] in {
4219//===----------------------------------------------------------------------===//
4220// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
4221//===----------------------------------------------------------------------===//
4222defm PseudoVFADD  : VPseudoVALU_VV_VF;
4223defm PseudoVFSUB  : VPseudoVALU_VV_VF;
4224defm PseudoVFRSUB : VPseudoVALU_VF;
4225
4226//===----------------------------------------------------------------------===//
4227// 14.3. Vector Widening Floating-Point Add/Subtract Instructions
4228//===----------------------------------------------------------------------===//
4229defm PseudoVFWADD : VPseudoVFWALU_VV_VF;
4230defm PseudoVFWSUB : VPseudoVFWALU_VV_VF;
4231defm PseudoVFWADD : VPseudoVFWALU_WV_WF;
4232defm PseudoVFWSUB : VPseudoVFWALU_WV_WF;
4233
4234//===----------------------------------------------------------------------===//
4235// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
4236//===----------------------------------------------------------------------===//
4237defm PseudoVFMUL  : VPseudoVFMUL_VV_VF;
4238defm PseudoVFDIV  : VPseudoVFDIV_VV_VF;
4239defm PseudoVFRDIV : VPseudoVFRDIV_VF;
4240
4241//===----------------------------------------------------------------------===//
4242// 14.5. Vector Widening Floating-Point Multiply
4243//===----------------------------------------------------------------------===//
4244defm PseudoVFWMUL : VPseudoVWMUL_VV_VF;
4245
4246//===----------------------------------------------------------------------===//
4247// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
4248//===----------------------------------------------------------------------===//
4249defm PseudoVFMACC  : VPseudoVMAC_VV_VF_AAXA;
4250defm PseudoVFNMACC : VPseudoVMAC_VV_VF_AAXA;
4251defm PseudoVFMSAC  : VPseudoVMAC_VV_VF_AAXA;
4252defm PseudoVFNMSAC : VPseudoVMAC_VV_VF_AAXA;
4253defm PseudoVFMADD  : VPseudoVMAC_VV_VF_AAXA;
4254defm PseudoVFNMADD : VPseudoVMAC_VV_VF_AAXA;
4255defm PseudoVFMSUB  : VPseudoVMAC_VV_VF_AAXA;
4256defm PseudoVFNMSUB : VPseudoVMAC_VV_VF_AAXA;
4257
4258//===----------------------------------------------------------------------===//
4259// 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
4260//===----------------------------------------------------------------------===//
4261defm PseudoVFWMACC  : VPseudoVWMAC_VV_VF;
4262defm PseudoVFWNMACC : VPseudoVWMAC_VV_VF;
4263defm PseudoVFWMSAC  : VPseudoVWMAC_VV_VF;
4264defm PseudoVFWNMSAC : VPseudoVWMAC_VV_VF;
4265
4266//===----------------------------------------------------------------------===//
4267// 14.8. Vector Floating-Point Square-Root Instruction
4268//===----------------------------------------------------------------------===//
4269defm PseudoVFSQRT : VPseudoVSQR_V;
4270
4271//===----------------------------------------------------------------------===//
4272// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
4273//===----------------------------------------------------------------------===//
4274defm PseudoVFRSQRT7 : VPseudoVRCP_V;
4275
4276//===----------------------------------------------------------------------===//
4277// 14.10. Vector Floating-Point Reciprocal Estimate Instruction
4278//===----------------------------------------------------------------------===//
4279defm PseudoVFREC7 : VPseudoVRCP_V;
4280
4281//===----------------------------------------------------------------------===//
4282// 14.11. Vector Floating-Point Min/Max Instructions
4283//===----------------------------------------------------------------------===//
4284defm PseudoVFMIN : VPseudoVMAX_VV_VF;
4285defm PseudoVFMAX : VPseudoVMAX_VV_VF;
4286
4287//===----------------------------------------------------------------------===//
4288// 14.12. Vector Floating-Point Sign-Injection Instructions
4289//===----------------------------------------------------------------------===//
4290defm PseudoVFSGNJ  : VPseudoVSGNJ_VV_VF;
4291defm PseudoVFSGNJN : VPseudoVSGNJ_VV_VF;
4292defm PseudoVFSGNJX : VPseudoVSGNJ_VV_VF;
4293
4294//===----------------------------------------------------------------------===//
4295// 14.13. Vector Floating-Point Compare Instructions
4296//===----------------------------------------------------------------------===//
4297defm PseudoVMFEQ : VPseudoVCMPM_VV_VF;
4298defm PseudoVMFNE : VPseudoVCMPM_VV_VF;
4299defm PseudoVMFLT : VPseudoVCMPM_VV_VF;
4300defm PseudoVMFLE : VPseudoVCMPM_VV_VF;
4301defm PseudoVMFGT : VPseudoVCMPM_VF;
4302defm PseudoVMFGE : VPseudoVCMPM_VF;
4303
4304//===----------------------------------------------------------------------===//
4305// 14.14. Vector Floating-Point Classify Instruction
4306//===----------------------------------------------------------------------===//
4307defm PseudoVFCLASS : VPseudoVCLS_V;
4308
4309//===----------------------------------------------------------------------===//
4310// 14.15. Vector Floating-Point Merge Instruction
4311//===----------------------------------------------------------------------===//
4312defm PseudoVFMERGE : VPseudoVMRG_FM;
4313
4314//===----------------------------------------------------------------------===//
4315// 14.16. Vector Floating-Point Move Instruction
4316//===----------------------------------------------------------------------===//
4317defm PseudoVFMV_V : VPseudoVMV_F;
4318
4319//===----------------------------------------------------------------------===//
4320// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
4321//===----------------------------------------------------------------------===//
4322defm PseudoVFCVT_XU_F : VPseudoVCVTI_V;
4323defm PseudoVFCVT_X_F : VPseudoVCVTI_V;
4324defm PseudoVFCVT_RTZ_XU_F : VPseudoVCVTI_V;
4325defm PseudoVFCVT_RTZ_X_F : VPseudoVCVTI_V;
4326defm PseudoVFCVT_F_XU : VPseudoVCVTF_V;
4327defm PseudoVFCVT_F_X : VPseudoVCVTF_V;
4328
4329//===----------------------------------------------------------------------===//
4330// 14.18. Widening Floating-Point/Integer Type-Convert Instructions
4331//===----------------------------------------------------------------------===//
4332defm PseudoVFWCVT_XU_F     : VPseudoVWCVTI_V;
4333defm PseudoVFWCVT_X_F      : VPseudoVWCVTI_V;
4334defm PseudoVFWCVT_RTZ_XU_F : VPseudoVWCVTI_V;
4335defm PseudoVFWCVT_RTZ_X_F  : VPseudoVWCVTI_V;
4336defm PseudoVFWCVT_F_XU     : VPseudoVWCVTF_V;
4337defm PseudoVFWCVT_F_X      : VPseudoVWCVTF_V;
4338defm PseudoVFWCVT_F_F      : VPseudoVWCVTD_V;
4339
4340//===----------------------------------------------------------------------===//
4341// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
4342//===----------------------------------------------------------------------===//
4343defm PseudoVFNCVT_XU_F     : VPseudoVNCVTI_W;
4344defm PseudoVFNCVT_X_F      : VPseudoVNCVTI_W;
4345defm PseudoVFNCVT_RTZ_XU_F : VPseudoVNCVTI_W;
4346defm PseudoVFNCVT_RTZ_X_F  : VPseudoVNCVTI_W;
4347defm PseudoVFNCVT_F_XU     : VPseudoVNCVTF_W;
4348defm PseudoVFNCVT_F_X      : VPseudoVNCVTF_W;
4349defm PseudoVFNCVT_F_F      : VPseudoVNCVTD_W;
4350defm PseudoVFNCVT_ROD_F_F  : VPseudoVNCVTD_W;
4351} // Predicates = [HasVInstructionsAnyF]
4352
4353let Predicates = [HasVInstructions] in {
4354//===----------------------------------------------------------------------===//
4355// 15.1. Vector Single-Width Integer Reduction Instructions
4356//===----------------------------------------------------------------------===//
4357defm PseudoVREDSUM  : VPseudoVRED_VS;
4358defm PseudoVREDAND  : VPseudoVRED_VS;
4359defm PseudoVREDOR   : VPseudoVRED_VS;
4360defm PseudoVREDXOR  : VPseudoVRED_VS;
4361defm PseudoVREDMINU : VPseudoVRED_VS;
4362defm PseudoVREDMIN  : VPseudoVRED_VS;
4363defm PseudoVREDMAXU : VPseudoVRED_VS;
4364defm PseudoVREDMAX  : VPseudoVRED_VS;
4365
4366//===----------------------------------------------------------------------===//
4367// 15.2. Vector Widening Integer Reduction Instructions
4368//===----------------------------------------------------------------------===//
4369let IsRVVWideningReduction = 1 in {
4370defm PseudoVWREDSUMU   : VPseudoVWRED_VS;
4371defm PseudoVWREDSUM    : VPseudoVWRED_VS;
4372}
4373} // Predicates = [HasVInstructions]
4374
4375let Predicates = [HasVInstructionsAnyF] in {
4376//===----------------------------------------------------------------------===//
4377// 15.3. Vector Single-Width Floating-Point Reduction Instructions
4378//===----------------------------------------------------------------------===//
4379defm PseudoVFREDOSUM : VPseudoVFREDO_VS;
4380defm PseudoVFREDUSUM : VPseudoVFRED_VS;
4381defm PseudoVFREDMIN  : VPseudoVFRED_VS;
4382defm PseudoVFREDMAX  : VPseudoVFRED_VS;
4383
4384//===----------------------------------------------------------------------===//
4385// 15.4. Vector Widening Floating-Point Reduction Instructions
4386//===----------------------------------------------------------------------===//
4387let IsRVVWideningReduction = 1 in {
4388defm PseudoVFWREDUSUM  : VPseudoVFWRED_VS;
4389defm PseudoVFWREDOSUM  : VPseudoVFWRED_VS;
4390}
4391
4392} // Predicates = [HasVInstructionsAnyF]
4393
4394//===----------------------------------------------------------------------===//
4395// 16. Vector Mask Instructions
4396//===----------------------------------------------------------------------===//
4397
4398//===----------------------------------------------------------------------===//
4399// 16.1 Vector Mask-Register Logical Instructions
4400//===----------------------------------------------------------------------===//
4401
4402defm PseudoVMAND: VPseudoVALU_MM;
4403defm PseudoVMNAND: VPseudoVALU_MM;
4404defm PseudoVMANDN: VPseudoVALU_MM;
4405defm PseudoVMXOR: VPseudoVALU_MM;
4406defm PseudoVMOR: VPseudoVALU_MM;
4407defm PseudoVMNOR: VPseudoVALU_MM;
4408defm PseudoVMORN: VPseudoVALU_MM;
4409defm PseudoVMXNOR: VPseudoVALU_MM;
4410
4411// Pseudo instructions
4412defm PseudoVMCLR : VPseudoNullaryPseudoM<"VMXOR">,
4413                   Sched<[WriteVMALUV, ReadVMALUV, ReadVMALUV]>;
4414defm PseudoVMSET : VPseudoNullaryPseudoM<"VMXNOR">,
4415                   Sched<[WriteVMALUV, ReadVMALUV, ReadVMALUV]>;
4416
4417//===----------------------------------------------------------------------===//
4418// 16.2. Vector mask population count vcpop
4419//===----------------------------------------------------------------------===//
4420
4421defm PseudoVCPOP: VPseudoVPOP_M;
4422
4423//===----------------------------------------------------------------------===//
4424// 16.3. vfirst find-first-set mask bit
4425//===----------------------------------------------------------------------===//
4426
4427defm PseudoVFIRST: VPseudoV1ST_M;
4428
4429//===----------------------------------------------------------------------===//
4430// 16.4. vmsbf.m set-before-first mask bit
4431//===----------------------------------------------------------------------===//
4432defm PseudoVMSBF: VPseudoVSFS_M;
4433
4434//===----------------------------------------------------------------------===//
4435// 16.5. vmsif.m set-including-first mask bit
4436//===----------------------------------------------------------------------===//
4437defm PseudoVMSIF: VPseudoVSFS_M;
4438
4439//===----------------------------------------------------------------------===//
4440// 16.6. vmsof.m set-only-first mask bit
4441//===----------------------------------------------------------------------===//
4442defm PseudoVMSOF: VPseudoVSFS_M;
4443
4444//===----------------------------------------------------------------------===//
4445// 16.8.  Vector Iota Instruction
4446//===----------------------------------------------------------------------===//
4447defm PseudoVIOTA_M: VPseudoVIOT_M;
4448
4449//===----------------------------------------------------------------------===//
4450// 16.9. Vector Element Index Instruction
4451//===----------------------------------------------------------------------===//
4452defm PseudoVID : VPseudoVID_V;
4453
4454//===----------------------------------------------------------------------===//
4455// 17. Vector Permutation Instructions
4456//===----------------------------------------------------------------------===//
4457
4458//===----------------------------------------------------------------------===//
4459// 17.1. Integer Scalar Move Instructions
4460//===----------------------------------------------------------------------===//
4461
4462let Predicates = [HasVInstructions] in {
4463let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
4464  foreach m = MxList in {
4465    let VLMul = m.value in {
4466      let HasSEWOp = 1, BaseInstr = VMV_X_S in
4467      def PseudoVMV_X_S # "_" # m.MX:
4468        Pseudo<(outs GPR:$rd), (ins m.vrclass:$rs2, ixlenimm:$sew), []>,
4469        Sched<[WriteVIMovVX, ReadVIMovVX]>,
4470        RISCVVPseudo;
4471      let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X,
4472          Constraints = "$rd = $rs1" in
4473      def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd),
4474                                             (ins m.vrclass:$rs1, GPR:$rs2,
4475                                                  AVL:$vl, ixlenimm:$sew),
4476                                             []>,
4477        Sched<[WriteVIMovXV, ReadVIMovXV, ReadVIMovXX]>,
4478        RISCVVPseudo;
4479    }
4480  }
4481}
4482} // Predicates = [HasVInstructions]
4483
4484//===----------------------------------------------------------------------===//
4485// 17.2. Floating-Point Scalar Move Instructions
4486//===----------------------------------------------------------------------===//
4487
4488let Predicates = [HasVInstructionsAnyF] in {
4489let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
4490  foreach f = FPList in {
4491    foreach m = f.MxList in {
4492      let VLMul = m.value in {
4493        let HasSEWOp = 1, BaseInstr = VFMV_F_S in
4494        def "PseudoVFMV_" # f.FX # "_S_" # m.MX :
4495          Pseudo<(outs f.fprclass:$rd),
4496                 (ins m.vrclass:$rs2, ixlenimm:$sew), []>,
4497          Sched<[WriteVFMovVF, ReadVFMovVF]>,
4498          RISCVVPseudo;
4499        let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F,
4500            Constraints = "$rd = $rs1" in
4501        def "PseudoVFMV_S_" # f.FX # "_" # m.MX :
4502                                          Pseudo<(outs m.vrclass:$rd),
4503                                                 (ins m.vrclass:$rs1, f.fprclass:$rs2,
4504                                                      AVL:$vl, ixlenimm:$sew),
4505                                                 []>,
4506          Sched<[WriteVFMovFV, ReadVFMovFV, ReadVFMovFX]>,
4507          RISCVVPseudo;
4508      }
4509    }
4510  }
4511}
4512} // Predicates = [HasVInstructionsAnyF]
4513
4514//===----------------------------------------------------------------------===//
4515// 17.3. Vector Slide Instructions
4516//===----------------------------------------------------------------------===//
4517let Predicates = [HasVInstructions] in {
4518  defm PseudoVSLIDEUP    : VPseudoVSLD_VX_VI<uimm5, "@earlyclobber $rd">;
4519  defm PseudoVSLIDEDOWN  : VPseudoVSLD_VX_VI<uimm5>;
4520  defm PseudoVSLIDE1UP   : VPseudoVSLD1_VX<"@earlyclobber $rd">;
4521  defm PseudoVSLIDE1DOWN : VPseudoVSLD1_VX;
4522} // Predicates = [HasVInstructions]
4523
4524let Predicates = [HasVInstructionsAnyF] in {
4525  defm PseudoVFSLIDE1UP  : VPseudoVSLD1_VF<"@earlyclobber $rd">;
4526  defm PseudoVFSLIDE1DOWN : VPseudoVSLD1_VF;
4527} // Predicates = [HasVInstructionsAnyF]
4528
4529//===----------------------------------------------------------------------===//
4530// 17.4. Vector Register Gather Instructions
4531//===----------------------------------------------------------------------===//
4532defm PseudoVRGATHER     : VPseudoVGTR_VV_VX_VI<uimm5, "@earlyclobber $rd">;
4533defm PseudoVRGATHEREI16 : VPseudoVGTR_VV_EEW</* eew */ 16, "@earlyclobber $rd">;
4534
4535//===----------------------------------------------------------------------===//
4536// 17.5. Vector Compress Instruction
4537//===----------------------------------------------------------------------===//
4538defm PseudoVCOMPRESS : VPseudoVCPR_V;
4539
4540//===----------------------------------------------------------------------===//
4541// Patterns.
4542//===----------------------------------------------------------------------===//
4543
4544//===----------------------------------------------------------------------===//
4545// 12. Vector Integer Arithmetic Instructions
4546//===----------------------------------------------------------------------===//
4547
4548let Predicates = [HasVInstructions] in {
4549//===----------------------------------------------------------------------===//
4550// 12.1. Vector Single-Width Integer Add and Subtract
4551//===----------------------------------------------------------------------===//
4552defm : VPatBinaryV_VV_VX_VI<"int_riscv_vadd", "PseudoVADD", AllIntegerVectors>;
4553defm : VPatBinaryV_VV_VX<"int_riscv_vsub", "PseudoVSUB", AllIntegerVectors>;
4554defm : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>;
4555
4556//===----------------------------------------------------------------------===//
4557// 12.2. Vector Widening Integer Add/Subtract
4558//===----------------------------------------------------------------------===//
4559defm : VPatBinaryW_VV_VX<"int_riscv_vwaddu", "PseudoVWADDU", AllWidenableIntVectors>;
4560defm : VPatBinaryW_VV_VX<"int_riscv_vwsubu", "PseudoVWSUBU", AllWidenableIntVectors>;
4561defm : VPatBinaryW_VV_VX<"int_riscv_vwadd", "PseudoVWADD", AllWidenableIntVectors>;
4562defm : VPatBinaryW_VV_VX<"int_riscv_vwsub", "PseudoVWSUB", AllWidenableIntVectors>;
4563defm : VPatBinaryW_WV_WX<"int_riscv_vwaddu_w", "PseudoVWADDU", AllWidenableIntVectors>;
4564defm : VPatBinaryW_WV_WX<"int_riscv_vwsubu_w", "PseudoVWSUBU", AllWidenableIntVectors>;
4565defm : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD", AllWidenableIntVectors>;
4566defm : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB", AllWidenableIntVectors>;
4567
4568//===----------------------------------------------------------------------===//
4569// 12.3. Vector Integer Extension
4570//===----------------------------------------------------------------------===//
4571defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF2",
4572                     AllFractionableVF2IntVectors>;
4573defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF4",
4574                     AllFractionableVF4IntVectors>;
4575defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF8",
4576                     AllFractionableVF8IntVectors>;
4577defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF2",
4578                     AllFractionableVF2IntVectors>;
4579defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF4",
4580                     AllFractionableVF4IntVectors>;
4581defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF8",
4582                     AllFractionableVF8IntVectors>;
4583
4584//===----------------------------------------------------------------------===//
4585// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
4586//===----------------------------------------------------------------------===//
4587defm : VPatBinaryV_VM_XM_IM<"int_riscv_vadc", "PseudoVADC">;
4588defm : VPatBinaryM_VM_XM_IM<"int_riscv_vmadc_carry_in", "PseudoVMADC">;
4589defm : VPatBinaryM_V_X_I<"int_riscv_vmadc", "PseudoVMADC">;
4590
4591defm : VPatBinaryV_VM_XM<"int_riscv_vsbc", "PseudoVSBC">;
4592defm : VPatBinaryM_VM_XM<"int_riscv_vmsbc_borrow_in", "PseudoVMSBC">;
4593defm : VPatBinaryM_V_X<"int_riscv_vmsbc", "PseudoVMSBC">;
4594
4595//===----------------------------------------------------------------------===//
4596// 12.5. Vector Bitwise Logical Instructions
4597//===----------------------------------------------------------------------===//
4598defm : VPatBinaryV_VV_VX_VI<"int_riscv_vand", "PseudoVAND", AllIntegerVectors>;
4599defm : VPatBinaryV_VV_VX_VI<"int_riscv_vor", "PseudoVOR", AllIntegerVectors>;
4600defm : VPatBinaryV_VV_VX_VI<"int_riscv_vxor", "PseudoVXOR", AllIntegerVectors>;
4601
4602//===----------------------------------------------------------------------===//
4603// 12.6. Vector Single-Width Bit Shift Instructions
4604//===----------------------------------------------------------------------===//
4605defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsll", "PseudoVSLL", AllIntegerVectors,
4606                            uimm5>;
4607defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsrl", "PseudoVSRL", AllIntegerVectors,
4608                            uimm5>;
4609defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors,
4610                            uimm5>;
4611
4612foreach vti = AllIntegerVectors in {
4613  // Emit shift by 1 as an add since it might be faster.
4614  def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector vti.RegClass:$rs1),
4615                                        (XLenVT 1), VLOpFrag)),
4616            (!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
4617                                                              vti.RegClass:$rs1,
4618                                                              GPR:$vl,
4619                                                              vti.Log2SEW)>;
4620  def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$merge),
4621                                             (vti.Vector vti.RegClass:$rs1),
4622                                             (XLenVT 1),
4623                                             (vti.Mask V0),
4624                                             VLOpFrag,
4625                                             (XLenVT timm:$policy))),
4626            (!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX#"_MASK")
4627                                                        vti.RegClass:$merge,
4628                                                        vti.RegClass:$rs1,
4629                                                        vti.RegClass:$rs1,
4630                                                        (vti.Mask V0),
4631                                                        GPR:$vl,
4632                                                        vti.Log2SEW,
4633                                                        (XLenVT timm:$policy))>;
4634}
4635
4636//===----------------------------------------------------------------------===//
4637// 12.7. Vector Narrowing Integer Right Shift Instructions
4638//===----------------------------------------------------------------------===//
4639defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsrl", "PseudoVNSRL", AllWidenableIntVectors>;
4640defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsra", "PseudoVNSRA", AllWidenableIntVectors>;
4641
4642//===----------------------------------------------------------------------===//
4643// 12.8. Vector Integer Comparison Instructions
4644//===----------------------------------------------------------------------===//
4645defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmseq", "PseudoVMSEQ", AllIntegerVectors>;
4646defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsne", "PseudoVMSNE", AllIntegerVectors>;
4647defm : VPatBinaryM_VV_VX<"int_riscv_vmsltu", "PseudoVMSLTU", AllIntegerVectors>;
4648defm : VPatBinaryM_VV_VX<"int_riscv_vmslt", "PseudoVMSLT", AllIntegerVectors>;
4649defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsleu", "PseudoVMSLEU", AllIntegerVectors>;
4650defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsle", "PseudoVMSLE", AllIntegerVectors>;
4651
4652defm : VPatBinaryM_VX_VI<"int_riscv_vmsgtu", "PseudoVMSGTU", AllIntegerVectors>;
4653defm : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>;
4654
4655// Match vmsgt with 2 vector operands to vmslt with the operands swapped.
4656defm : VPatBinarySwappedM_VV<"int_riscv_vmsgtu", "PseudoVMSLTU", AllIntegerVectors>;
4657defm : VPatBinarySwappedM_VV<"int_riscv_vmsgt", "PseudoVMSLT", AllIntegerVectors>;
4658
4659defm : VPatBinarySwappedM_VV<"int_riscv_vmsgeu", "PseudoVMSLEU", AllIntegerVectors>;
4660defm : VPatBinarySwappedM_VV<"int_riscv_vmsge", "PseudoVMSLE", AllIntegerVectors>;
4661
4662// Match vmslt(u).vx intrinsics to vmsle(u).vi if the scalar is -15 to 16 and
4663// non-zero. Zero can be .vx with x0. This avoids the user needing to know that
4664// there is no vmslt(u).vi instruction. Similar for vmsge(u).vx intrinsics
4665// using vmslt(u).vi.
4666defm : VPatCompare_VI<"int_riscv_vmslt", "PseudoVMSLE", simm5_plus1_nonzero>;
4667defm : VPatCompare_VI<"int_riscv_vmsltu", "PseudoVMSLEU", simm5_plus1_nonzero>;
4668
4669// We need to handle 0 for vmsge.vi using vmslt.vi because there is no vmsge.vx.
4670defm : VPatCompare_VI<"int_riscv_vmsge", "PseudoVMSGT", simm5_plus1>;
4671defm : VPatCompare_VI<"int_riscv_vmsgeu", "PseudoVMSGTU", simm5_plus1_nonzero>;
4672
4673//===----------------------------------------------------------------------===//
4674// 12.9. Vector Integer Min/Max Instructions
4675//===----------------------------------------------------------------------===//
4676defm : VPatBinaryV_VV_VX<"int_riscv_vminu", "PseudoVMINU", AllIntegerVectors>;
4677defm : VPatBinaryV_VV_VX<"int_riscv_vmin", "PseudoVMIN", AllIntegerVectors>;
4678defm : VPatBinaryV_VV_VX<"int_riscv_vmaxu", "PseudoVMAXU", AllIntegerVectors>;
4679defm : VPatBinaryV_VV_VX<"int_riscv_vmax", "PseudoVMAX", AllIntegerVectors>;
4680
4681//===----------------------------------------------------------------------===//
4682// 12.10. Vector Single-Width Integer Multiply Instructions
4683//===----------------------------------------------------------------------===//
4684defm : VPatBinaryV_VV_VX<"int_riscv_vmul", "PseudoVMUL", AllIntegerVectors>;
4685defm : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH", AllIntegerVectors>;
4686defm : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU", AllIntegerVectors>;
4687defm : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU", AllIntegerVectors>;
4688
4689//===----------------------------------------------------------------------===//
4690// 12.11. Vector Integer Divide Instructions
4691//===----------------------------------------------------------------------===//
4692defm : VPatBinaryV_VV_VX<"int_riscv_vdivu", "PseudoVDIVU", AllIntegerVectors>;
4693defm : VPatBinaryV_VV_VX<"int_riscv_vdiv", "PseudoVDIV", AllIntegerVectors>;
4694defm : VPatBinaryV_VV_VX<"int_riscv_vremu", "PseudoVREMU", AllIntegerVectors>;
4695defm : VPatBinaryV_VV_VX<"int_riscv_vrem", "PseudoVREM", AllIntegerVectors>;
4696
4697//===----------------------------------------------------------------------===//
4698// 12.12. Vector Widening Integer Multiply Instructions
4699//===----------------------------------------------------------------------===//
4700defm : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL", AllWidenableIntVectors>;
4701defm : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU", AllWidenableIntVectors>;
4702defm : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU", AllWidenableIntVectors>;
4703
4704//===----------------------------------------------------------------------===//
4705// 12.13. Vector Single-Width Integer Multiply-Add Instructions
4706//===----------------------------------------------------------------------===//
4707defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmadd", "PseudoVMADD", AllIntegerVectors>;
4708defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsub", "PseudoVNMSUB", AllIntegerVectors>;
4709defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmacc", "PseudoVMACC", AllIntegerVectors>;
4710defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsac", "PseudoVNMSAC", AllIntegerVectors>;
4711
4712//===----------------------------------------------------------------------===//
4713// 12.14. Vector Widening Integer Multiply-Add Instructions
4714//===----------------------------------------------------------------------===//
4715defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccu", "PseudoVWMACCU", AllWidenableIntVectors>;
4716defm : VPatTernaryW_VV_VX<"int_riscv_vwmacc", "PseudoVWMACC", AllWidenableIntVectors>;
4717defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccsu", "PseudoVWMACCSU", AllWidenableIntVectors>;
4718defm : VPatTernaryW_VX<"int_riscv_vwmaccus", "PseudoVWMACCUS", AllWidenableIntVectors>;
4719
4720//===----------------------------------------------------------------------===//
4721// 12.15. Vector Integer Merge Instructions
4722//===----------------------------------------------------------------------===//
4723defm : VPatBinaryV_VM_XM_IM<"int_riscv_vmerge", "PseudoVMERGE">;
4724
4725//===----------------------------------------------------------------------===//
4726// 12.16. Vector Integer Move Instructions
4727//===----------------------------------------------------------------------===//
4728foreach vti = AllVectors in {
4729  def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$rs1),
4730                                           VLOpFrag)),
4731            (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX)
4732             $rs1, GPR:$vl, vti.Log2SEW)>;
4733
4734  // vmv.v.x/vmv.v.i are handled in RISCInstrVInstrInfoVVLPatterns.td
4735}
4736
4737//===----------------------------------------------------------------------===//
4738// 13.1. Vector Single-Width Saturating Add and Subtract
4739//===----------------------------------------------------------------------===//
4740defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsaddu", "PseudoVSADDU", AllIntegerVectors>;
4741defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsadd", "PseudoVSADD", AllIntegerVectors>;
4742defm : VPatBinaryV_VV_VX<"int_riscv_vssubu", "PseudoVSSUBU", AllIntegerVectors>;
4743defm : VPatBinaryV_VV_VX<"int_riscv_vssub", "PseudoVSSUB", AllIntegerVectors>;
4744
4745//===----------------------------------------------------------------------===//
4746// 13.2. Vector Single-Width Averaging Add and Subtract
4747//===----------------------------------------------------------------------===//
4748defm : VPatBinaryV_VV_VX<"int_riscv_vaaddu", "PseudoVAADDU", AllIntegerVectors>;
4749defm : VPatBinaryV_VV_VX<"int_riscv_vaadd", "PseudoVAADD", AllIntegerVectors>;
4750defm : VPatBinaryV_VV_VX<"int_riscv_vasubu", "PseudoVASUBU", AllIntegerVectors>;
4751defm : VPatBinaryV_VV_VX<"int_riscv_vasub", "PseudoVASUB", AllIntegerVectors>;
4752
4753//===----------------------------------------------------------------------===//
4754// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
4755//===----------------------------------------------------------------------===//
4756defm : VPatBinaryV_VV_VX<"int_riscv_vsmul", "PseudoVSMUL", AllIntegerVectors>;
4757
4758//===----------------------------------------------------------------------===//
4759// 13.4. Vector Single-Width Scaling Shift Instructions
4760//===----------------------------------------------------------------------===//
4761defm : VPatBinaryV_VV_VX_VI<"int_riscv_vssrl", "PseudoVSSRL", AllIntegerVectors,
4762                            uimm5>;
4763defm : VPatBinaryV_VV_VX_VI<"int_riscv_vssra", "PseudoVSSRA", AllIntegerVectors,
4764                            uimm5>;
4765
4766//===----------------------------------------------------------------------===//
4767// 13.5. Vector Narrowing Fixed-Point Clip Instructions
4768//===----------------------------------------------------------------------===//
4769defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclipu", "PseudoVNCLIPU", AllWidenableIntVectors>;
4770defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclip", "PseudoVNCLIP", AllWidenableIntVectors>;
4771
4772} // Predicates = [HasVInstructions]
4773
4774let Predicates = [HasVInstructionsAnyF] in {
4775//===----------------------------------------------------------------------===//
4776// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
4777//===----------------------------------------------------------------------===//
4778defm : VPatBinaryV_VV_VX<"int_riscv_vfadd", "PseudoVFADD", AllFloatVectors>;
4779defm : VPatBinaryV_VV_VX<"int_riscv_vfsub", "PseudoVFSUB", AllFloatVectors>;
4780defm : VPatBinaryV_VX<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>;
4781
4782//===----------------------------------------------------------------------===//
4783// 14.3. Vector Widening Floating-Point Add/Subtract Instructions
4784//===----------------------------------------------------------------------===//
4785defm : VPatBinaryW_VV_VX<"int_riscv_vfwadd", "PseudoVFWADD", AllWidenableFloatVectors>;
4786defm : VPatBinaryW_VV_VX<"int_riscv_vfwsub", "PseudoVFWSUB", AllWidenableFloatVectors>;
4787defm : VPatBinaryW_WV_WX<"int_riscv_vfwadd_w", "PseudoVFWADD", AllWidenableFloatVectors>;
4788defm : VPatBinaryW_WV_WX<"int_riscv_vfwsub_w", "PseudoVFWSUB", AllWidenableFloatVectors>;
4789
4790//===----------------------------------------------------------------------===//
4791// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
4792//===----------------------------------------------------------------------===//
4793defm : VPatBinaryV_VV_VX<"int_riscv_vfmul", "PseudoVFMUL", AllFloatVectors>;
4794defm : VPatBinaryV_VV_VX<"int_riscv_vfdiv", "PseudoVFDIV", AllFloatVectors>;
4795defm : VPatBinaryV_VX<"int_riscv_vfrdiv", "PseudoVFRDIV", AllFloatVectors>;
4796
4797//===----------------------------------------------------------------------===//
4798// 14.5. Vector Widening Floating-Point Multiply
4799//===----------------------------------------------------------------------===//
4800defm : VPatBinaryW_VV_VX<"int_riscv_vfwmul", "PseudoVFWMUL", AllWidenableFloatVectors>;
4801
4802//===----------------------------------------------------------------------===//
4803// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
4804//===----------------------------------------------------------------------===//
4805defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmacc", "PseudoVFMACC", AllFloatVectors>;
4806defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmacc", "PseudoVFNMACC", AllFloatVectors>;
4807defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmsac", "PseudoVFMSAC", AllFloatVectors>;
4808defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmsac", "PseudoVFNMSAC", AllFloatVectors>;
4809defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmadd", "PseudoVFMADD", AllFloatVectors>;
4810defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmadd", "PseudoVFNMADD", AllFloatVectors>;
4811defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmsub", "PseudoVFMSUB", AllFloatVectors>;
4812defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmsub", "PseudoVFNMSUB", AllFloatVectors>;
4813
4814//===----------------------------------------------------------------------===//
4815// 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
4816//===----------------------------------------------------------------------===//
4817defm : VPatTernaryW_VV_VX<"int_riscv_vfwmacc", "PseudoVFWMACC", AllWidenableFloatVectors>;
4818defm : VPatTernaryW_VV_VX<"int_riscv_vfwnmacc", "PseudoVFWNMACC", AllWidenableFloatVectors>;
4819defm : VPatTernaryW_VV_VX<"int_riscv_vfwmsac", "PseudoVFWMSAC", AllWidenableFloatVectors>;
4820defm : VPatTernaryW_VV_VX<"int_riscv_vfwnmsac", "PseudoVFWNMSAC", AllWidenableFloatVectors>;
4821
4822//===----------------------------------------------------------------------===//
4823// 14.8. Vector Floating-Point Square-Root Instruction
4824//===----------------------------------------------------------------------===//
4825defm : VPatUnaryV_V<"int_riscv_vfsqrt", "PseudoVFSQRT", AllFloatVectors>;
4826
4827//===----------------------------------------------------------------------===//
4828// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
4829//===----------------------------------------------------------------------===//
4830defm : VPatUnaryV_V<"int_riscv_vfrsqrt7", "PseudoVFRSQRT7", AllFloatVectors>;
4831
4832//===----------------------------------------------------------------------===//
4833// 14.10. Vector Floating-Point Reciprocal Estimate Instruction
4834//===----------------------------------------------------------------------===//
4835defm : VPatUnaryV_V<"int_riscv_vfrec7", "PseudoVFREC7", AllFloatVectors>;
4836
4837//===----------------------------------------------------------------------===//
4838// 14.11. Vector Floating-Point Min/Max Instructions
4839//===----------------------------------------------------------------------===//
4840defm : VPatBinaryV_VV_VX<"int_riscv_vfmin", "PseudoVFMIN", AllFloatVectors>;
4841defm : VPatBinaryV_VV_VX<"int_riscv_vfmax", "PseudoVFMAX", AllFloatVectors>;
4842
4843//===----------------------------------------------------------------------===//
4844// 14.12. Vector Floating-Point Sign-Injection Instructions
4845//===----------------------------------------------------------------------===//
4846defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnj", "PseudoVFSGNJ", AllFloatVectors>;
4847defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjn", "PseudoVFSGNJN", AllFloatVectors>;
4848defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjx", "PseudoVFSGNJX", AllFloatVectors>;
4849
4850//===----------------------------------------------------------------------===//
4851// 14.13. Vector Floating-Point Compare Instructions
4852//===----------------------------------------------------------------------===//
4853defm : VPatBinaryM_VV_VX<"int_riscv_vmfeq", "PseudoVMFEQ", AllFloatVectors>;
4854defm : VPatBinaryM_VV_VX<"int_riscv_vmfle", "PseudoVMFLE", AllFloatVectors>;
4855defm : VPatBinaryM_VV_VX<"int_riscv_vmflt", "PseudoVMFLT", AllFloatVectors>;
4856defm : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE", AllFloatVectors>;
4857defm : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT", AllFloatVectors>;
4858defm : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE", AllFloatVectors>;
4859defm : VPatBinarySwappedM_VV<"int_riscv_vmfgt", "PseudoVMFLT", AllFloatVectors>;
4860defm : VPatBinarySwappedM_VV<"int_riscv_vmfge", "PseudoVMFLE", AllFloatVectors>;
4861
4862//===----------------------------------------------------------------------===//
4863// 14.14. Vector Floating-Point Classify Instruction
4864//===----------------------------------------------------------------------===//
4865defm : VPatClassifyVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">;
4866
4867//===----------------------------------------------------------------------===//
4868// 14.15. Vector Floating-Point Merge Instruction
4869//===----------------------------------------------------------------------===//
4870// We can use vmerge.vvm to support vector-vector vfmerge.
4871// NOTE: Clang previously used int_riscv_vfmerge for vector-vector, but now uses
4872// int_riscv_vmerge. Support both for compatibility.
4873defm : VPatBinaryV_VM<"int_riscv_vmerge", "PseudoVMERGE",
4874                      /*CarryOut = */0, /*vtilist=*/AllFloatVectors>;
4875defm : VPatBinaryV_VM<"int_riscv_vfmerge", "PseudoVMERGE",
4876                      /*CarryOut = */0, /*vtilist=*/AllFloatVectors>;
4877defm : VPatBinaryV_XM<"int_riscv_vfmerge", "PseudoVFMERGE",
4878                      /*CarryOut = */0, /*vtilist=*/AllFloatVectors>;
4879
4880foreach fvti = AllFloatVectors in {
4881  defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX);
4882  def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$rs2),
4883                                            (fvti.Scalar (fpimm0)),
4884                                            (fvti.Mask V0), VLOpFrag)),
4885            (instr fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
4886}
4887
4888//===----------------------------------------------------------------------===//
4889// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
4890//===----------------------------------------------------------------------===//
4891defm : VPatConversionVI_VF<"int_riscv_vfcvt_xu_f_v", "PseudoVFCVT_XU_F">;
4892defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_xu_f_v", "PseudoVFCVT_RTZ_XU_F">;
4893defm : VPatConversionVI_VF<"int_riscv_vfcvt_x_f_v", "PseudoVFCVT_X_F">;
4894defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_x_f_v", "PseudoVFCVT_RTZ_X_F">;
4895defm : VPatConversionVF_VI<"int_riscv_vfcvt_f_x_v", "PseudoVFCVT_F_X">;
4896defm : VPatConversionVF_VI<"int_riscv_vfcvt_f_xu_v", "PseudoVFCVT_F_XU">;
4897
4898//===----------------------------------------------------------------------===//
4899// 14.18. Widening Floating-Point/Integer Type-Convert Instructions
4900//===----------------------------------------------------------------------===//
4901defm : VPatConversionWI_VF<"int_riscv_vfwcvt_xu_f_v", "PseudoVFWCVT_XU_F">;
4902defm : VPatConversionWI_VF<"int_riscv_vfwcvt_x_f_v", "PseudoVFWCVT_X_F">;
4903defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_xu_f_v", "PseudoVFWCVT_RTZ_XU_F">;
4904defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_x_f_v", "PseudoVFWCVT_RTZ_X_F">;
4905defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU">;
4906defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X">;
4907defm : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F">;
4908
4909//===----------------------------------------------------------------------===//
4910// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
4911//===----------------------------------------------------------------------===//
4912defm : VPatConversionVI_WF<"int_riscv_vfncvt_xu_f_w", "PseudoVFNCVT_XU_F">;
4913defm : VPatConversionVI_WF<"int_riscv_vfncvt_x_f_w", "PseudoVFNCVT_X_F">;
4914defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_xu_f_w", "PseudoVFNCVT_RTZ_XU_F">;
4915defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_x_f_w", "PseudoVFNCVT_RTZ_X_F">;
4916defm : VPatConversionVF_WI <"int_riscv_vfncvt_f_xu_w", "PseudoVFNCVT_F_XU">;
4917defm : VPatConversionVF_WI <"int_riscv_vfncvt_f_x_w", "PseudoVFNCVT_F_X">;
4918defm : VPatConversionVF_WF<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F">;
4919defm : VPatConversionVF_WF<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F">;
4920} // Predicates = [HasVInstructionsAnyF]
4921
4922let Predicates = [HasVInstructions] in {
4923//===----------------------------------------------------------------------===//
4924// 15.1. Vector Single-Width Integer Reduction Instructions
4925//===----------------------------------------------------------------------===//
4926defm : VPatReductionV_VS<"int_riscv_vredsum", "PseudoVREDSUM">;
4927defm : VPatReductionV_VS<"int_riscv_vredand", "PseudoVREDAND">;
4928defm : VPatReductionV_VS<"int_riscv_vredor", "PseudoVREDOR">;
4929defm : VPatReductionV_VS<"int_riscv_vredxor", "PseudoVREDXOR">;
4930defm : VPatReductionV_VS<"int_riscv_vredminu", "PseudoVREDMINU">;
4931defm : VPatReductionV_VS<"int_riscv_vredmin", "PseudoVREDMIN">;
4932defm : VPatReductionV_VS<"int_riscv_vredmaxu", "PseudoVREDMAXU">;
4933defm : VPatReductionV_VS<"int_riscv_vredmax", "PseudoVREDMAX">;
4934
4935//===----------------------------------------------------------------------===//
4936// 15.2. Vector Widening Integer Reduction Instructions
4937//===----------------------------------------------------------------------===//
4938defm : VPatReductionW_VS<"int_riscv_vwredsumu", "PseudoVWREDSUMU">;
4939defm : VPatReductionW_VS<"int_riscv_vwredsum", "PseudoVWREDSUM">;
4940} // Predicates = [HasVInstructions]
4941
4942let Predicates = [HasVInstructionsAnyF] in {
4943//===----------------------------------------------------------------------===//
4944// 15.3. Vector Single-Width Floating-Point Reduction Instructions
4945//===----------------------------------------------------------------------===//
4946defm : VPatReductionV_VS<"int_riscv_vfredosum", "PseudoVFREDOSUM", /*IsFloat=*/1>;
4947defm : VPatReductionV_VS<"int_riscv_vfredusum", "PseudoVFREDUSUM", /*IsFloat=*/1>;
4948defm : VPatReductionV_VS<"int_riscv_vfredmin", "PseudoVFREDMIN", /*IsFloat=*/1>;
4949defm : VPatReductionV_VS<"int_riscv_vfredmax", "PseudoVFREDMAX", /*IsFloat=*/1>;
4950
4951//===----------------------------------------------------------------------===//
4952// 15.4. Vector Widening Floating-Point Reduction Instructions
4953//===----------------------------------------------------------------------===//
4954defm : VPatReductionW_VS<"int_riscv_vfwredusum", "PseudoVFWREDUSUM", /*IsFloat=*/1>;
4955defm : VPatReductionW_VS<"int_riscv_vfwredosum", "PseudoVFWREDOSUM", /*IsFloat=*/1>;
4956
4957} // Predicates = [HasVInstructionsAnyF]
4958
4959//===----------------------------------------------------------------------===//
4960// 16. Vector Mask Instructions
4961//===----------------------------------------------------------------------===//
4962
4963let Predicates = [HasVInstructions] in {
4964//===----------------------------------------------------------------------===//
4965// 16.1 Vector Mask-Register Logical Instructions
4966//===----------------------------------------------------------------------===//
4967defm : VPatBinaryM_MM<"int_riscv_vmand", "PseudoVMAND">;
4968defm : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">;
4969defm : VPatBinaryM_MM<"int_riscv_vmandn", "PseudoVMANDN">;
4970defm : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">;
4971defm : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">;
4972defm : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">;
4973defm : VPatBinaryM_MM<"int_riscv_vmorn", "PseudoVMORN">;
4974defm : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">;
4975
4976// pseudo instructions
4977defm : VPatNullaryM<"int_riscv_vmclr", "PseudoVMCLR">;
4978defm : VPatNullaryM<"int_riscv_vmset", "PseudoVMSET">;
4979
4980//===----------------------------------------------------------------------===//
4981// 16.2. Vector count population in mask vcpop.m
4982//===----------------------------------------------------------------------===//
4983defm : VPatUnaryS_M<"int_riscv_vcpop", "PseudoVCPOP">;
4984
4985//===----------------------------------------------------------------------===//
4986// 16.3. vfirst find-first-set mask bit
4987//===----------------------------------------------------------------------===//
4988defm : VPatUnaryS_M<"int_riscv_vfirst", "PseudoVFIRST">;
4989
4990//===----------------------------------------------------------------------===//
4991// 16.4. vmsbf.m set-before-first mask bit
4992//===----------------------------------------------------------------------===//
4993defm : VPatUnaryM_M<"int_riscv_vmsbf", "PseudoVMSBF">;
4994
4995//===----------------------------------------------------------------------===//
4996// 16.5. vmsif.m set-including-first mask bit
4997//===----------------------------------------------------------------------===//
4998defm : VPatUnaryM_M<"int_riscv_vmsif", "PseudoVMSIF">;
4999
5000//===----------------------------------------------------------------------===//
5001// 16.6. vmsof.m set-only-first mask bit
5002//===----------------------------------------------------------------------===//
5003defm : VPatUnaryM_M<"int_riscv_vmsof", "PseudoVMSOF">;
5004
5005//===----------------------------------------------------------------------===//
5006// 16.8.  Vector Iota Instruction
5007//===----------------------------------------------------------------------===//
5008defm : VPatUnaryV_M<"int_riscv_viota", "PseudoVIOTA">;
5009
5010//===----------------------------------------------------------------------===//
5011// 16.9. Vector Element Index Instruction
5012//===----------------------------------------------------------------------===//
5013defm : VPatNullaryV<"int_riscv_vid", "PseudoVID">;
5014
5015} // Predicates = [HasVInstructions]
5016
5017//===----------------------------------------------------------------------===//
5018// 17. Vector Permutation Instructions
5019//===----------------------------------------------------------------------===//
5020
5021//===----------------------------------------------------------------------===//
5022// 17.1. Integer Scalar Move Instructions
5023//===----------------------------------------------------------------------===//
5024
5025let Predicates = [HasVInstructions] in {
5026foreach vti = AllIntegerVectors in {
5027  def : Pat<(riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2)),
5028            (!cast<Instruction>("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.Log2SEW)>;
5029  // vmv.s.x is handled with a custom node in RISCVInstrInfoVVLPatterns.td
5030}
5031} // Predicates = [HasVInstructions]
5032
5033//===----------------------------------------------------------------------===//
5034// 17.2. Floating-Point Scalar Move Instructions
5035//===----------------------------------------------------------------------===//
5036
5037let Predicates = [HasVInstructionsAnyF] in {
5038foreach fvti = AllFloatVectors in {
5039  defvar instr = !cast<Instruction>("PseudoVFMV_"#fvti.ScalarSuffix#"_S_" #
5040                                    fvti.LMul.MX);
5041  def : Pat<(fvti.Scalar (int_riscv_vfmv_f_s (fvti.Vector fvti.RegClass:$rs2))),
5042                         (instr $rs2, fvti.Log2SEW)>;
5043
5044  def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1),
5045                         (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)),
5046            (!cast<Instruction>("PseudoVFMV_S_"#fvti.ScalarSuffix#"_" #
5047                                fvti.LMul.MX)
5048             (fvti.Vector $rs1),
5049             (fvti.Scalar fvti.ScalarRegClass:$rs2),
5050             GPR:$vl, fvti.Log2SEW)>;
5051}
5052} // Predicates = [HasVInstructionsAnyF]
5053
5054//===----------------------------------------------------------------------===//
5055// 17.3. Vector Slide Instructions
5056//===----------------------------------------------------------------------===//
5057let Predicates = [HasVInstructions] in {
5058  defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllIntegerVectors, uimm5>;
5059  defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllIntegerVectors, uimm5>;
5060  defm : VPatBinaryV_VX<"int_riscv_vslide1up", "PseudoVSLIDE1UP", AllIntegerVectors>;
5061  defm : VPatBinaryV_VX<"int_riscv_vslide1down", "PseudoVSLIDE1DOWN", AllIntegerVectors>;
5062} // Predicates = [HasVInstructions]
5063
5064let Predicates = [HasVInstructionsAnyF] in {
5065  defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllFloatVectors, uimm5>;
5066  defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVectors, uimm5>;
5067  defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>;
5068  defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>;
5069} // Predicates = [HasVInstructionsAnyF]
5070
5071//===----------------------------------------------------------------------===//
5072// 17.4. Vector Register Gather Instructions
5073//===----------------------------------------------------------------------===//
5074let Predicates = [HasVInstructions] in {
5075  defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
5076                                  AllIntegerVectors, uimm5>;
5077  defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
5078                                /* eew */ 16, AllIntegerVectors>;
5079} // Predicates = [HasVInstructions]
5080
5081let Predicates = [HasVInstructionsAnyF] in {
5082  defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
5083                                  AllFloatVectors, uimm5>;
5084  defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
5085                                /* eew */ 16, AllFloatVectors>;
5086} // Predicates = [HasVInstructionsAnyF]
5087
5088//===----------------------------------------------------------------------===//
5089// 17.5. Vector Compress Instruction
5090//===----------------------------------------------------------------------===//
5091let Predicates = [HasVInstructions] in {
5092  defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>;
5093} // Predicates = [HasVInstructions]
5094
5095let Predicates = [HasVInstructionsAnyF] in {
5096  defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>;
5097} // Predicates = [HasVInstructionsAnyF]
5098
5099// Include the non-intrinsic ISel patterns
5100include "RISCVInstrInfoVSDPatterns.td"
5101include "RISCVInstrInfoVVLPatterns.td"
5102