xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td (revision 43e29d03f416d7dda52112a29600a7c82ee1a91e)
1//===-- RISCVInstrInfoVPseudos.td - RISC-V 'V' Pseudos -----*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// This file contains the required infrastructure to support code generation
10/// for the standard 'V' (Vector) extension, version 1.0.
11///
12/// This file is included from RISCVInstrInfoV.td
13///
14//===----------------------------------------------------------------------===//
15
16def riscv_vmv_x_s : SDNode<"RISCVISD::VMV_X_S",
17                           SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>,
18                                                SDTCisInt<1>]>>;
19def riscv_read_vlenb : SDNode<"RISCVISD::READ_VLENB",
20                              SDTypeProfile<1, 0, [SDTCisVT<0, XLenVT>]>>;
21
22// Operand that is allowed to be a register or a 5 bit immediate.
23// This allows us to pick between VSETIVLI and VSETVLI opcodes using the same
24// pseudo instructions.
25def AVL : RegisterOperand<GPRNoX0> {
26  let OperandNamespace = "RISCVOp";
27  let OperandType = "OPERAND_AVL";
28}
29
30// X0 has special meaning for vsetvl/vsetvli.
31//  rd | rs1 |   AVL value | Effect on vl
32//--------------------------------------------------------------
33// !X0 |  X0 |       VLMAX | Set vl to VLMAX
34//  X0 |  X0 | Value in vl | Keep current vl, just change vtype.
35def VLOp : ComplexPattern<XLenVT, 1, "selectVLOp">;
36
37def DecImm : SDNodeXForm<imm, [{
38  return CurDAG->getTargetConstant(N->getSExtValue() - 1, SDLoc(N),
39                                   N->getValueType(0));
40}]>;
41
42defvar TAIL_UNDISTURBED_MASK_UNDISTURBED = 0;
43defvar TAIL_AGNOSTIC = 1;
44defvar TA_MA = 3;
45
46//===----------------------------------------------------------------------===//
47// Utilities.
48//===----------------------------------------------------------------------===//
49
50class PseudoToVInst<string PseudoInst> {
51  string VInst = !subst("_M8", "",
52                 !subst("_M4", "",
53                 !subst("_M2", "",
54                 !subst("_M1", "",
55                 !subst("_MF2", "",
56                 !subst("_MF4", "",
57                 !subst("_MF8", "",
58                 !subst("_B1", "",
59                 !subst("_B2", "",
60                 !subst("_B4", "",
61                 !subst("_B8", "",
62                 !subst("_B16", "",
63                 !subst("_B32", "",
64                 !subst("_B64", "",
65                 !subst("_MASK", "",
66                 !subst("_TIED", "",
67                 !subst("_TU", "",
68                 !subst("F16", "F",
69                 !subst("F32", "F",
70                 !subst("F64", "F",
71                 !subst("Pseudo", "", PseudoInst)))))))))))))))))))));
72}
73
74// This class describes information associated to the LMUL.
75class LMULInfo<int lmul, int oct, VReg regclass, VReg wregclass,
76               VReg f2regclass, VReg f4regclass, VReg f8regclass, string mx> {
77  bits<3> value = lmul; // This is encoded as the vlmul field of vtype.
78  VReg vrclass = regclass;
79  VReg wvrclass = wregclass;
80  VReg f8vrclass = f8regclass;
81  VReg f4vrclass = f4regclass;
82  VReg f2vrclass = f2regclass;
83  string MX = mx;
84  int octuple = oct;
85}
86
87// Associate LMUL with tablegen records of register classes.
88def V_M1  : LMULInfo<0b000,  8,   VR,        VRM2,   VR,   VR, VR, "M1">;
89def V_M2  : LMULInfo<0b001, 16, VRM2,        VRM4,   VR,   VR, VR, "M2">;
90def V_M4  : LMULInfo<0b010, 32, VRM4,        VRM8, VRM2,   VR, VR, "M4">;
91def V_M8  : LMULInfo<0b011, 64, VRM8,/*NoVReg*/VR, VRM4, VRM2, VR, "M8">;
92
93def V_MF8 : LMULInfo<0b101, 1, VR, VR,/*NoVReg*/VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF8">;
94def V_MF4 : LMULInfo<0b110, 2, VR, VR,          VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF4">;
95def V_MF2 : LMULInfo<0b111, 4, VR, VR,          VR,          VR,/*NoVReg*/VR, "MF2">;
96
97// Used to iterate over all possible LMULs.
98defvar MxList = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
99// For floating point which don't need MF8.
100defvar MxListF = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
101
102// Used for widening and narrowing instructions as it doesn't contain M8.
103defvar MxListW = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4];
104// For floating point which don't need MF8.
105defvar MxListFW = [V_MF4, V_MF2, V_M1, V_M2, V_M4];
106
107// Use for zext/sext.vf2
108defvar MxListVF2 = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
109
110// Use for zext/sext.vf4
111defvar MxListVF4 = [V_MF2, V_M1, V_M2, V_M4, V_M8];
112
113// Use for zext/sext.vf8
114defvar MxListVF8 = [V_M1, V_M2, V_M4, V_M8];
115
116class MxSet<int eew> {
117  list<LMULInfo> m = !cond(!eq(eew, 8) : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
118                           !eq(eew, 16) : [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
119                           !eq(eew, 32) : [V_MF2, V_M1, V_M2, V_M4, V_M8],
120                           !eq(eew, 64) : [V_M1, V_M2, V_M4, V_M8]);
121}
122
123class FPR_Info<RegisterClass regclass, string fx, list<LMULInfo> mxlist,
124               list<LMULInfo> mxlistfw> {
125  RegisterClass fprclass = regclass;
126  string FX = fx;
127  list<LMULInfo> MxList = mxlist;
128  list<LMULInfo> MxListFW = mxlistfw;
129}
130
131def SCALAR_F16 : FPR_Info<FPR16, "F16", MxSet<16>.m, [V_MF4, V_MF2, V_M1, V_M2, V_M4]>;
132def SCALAR_F32 : FPR_Info<FPR32, "F32", MxSet<32>.m, [V_MF2, V_M1, V_M2, V_M4]>;
133def SCALAR_F64 : FPR_Info<FPR64, "F64", MxSet<64>.m, []>;
134
135defvar FPList = [SCALAR_F16, SCALAR_F32, SCALAR_F64];
136
137// Used for widening instructions. It excludes F64.
138defvar FPListW = [SCALAR_F16, SCALAR_F32];
139
140class NFSet<LMULInfo m> {
141  list<int> L = !cond(!eq(m.value, V_M8.value): [],
142                      !eq(m.value, V_M4.value): [2],
143                      !eq(m.value, V_M2.value): [2, 3, 4],
144                      true: [2, 3, 4, 5, 6, 7, 8]);
145}
146
147class log2<int num> {
148  int val = !if(!eq(num, 1), 0, !add(1, log2<!srl(num, 1)>.val));
149}
150
151class octuple_to_str<int octuple> {
152  string ret = !if(!eq(octuple, 1), "MF8",
153                   !if(!eq(octuple, 2), "MF4",
154                   !if(!eq(octuple, 4), "MF2",
155                   !if(!eq(octuple, 8), "M1",
156                   !if(!eq(octuple, 16), "M2",
157                   !if(!eq(octuple, 32), "M4",
158                   !if(!eq(octuple, 64), "M8",
159                   "NoDef")))))));
160}
161
162def VLOpFrag : PatFrag<(ops), (XLenVT (VLOp (XLenVT AVL:$vl)))>;
163
164// Output pattern for X0 used to represent VLMAX in the pseudo instructions.
165// We can't use X0 register becuase the AVL operands use GPRNoX0.
166// This must be kept in sync with RISCV::VLMaxSentinel.
167def VLMax : OutPatFrag<(ops), (XLenVT -1)>;
168
169// List of EEW.
170defvar EEWList = [8, 16, 32, 64];
171
172class SegRegClass<LMULInfo m, int nf> {
173  VReg RC = !cast<VReg>("VRN" # nf # !cond(!eq(m.value, V_MF8.value): V_M1.MX,
174                                           !eq(m.value, V_MF4.value): V_M1.MX,
175                                           !eq(m.value, V_MF2.value): V_M1.MX,
176                                           true: m.MX));
177}
178
179//===----------------------------------------------------------------------===//
180// Vector register and vector group type information.
181//===----------------------------------------------------------------------===//
182
183class VTypeInfo<ValueType Vec, ValueType Mas, int Sew, VReg Reg, LMULInfo M,
184                ValueType Scal = XLenVT, RegisterClass ScalarReg = GPR>
185{
186  ValueType Vector = Vec;
187  ValueType Mask = Mas;
188  int SEW = Sew;
189  int Log2SEW = log2<Sew>.val;
190  VReg RegClass = Reg;
191  LMULInfo LMul = M;
192  ValueType Scalar = Scal;
193  RegisterClass ScalarRegClass = ScalarReg;
194  // The pattern fragment which produces the AVL operand, representing the
195  // "natural" vector length for this type. For scalable vectors this is VLMax.
196  OutPatFrag AVL = VLMax;
197
198  string ScalarSuffix = !cond(!eq(Scal, XLenVT) : "X",
199                              !eq(Scal, f16) : "F16",
200                              !eq(Scal, f32) : "F32",
201                              !eq(Scal, f64) : "F64");
202}
203
204class GroupVTypeInfo<ValueType Vec, ValueType VecM1, ValueType Mas, int Sew,
205                     VReg Reg, LMULInfo M, ValueType Scal = XLenVT,
206                     RegisterClass ScalarReg = GPR>
207    : VTypeInfo<Vec, Mas, Sew, Reg, M, Scal, ScalarReg>
208{
209  ValueType VectorM1 = VecM1;
210}
211
212defset list<VTypeInfo> AllVectors = {
213  defset list<VTypeInfo> AllIntegerVectors = {
214    defset list<VTypeInfo> NoGroupIntegerVectors = {
215      defset list<VTypeInfo> FractionalGroupIntegerVectors = {
216        def VI8MF8: VTypeInfo<vint8mf8_t,  vbool64_t,  8, VR, V_MF8>;
217        def VI8MF4: VTypeInfo<vint8mf4_t,  vbool32_t,  8, VR, V_MF4>;
218        def VI8MF2: VTypeInfo<vint8mf2_t,  vbool16_t,  8, VR, V_MF2>;
219        def VI16MF4: VTypeInfo<vint16mf4_t, vbool64_t, 16, VR, V_MF4>;
220        def VI16MF2: VTypeInfo<vint16mf2_t, vbool32_t, 16, VR, V_MF2>;
221        def VI32MF2: VTypeInfo<vint32mf2_t, vbool64_t, 32, VR, V_MF2>;
222      }
223      def VI8M1: VTypeInfo<vint8m1_t,   vbool8_t,   8, VR, V_M1>;
224      def VI16M1: VTypeInfo<vint16m1_t,  vbool16_t, 16, VR, V_M1>;
225      def VI32M1: VTypeInfo<vint32m1_t,  vbool32_t, 32, VR, V_M1>;
226      def VI64M1: VTypeInfo<vint64m1_t,  vbool64_t, 64, VR, V_M1>;
227    }
228    defset list<GroupVTypeInfo> GroupIntegerVectors = {
229      def VI8M2: GroupVTypeInfo<vint8m2_t, vint8m1_t, vbool4_t, 8, VRM2, V_M2>;
230      def VI8M4: GroupVTypeInfo<vint8m4_t, vint8m1_t, vbool2_t, 8, VRM4, V_M4>;
231      def VI8M8: GroupVTypeInfo<vint8m8_t, vint8m1_t, vbool1_t, 8, VRM8, V_M8>;
232
233      def VI16M2: GroupVTypeInfo<vint16m2_t,vint16m1_t,vbool8_t, 16,VRM2, V_M2>;
234      def VI16M4: GroupVTypeInfo<vint16m4_t,vint16m1_t,vbool4_t, 16,VRM4, V_M4>;
235      def VI16M8: GroupVTypeInfo<vint16m8_t,vint16m1_t,vbool2_t, 16,VRM8, V_M8>;
236
237      def VI32M2: GroupVTypeInfo<vint32m2_t,vint32m1_t,vbool16_t,32,VRM2, V_M2>;
238      def VI32M4: GroupVTypeInfo<vint32m4_t,vint32m1_t,vbool8_t, 32,VRM4, V_M4>;
239      def VI32M8: GroupVTypeInfo<vint32m8_t,vint32m1_t,vbool4_t, 32,VRM8, V_M8>;
240
241      def VI64M2: GroupVTypeInfo<vint64m2_t,vint64m1_t,vbool32_t,64,VRM2, V_M2>;
242      def VI64M4: GroupVTypeInfo<vint64m4_t,vint64m1_t,vbool16_t,64,VRM4, V_M4>;
243      def VI64M8: GroupVTypeInfo<vint64m8_t,vint64m1_t,vbool8_t, 64,VRM8, V_M8>;
244    }
245  }
246
247  defset list<VTypeInfo> AllFloatVectors = {
248    defset list<VTypeInfo> NoGroupFloatVectors = {
249      defset list<VTypeInfo> FractionalGroupFloatVectors = {
250        def VF16MF4: VTypeInfo<vfloat16mf4_t, vbool64_t, 16, VR, V_MF4, f16, FPR16>;
251        def VF16MF2: VTypeInfo<vfloat16mf2_t, vbool32_t, 16, VR, V_MF2, f16, FPR16>;
252        def VF32MF2: VTypeInfo<vfloat32mf2_t,vbool64_t, 32, VR, V_MF2, f32, FPR32>;
253      }
254      def VF16M1:  VTypeInfo<vfloat16m1_t,  vbool16_t, 16, VR, V_M1,  f16, FPR16>;
255      def VF32M1:  VTypeInfo<vfloat32m1_t, vbool32_t, 32, VR, V_M1,  f32, FPR32>;
256      def VF64M1: VTypeInfo<vfloat64m1_t, vbool64_t, 64, VR, V_M1, f64, FPR64>;
257    }
258
259    defset list<GroupVTypeInfo> GroupFloatVectors = {
260      def VF16M2: GroupVTypeInfo<vfloat16m2_t, vfloat16m1_t, vbool8_t, 16,
261                                 VRM2, V_M2, f16, FPR16>;
262      def VF16M4: GroupVTypeInfo<vfloat16m4_t, vfloat16m1_t, vbool4_t, 16,
263                                 VRM4, V_M4, f16, FPR16>;
264      def VF16M8: GroupVTypeInfo<vfloat16m8_t, vfloat16m1_t, vbool2_t, 16,
265                                 VRM8, V_M8, f16, FPR16>;
266
267      def VF32M2: GroupVTypeInfo<vfloat32m2_t, vfloat32m1_t, vbool16_t, 32,
268                                 VRM2, V_M2, f32, FPR32>;
269      def VF32M4: GroupVTypeInfo<vfloat32m4_t, vfloat32m1_t, vbool8_t,  32,
270                                 VRM4, V_M4, f32, FPR32>;
271      def VF32M8: GroupVTypeInfo<vfloat32m8_t, vfloat32m1_t, vbool4_t,  32,
272                                 VRM8, V_M8, f32, FPR32>;
273
274      def VF64M2: GroupVTypeInfo<vfloat64m2_t, vfloat64m1_t, vbool32_t, 64,
275                                 VRM2, V_M2, f64, FPR64>;
276      def VF64M4: GroupVTypeInfo<vfloat64m4_t, vfloat64m1_t, vbool16_t, 64,
277                                 VRM4, V_M4, f64, FPR64>;
278      def VF64M8: GroupVTypeInfo<vfloat64m8_t, vfloat64m1_t, vbool8_t,  64,
279                                 VRM8, V_M8, f64, FPR64>;
280    }
281  }
282}
283
284// This functor is used to obtain the int vector type that has the same SEW and
285// multiplier as the input parameter type
286class GetIntVTypeInfo<VTypeInfo vti>
287{
288  // Equivalent integer vector type. Eg.
289  //   VI8M1 → VI8M1 (identity)
290  //   VF64M4 → VI64M4
291  VTypeInfo Vti = !cast<VTypeInfo>(!subst("VF", "VI", !cast<string>(vti)));
292}
293
294class MTypeInfo<ValueType Mas, LMULInfo M, string Bx> {
295  ValueType Mask = Mas;
296  // {SEW, VLMul} values set a valid VType to deal with this mask type.
297  // we assume SEW=1 and set corresponding LMUL. vsetvli insertion will
298  // look for SEW=1 to optimize based on surrounding instructions.
299  int SEW = 1;
300  int Log2SEW = 0;
301  LMULInfo LMul = M;
302  string BX = Bx; // Appendix of mask operations.
303  // The pattern fragment which produces the AVL operand, representing the
304  // "natural" vector length for this mask type. For scalable masks this is
305  // VLMax.
306  OutPatFrag AVL = VLMax;
307}
308
309defset list<MTypeInfo> AllMasks = {
310  // vbool<n>_t, <n> = SEW/LMUL, we assume SEW=8 and corresponding LMUL.
311  def : MTypeInfo<vbool64_t, V_MF8, "B1">;
312  def : MTypeInfo<vbool32_t, V_MF4, "B2">;
313  def : MTypeInfo<vbool16_t, V_MF2, "B4">;
314  def : MTypeInfo<vbool8_t, V_M1, "B8">;
315  def : MTypeInfo<vbool4_t, V_M2, "B16">;
316  def : MTypeInfo<vbool2_t, V_M4, "B32">;
317  def : MTypeInfo<vbool1_t, V_M8, "B64">;
318}
319
320class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti>
321{
322  VTypeInfo Vti = vti;
323  VTypeInfo Wti = wti;
324}
325
326class VTypeInfoToFraction<VTypeInfo vti, VTypeInfo fti>
327{
328  VTypeInfo Vti = vti;
329  VTypeInfo Fti = fti;
330}
331
332defset list<VTypeInfoToWide> AllWidenableIntVectors = {
333  def : VTypeInfoToWide<VI8MF8,  VI16MF4>;
334  def : VTypeInfoToWide<VI8MF4,  VI16MF2>;
335  def : VTypeInfoToWide<VI8MF2,  VI16M1>;
336  def : VTypeInfoToWide<VI8M1,   VI16M2>;
337  def : VTypeInfoToWide<VI8M2,   VI16M4>;
338  def : VTypeInfoToWide<VI8M4,   VI16M8>;
339
340  def : VTypeInfoToWide<VI16MF4, VI32MF2>;
341  def : VTypeInfoToWide<VI16MF2, VI32M1>;
342  def : VTypeInfoToWide<VI16M1,  VI32M2>;
343  def : VTypeInfoToWide<VI16M2,  VI32M4>;
344  def : VTypeInfoToWide<VI16M4,  VI32M8>;
345
346  def : VTypeInfoToWide<VI32MF2, VI64M1>;
347  def : VTypeInfoToWide<VI32M1,  VI64M2>;
348  def : VTypeInfoToWide<VI32M2,  VI64M4>;
349  def : VTypeInfoToWide<VI32M4,  VI64M8>;
350}
351
352defset list<VTypeInfoToWide> AllWidenableFloatVectors = {
353  def : VTypeInfoToWide<VF16MF4, VF32MF2>;
354  def : VTypeInfoToWide<VF16MF2, VF32M1>;
355  def : VTypeInfoToWide<VF16M1, VF32M2>;
356  def : VTypeInfoToWide<VF16M2, VF32M4>;
357  def : VTypeInfoToWide<VF16M4, VF32M8>;
358
359  def : VTypeInfoToWide<VF32MF2, VF64M1>;
360  def : VTypeInfoToWide<VF32M1, VF64M2>;
361  def : VTypeInfoToWide<VF32M2, VF64M4>;
362  def : VTypeInfoToWide<VF32M4, VF64M8>;
363}
364
365defset list<VTypeInfoToFraction> AllFractionableVF2IntVectors = {
366  def : VTypeInfoToFraction<VI16MF4, VI8MF8>;
367  def : VTypeInfoToFraction<VI16MF2, VI8MF4>;
368  def : VTypeInfoToFraction<VI16M1, VI8MF2>;
369  def : VTypeInfoToFraction<VI16M2, VI8M1>;
370  def : VTypeInfoToFraction<VI16M4, VI8M2>;
371  def : VTypeInfoToFraction<VI16M8, VI8M4>;
372  def : VTypeInfoToFraction<VI32MF2, VI16MF4>;
373  def : VTypeInfoToFraction<VI32M1, VI16MF2>;
374  def : VTypeInfoToFraction<VI32M2, VI16M1>;
375  def : VTypeInfoToFraction<VI32M4, VI16M2>;
376  def : VTypeInfoToFraction<VI32M8, VI16M4>;
377  def : VTypeInfoToFraction<VI64M1, VI32MF2>;
378  def : VTypeInfoToFraction<VI64M2, VI32M1>;
379  def : VTypeInfoToFraction<VI64M4, VI32M2>;
380  def : VTypeInfoToFraction<VI64M8, VI32M4>;
381}
382
383defset list<VTypeInfoToFraction> AllFractionableVF4IntVectors = {
384  def : VTypeInfoToFraction<VI32MF2, VI8MF8>;
385  def : VTypeInfoToFraction<VI32M1, VI8MF4>;
386  def : VTypeInfoToFraction<VI32M2, VI8MF2>;
387  def : VTypeInfoToFraction<VI32M4, VI8M1>;
388  def : VTypeInfoToFraction<VI32M8, VI8M2>;
389  def : VTypeInfoToFraction<VI64M1, VI16MF4>;
390  def : VTypeInfoToFraction<VI64M2, VI16MF2>;
391  def : VTypeInfoToFraction<VI64M4, VI16M1>;
392  def : VTypeInfoToFraction<VI64M8, VI16M2>;
393}
394
395defset list<VTypeInfoToFraction> AllFractionableVF8IntVectors = {
396  def : VTypeInfoToFraction<VI64M1, VI8MF8>;
397  def : VTypeInfoToFraction<VI64M2, VI8MF4>;
398  def : VTypeInfoToFraction<VI64M4, VI8MF2>;
399  def : VTypeInfoToFraction<VI64M8, VI8M1>;
400}
401
402defset list<VTypeInfoToWide> AllWidenableIntToFloatVectors = {
403  def : VTypeInfoToWide<VI8MF8, VF16MF4>;
404  def : VTypeInfoToWide<VI8MF4, VF16MF2>;
405  def : VTypeInfoToWide<VI8MF2, VF16M1>;
406  def : VTypeInfoToWide<VI8M1, VF16M2>;
407  def : VTypeInfoToWide<VI8M2, VF16M4>;
408  def : VTypeInfoToWide<VI8M4, VF16M8>;
409
410  def : VTypeInfoToWide<VI16MF4, VF32MF2>;
411  def : VTypeInfoToWide<VI16MF2, VF32M1>;
412  def : VTypeInfoToWide<VI16M1, VF32M2>;
413  def : VTypeInfoToWide<VI16M2, VF32M4>;
414  def : VTypeInfoToWide<VI16M4, VF32M8>;
415
416  def : VTypeInfoToWide<VI32MF2, VF64M1>;
417  def : VTypeInfoToWide<VI32M1, VF64M2>;
418  def : VTypeInfoToWide<VI32M2, VF64M4>;
419  def : VTypeInfoToWide<VI32M4, VF64M8>;
420}
421
422// This class holds the record of the RISCVVPseudoTable below.
423// This represents the information we need in codegen for each pseudo.
424// The definition should be consistent with `struct PseudoInfo` in
425// RISCVBaseInfo.h.
426class CONST8b<bits<8> val> {
427  bits<8> V = val;
428}
429def InvalidIndex : CONST8b<0x80>;
430class RISCVVPseudo {
431  Pseudo Pseudo = !cast<Pseudo>(NAME); // Used as a key.
432  Instruction BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
433}
434
435// The actual table.
436def RISCVVPseudosTable : GenericTable {
437  let FilterClass = "RISCVVPseudo";
438  let CppTypeName = "PseudoInfo";
439  let Fields = [ "Pseudo", "BaseInstr" ];
440  let PrimaryKey = [ "Pseudo" ];
441  let PrimaryKeyName = "getPseudoInfo";
442  let PrimaryKeyEarlyOut = true;
443}
444
445def RISCVVInversePseudosTable : GenericTable {
446  let FilterClass = "RISCVVPseudo";
447  let CppTypeName = "PseudoInfo";
448  let Fields = [ "Pseudo", "BaseInstr", "VLMul" ];
449  let PrimaryKey = [ "BaseInstr", "VLMul" ];
450  let PrimaryKeyName = "getBaseInfo";
451  let PrimaryKeyEarlyOut = true;
452}
453
454def RISCVVIntrinsicsTable : GenericTable {
455  let FilterClass = "RISCVVIntrinsic";
456  let CppTypeName = "RISCVVIntrinsicInfo";
457  let Fields = ["IntrinsicID", "ScalarOperand", "VLOperand"];
458  let PrimaryKey = ["IntrinsicID"];
459  let PrimaryKeyName = "getRISCVVIntrinsicInfo";
460}
461
462class RISCVMaskedPseudo<bits<4> MaskIdx, bit HasTU = true> {
463  Pseudo MaskedPseudo = !cast<Pseudo>(NAME);
464  Pseudo UnmaskedPseudo = !cast<Pseudo>(!subst("_MASK", "", NAME));
465  Pseudo UnmaskedTUPseudo = !if(HasTU, !cast<Pseudo>(!subst("_MASK", "", NAME # "_TU")), MaskedPseudo);
466  bits<4> MaskOpIdx = MaskIdx;
467}
468
469def RISCVMaskedPseudosTable : GenericTable {
470  let FilterClass = "RISCVMaskedPseudo";
471  let CppTypeName = "RISCVMaskedPseudoInfo";
472  let Fields = ["MaskedPseudo", "UnmaskedPseudo", "UnmaskedTUPseudo", "MaskOpIdx"];
473  let PrimaryKey = ["MaskedPseudo"];
474  let PrimaryKeyName = "getMaskedPseudoInfo";
475}
476
477class RISCVVLE<bit M, bit TU, bit Str, bit F, bits<3> S, bits<3> L> {
478  bits<1> Masked = M;
479  bits<1> IsTU = TU;
480  bits<1> Strided = Str;
481  bits<1> FF = F;
482  bits<3> Log2SEW = S;
483  bits<3> LMUL = L;
484  Pseudo Pseudo = !cast<Pseudo>(NAME);
485}
486
487def lookupMaskedIntrinsicByUnmaskedTA : SearchIndex {
488  let Table = RISCVMaskedPseudosTable;
489  let Key = ["UnmaskedPseudo"];
490}
491
492def RISCVVLETable : GenericTable {
493  let FilterClass = "RISCVVLE";
494  let CppTypeName = "VLEPseudo";
495  let Fields = ["Masked", "IsTU", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
496  let PrimaryKey = ["Masked", "IsTU", "Strided", "FF", "Log2SEW", "LMUL"];
497  let PrimaryKeyName = "getVLEPseudo";
498}
499
500class RISCVVSE<bit M, bit Str, bits<3> S, bits<3> L> {
501  bits<1> Masked = M;
502  bits<1> Strided = Str;
503  bits<3> Log2SEW = S;
504  bits<3> LMUL = L;
505  Pseudo Pseudo = !cast<Pseudo>(NAME);
506}
507
508def RISCVVSETable : GenericTable {
509  let FilterClass = "RISCVVSE";
510  let CppTypeName = "VSEPseudo";
511  let Fields = ["Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
512  let PrimaryKey = ["Masked", "Strided", "Log2SEW", "LMUL"];
513  let PrimaryKeyName = "getVSEPseudo";
514}
515
516class RISCVVLX_VSX<bit M, bit TU, bit O, bits<3> S, bits<3> L, bits<3> IL> {
517  bits<1> Masked = M;
518  bits<1> IsTU = TU;
519  bits<1> Ordered = O;
520  bits<3> Log2SEW = S;
521  bits<3> LMUL = L;
522  bits<3> IndexLMUL = IL;
523  Pseudo Pseudo = !cast<Pseudo>(NAME);
524}
525
526class RISCVVLX<bit M, bit TU, bit O, bits<3> S, bits<3> L, bits<3> IL> :
527  RISCVVLX_VSX<M, TU, O, S, L, IL>;
528class RISCVVSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
529  RISCVVLX_VSX<M, /*TU*/0, O, S, L, IL>;
530
531class RISCVVLX_VSXTable : GenericTable {
532  let CppTypeName = "VLX_VSXPseudo";
533  let Fields = ["Masked", "IsTU", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
534  let PrimaryKey = ["Masked", "IsTU", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
535}
536
537def RISCVVLXTable : RISCVVLX_VSXTable {
538  let FilterClass = "RISCVVLX";
539  let PrimaryKeyName = "getVLXPseudo";
540}
541
542def RISCVVSXTable : RISCVVLX_VSXTable {
543  let FilterClass = "RISCVVSX";
544  let PrimaryKeyName = "getVSXPseudo";
545}
546
547class RISCVVLSEG<bits<4> N, bit M, bit TU, bit Str, bit F, bits<3> S, bits<3> L> {
548  bits<4> NF = N;
549  bits<1> Masked = M;
550  bits<1> IsTU = TU;
551  bits<1> Strided = Str;
552  bits<1> FF = F;
553  bits<3> Log2SEW = S;
554  bits<3> LMUL = L;
555  Pseudo Pseudo = !cast<Pseudo>(NAME);
556}
557
558def RISCVVLSEGTable : GenericTable {
559  let FilterClass = "RISCVVLSEG";
560  let CppTypeName = "VLSEGPseudo";
561  let Fields = ["NF", "Masked", "IsTU", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
562  let PrimaryKey = ["NF", "Masked", "IsTU", "Strided", "FF", "Log2SEW", "LMUL"];
563  let PrimaryKeyName = "getVLSEGPseudo";
564}
565
566class RISCVVLXSEG<bits<4> N, bit M, bit TU, bit O, bits<3> S, bits<3> L, bits<3> IL> {
567  bits<4> NF = N;
568  bits<1> Masked = M;
569  bits<1> IsTU = TU;
570  bits<1> Ordered = O;
571  bits<3> Log2SEW = S;
572  bits<3> LMUL = L;
573  bits<3> IndexLMUL = IL;
574  Pseudo Pseudo = !cast<Pseudo>(NAME);
575}
576
577def RISCVVLXSEGTable : GenericTable {
578  let FilterClass = "RISCVVLXSEG";
579  let CppTypeName = "VLXSEGPseudo";
580  let Fields = ["NF", "Masked", "IsTU", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
581  let PrimaryKey = ["NF", "Masked", "IsTU", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
582  let PrimaryKeyName = "getVLXSEGPseudo";
583}
584
585class RISCVVSSEG<bits<4> N, bit M, bit Str, bits<3> S, bits<3> L> {
586  bits<4> NF = N;
587  bits<1> Masked = M;
588  bits<1> Strided = Str;
589  bits<3> Log2SEW = S;
590  bits<3> LMUL = L;
591  Pseudo Pseudo = !cast<Pseudo>(NAME);
592}
593
594def RISCVVSSEGTable : GenericTable {
595  let FilterClass = "RISCVVSSEG";
596  let CppTypeName = "VSSEGPseudo";
597  let Fields = ["NF", "Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
598  let PrimaryKey = ["NF", "Masked", "Strided", "Log2SEW", "LMUL"];
599  let PrimaryKeyName = "getVSSEGPseudo";
600}
601
602class RISCVVSXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
603  bits<4> NF = N;
604  bits<1> Masked = M;
605  bits<1> Ordered = O;
606  bits<3> Log2SEW = S;
607  bits<3> LMUL = L;
608  bits<3> IndexLMUL = IL;
609  Pseudo Pseudo = !cast<Pseudo>(NAME);
610}
611
612def RISCVVSXSEGTable : GenericTable {
613  let FilterClass = "RISCVVSXSEG";
614  let CppTypeName = "VSXSEGPseudo";
615  let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
616  let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
617  let PrimaryKeyName = "getVSXSEGPseudo";
618}
619
620//===----------------------------------------------------------------------===//
621// Helpers to define the different pseudo instructions.
622//===----------------------------------------------------------------------===//
623
624// The destination vector register group for a masked vector instruction cannot
625// overlap the source mask register (v0), unless the destination vector register
626// is being written with a mask value (e.g., comparisons) or the scalar result
627// of a reduction.
628class GetVRegNoV0<VReg VRegClass> {
629  VReg R = !cond(!eq(VRegClass, VR) : VRNoV0,
630                 !eq(VRegClass, VRM2) : VRM2NoV0,
631                 !eq(VRegClass, VRM4) : VRM4NoV0,
632                 !eq(VRegClass, VRM8) : VRM8NoV0,
633                 !eq(VRegClass, VRN2M1) : VRN2M1NoV0,
634                 !eq(VRegClass, VRN2M2) : VRN2M2NoV0,
635                 !eq(VRegClass, VRN2M4) : VRN2M4NoV0,
636                 !eq(VRegClass, VRN3M1) : VRN3M1NoV0,
637                 !eq(VRegClass, VRN3M2) : VRN3M2NoV0,
638                 !eq(VRegClass, VRN4M1) : VRN4M1NoV0,
639                 !eq(VRegClass, VRN4M2) : VRN4M2NoV0,
640                 !eq(VRegClass, VRN5M1) : VRN5M1NoV0,
641                 !eq(VRegClass, VRN6M1) : VRN6M1NoV0,
642                 !eq(VRegClass, VRN7M1) : VRN7M1NoV0,
643                 !eq(VRegClass, VRN8M1) : VRN8M1NoV0,
644                 true : VRegClass);
645}
646
647// Join strings in list using separator and ignoring empty elements
648class Join<list<string> strings, string separator> {
649  string ret = !foldl(!head(strings), !tail(strings), a, b,
650                      !cond(
651                        !and(!empty(a), !empty(b)) : "",
652                        !empty(a) : b,
653                        !empty(b) : a,
654                        1 : a#separator#b));
655}
656
657class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins> :
658      Pseudo<outs, ins, []>, RISCVVPseudo {
659  let BaseInstr = instr;
660  let VLMul = m.value;
661}
662
663class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit DummyMask = 1> :
664      Pseudo<(outs RetClass:$rd),
665             (ins GPRMem:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
666      RISCVVPseudo,
667      RISCVVLE</*Masked*/0, /*TU*/0, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
668  let mayLoad = 1;
669  let mayStore = 0;
670  let hasSideEffects = 0;
671  let HasVLOp = 1;
672  let HasSEWOp = 1;
673  let HasDummyMask = DummyMask;
674}
675
676class VPseudoUSLoadNoMaskTU<VReg RetClass, int EEW> :
677      Pseudo<(outs RetClass:$rd),
678             (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
679      RISCVVPseudo,
680      RISCVVLE</*Masked*/0, /*TU*/1, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
681  let mayLoad = 1;
682  let mayStore = 0;
683  let hasSideEffects = 0;
684  let HasVLOp = 1;
685  let HasSEWOp = 1;
686  let HasDummyMask = 1;
687  let HasMergeOp = 1;
688  let Constraints = "$rd = $dest";
689}
690
691class VPseudoUSLoadMask<VReg RetClass, int EEW> :
692      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
693              (ins GetVRegNoV0<RetClass>.R:$merge,
694                   GPRMem:$rs1,
695                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
696      RISCVVPseudo,
697      RISCVVLE</*Masked*/1, /*TU*/1, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
698  let mayLoad = 1;
699  let mayStore = 0;
700  let hasSideEffects = 0;
701  let Constraints = "$rd = $merge";
702  let HasVLOp = 1;
703  let HasSEWOp = 1;
704  let HasMergeOp = 1;
705  let HasVecPolicyOp = 1;
706  let UsesMaskPolicy = 1;
707}
708
709class VPseudoUSLoadFFNoMask<VReg RetClass, int EEW, bit DummyMask = 1> :
710      Pseudo<(outs RetClass:$rd, GPR:$vl),
711             (ins GPRMem:$rs1, AVL:$avl, ixlenimm:$sew),[]>,
712      RISCVVPseudo,
713      RISCVVLE</*Masked*/0, /*TU*/0, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
714  let mayLoad = 1;
715  let mayStore = 0;
716  let hasSideEffects = 0;
717  let HasVLOp = 1;
718  let HasSEWOp = 1;
719  let HasDummyMask = DummyMask;
720}
721
722class VPseudoUSLoadFFNoMaskTU<VReg RetClass, int EEW> :
723      Pseudo<(outs RetClass:$rd, GPR:$vl),
724             (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl, ixlenimm:$sew),[]>,
725      RISCVVPseudo,
726      RISCVVLE</*Masked*/0, /*TU*/1, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
727  let mayLoad = 1;
728  let mayStore = 0;
729  let hasSideEffects = 0;
730  let HasVLOp = 1;
731  let HasSEWOp = 1;
732  let HasDummyMask = 1;
733  let HasMergeOp = 1;
734  let Constraints = "$rd = $dest";
735}
736
737class VPseudoUSLoadFFMask<VReg RetClass, int EEW> :
738      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
739              (ins GetVRegNoV0<RetClass>.R:$merge,
740                   GPRMem:$rs1,
741                   VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy),[]>,
742      RISCVVPseudo,
743      RISCVVLE</*Masked*/1, /*TU*/1, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
744  let mayLoad = 1;
745  let mayStore = 0;
746  let hasSideEffects = 0;
747  let Constraints = "$rd = $merge";
748  let HasVLOp = 1;
749  let HasSEWOp = 1;
750  let HasMergeOp = 1;
751  let HasVecPolicyOp = 1;
752  let UsesMaskPolicy = 1;
753}
754
755class VPseudoSLoadNoMask<VReg RetClass, int EEW>:
756      Pseudo<(outs RetClass:$rd),
757             (ins GPRMem:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
758      RISCVVPseudo,
759      RISCVVLE</*Masked*/0, /*TU*/0, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
760  let mayLoad = 1;
761  let mayStore = 0;
762  let hasSideEffects = 0;
763  let HasVLOp = 1;
764  let HasSEWOp = 1;
765  let HasDummyMask = 1;
766}
767
768class VPseudoSLoadNoMaskTU<VReg RetClass, int EEW>:
769      Pseudo<(outs RetClass:$rd),
770             (ins RetClass:$dest, GPRMem:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
771      RISCVVPseudo,
772      RISCVVLE</*Masked*/0, /*TU*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
773  let mayLoad = 1;
774  let mayStore = 0;
775  let hasSideEffects = 0;
776  let HasVLOp = 1;
777  let HasSEWOp = 1;
778  let HasDummyMask = 1;
779  let HasMergeOp = 1;
780  let Constraints = "$rd = $dest";
781}
782
783class VPseudoSLoadMask<VReg RetClass, int EEW>:
784      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
785              (ins GetVRegNoV0<RetClass>.R:$merge,
786                   GPRMem:$rs1, GPR:$rs2,
787                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
788      RISCVVPseudo,
789      RISCVVLE</*Masked*/1, /*TU*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
790  let mayLoad = 1;
791  let mayStore = 0;
792  let hasSideEffects = 0;
793  let Constraints = "$rd = $merge";
794  let HasVLOp = 1;
795  let HasSEWOp = 1;
796  let HasMergeOp = 1;
797  let HasVecPolicyOp = 1;
798  let UsesMaskPolicy = 1;
799}
800
801class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
802                         bit Ordered, bit EarlyClobber>:
803      Pseudo<(outs RetClass:$rd),
804             (ins GPRMem:$rs1, IdxClass:$rs2, AVL:$vl,
805              ixlenimm:$sew),[]>,
806      RISCVVPseudo,
807      RISCVVLX</*Masked*/0, /*TU*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
808  let mayLoad = 1;
809  let mayStore = 0;
810  let hasSideEffects = 0;
811  let HasVLOp = 1;
812  let HasSEWOp = 1;
813  let HasDummyMask = 1;
814  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd", "");
815}
816
817class VPseudoILoadNoMaskTU<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
818                           bit Ordered, bit EarlyClobber>:
819      Pseudo<(outs RetClass:$rd),
820             (ins RetClass:$dest, GPRMem:$rs1, IdxClass:$rs2, AVL:$vl,
821              ixlenimm:$sew),[]>,
822      RISCVVPseudo,
823      RISCVVLX</*Masked*/0, /*TU*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
824  let mayLoad = 1;
825  let mayStore = 0;
826  let hasSideEffects = 0;
827  let HasVLOp = 1;
828  let HasSEWOp = 1;
829  let HasDummyMask = 1;
830  let HasMergeOp = 1;
831  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $dest", "$rd = $dest");
832}
833
834class VPseudoILoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
835                       bit Ordered, bit EarlyClobber>:
836      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
837              (ins GetVRegNoV0<RetClass>.R:$merge,
838                   GPRMem:$rs1, IdxClass:$rs2,
839                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
840      RISCVVPseudo,
841      RISCVVLX</*Masked*/1, /*TU*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
842  let mayLoad = 1;
843  let mayStore = 0;
844  let hasSideEffects = 0;
845  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $merge", "$rd = $merge");
846  let HasVLOp = 1;
847  let HasSEWOp = 1;
848  let HasMergeOp = 1;
849  let HasVecPolicyOp = 1;
850  let UsesMaskPolicy = 1;
851}
852
853class VPseudoUSStoreNoMask<VReg StClass, int EEW, bit DummyMask = 1>:
854      Pseudo<(outs),
855              (ins StClass:$rd, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
856      RISCVVPseudo,
857      RISCVVSE</*Masked*/0, /*Strided*/0, log2<EEW>.val, VLMul> {
858  let mayLoad = 0;
859  let mayStore = 1;
860  let hasSideEffects = 0;
861  let HasVLOp = 1;
862  let HasSEWOp = 1;
863  let HasDummyMask = DummyMask;
864}
865
866class VPseudoUSStoreMask<VReg StClass, int EEW>:
867      Pseudo<(outs),
868              (ins StClass:$rd, GPRMem:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
869      RISCVVPseudo,
870      RISCVVSE</*Masked*/1, /*Strided*/0, log2<EEW>.val, VLMul> {
871  let mayLoad = 0;
872  let mayStore = 1;
873  let hasSideEffects = 0;
874  let HasVLOp = 1;
875  let HasSEWOp = 1;
876}
877
878class VPseudoSStoreNoMask<VReg StClass, int EEW>:
879      Pseudo<(outs),
880              (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
881      RISCVVPseudo,
882      RISCVVSE</*Masked*/0, /*Strided*/1, log2<EEW>.val, VLMul> {
883  let mayLoad = 0;
884  let mayStore = 1;
885  let hasSideEffects = 0;
886  let HasVLOp = 1;
887  let HasSEWOp = 1;
888  let HasDummyMask = 1;
889}
890
891class VPseudoSStoreMask<VReg StClass, int EEW>:
892      Pseudo<(outs),
893              (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
894      RISCVVPseudo,
895      RISCVVSE</*Masked*/1, /*Strided*/1, log2<EEW>.val, VLMul> {
896  let mayLoad = 0;
897  let mayStore = 1;
898  let hasSideEffects = 0;
899  let HasVLOp = 1;
900  let HasSEWOp = 1;
901}
902
903// Unary instruction that is never masked so HasDummyMask=0.
904class VPseudoUnaryNoDummyMask<VReg RetClass,
905                              DAGOperand Op2Class> :
906        Pseudo<(outs RetClass:$rd),
907               (ins Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
908        RISCVVPseudo {
909  let mayLoad = 0;
910  let mayStore = 0;
911  let hasSideEffects = 0;
912  let HasVLOp = 1;
913  let HasSEWOp = 1;
914}
915
916class VPseudoUnaryNoDummyMaskTU<VReg RetClass,
917                                DAGOperand Op2Class> :
918        Pseudo<(outs RetClass:$rd),
919               (ins RetClass:$dest, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
920        RISCVVPseudo {
921  let mayLoad = 0;
922  let mayStore = 0;
923  let hasSideEffects = 0;
924  let HasVLOp = 1;
925  let HasSEWOp = 1;
926  let HasMergeOp = 1;
927  let Constraints = "$rd = $dest";
928}
929
930class VPseudoNullaryNoMask<VReg RegClass>:
931      Pseudo<(outs RegClass:$rd),
932             (ins AVL:$vl, ixlenimm:$sew),
933             []>, RISCVVPseudo {
934  let mayLoad = 0;
935  let mayStore = 0;
936  let hasSideEffects = 0;
937  let HasVLOp = 1;
938  let HasSEWOp = 1;
939  let HasDummyMask = 1;
940}
941
942class VPseudoNullaryNoMaskTU<VReg RegClass>:
943      Pseudo<(outs RegClass:$rd),
944             (ins RegClass:$merge, AVL:$vl, ixlenimm:$sew),
945             []>, RISCVVPseudo {
946  let mayLoad = 0;
947  let mayStore = 0;
948  let hasSideEffects = 0;
949  let Constraints = "$rd = $merge";
950  let HasVLOp = 1;
951  let HasSEWOp = 1;
952  let HasDummyMask = 1;
953  let HasMergeOp = 1;
954}
955
956class VPseudoNullaryMask<VReg RegClass>:
957      Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd),
958             (ins GetVRegNoV0<RegClass>.R:$merge, VMaskOp:$vm, AVL:$vl,
959              ixlenimm:$sew, ixlenimm:$policy), []>, RISCVVPseudo {
960  let mayLoad = 0;
961  let mayStore = 0;
962  let hasSideEffects = 0;
963  let Constraints ="$rd = $merge";
964  let HasVLOp = 1;
965  let HasSEWOp = 1;
966  let HasMergeOp = 1;
967  let UsesMaskPolicy = 1;
968  let HasVecPolicyOp = 1;
969}
970
971// Nullary for pseudo instructions. They are expanded in
972// RISCVExpandPseudoInsts pass.
973class VPseudoNullaryPseudoM<string BaseInst>
974       : Pseudo<(outs VR:$rd), (ins AVL:$vl, ixlenimm:$sew), []>,
975       RISCVVPseudo {
976  let mayLoad = 0;
977  let mayStore = 0;
978  let hasSideEffects = 0;
979  let HasVLOp = 1;
980  let HasSEWOp = 1;
981  // BaseInstr is not used in RISCVExpandPseudoInsts pass.
982  // Just fill a corresponding real v-inst to pass tablegen check.
983  let BaseInstr = !cast<Instruction>(BaseInst);
984}
985
986// RetClass could be GPR or VReg.
987class VPseudoUnaryNoMask<DAGOperand RetClass, VReg OpClass, string Constraint = ""> :
988        Pseudo<(outs RetClass:$rd),
989               (ins OpClass:$rs2, AVL:$vl, ixlenimm:$sew), []>,
990        RISCVVPseudo {
991  let mayLoad = 0;
992  let mayStore = 0;
993  let hasSideEffects = 0;
994  let Constraints = Constraint;
995  let HasVLOp = 1;
996  let HasSEWOp = 1;
997  let HasDummyMask = 1;
998}
999
1000// RetClass could be GPR or VReg.
1001class VPseudoUnaryNoMaskTU<DAGOperand RetClass, VReg OpClass, string Constraint = ""> :
1002      Pseudo<(outs RetClass:$rd),
1003        (ins RetClass:$merge, OpClass:$rs2, AVL:$vl, ixlenimm:$sew), []>,
1004        RISCVVPseudo {
1005  let mayLoad = 0;
1006  let mayStore = 0;
1007  let hasSideEffects = 0;
1008  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1009  let HasVLOp = 1;
1010  let HasSEWOp = 1;
1011  let HasDummyMask = 1;
1012  let HasMergeOp = 1;
1013}
1014
1015class VPseudoUnaryMask<VReg RetClass, VReg OpClass, string Constraint = ""> :
1016        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1017               (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
1018                    VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1019        RISCVVPseudo {
1020  let mayLoad = 0;
1021  let mayStore = 0;
1022  let hasSideEffects = 0;
1023  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1024  let HasVLOp = 1;
1025  let HasSEWOp = 1;
1026  let HasMergeOp = 1;
1027  let UsesMaskPolicy = 1;
1028}
1029
1030class VPseudoUnaryMaskTA<VReg RetClass, VReg OpClass, string Constraint = ""> :
1031        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1032               (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
1033                    VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1034        RISCVVPseudo {
1035  let mayLoad = 0;
1036  let mayStore = 0;
1037  let hasSideEffects = 0;
1038  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1039  let HasVLOp = 1;
1040  let HasSEWOp = 1;
1041  let HasMergeOp = 1;
1042  let HasVecPolicyOp = 1;
1043  let UsesMaskPolicy = 1;
1044}
1045
1046class VPseudoUnaryMaskTA_NoExcept<VReg RetClass, VReg OpClass, string Constraint = ""> :
1047        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1048               (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2, VMaskOp:$vm,
1049                    AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []> {
1050  let mayLoad = 0;
1051  let mayStore = 0;
1052  let hasSideEffects = 0;
1053  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1054  let HasVLOp = 1;
1055  let HasSEWOp = 1;
1056  let HasMergeOp = 1;
1057  let HasVecPolicyOp = 1;
1058  let UsesMaskPolicy = 1;
1059  let usesCustomInserter = 1;
1060}
1061
1062class VPseudoUnaryMaskTA_FRM<VReg RetClass, VReg OpClass, string Constraint = ""> :
1063        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1064               (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
1065                    VMaskOp:$vm, ixlenimm:$frm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []> {
1066  let mayLoad = 0;
1067  let mayStore = 0;
1068  let hasSideEffects = 0;
1069  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1070  let HasVLOp = 1;
1071  let HasSEWOp = 1;
1072  let HasMergeOp = 1;
1073  let HasVecPolicyOp = 1;
1074  let UsesMaskPolicy = 1;
1075  let usesCustomInserter = 1;
1076}
1077
1078// mask unary operation without maskedoff
1079class VPseudoMaskUnarySOutMask:
1080        Pseudo<(outs GPR:$rd),
1081               (ins VR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1082        RISCVVPseudo {
1083  let mayLoad = 0;
1084  let mayStore = 0;
1085  let hasSideEffects = 0;
1086  let HasVLOp = 1;
1087  let HasSEWOp = 1;
1088}
1089
1090// Mask can be V0~V31
1091class VPseudoUnaryAnyMask<VReg RetClass,
1092                          VReg Op1Class> :
1093      Pseudo<(outs RetClass:$rd),
1094             (ins RetClass:$merge,
1095                  Op1Class:$rs2,
1096                  VR:$vm, AVL:$vl, ixlenimm:$sew),
1097             []>,
1098      RISCVVPseudo {
1099  let mayLoad = 0;
1100  let mayStore = 0;
1101  let hasSideEffects = 0;
1102  let Constraints = "@earlyclobber $rd, $rd = $merge";
1103  let HasVLOp = 1;
1104  let HasSEWOp = 1;
1105  let HasMergeOp = 1;
1106}
1107
1108class VPseudoBinaryNoMask<VReg RetClass,
1109                          VReg Op1Class,
1110                          DAGOperand Op2Class,
1111                          string Constraint,
1112                          int DummyMask = 1> :
1113        Pseudo<(outs RetClass:$rd),
1114               (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
1115        RISCVVPseudo {
1116  let mayLoad = 0;
1117  let mayStore = 0;
1118  let hasSideEffects = 0;
1119  let Constraints = Constraint;
1120  let HasVLOp = 1;
1121  let HasSEWOp = 1;
1122  let HasDummyMask = DummyMask;
1123}
1124
1125class VPseudoBinaryNoMaskTU<VReg RetClass,
1126                            VReg Op1Class,
1127                            DAGOperand Op2Class,
1128                            string Constraint> :
1129        Pseudo<(outs RetClass:$rd),
1130               (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
1131        RISCVVPseudo {
1132  let mayLoad = 0;
1133  let mayStore = 0;
1134  let hasSideEffects = 0;
1135  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1136  let HasVLOp = 1;
1137  let HasSEWOp = 1;
1138  let HasDummyMask = 1;
1139  let HasMergeOp = 1;
1140}
1141
1142// Special version of VPseudoBinaryNoMask where we pretend the first source is
1143// tied to the destination.
1144// This allows maskedoff and rs2 to be the same register.
1145class VPseudoTiedBinaryNoMask<VReg RetClass,
1146                              DAGOperand Op2Class,
1147                              string Constraint> :
1148        Pseudo<(outs RetClass:$rd),
1149               (ins RetClass:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew,
1150                    ixlenimm:$policy), []>,
1151        RISCVVPseudo {
1152  let mayLoad = 0;
1153  let mayStore = 0;
1154  let hasSideEffects = 0;
1155  let Constraints = Join<[Constraint, "$rd = $rs2"], ",">.ret;
1156  let HasVLOp = 1;
1157  let HasSEWOp = 1;
1158  let HasDummyMask = 1;
1159  let HasVecPolicyOp = 1;
1160  let isConvertibleToThreeAddress = 1;
1161}
1162
1163class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
1164                          bit Ordered>:
1165      Pseudo<(outs),
1166              (ins StClass:$rd, GPRMem:$rs1, IdxClass:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
1167      RISCVVPseudo,
1168      RISCVVSX</*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
1169  let mayLoad = 0;
1170  let mayStore = 1;
1171  let hasSideEffects = 0;
1172  let HasVLOp = 1;
1173  let HasSEWOp = 1;
1174  let HasDummyMask = 1;
1175}
1176
1177class VPseudoIStoreMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
1178                        bit Ordered>:
1179      Pseudo<(outs),
1180              (ins StClass:$rd, GPRMem:$rs1, IdxClass:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1181      RISCVVPseudo,
1182      RISCVVSX</*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
1183  let mayLoad = 0;
1184  let mayStore = 1;
1185  let hasSideEffects = 0;
1186  let HasVLOp = 1;
1187  let HasSEWOp = 1;
1188}
1189
1190class VPseudoBinaryMask<VReg RetClass,
1191                        RegisterClass Op1Class,
1192                        DAGOperand Op2Class,
1193                        string Constraint> :
1194        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1195                (ins GetVRegNoV0<RetClass>.R:$merge,
1196                     Op1Class:$rs2, Op2Class:$rs1,
1197                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1198        RISCVVPseudo {
1199  let mayLoad = 0;
1200  let mayStore = 0;
1201  let hasSideEffects = 0;
1202  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1203  let HasVLOp = 1;
1204  let HasSEWOp = 1;
1205  let HasMergeOp = 1;
1206}
1207
1208class VPseudoBinaryMaskPolicy<VReg RetClass,
1209                              RegisterClass Op1Class,
1210                              DAGOperand Op2Class,
1211                              string Constraint> :
1212        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1213                (ins GetVRegNoV0<RetClass>.R:$merge,
1214                     Op1Class:$rs2, Op2Class:$rs1,
1215                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1216        RISCVVPseudo {
1217  let mayLoad = 0;
1218  let mayStore = 0;
1219  let hasSideEffects = 0;
1220  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1221  let HasVLOp = 1;
1222  let HasSEWOp = 1;
1223  let HasMergeOp = 1;
1224  let HasVecPolicyOp = 1;
1225  let UsesMaskPolicy = 1;
1226}
1227
1228// Like VPseudoBinaryMask, but output can be V0.
1229class VPseudoBinaryMOutMask<VReg RetClass,
1230                            RegisterClass Op1Class,
1231                            DAGOperand Op2Class,
1232                            string Constraint> :
1233        Pseudo<(outs RetClass:$rd),
1234                (ins RetClass:$merge,
1235                     Op1Class:$rs2, Op2Class:$rs1,
1236                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1237        RISCVVPseudo {
1238  let mayLoad = 0;
1239  let mayStore = 0;
1240  let hasSideEffects = 0;
1241  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1242  let HasVLOp = 1;
1243  let HasSEWOp = 1;
1244  let HasMergeOp = 1;
1245  let UsesMaskPolicy = 1;
1246}
1247
1248// Special version of VPseudoBinaryMask where we pretend the first source is
1249// tied to the destination so we can workaround the earlyclobber constraint.
1250// This allows maskedoff and rs2 to be the same register.
1251class VPseudoTiedBinaryMask<VReg RetClass,
1252                            DAGOperand Op2Class,
1253                            string Constraint> :
1254        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1255                (ins GetVRegNoV0<RetClass>.R:$merge,
1256                     Op2Class:$rs1,
1257                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1258        RISCVVPseudo {
1259  let mayLoad = 0;
1260  let mayStore = 0;
1261  let hasSideEffects = 0;
1262  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1263  let HasVLOp = 1;
1264  let HasSEWOp = 1;
1265  let HasMergeOp = 0; // Merge is also rs2.
1266  let HasVecPolicyOp = 1;
1267  let UsesMaskPolicy = 1;
1268}
1269
1270class VPseudoBinaryCarryIn<VReg RetClass,
1271                           VReg Op1Class,
1272                           DAGOperand Op2Class,
1273                           LMULInfo MInfo,
1274                           bit CarryIn,
1275                           string Constraint> :
1276        Pseudo<(outs RetClass:$rd),
1277               !if(CarryIn,
1278                  (ins Op1Class:$rs2, Op2Class:$rs1, VMV0:$carry, AVL:$vl,
1279                       ixlenimm:$sew),
1280                  (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew)), []>,
1281        RISCVVPseudo {
1282  let mayLoad = 0;
1283  let mayStore = 0;
1284  let hasSideEffects = 0;
1285  let Constraints = Constraint;
1286  let HasVLOp = 1;
1287  let HasSEWOp = 1;
1288  let HasMergeOp = 0;
1289  let VLMul = MInfo.value;
1290}
1291
1292class VPseudoTiedBinaryCarryIn<VReg RetClass,
1293                               VReg Op1Class,
1294                               DAGOperand Op2Class,
1295                               LMULInfo MInfo,
1296                               bit CarryIn,
1297                               string Constraint> :
1298        Pseudo<(outs RetClass:$rd),
1299               !if(CarryIn,
1300                  (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, VMV0:$carry, AVL:$vl,
1301                       ixlenimm:$sew),
1302                  (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew)), []>,
1303        RISCVVPseudo {
1304  let mayLoad = 0;
1305  let mayStore = 0;
1306  let hasSideEffects = 0;
1307  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1308  let HasVLOp = 1;
1309  let HasSEWOp = 1;
1310  let HasMergeOp = 1;
1311  let HasVecPolicyOp = 0;
1312  let VLMul = MInfo.value;
1313}
1314
1315class VPseudoTernaryNoMask<VReg RetClass,
1316                           RegisterClass Op1Class,
1317                           DAGOperand Op2Class,
1318                           string Constraint> :
1319        Pseudo<(outs RetClass:$rd),
1320               (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1321                    AVL:$vl, ixlenimm:$sew),
1322               []>,
1323        RISCVVPseudo {
1324  let mayLoad = 0;
1325  let mayStore = 0;
1326  let hasSideEffects = 0;
1327  let Constraints = Join<[Constraint, "$rd = $rs3"], ",">.ret;
1328  let HasVLOp = 1;
1329  let HasSEWOp = 1;
1330  let HasMergeOp = 1;
1331  let HasDummyMask = 1;
1332}
1333
1334class VPseudoTernaryNoMaskWithPolicy<VReg RetClass,
1335                                     RegisterClass Op1Class,
1336                                     DAGOperand Op2Class,
1337                                     string Constraint> :
1338        Pseudo<(outs RetClass:$rd),
1339               (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1340                    AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
1341               []>,
1342        RISCVVPseudo {
1343  let mayLoad = 0;
1344  let mayStore = 0;
1345  let hasSideEffects = 0;
1346  let Constraints = Join<[Constraint, "$rd = $rs3"], ",">.ret;
1347  let HasVecPolicyOp = 1;
1348  let HasVLOp = 1;
1349  let HasSEWOp = 1;
1350  let HasMergeOp = 1;
1351  let HasDummyMask = 1;
1352}
1353
1354class VPseudoUSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF>:
1355      Pseudo<(outs RetClass:$rd),
1356             (ins GPRMem:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
1357      RISCVVPseudo,
1358      RISCVVLSEG<NF, /*Masked*/0, /*TU*/0, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
1359  let mayLoad = 1;
1360  let mayStore = 0;
1361  let hasSideEffects = 0;
1362  let HasVLOp = 1;
1363  let HasSEWOp = 1;
1364  let HasDummyMask = 1;
1365}
1366
1367class VPseudoUSSegLoadNoMaskTU<VReg RetClass, int EEW, bits<4> NF>:
1368      Pseudo<(outs RetClass:$rd),
1369             (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
1370      RISCVVPseudo,
1371      RISCVVLSEG<NF, /*Masked*/0, /*TU*/1, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
1372  let mayLoad = 1;
1373  let mayStore = 0;
1374  let hasSideEffects = 0;
1375  let HasVLOp = 1;
1376  let HasSEWOp = 1;
1377  let HasDummyMask = 1;
1378  let HasMergeOp = 1;
1379  let Constraints = "$rd = $dest";
1380}
1381
1382class VPseudoUSSegLoadMask<VReg RetClass, int EEW, bits<4> NF>:
1383      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1384             (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
1385                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
1386      RISCVVPseudo,
1387      RISCVVLSEG<NF, /*Masked*/1, /*TU*/1, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
1388  let mayLoad = 1;
1389  let mayStore = 0;
1390  let hasSideEffects = 0;
1391  let Constraints = "$rd = $merge";
1392  let HasVLOp = 1;
1393  let HasSEWOp = 1;
1394  let HasMergeOp = 1;
1395  let HasVecPolicyOp = 1;
1396  let UsesMaskPolicy = 1;
1397}
1398
1399class VPseudoUSSegLoadFFNoMask<VReg RetClass, int EEW, bits<4> NF>:
1400      Pseudo<(outs RetClass:$rd, GPR:$vl),
1401             (ins GPRMem:$rs1, AVL:$avl, ixlenimm:$sew),[]>,
1402      RISCVVPseudo,
1403      RISCVVLSEG<NF, /*Masked*/0, /*TU*/0, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
1404  let mayLoad = 1;
1405  let mayStore = 0;
1406  let hasSideEffects = 0;
1407  let HasVLOp = 1;
1408  let HasSEWOp = 1;
1409  let HasDummyMask = 1;
1410}
1411
1412class VPseudoUSSegLoadFFNoMaskTU<VReg RetClass, int EEW, bits<4> NF>:
1413      Pseudo<(outs RetClass:$rd, GPR:$vl),
1414             (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl, ixlenimm:$sew),[]>,
1415      RISCVVPseudo,
1416      RISCVVLSEG<NF, /*Masked*/0, /*TU*/1, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
1417  let mayLoad = 1;
1418  let mayStore = 0;
1419  let hasSideEffects = 0;
1420  let HasVLOp = 1;
1421  let HasSEWOp = 1;
1422  let HasDummyMask = 1;
1423  let HasMergeOp = 1;
1424  let Constraints = "$rd = $dest";
1425}
1426
1427class VPseudoUSSegLoadFFMask<VReg RetClass, int EEW, bits<4> NF>:
1428      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
1429             (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
1430                  VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy),[]>,
1431      RISCVVPseudo,
1432      RISCVVLSEG<NF, /*Masked*/1, /*TU*/1, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
1433  let mayLoad = 1;
1434  let mayStore = 0;
1435  let hasSideEffects = 0;
1436  let Constraints = "$rd = $merge";
1437  let HasVLOp = 1;
1438  let HasSEWOp = 1;
1439  let HasMergeOp = 1;
1440  let HasVecPolicyOp = 1;
1441  let UsesMaskPolicy = 1;
1442}
1443
1444class VPseudoSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF>:
1445      Pseudo<(outs RetClass:$rd),
1446             (ins GPRMem:$rs1, GPR:$offset, AVL:$vl, ixlenimm:$sew),[]>,
1447      RISCVVPseudo,
1448      RISCVVLSEG<NF, /*Masked*/0, /*TU*/0, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
1449  let mayLoad = 1;
1450  let mayStore = 0;
1451  let hasSideEffects = 0;
1452  let HasVLOp = 1;
1453  let HasSEWOp = 1;
1454  let HasDummyMask = 1;
1455}
1456
1457class VPseudoSSegLoadNoMaskTU<VReg RetClass, int EEW, bits<4> NF>:
1458      Pseudo<(outs RetClass:$rd),
1459             (ins RetClass:$merge, GPRMem:$rs1, GPR:$offset, AVL:$vl, ixlenimm:$sew),[]>,
1460      RISCVVPseudo,
1461      RISCVVLSEG<NF, /*Masked*/0, /*TU*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
1462  let mayLoad = 1;
1463  let mayStore = 0;
1464  let hasSideEffects = 0;
1465  let HasVLOp = 1;
1466  let HasSEWOp = 1;
1467  let HasDummyMask = 1;
1468  let HasMergeOp = 1;
1469  let Constraints = "$rd = $merge";
1470}
1471
1472class VPseudoSSegLoadMask<VReg RetClass, int EEW, bits<4> NF>:
1473      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1474             (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
1475                  GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
1476                  ixlenimm:$policy),[]>,
1477      RISCVVPseudo,
1478      RISCVVLSEG<NF, /*Masked*/1, /*TU*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
1479  let mayLoad = 1;
1480  let mayStore = 0;
1481  let hasSideEffects = 0;
1482  let Constraints = "$rd = $merge";
1483  let HasVLOp = 1;
1484  let HasSEWOp = 1;
1485  let HasMergeOp = 1;
1486  let HasVecPolicyOp = 1;
1487  let UsesMaskPolicy = 1;
1488}
1489
1490class VPseudoISegLoadNoMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
1491                            bits<4> NF, bit Ordered>:
1492      Pseudo<(outs RetClass:$rd),
1493             (ins GPRMem:$rs1, IdxClass:$offset, AVL:$vl, ixlenimm:$sew),[]>,
1494      RISCVVPseudo,
1495      RISCVVLXSEG<NF, /*Masked*/0, /*TU*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
1496  let mayLoad = 1;
1497  let mayStore = 0;
1498  let hasSideEffects = 0;
1499  // For vector indexed segment loads, the destination vector register groups
1500  // cannot overlap the source vector register group
1501  let Constraints = "@earlyclobber $rd";
1502  let HasVLOp = 1;
1503  let HasSEWOp = 1;
1504  let HasDummyMask = 1;
1505}
1506
1507class VPseudoISegLoadNoMaskTU<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
1508                              bits<4> NF, bit Ordered>:
1509      Pseudo<(outs RetClass:$rd),
1510             (ins RetClass:$merge, GPRMem:$rs1, IdxClass:$offset, AVL:$vl, ixlenimm:$sew),[]>,
1511      RISCVVPseudo,
1512      RISCVVLXSEG<NF, /*Masked*/0, /*TU*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
1513  let mayLoad = 1;
1514  let mayStore = 0;
1515  let hasSideEffects = 0;
1516  // For vector indexed segment loads, the destination vector register groups
1517  // cannot overlap the source vector register group
1518  let Constraints = "@earlyclobber $rd, $rd = $merge";
1519  let HasVLOp = 1;
1520  let HasSEWOp = 1;
1521  let HasDummyMask = 1;
1522  let HasMergeOp = 1;
1523}
1524
1525class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
1526                          bits<4> NF, bit Ordered>:
1527      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1528             (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
1529                  IdxClass:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
1530                  ixlenimm:$policy),[]>,
1531      RISCVVPseudo,
1532      RISCVVLXSEG<NF, /*Masked*/1, /*TU*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
1533  let mayLoad = 1;
1534  let mayStore = 0;
1535  let hasSideEffects = 0;
1536  // For vector indexed segment loads, the destination vector register groups
1537  // cannot overlap the source vector register group
1538  let Constraints = "@earlyclobber $rd, $rd = $merge";
1539  let HasVLOp = 1;
1540  let HasSEWOp = 1;
1541  let HasMergeOp = 1;
1542  let HasVecPolicyOp = 1;
1543  let UsesMaskPolicy = 1;
1544}
1545
1546class VPseudoUSSegStoreNoMask<VReg ValClass, int EEW, bits<4> NF>:
1547      Pseudo<(outs),
1548             (ins ValClass:$rd, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
1549      RISCVVPseudo,
1550      RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, log2<EEW>.val, VLMul> {
1551  let mayLoad = 0;
1552  let mayStore = 1;
1553  let hasSideEffects = 0;
1554  let HasVLOp = 1;
1555  let HasSEWOp = 1;
1556  let HasDummyMask = 1;
1557}
1558
1559class VPseudoUSSegStoreMask<VReg ValClass, int EEW, bits<4> NF>:
1560      Pseudo<(outs),
1561             (ins ValClass:$rd, GPRMem:$rs1,
1562                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1563      RISCVVPseudo,
1564      RISCVVSSEG<NF, /*Masked*/1, /*Strided*/0, log2<EEW>.val, VLMul> {
1565  let mayLoad = 0;
1566  let mayStore = 1;
1567  let hasSideEffects = 0;
1568  let HasVLOp = 1;
1569  let HasSEWOp = 1;
1570}
1571
1572class VPseudoSSegStoreNoMask<VReg ValClass, int EEW, bits<4> NF>:
1573      Pseudo<(outs),
1574             (ins ValClass:$rd, GPRMem:$rs1, GPR: $offset, AVL:$vl, ixlenimm:$sew),[]>,
1575      RISCVVPseudo,
1576      RISCVVSSEG<NF, /*Masked*/0, /*Strided*/1, log2<EEW>.val, VLMul> {
1577  let mayLoad = 0;
1578  let mayStore = 1;
1579  let hasSideEffects = 0;
1580  let HasVLOp = 1;
1581  let HasSEWOp = 1;
1582  let HasDummyMask = 1;
1583}
1584
1585class VPseudoSSegStoreMask<VReg ValClass, int EEW, bits<4> NF>:
1586      Pseudo<(outs),
1587             (ins ValClass:$rd, GPRMem:$rs1, GPR: $offset,
1588                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1589      RISCVVPseudo,
1590      RISCVVSSEG<NF, /*Masked*/1, /*Strided*/1, log2<EEW>.val, VLMul> {
1591  let mayLoad = 0;
1592  let mayStore = 1;
1593  let hasSideEffects = 0;
1594  let HasVLOp = 1;
1595  let HasSEWOp = 1;
1596}
1597
1598class VPseudoISegStoreNoMask<VReg ValClass, VReg IdxClass, int EEW, bits<3> LMUL,
1599                             bits<4> NF, bit Ordered>:
1600      Pseudo<(outs),
1601             (ins ValClass:$rd, GPRMem:$rs1, IdxClass: $index,
1602                  AVL:$vl, ixlenimm:$sew),[]>,
1603      RISCVVPseudo,
1604      RISCVVSXSEG<NF, /*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
1605  let mayLoad = 0;
1606  let mayStore = 1;
1607  let hasSideEffects = 0;
1608  let HasVLOp = 1;
1609  let HasSEWOp = 1;
1610  let HasDummyMask = 1;
1611}
1612
1613class VPseudoISegStoreMask<VReg ValClass, VReg IdxClass, int EEW, bits<3> LMUL,
1614                           bits<4> NF, bit Ordered>:
1615      Pseudo<(outs),
1616             (ins ValClass:$rd, GPRMem:$rs1, IdxClass: $index,
1617                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1618      RISCVVPseudo,
1619      RISCVVSXSEG<NF, /*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
1620  let mayLoad = 0;
1621  let mayStore = 1;
1622  let hasSideEffects = 0;
1623  let HasVLOp = 1;
1624  let HasSEWOp = 1;
1625}
1626
1627multiclass VPseudoUSLoad {
1628  foreach eew = EEWList in {
1629    foreach lmul = MxSet<eew>.m in {
1630      defvar LInfo = lmul.MX;
1631      defvar vreg = lmul.vrclass;
1632      let VLMul = lmul.value in {
1633        def "E" # eew # "_V_" # LInfo :
1634          VPseudoUSLoadNoMask<vreg, eew>,
1635          VLESched<LInfo>;
1636        def "E" # eew # "_V_" # LInfo # "_TU":
1637          VPseudoUSLoadNoMaskTU<vreg, eew>,
1638          VLESched<LInfo>;
1639        def "E" # eew # "_V_" # LInfo # "_MASK" :
1640          VPseudoUSLoadMask<vreg, eew>,
1641          RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
1642          VLESched<LInfo>;
1643      }
1644    }
1645  }
1646}
1647
1648multiclass VPseudoFFLoad {
1649  foreach eew = EEWList in {
1650    foreach lmul = MxSet<eew>.m in {
1651      defvar LInfo = lmul.MX;
1652      defvar vreg = lmul.vrclass;
1653      let VLMul = lmul.value in {
1654        def "E" # eew # "FF_V_" # LInfo:
1655          VPseudoUSLoadFFNoMask<vreg, eew>,
1656          VLFSched<LInfo>;
1657        def "E" # eew # "FF_V_" # LInfo # "_TU":
1658          VPseudoUSLoadFFNoMaskTU<vreg, eew>,
1659          VLFSched<LInfo>;
1660        def "E" # eew # "FF_V_" # LInfo # "_MASK":
1661          VPseudoUSLoadFFMask<vreg, eew>,
1662          RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
1663          VLFSched<LInfo>;
1664      }
1665    }
1666  }
1667}
1668
1669multiclass VPseudoLoadMask {
1670  foreach mti = AllMasks in {
1671    defvar mx = mti.LMul.MX;
1672    defvar WriteVLDM_MX = !cast<SchedWrite>("WriteVLDM_" # mx);
1673    defvar ReadVLDX_MX = !cast<SchedRead>("ReadVLDX_" # mx);
1674    let VLMul = mti.LMul.value in {
1675      def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, /*EEW*/1, /*DummyMask*/0>,
1676        Sched<[WriteVLDM_MX, ReadVLDX_MX]>;
1677    }
1678  }
1679}
1680
1681multiclass VPseudoSLoad {
1682  foreach eew = EEWList in {
1683    foreach lmul = MxSet<eew>.m in {
1684      defvar LInfo = lmul.MX;
1685      defvar vreg = lmul.vrclass;
1686      let VLMul = lmul.value in {
1687        def "E" # eew # "_V_" # LInfo : VPseudoSLoadNoMask<vreg, eew>,
1688                                        VLSSched<eew, LInfo>;
1689        def "E" # eew # "_V_" # LInfo # "_TU": VPseudoSLoadNoMaskTU<vreg, eew>,
1690                                        VLSSched<eew, LInfo>;
1691        def "E" # eew # "_V_" # LInfo # "_MASK" :
1692          VPseudoSLoadMask<vreg, eew>,
1693          RISCVMaskedPseudo</*MaskOpIdx*/ 3>,
1694          VLSSched<eew, LInfo>;
1695      }
1696    }
1697  }
1698}
1699
1700multiclass VPseudoILoad<bit Ordered> {
1701  foreach eew = EEWList in {
1702    foreach sew = EEWList in {
1703      foreach lmul = MxSet<sew>.m in {
1704        defvar octuple_lmul = lmul.octuple;
1705        // Calculate emul = eew * lmul / sew
1706        defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2<sew>.val);
1707        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
1708          defvar LInfo = lmul.MX;
1709          defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
1710          defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
1711          defvar Vreg = lmul.vrclass;
1712          defvar IdxVreg = idx_lmul.vrclass;
1713          defvar HasConstraint = !ne(sew, eew);
1714          defvar Order = !if(Ordered, "O", "U");
1715          let VLMul = lmul.value in {
1716            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo :
1717              VPseudoILoadNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>,
1718              VLXSched<eew, Order, LInfo>;
1719            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_TU":
1720              VPseudoILoadNoMaskTU<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>,
1721              VLXSched<eew, Order, LInfo>;
1722            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
1723              VPseudoILoadMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>,
1724              RISCVMaskedPseudo</*MaskOpIdx*/ 3>,
1725              VLXSched<eew, Order, LInfo>;
1726          }
1727        }
1728      }
1729    }
1730  }
1731}
1732
1733multiclass VPseudoUSStore {
1734  foreach eew = EEWList in {
1735    foreach lmul = MxSet<eew>.m in {
1736      defvar LInfo = lmul.MX;
1737      defvar vreg = lmul.vrclass;
1738      let VLMul = lmul.value in {
1739        def "E" # eew # "_V_" # LInfo : VPseudoUSStoreNoMask<vreg, eew>,
1740                                        VSESched<LInfo>;
1741        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask<vreg, eew>,
1742                                                  VSESched<LInfo>;
1743      }
1744    }
1745  }
1746}
1747
1748multiclass VPseudoStoreMask {
1749  foreach mti = AllMasks in {
1750    defvar mx = mti.LMul.MX;
1751    defvar WriteVSTM_MX = !cast<SchedWrite>("WriteVSTM_" # mx);
1752    defvar ReadVSTX_MX = !cast<SchedRead>("ReadVSTX_" # mx);
1753    let VLMul = mti.LMul.value in {
1754      def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, /*EEW*/1, /*DummyMask*/0>,
1755        Sched<[WriteVSTM_MX, ReadVSTX_MX]>;
1756    }
1757  }
1758}
1759
1760multiclass VPseudoSStore {
1761  foreach eew = EEWList in {
1762    foreach lmul = MxSet<eew>.m in {
1763      defvar LInfo = lmul.MX;
1764      defvar vreg = lmul.vrclass;
1765      let VLMul = lmul.value in {
1766        def "E" # eew # "_V_" # LInfo : VPseudoSStoreNoMask<vreg, eew>,
1767                                        VSSSched<eew, LInfo>;
1768        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSStoreMask<vreg, eew>,
1769                                                  VSSSched<eew, LInfo>;
1770      }
1771    }
1772  }
1773}
1774
1775multiclass VPseudoIStore<bit Ordered> {
1776  foreach eew = EEWList in {
1777    foreach sew = EEWList in {
1778      foreach lmul = MxSet<sew>.m in {
1779        defvar octuple_lmul = lmul.octuple;
1780        // Calculate emul = eew * lmul / sew
1781        defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2<sew>.val);
1782        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
1783          defvar LInfo = lmul.MX;
1784          defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
1785          defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
1786          defvar Vreg = lmul.vrclass;
1787          defvar IdxVreg = idx_lmul.vrclass;
1788          defvar Order = !if(Ordered, "O", "U");
1789          let VLMul = lmul.value in {
1790            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo :
1791              VPseudoIStoreNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>,
1792              VSXSched<eew, Order, LInfo>;
1793            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
1794              VPseudoIStoreMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>,
1795              VSXSched<eew, Order, LInfo>;
1796          }
1797        }
1798      }
1799    }
1800  }
1801}
1802
1803multiclass VPseudoVPOP_M {
1804  foreach mti = AllMasks in
1805  {
1806    defvar mx = mti.LMul.MX;
1807    defvar WriteVMPopV_MX = !cast<SchedWrite>("WriteVMPopV_" # mx);
1808    defvar ReadVMPopV_MX = !cast<SchedRead>("ReadVMPopV_" # mx);
1809    let VLMul = mti.LMul.value in {
1810      def "_M_" # mti.BX : VPseudoUnaryNoMask<GPR, VR>,
1811                           Sched<[WriteVMPopV_MX, ReadVMPopV_MX, ReadVMPopV_MX]>;
1812      def "_M_" # mti.BX # "_MASK" : VPseudoMaskUnarySOutMask,
1813                                     Sched<[WriteVMPopV_MX, ReadVMPopV_MX, ReadVMPopV_MX]>;
1814    }
1815  }
1816}
1817
1818multiclass VPseudoV1ST_M {
1819  foreach mti = AllMasks in
1820  {
1821    defvar mx = mti.LMul.MX;
1822    defvar WriteVMFFSV_MX = !cast<SchedWrite>("WriteVMFFSV_" # mx);
1823    defvar ReadVMFFSV_MX = !cast<SchedRead>("ReadVMFFSV_" # mx);
1824    let VLMul = mti.LMul.value in {
1825      def "_M_" # mti.BX : VPseudoUnaryNoMask<GPR, VR>,
1826                           Sched<[WriteVMFFSV_MX, ReadVMFFSV_MX, ReadVMFFSV_MX]>;
1827      def "_M_" # mti.BX # "_MASK" : VPseudoMaskUnarySOutMask,
1828                                     Sched<[WriteVMFFSV_MX, ReadVMFFSV_MX, ReadVMFFSV_MX]>;
1829    }
1830  }
1831}
1832
1833multiclass VPseudoVSFS_M {
1834  defvar constraint = "@earlyclobber $rd";
1835  foreach mti = AllMasks in
1836  {
1837    defvar mx = mti.LMul.MX;
1838    defvar WriteVMSFSV_MX = !cast<SchedWrite>("WriteVMSFSV_" # mx);
1839    defvar ReadVMSFSV_MX = !cast<SchedRead>("ReadVMSFSV_" # mx);
1840    let VLMul = mti.LMul.value in {
1841      def "_M_" # mti.BX : VPseudoUnaryNoMask<VR, VR, constraint>,
1842                           Sched<[WriteVMSFSV_MX, ReadVMSFSV_MX, ReadVMask]>;
1843      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR, constraint>,
1844                                     Sched<[WriteVMSFSV_MX, ReadVMSFSV_MX, ReadVMask]>;
1845    }
1846  }
1847}
1848
1849multiclass VPseudoVID_V {
1850  foreach m = MxList in {
1851    defvar mx = m.MX;
1852    defvar WriteVMIdxV_MX = !cast<SchedWrite>("WriteVMIdxV_" # mx);
1853    defvar ReadVMIdxV_MX = !cast<SchedRead>("ReadVMIdxV_" # mx);
1854
1855    let VLMul = m.value in {
1856      def "_V_" # m.MX : VPseudoNullaryNoMask<m.vrclass>,
1857                         Sched<[WriteVMIdxV_MX, ReadVMask]>;
1858      def "_V_" # m.MX # "_TU": VPseudoNullaryNoMaskTU<m.vrclass>,
1859                                Sched<[WriteVMIdxV_MX, ReadVMask]>;
1860      def "_V_" # m.MX # "_MASK" : VPseudoNullaryMask<m.vrclass>,
1861                                   RISCVMaskedPseudo</*MaskOpIdx*/ 1>,
1862                                   Sched<[WriteVMIdxV_MX, ReadVMask]>;
1863    }
1864  }
1865}
1866
1867multiclass VPseudoNullaryPseudoM <string BaseInst> {
1868  foreach mti = AllMasks in {
1869    defvar mx = mti.LMul.MX;
1870    defvar WriteVMALUV_MX = !cast<SchedWrite>("WriteVMALUV_" # mx);
1871    defvar ReadVMALUV_MX = !cast<SchedRead>("ReadVMALUV_" # mx);
1872
1873    let VLMul = mti.LMul.value in {
1874      def "_M_" # mti.BX : VPseudoNullaryPseudoM<BaseInst # "_MM">,
1875        Sched<[WriteVMALUV_MX, ReadVMALUV_MX, ReadVMALUV_MX]>;
1876    }
1877  }
1878}
1879
1880multiclass VPseudoVIOT_M {
1881  defvar constraint = "@earlyclobber $rd";
1882  foreach m = MxList in {
1883    defvar mx = m.MX;
1884    defvar WriteVMIotV_MX = !cast<SchedWrite>("WriteVMIotV_" # mx);
1885    defvar ReadVMIotV_MX = !cast<SchedRead>("ReadVMIotV_" # mx);
1886    let VLMul = m.value in {
1887      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, VR, constraint>,
1888                       Sched<[WriteVMIotV_MX, ReadVMIotV_MX, ReadVMask]>;
1889      def "_" # m.MX # "_TU" : VPseudoUnaryNoMaskTU<m.vrclass, VR, constraint>,
1890                               Sched<[WriteVMIotV_MX, ReadVMIotV_MX, ReadVMask]>;
1891      def "_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, VR, constraint>,
1892                                 RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
1893                                 Sched<[WriteVMIotV_MX, ReadVMIotV_MX, ReadVMask]>;
1894    }
1895  }
1896}
1897
1898multiclass VPseudoVCPR_V {
1899  foreach m = MxList in {
1900    defvar mx = m.MX;
1901    defvar WriteVCompressV_MX = !cast<SchedWrite>("WriteVCompressV_" # mx);
1902    defvar ReadVCompressV_MX = !cast<SchedRead>("ReadVCompressV_" # mx);
1903
1904    let VLMul = m.value in
1905      def _VM # "_" # m.MX : VPseudoUnaryAnyMask<m.vrclass, m.vrclass>,
1906                             Sched<[WriteVCompressV_MX, ReadVCompressV_MX, ReadVCompressV_MX]>;
1907  }
1908}
1909
1910multiclass VPseudoBinary<VReg RetClass,
1911                         VReg Op1Class,
1912                         DAGOperand Op2Class,
1913                         LMULInfo MInfo,
1914                         string Constraint = ""> {
1915  let VLMul = MInfo.value in {
1916    def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
1917                                             Constraint>;
1918    def "_" # MInfo.MX # "_TU" : VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
1919                                                       Constraint>;
1920    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
1921                                                           Constraint>,
1922                                   RISCVMaskedPseudo</*MaskOpIdx*/ 3>;
1923  }
1924}
1925
1926multiclass VPseudoBinaryM<VReg RetClass,
1927                          VReg Op1Class,
1928                          DAGOperand Op2Class,
1929                          LMULInfo MInfo,
1930                          string Constraint = ""> {
1931  let VLMul = MInfo.value in {
1932    def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
1933                                             Constraint>;
1934    let ForceTailAgnostic = true in
1935    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask<RetClass, Op1Class,
1936                                                         Op2Class, Constraint>,
1937                                   RISCVMaskedPseudo</*MaskOpIdx*/ 3, /*HasTU*/ false>;
1938  }
1939}
1940
1941multiclass VPseudoBinaryEmul<VReg RetClass,
1942                             VReg Op1Class,
1943                             DAGOperand Op2Class,
1944                             LMULInfo lmul,
1945                             LMULInfo emul,
1946                             string Constraint = ""> {
1947  let VLMul = lmul.value in {
1948    def "_" # lmul.MX # "_" # emul.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
1949                                                            Constraint>;
1950    def "_" # lmul.MX # "_" # emul.MX # "_TU": VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
1951                                                                     Constraint>;
1952    def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
1953                                                                          Constraint>,
1954                                                  RISCVMaskedPseudo</*MaskOpIdx*/ 3>;
1955  }
1956}
1957
1958multiclass VPseudoTiedBinary<VReg RetClass,
1959                             DAGOperand Op2Class,
1960                             LMULInfo MInfo,
1961                             string Constraint = ""> {
1962  let VLMul = MInfo.value in {
1963    def "_" # MInfo.MX # "_TIED": VPseudoTiedBinaryNoMask<RetClass, Op2Class,
1964                                                          Constraint>;
1965    def "_" # MInfo.MX # "_MASK_TIED" : VPseudoTiedBinaryMask<RetClass, Op2Class,
1966                                                         Constraint>;
1967  }
1968}
1969
1970multiclass VPseudoBinaryV_VV<LMULInfo m, string Constraint = ""> {
1971  defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
1972}
1973
1974// Similar to VPseudoBinaryV_VV, but uses MxListF.
1975multiclass VPseudoBinaryFV_VV<LMULInfo m, string Constraint = ""> {
1976  defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
1977}
1978
1979multiclass VPseudoVGTR_VV_EEW<int eew, string Constraint = ""> {
1980  foreach m = MxList in {
1981    defvar mx = m.MX;
1982    defvar WriteVGatherV_MX = !cast<SchedWrite>("WriteVGatherV_" # mx);
1983    defvar ReadVGatherV_MX = !cast<SchedRead>("ReadVGatherV_" # mx);
1984
1985    foreach sew = EEWList in {
1986      defvar octuple_lmul = m.octuple;
1987      // emul = lmul * eew / sew
1988      defvar octuple_emul = !srl(!mul(octuple_lmul, eew), log2<sew>.val);
1989      if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
1990        defvar emulMX = octuple_to_str<octuple_emul>.ret;
1991        defvar emul = !cast<LMULInfo>("V_" # emulMX);
1992        defm _VV : VPseudoBinaryEmul<m.vrclass, m.vrclass, emul.vrclass, m, emul, Constraint>,
1993                   Sched<[WriteVGatherV_MX, ReadVGatherV_MX, ReadVGatherV_MX]>;
1994      }
1995    }
1996  }
1997}
1998
1999multiclass VPseudoBinaryV_VX<LMULInfo m, string Constraint = ""> {
2000  defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>;
2001}
2002
2003multiclass VPseudoVSLD1_VX<string Constraint = ""> {
2004  foreach m = MxList in {
2005    defvar mx = m.MX;
2006    defvar WriteVISlide1X_MX = !cast<SchedWrite>("WriteVISlide1X_" # mx);
2007    defvar ReadVISlideV_MX = !cast<SchedRead>("ReadVISlideV_" # mx);
2008    defvar ReadVISlideX_MX = !cast<SchedRead>("ReadVISlideX_" # mx);
2009
2010    defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>,
2011                 Sched<[WriteVISlide1X_MX, ReadVISlideV_MX, ReadVISlideX_MX, ReadVMask]>;
2012  }
2013}
2014
2015multiclass VPseudoBinaryV_VF<LMULInfo m, FPR_Info f, string Constraint = ""> {
2016  defm "_V" # f.FX : VPseudoBinary<m.vrclass, m.vrclass,
2017                                   f.fprclass, m, Constraint>;
2018}
2019
2020multiclass VPseudoVSLD1_VF<string Constraint = ""> {
2021  foreach f = FPList in {
2022    foreach m = f.MxList in {
2023      defvar mx = m.MX;
2024      defvar WriteVFSlide1F_MX = !cast<SchedWrite>("WriteVFSlide1F_" # mx);
2025      defvar ReadVFSlideV_MX = !cast<SchedRead>("ReadVFSlideV_" # mx);
2026      defvar ReadVFSlideF_MX = !cast<SchedRead>("ReadVFSlideF_" # mx);
2027
2028      defm "_V" # f.FX :
2029        VPseudoBinary<m.vrclass, m.vrclass, f.fprclass, m, Constraint>,
2030        Sched<[WriteVFSlide1F_MX, ReadVFSlideV_MX, ReadVFSlideF_MX, ReadVMask]>;
2031    }
2032  }
2033}
2034
2035multiclass VPseudoBinaryV_VI<Operand ImmType = simm5, LMULInfo m, string Constraint = ""> {
2036  defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
2037}
2038
2039multiclass VPseudoVALU_MM {
2040  foreach m = MxList in {
2041    defvar mx = m.MX;
2042    defvar WriteVMALUV_MX = !cast<SchedWrite>("WriteVMALUV_" # mx);
2043    defvar ReadVMALUV_MX = !cast<SchedRead>("ReadVMALUV_" # mx);
2044
2045    let VLMul = m.value in {
2046      def "_MM_" # mx : VPseudoBinaryNoMask<VR, VR, VR, "", /*DummyMask*/0>,
2047                          Sched<[WriteVMALUV_MX, ReadVMALUV_MX, ReadVMALUV_MX]>;
2048    }
2049  }
2050}
2051
2052// We use earlyclobber here due to
2053// * The destination EEW is smaller than the source EEW and the overlap is
2054//   in the lowest-numbered part of the source register group is legal.
2055//   Otherwise, it is illegal.
2056// * The destination EEW is greater than the source EEW, the source EMUL is
2057//   at least 1, and the overlap is in the highest-numbered part of the
2058//   destination register group is legal. Otherwise, it is illegal.
2059multiclass VPseudoBinaryW_VV<LMULInfo m> {
2060  defm _VV : VPseudoBinary<m.wvrclass, m.vrclass, m.vrclass, m,
2061                           "@earlyclobber $rd">;
2062}
2063
2064multiclass VPseudoBinaryW_VX<LMULInfo m> {
2065  defm "_VX" : VPseudoBinary<m.wvrclass, m.vrclass, GPR, m,
2066                             "@earlyclobber $rd">;
2067}
2068
2069multiclass VPseudoBinaryW_VF<LMULInfo m, FPR_Info f> {
2070  defm "_V" # f.FX : VPseudoBinary<m.wvrclass, m.vrclass,
2071                                   f.fprclass, m,
2072                                   "@earlyclobber $rd">;
2073}
2074
2075multiclass VPseudoBinaryW_WV<LMULInfo m> {
2076  defm _WV : VPseudoBinary<m.wvrclass, m.wvrclass, m.vrclass, m,
2077                           "@earlyclobber $rd">;
2078  defm _WV : VPseudoTiedBinary<m.wvrclass, m.vrclass, m,
2079                               "@earlyclobber $rd">;
2080}
2081
2082multiclass VPseudoBinaryW_WX<LMULInfo m> {
2083  defm "_WX" : VPseudoBinary<m.wvrclass, m.wvrclass, GPR, m>;
2084}
2085
2086multiclass VPseudoBinaryW_WF<LMULInfo m, FPR_Info f> {
2087  defm "_W" # f.FX : VPseudoBinary<m.wvrclass, m.wvrclass,
2088                                   f.fprclass, m>;
2089}
2090
2091// Narrowing instructions like vnsrl/vnsra/vnclip(u) don't need @earlyclobber
2092// if the source and destination have an LMUL<=1. This matches this overlap
2093// exception from the spec.
2094// "The destination EEW is smaller than the source EEW and the overlap is in the
2095//  lowest-numbered part of the source register group."
2096multiclass VPseudoBinaryV_WV<LMULInfo m> {
2097  defm _WV : VPseudoBinary<m.vrclass, m.wvrclass, m.vrclass, m,
2098                           !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
2099}
2100
2101multiclass VPseudoBinaryV_WX<LMULInfo m> {
2102  defm _WX : VPseudoBinary<m.vrclass, m.wvrclass, GPR, m,
2103                           !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
2104}
2105
2106multiclass VPseudoBinaryV_WI<LMULInfo m> {
2107  defm _WI : VPseudoBinary<m.vrclass, m.wvrclass, uimm5, m,
2108                           !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
2109}
2110
2111// For vadc and vsbc, the instruction encoding is reserved if the destination
2112// vector register is v0.
2113// For vadc and vsbc, CarryIn == 1 and CarryOut == 0
2114multiclass VPseudoBinaryV_VM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2115                             string Constraint = ""> {
2116  def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX :
2117    VPseudoBinaryCarryIn<!if(CarryOut, VR,
2118                         !if(!and(CarryIn, !not(CarryOut)),
2119                             GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2120                         m.vrclass, m.vrclass, m, CarryIn, Constraint>;
2121}
2122
2123multiclass VPseudoTiedBinaryV_VM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2124                                 string Constraint = ""> {
2125  def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX # "_TU" :
2126    VPseudoTiedBinaryCarryIn<!if(CarryOut, VR,
2127                             !if(!and(CarryIn, !not(CarryOut)),
2128                                 GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2129                             m.vrclass, m.vrclass, m, CarryIn, Constraint>;
2130}
2131
2132multiclass VPseudoBinaryV_XM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2133                             string Constraint = ""> {
2134  def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX :
2135    VPseudoBinaryCarryIn<!if(CarryOut, VR,
2136                         !if(!and(CarryIn, !not(CarryOut)),
2137                             GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2138                         m.vrclass, GPR, m, CarryIn, Constraint>;
2139}
2140
2141multiclass VPseudoTiedBinaryV_XM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2142                                 string Constraint = ""> {
2143  def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX # "_TU":
2144    VPseudoTiedBinaryCarryIn<!if(CarryOut, VR,
2145                             !if(!and(CarryIn, !not(CarryOut)),
2146                                 GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2147                             m.vrclass, GPR, m, CarryIn, Constraint>;
2148}
2149
2150multiclass VPseudoVMRG_FM {
2151  foreach f = FPList in {
2152    foreach m = f.MxList in {
2153      defvar mx = m.MX;
2154      defvar WriteVFMergeV_MX = !cast<SchedWrite>("WriteVFMergeV_" # mx);
2155      defvar ReadVFMergeV_MX = !cast<SchedRead>("ReadVFMergeV_" # mx);
2156      defvar ReadVFMergeF_MX = !cast<SchedRead>("ReadVFMergeF_" # mx);
2157
2158      def "_V" # f.FX # "M_" # mx :
2159        VPseudoBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
2160                             m.vrclass, f.fprclass, m, /*CarryIn=*/1, "">,
2161        Sched<[WriteVFMergeV_MX, ReadVFMergeV_MX, ReadVFMergeF_MX, ReadVMask]>;
2162      // Tied version to allow codegen control over the tail elements
2163      def "_V" # f.FX # "M_" # mx # "_TU":
2164        VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
2165                                 m.vrclass, f.fprclass, m, /*CarryIn=*/1, "">,
2166        Sched<[WriteVFMergeV_MX, ReadVFMergeV_MX, ReadVFMergeF_MX, ReadVMask]>;
2167    }
2168  }
2169}
2170
2171multiclass VPseudoBinaryV_IM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2172                             string Constraint = ""> {
2173  def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX :
2174    VPseudoBinaryCarryIn<!if(CarryOut, VR,
2175                         !if(!and(CarryIn, !not(CarryOut)),
2176                             GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2177                         m.vrclass, simm5, m, CarryIn, Constraint>;
2178}
2179
2180multiclass VPseudoTiedBinaryV_IM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2181                                 string Constraint = ""> {
2182  def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX # "_TU":
2183    VPseudoTiedBinaryCarryIn<!if(CarryOut, VR,
2184                             !if(!and(CarryIn, !not(CarryOut)),
2185                                 GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2186                             m.vrclass, simm5, m, CarryIn, Constraint>;
2187}
2188
2189multiclass VPseudoUnaryVMV_V_X_I {
2190  foreach m = MxList in {
2191    let VLMul = m.value in {
2192      defvar mx = m.MX;
2193      defvar WriteVIMovV_MX = !cast<SchedWrite>("WriteVIMovV_" # mx);
2194      defvar WriteVIMovX_MX = !cast<SchedWrite>("WriteVIMovX_" # mx);
2195      defvar WriteVIMovI_MX = !cast<SchedWrite>("WriteVIMovI_" # mx);
2196      defvar ReadVIMovV_MX = !cast<SchedRead>("ReadVIMovV_" # mx);
2197      defvar ReadVIMovX_MX = !cast<SchedRead>("ReadVIMovX_" # mx);
2198
2199      let VLMul = m.value in {
2200        def "_V_" # mx : VPseudoUnaryNoDummyMask<m.vrclass, m.vrclass>,
2201                           Sched<[WriteVIMovV_MX, ReadVIMovV_MX]>;
2202        def "_X_" # mx : VPseudoUnaryNoDummyMask<m.vrclass, GPR>,
2203                           Sched<[WriteVIMovX_MX, ReadVIMovX_MX]>;
2204        def "_I_" # mx : VPseudoUnaryNoDummyMask<m.vrclass, simm5>,
2205                           Sched<[WriteVIMovI_MX]>;
2206        def "_V_" # mx # "_TU": VPseudoUnaryNoDummyMaskTU<m.vrclass, m.vrclass>,
2207                           Sched<[WriteVIMovV_MX, ReadVIMovV_MX]>;
2208        def "_X_" # mx # "_TU": VPseudoUnaryNoDummyMaskTU<m.vrclass, GPR>,
2209                           Sched<[WriteVIMovX_MX, ReadVIMovX_MX]>;
2210        def "_I_" # mx # "_TU": VPseudoUnaryNoDummyMaskTU<m.vrclass, simm5>,
2211                           Sched<[WriteVIMovI_MX]>;
2212      }
2213    }
2214  }
2215}
2216
2217multiclass VPseudoVMV_F {
2218  foreach f = FPList in {
2219    foreach m = f.MxList in {
2220      defvar mx = m.MX;
2221      defvar WriteVFMovV_MX = !cast<SchedWrite>("WriteVFMovV_" # mx);
2222      defvar ReadVFMovF_MX = !cast<SchedRead>("ReadVFMovF_" # mx);
2223
2224      let VLMul = m.value in {
2225        def "_" # f.FX # "_" # mx :
2226          VPseudoUnaryNoDummyMask<m.vrclass, f.fprclass>,
2227          Sched<[WriteVFMovV_MX, ReadVFMovF_MX]>;
2228        def "_" # f.FX # "_" # mx # "_TU":
2229          VPseudoUnaryNoDummyMaskTU<m.vrclass, f.fprclass>,
2230          Sched<[WriteVFMovV_MX, ReadVFMovF_MX]>;
2231      }
2232    }
2233  }
2234}
2235
2236multiclass VPseudoVCLS_V {
2237  foreach m = MxListF in {
2238    defvar mx = m.MX;
2239    defvar WriteVFClassV_MX = !cast<SchedWrite>("WriteVFClassV_" # mx);
2240    defvar ReadVFClassV_MX = !cast<SchedRead>("ReadVFClassV_" # mx);
2241
2242    let VLMul = m.value in {
2243      def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2244                       Sched<[WriteVFClassV_MX, ReadVFClassV_MX, ReadVMask]>;
2245      def "_V_" # mx # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.vrclass>,
2246                              Sched<[WriteVFClassV_MX, ReadVFClassV_MX, ReadVMask]>;
2247      def "_V_" # mx # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>,
2248                                 RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
2249                                 Sched<[WriteVFClassV_MX, ReadVFClassV_MX, ReadVMask]>;
2250    }
2251  }
2252}
2253
2254multiclass VPseudoVSQR_V {
2255  foreach m = MxListF in {
2256    defvar mx = m.MX;
2257    defvar WriteVFSqrtV_MX = !cast<SchedWrite>("WriteVFSqrtV_" # mx);
2258    defvar ReadVFSqrtV_MX = !cast<SchedRead>("ReadVFSqrtV_" # mx);
2259
2260    let VLMul = m.value in {
2261      def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2262                       Sched<[WriteVFSqrtV_MX, ReadVFSqrtV_MX, ReadVMask]>;
2263      def "_V_" # mx # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.vrclass>,
2264                              Sched<[WriteVFSqrtV_MX, ReadVFSqrtV_MX, ReadVMask]>;
2265      def "_V_" # mx # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>,
2266                                 RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
2267                                 Sched<[WriteVFSqrtV_MX, ReadVFSqrtV_MX, ReadVMask]>;
2268    }
2269  }
2270}
2271
2272multiclass VPseudoVRCP_V {
2273  foreach m = MxListF in {
2274    defvar mx = m.MX;
2275    defvar WriteVFRecpV_MX = !cast<SchedWrite>("WriteVFRecpV_" # mx);
2276    defvar ReadVFRecpV_MX = !cast<SchedRead>("ReadVFRecpV_" # mx);
2277
2278    let VLMul = m.value in {
2279      def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2280                         Sched<[WriteVFRecpV_MX, ReadVFRecpV_MX, ReadVMask]>;
2281      def "_V_" # mx # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.vrclass>,
2282                              Sched<[WriteVFRecpV_MX, ReadVFRecpV_MX, ReadVMask]>;
2283      def "_V_" # mx # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>,
2284                                 RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
2285                                 Sched<[WriteVFRecpV_MX, ReadVFRecpV_MX, ReadVMask]>;
2286    }
2287  }
2288}
2289
2290multiclass PseudoVEXT_VF2 {
2291  defvar constraints = "@earlyclobber $rd";
2292  foreach m = MxListVF2 in
2293  {
2294    defvar mx = m.MX;
2295    defvar WriteVExtV_MX = !cast<SchedWrite>("WriteVExtV_" # mx);
2296    defvar ReadVExtV_MX = !cast<SchedRead>("ReadVExtV_" # mx);
2297
2298    let VLMul = m.value in {
2299      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f2vrclass, constraints>,
2300                     Sched<[WriteVExtV_MX, ReadVExtV_MX, ReadVMask]>;
2301      def "_" # mx # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.f2vrclass, constraints>,
2302                            Sched<[WriteVExtV_MX, ReadVExtV_MX, ReadVMask]>;
2303      def "_" # mx # "_MASK" :
2304        VPseudoUnaryMaskTA<m.vrclass, m.f2vrclass, constraints>,
2305        RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
2306        Sched<[WriteVExtV_MX, ReadVExtV_MX, ReadVMask]>;
2307    }
2308  }
2309}
2310
2311multiclass PseudoVEXT_VF4 {
2312  defvar constraints = "@earlyclobber $rd";
2313  foreach m = MxListVF4 in
2314  {
2315    defvar mx = m.MX;
2316    defvar WriteVExtV_MX = !cast<SchedWrite>("WriteVExtV_" # mx);
2317    defvar ReadVExtV_MX = !cast<SchedRead>("ReadVExtV_" # mx);
2318
2319    let VLMul = m.value in {
2320      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f4vrclass, constraints>,
2321                     Sched<[WriteVExtV_MX, ReadVExtV_MX, ReadVMask]>;
2322      def "_" # mx # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.f4vrclass, constraints>,
2323                            Sched<[WriteVExtV_MX, ReadVExtV_MX, ReadVMask]>;
2324      def "_" # mx # "_MASK" :
2325        VPseudoUnaryMaskTA<m.vrclass, m.f4vrclass, constraints>,
2326        RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
2327        Sched<[WriteVExtV_MX, ReadVExtV_MX, ReadVMask]>;
2328    }
2329  }
2330}
2331
2332multiclass PseudoVEXT_VF8 {
2333  defvar constraints = "@earlyclobber $rd";
2334  foreach m = MxListVF8 in
2335  {
2336    defvar mx = m.MX;
2337    defvar WriteVExtV_MX = !cast<SchedWrite>("WriteVExtV_" # mx);
2338    defvar ReadVExtV_MX = !cast<SchedRead>("ReadVExtV_" # mx);
2339
2340    let VLMul = m.value in {
2341      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f8vrclass, constraints>,
2342                     Sched<[WriteVExtV_MX, ReadVExtV_MX, ReadVMask]>;
2343      def "_" # mx # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.f8vrclass, constraints>,
2344                            Sched<[WriteVExtV_MX, ReadVExtV_MX, ReadVMask]>;
2345      def "_" # mx # "_MASK" :
2346        VPseudoUnaryMaskTA<m.vrclass, m.f8vrclass, constraints>,
2347        RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
2348        Sched<[WriteVExtV_MX, ReadVExtV_MX, ReadVMask]>;
2349    }
2350  }
2351}
2352
2353// The destination EEW is 1 since "For the purposes of register group overlap
2354// constraints, mask elements have EEW=1."
2355// The source EEW is 8, 16, 32, or 64.
2356// When the destination EEW is different from source EEW, we need to use
2357// @earlyclobber to avoid the overlap between destination and source registers.
2358// We don't need @earlyclobber for LMUL<=1 since that matches this overlap
2359// exception from the spec
2360// "The destination EEW is smaller than the source EEW and the overlap is in the
2361//  lowest-numbered part of the source register group".
2362// With LMUL<=1 the source and dest occupy a single register so any overlap
2363// is in the lowest-numbered part.
2364multiclass VPseudoBinaryM_VV<LMULInfo m> {
2365  defm _VV : VPseudoBinaryM<VR, m.vrclass, m.vrclass, m,
2366                            !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
2367}
2368
2369multiclass VPseudoBinaryM_VX<LMULInfo m> {
2370  defm "_VX" :
2371    VPseudoBinaryM<VR, m.vrclass, GPR, m,
2372                   !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
2373}
2374
2375multiclass VPseudoBinaryM_VF<LMULInfo m, FPR_Info f> {
2376  defm "_V" # f.FX :
2377    VPseudoBinaryM<VR, m.vrclass, f.fprclass, m,
2378                   !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
2379}
2380
2381multiclass VPseudoBinaryM_VI<LMULInfo m> {
2382  defm _VI : VPseudoBinaryM<VR, m.vrclass, simm5, m,
2383                            !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
2384}
2385
2386multiclass VPseudoVGTR_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2387  foreach m = MxList in {
2388    defvar mx = m.MX;
2389    defvar WriteVGatherV_MX = !cast<SchedWrite>("WriteVGatherV_" # mx);
2390    defvar WriteVGatherX_MX = !cast<SchedWrite>("WriteVGatherX_" # mx);
2391    defvar WriteVGatherI_MX = !cast<SchedWrite>("WriteVGatherI_" # mx);
2392    defvar ReadVGatherV_MX = !cast<SchedRead>("ReadVGatherV_" # mx);
2393    defvar ReadVGatherX_MX = !cast<SchedRead>("ReadVGatherX_" # mx);
2394
2395    defm "" : VPseudoBinaryV_VV<m, Constraint>,
2396              Sched<[WriteVGatherV_MX, ReadVGatherV_MX, ReadVGatherV_MX, ReadVMask]>;
2397    defm "" : VPseudoBinaryV_VX<m, Constraint>,
2398              Sched<[WriteVGatherX_MX, ReadVGatherV_MX, ReadVGatherX_MX, ReadVMask]>;
2399    defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
2400              Sched<[WriteVGatherI_MX, ReadVGatherV_MX, ReadVMask]>;
2401  }
2402}
2403
2404multiclass VPseudoVSALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2405  foreach m = MxList in {
2406    defvar mx = m.MX;
2407    defvar WriteVSALUV_MX = !cast<SchedWrite>("WriteVSALUV_" # mx);
2408    defvar WriteVSALUX_MX = !cast<SchedWrite>("WriteVSALUX_" # mx);
2409    defvar WriteVSALUI_MX = !cast<SchedWrite>("WriteVSALUI_" # mx);
2410    defvar ReadVSALUV_MX = !cast<SchedRead>("ReadVSALUV_" # mx);
2411    defvar ReadVSALUX_MX = !cast<SchedRead>("ReadVSALUX_" # mx);
2412
2413    defm "" : VPseudoBinaryV_VV<m, Constraint>,
2414              Sched<[WriteVSALUV_MX, ReadVSALUV_MX, ReadVSALUV_MX, ReadVMask]>;
2415    defm "" : VPseudoBinaryV_VX<m, Constraint>,
2416              Sched<[WriteVSALUX_MX, ReadVSALUV_MX, ReadVSALUX_MX, ReadVMask]>;
2417    defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
2418              Sched<[WriteVSALUI_MX, ReadVSALUV_MX, ReadVMask]>;
2419  }
2420}
2421
2422
2423multiclass VPseudoVSHT_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2424  foreach m = MxList in {
2425    defvar mx = m.MX;
2426    defvar WriteVShiftV_MX = !cast<SchedWrite>("WriteVShiftV_" # mx);
2427    defvar WriteVShiftX_MX = !cast<SchedWrite>("WriteVShiftX_" # mx);
2428    defvar WriteVShiftI_MX = !cast<SchedWrite>("WriteVShiftI_" # mx);
2429    defvar ReadVShiftV_MX = !cast<SchedRead>("ReadVShiftV_" # mx);
2430    defvar ReadVShiftX_MX = !cast<SchedRead>("ReadVShiftX_" # mx);
2431
2432    defm "" : VPseudoBinaryV_VV<m, Constraint>,
2433              Sched<[WriteVShiftV_MX, ReadVShiftV_MX, ReadVShiftV_MX, ReadVMask]>;
2434    defm "" : VPseudoBinaryV_VX<m, Constraint>,
2435              Sched<[WriteVShiftX_MX, ReadVShiftV_MX, ReadVShiftX_MX, ReadVMask]>;
2436    defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
2437              Sched<[WriteVShiftI_MX, ReadVShiftV_MX, ReadVMask]>;
2438  }
2439}
2440
2441multiclass VPseudoVSSHT_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2442  foreach m = MxList in {
2443    defvar mx = m.MX;
2444    defvar WriteVSShiftV_MX = !cast<SchedWrite>("WriteVSShiftV_" # mx);
2445    defvar WriteVSShiftX_MX = !cast<SchedWrite>("WriteVSShiftX_" # mx);
2446    defvar WriteVSShiftI_MX = !cast<SchedWrite>("WriteVSShiftI_" # mx);
2447    defvar ReadVSShiftV_MX = !cast<SchedRead>("ReadVSShiftV_" # mx);
2448    defvar ReadVSShiftX_MX = !cast<SchedRead>("ReadVSShiftX_" # mx);
2449
2450    defm "" : VPseudoBinaryV_VV<m, Constraint>,
2451              Sched<[WriteVSShiftV_MX, ReadVSShiftV_MX, ReadVSShiftV_MX, ReadVMask]>;
2452    defm "" : VPseudoBinaryV_VX<m, Constraint>,
2453              Sched<[WriteVSShiftX_MX, ReadVSShiftV_MX, ReadVSShiftX_MX, ReadVMask]>;
2454    defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
2455              Sched<[WriteVSShiftI_MX, ReadVSShiftV_MX, ReadVMask]>;
2456  }
2457}
2458
2459multiclass VPseudoVALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2460  foreach m = MxList in {
2461    defvar mx = m.MX;
2462    defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
2463    defvar WriteVIALUX_MX = !cast<SchedWrite>("WriteVIALUX_" # mx);
2464    defvar WriteVIALUI_MX = !cast<SchedWrite>("WriteVIALUI_" # mx);
2465    defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
2466    defvar ReadVIALUX_MX = !cast<SchedRead>("ReadVIALUX_" # mx);
2467
2468    defm "" : VPseudoBinaryV_VV<m, Constraint>,
2469            Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
2470    defm "" : VPseudoBinaryV_VX<m, Constraint>,
2471            Sched<[WriteVIALUX_MX, ReadVIALUV_MX, ReadVIALUX_MX, ReadVMask]>;
2472    defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
2473            Sched<[WriteVIALUI_MX, ReadVIALUV_MX, ReadVMask]>;
2474  }
2475}
2476
2477multiclass VPseudoVSALU_VV_VX {
2478  foreach m = MxList in {
2479    defvar mx = m.MX;
2480    defvar WriteVSALUV_MX = !cast<SchedWrite>("WriteVSALUV_" # mx);
2481    defvar WriteVSALUX_MX = !cast<SchedWrite>("WriteVSALUX_" # mx);
2482    defvar ReadVSALUV_MX = !cast<SchedRead>("ReadVSALUV_" # mx);
2483    defvar ReadVSALUX_MX = !cast<SchedRead>("ReadVSALUX_" # mx);
2484
2485    defm "" : VPseudoBinaryV_VV<m>,
2486              Sched<[WriteVSALUV_MX, ReadVSALUV_MX, ReadVSALUV_MX, ReadVMask]>;
2487    defm "" : VPseudoBinaryV_VX<m>,
2488              Sched<[WriteVSALUX_MX, ReadVSALUV_MX, ReadVSALUX_MX, ReadVMask]>;
2489  }
2490}
2491
2492multiclass VPseudoVSMUL_VV_VX {
2493  foreach m = MxList in {
2494    defvar mx = m.MX;
2495    defvar WriteVSMulV_MX = !cast<SchedWrite>("WriteVSMulV_" # mx);
2496    defvar WriteVSMulX_MX = !cast<SchedWrite>("WriteVSMulX_" # mx);
2497    defvar ReadVSMulV_MX = !cast<SchedRead>("ReadVSMulV_" # mx);
2498    defvar ReadVSMulX_MX = !cast<SchedRead>("ReadVSMulX_" # mx);
2499
2500    defm "" : VPseudoBinaryV_VV<m>,
2501              Sched<[WriteVSMulV_MX, ReadVSMulV_MX, ReadVSMulV_MX, ReadVMask]>;
2502    defm "" : VPseudoBinaryV_VX<m>,
2503              Sched<[WriteVSMulX_MX, ReadVSMulV_MX, ReadVSMulX_MX, ReadVMask]>;
2504  }
2505}
2506
2507multiclass VPseudoVAALU_VV_VX {
2508  foreach m = MxList in {
2509    defvar mx = m.MX;
2510    defvar WriteVAALUV_MX = !cast<SchedWrite>("WriteVAALUV_" # mx);
2511    defvar WriteVAALUX_MX = !cast<SchedWrite>("WriteVAALUX_" # mx);
2512    defvar ReadVAALUV_MX = !cast<SchedRead>("ReadVAALUV_" # mx);
2513    defvar ReadVAALUX_MX = !cast<SchedRead>("ReadVAALUX_" # mx);
2514
2515    defm "" : VPseudoBinaryV_VV<m>,
2516              Sched<[WriteVAALUV_MX, ReadVAALUV_MX, ReadVAALUV_MX, ReadVMask]>;
2517    defm "" : VPseudoBinaryV_VX<m>,
2518              Sched<[WriteVAALUX_MX, ReadVAALUV_MX, ReadVAALUX_MX, ReadVMask]>;
2519  }
2520}
2521
2522multiclass VPseudoVMINMAX_VV_VX {
2523  foreach m = MxList in {
2524    defvar mx = m.MX;
2525    defvar WriteVICmpV_MX = !cast<SchedWrite>("WriteVICmpV_" # mx);
2526    defvar WriteVICmpX_MX = !cast<SchedWrite>("WriteVICmpX_" # mx);
2527    defvar ReadVICmpV_MX = !cast<SchedRead>("ReadVICmpV_" # mx);
2528    defvar ReadVICmpX_MX = !cast<SchedRead>("ReadVICmpX_" # mx);
2529
2530    defm "" : VPseudoBinaryV_VV<m>,
2531              Sched<[WriteVICmpV_MX, ReadVICmpV_MX, ReadVICmpV_MX, ReadVMask]>;
2532    defm "" : VPseudoBinaryV_VX<m>,
2533              Sched<[WriteVICmpX_MX, ReadVICmpV_MX, ReadVICmpX_MX, ReadVMask]>;
2534  }
2535}
2536
2537multiclass VPseudoVMUL_VV_VX {
2538  foreach m = MxList in {
2539    defvar mx = m.MX;
2540    defvar WriteVIMulV_MX = !cast<SchedWrite>("WriteVIMulV_" # mx);
2541    defvar WriteVIMulX_MX = !cast<SchedWrite>("WriteVIMulX_" # mx);
2542    defvar ReadVIMulV_MX = !cast<SchedRead>("ReadVIMulV_" # mx);
2543    defvar ReadVIMulX_MX = !cast<SchedRead>("ReadVIMulX_" # mx);
2544
2545    defm "" : VPseudoBinaryV_VV<m>,
2546              Sched<[WriteVIMulV_MX, ReadVIMulV_MX, ReadVIMulV_MX, ReadVMask]>;
2547    defm "" : VPseudoBinaryV_VX<m>,
2548              Sched<[WriteVIMulX_MX, ReadVIMulV_MX, ReadVIMulX_MX, ReadVMask]>;
2549  }
2550}
2551
2552multiclass VPseudoVDIV_VV_VX {
2553  foreach m = MxList in {
2554    defvar mx = m.MX;
2555    defvar WriteVIDivV_MX = !cast<SchedWrite>("WriteVIDivV_" # mx);
2556    defvar WriteVIDivX_MX = !cast<SchedWrite>("WriteVIDivX_" # mx);
2557    defvar ReadVIDivV_MX = !cast<SchedRead>("ReadVIDivV_" # mx);
2558    defvar ReadVIDivX_MX = !cast<SchedRead>("ReadVIDivX_" # mx);
2559
2560    defm "" : VPseudoBinaryV_VV<m>,
2561              Sched<[WriteVIDivV_MX, ReadVIDivV_MX, ReadVIDivV_MX, ReadVMask]>;
2562    defm "" : VPseudoBinaryV_VX<m>,
2563              Sched<[WriteVIDivX_MX, ReadVIDivV_MX, ReadVIDivX_MX, ReadVMask]>;
2564  }
2565}
2566
2567multiclass VPseudoVFMUL_VV_VF {
2568  foreach m = MxListF in {
2569    defvar mx = m.MX;
2570    defvar WriteVFMulV_MX = !cast<SchedWrite>("WriteVFMulV_" # mx);
2571    defvar ReadVFMulV_MX = !cast<SchedRead>("ReadVFMulV_" # mx);
2572
2573    defm "" : VPseudoBinaryFV_VV<m>,
2574              Sched<[WriteVFMulV_MX, ReadVFMulV_MX, ReadVFMulV_MX, ReadVMask]>;
2575  }
2576
2577  foreach f = FPList in {
2578    foreach m = f.MxList in {
2579      defvar mx = m.MX;
2580      defvar WriteVFMulF_MX = !cast<SchedWrite>("WriteVFMulF_" # mx);
2581      defvar ReadVFMulV_MX = !cast<SchedRead>("ReadVFMulV_" # mx);
2582      defvar ReadVFMulF_MX = !cast<SchedRead>("ReadVFMulF_" # mx);
2583
2584      defm "" : VPseudoBinaryV_VF<m, f>,
2585                Sched<[WriteVFMulF_MX, ReadVFMulV_MX, ReadVFMulF_MX, ReadVMask]>;
2586    }
2587  }
2588}
2589
2590multiclass VPseudoVFDIV_VV_VF {
2591  foreach m = MxListF in {
2592    defvar mx = m.MX;
2593    defvar WriteVFDivV_MX = !cast<SchedWrite>("WriteVFDivV_" # mx);
2594    defvar ReadVFDivV_MX = !cast<SchedRead>("ReadVFDivV_" # mx);
2595
2596    defm "" : VPseudoBinaryFV_VV<m>,
2597              Sched<[WriteVFDivV_MX, ReadVFDivV_MX, ReadVFDivV_MX, ReadVMask]>;
2598  }
2599
2600  foreach f = FPList in {
2601    foreach m = f.MxList in {
2602      defvar mx = m.MX;
2603      defvar WriteVFDivF_MX = !cast<SchedWrite>("WriteVFDivF_" # mx);
2604      defvar ReadVFDivV_MX = !cast<SchedRead>("ReadVFDivV_" # mx);
2605      defvar ReadVFDivF_MX = !cast<SchedRead>("ReadVFDivF_" # mx);
2606
2607      defm "" : VPseudoBinaryV_VF<m, f>,
2608                Sched<[WriteVFDivF_MX, ReadVFDivV_MX, ReadVFDivF_MX, ReadVMask]>;
2609    }
2610  }
2611}
2612
2613multiclass VPseudoVFRDIV_VF {
2614  foreach f = FPList in {
2615    foreach m = f.MxList in {
2616      defvar mx = m.MX;
2617      defvar WriteVFDivF_MX = !cast<SchedWrite>("WriteVFDivF_" # mx);
2618      defvar ReadVFDivV_MX = !cast<SchedRead>("ReadVFDivV_" # mx);
2619      defvar ReadVFDivF_MX = !cast<SchedRead>("ReadVFDivF_" # mx);
2620        defm "" : VPseudoBinaryV_VF<m, f>,
2621                  Sched<[WriteVFDivF_MX, ReadVFDivV_MX, ReadVFDivF_MX, ReadVMask]>;
2622    }
2623  }
2624}
2625
2626multiclass VPseudoVALU_VV_VX {
2627 foreach m = MxList in {
2628    defvar mx = m.MX;
2629    defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
2630    defvar WriteVIALUX_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
2631    defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
2632    defvar ReadVIALUX_MX = !cast<SchedRead>("ReadVIALUX_" # mx);
2633
2634    defm "" : VPseudoBinaryV_VV<m>,
2635            Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
2636    defm "" : VPseudoBinaryV_VX<m>,
2637            Sched<[WriteVIALUX_MX, ReadVIALUV_MX, ReadVIALUX_MX, ReadVMask]>;
2638  }
2639}
2640
2641multiclass VPseudoVSGNJ_VV_VF {
2642  foreach m = MxListF in {
2643    defvar mx = m.MX;
2644    defvar WriteVFSgnjV_MX = !cast<SchedWrite>("WriteVFSgnjV_" # mx);
2645    defvar ReadVFSgnjV_MX = !cast<SchedRead>("ReadVFSgnjV_" # mx);
2646
2647    defm "" : VPseudoBinaryFV_VV<m>,
2648              Sched<[WriteVFSgnjV_MX, ReadVFSgnjV_MX, ReadVFSgnjV_MX, ReadVMask]>;
2649  }
2650
2651  foreach f = FPList in {
2652    foreach m = f.MxList in {
2653      defvar mx = m.MX;
2654      defvar WriteVFSgnjF_MX = !cast<SchedWrite>("WriteVFSgnjF_" # mx);
2655      defvar ReadVFSgnjV_MX = !cast<SchedRead>("ReadVFSgnjV_" # mx);
2656      defvar ReadVFSgnjF_MX = !cast<SchedRead>("ReadVFSgnjF_" # mx);
2657
2658      defm "" : VPseudoBinaryV_VF<m, f>,
2659                Sched<[WriteVFSgnjF_MX, ReadVFSgnjV_MX, ReadVFSgnjF_MX, ReadVMask]>;
2660    }
2661  }
2662}
2663
2664multiclass VPseudoVMAX_VV_VF {
2665  foreach m = MxListF in {
2666    defvar mx = m.MX;
2667    defvar WriteVFCmpV_MX = !cast<SchedWrite>("WriteVFCmpV_" # mx);
2668    defvar ReadVFCmpV_MX = !cast<SchedRead>("ReadVFCmpV_" # mx);
2669
2670    defm "" : VPseudoBinaryFV_VV<m>,
2671              Sched<[WriteVFCmpV_MX, ReadVFCmpV_MX, ReadVFCmpV_MX, ReadVMask]>;
2672  }
2673
2674  foreach f = FPList in {
2675    foreach m = f.MxList in {
2676      defvar mx = m.MX;
2677      defvar WriteVFCmpF_MX = !cast<SchedWrite>("WriteVFCmpF_" # mx);
2678      defvar ReadVFCmpV_MX = !cast<SchedRead>("ReadVFCmpV_" # mx);
2679      defvar ReadVFCmpF_MX = !cast<SchedRead>("ReadVFCmpF_" # mx);
2680
2681      defm "" : VPseudoBinaryV_VF<m, f>,
2682                Sched<[WriteVFCmpF_MX, ReadVFCmpV_MX, ReadVFCmpF_MX, ReadVMask]>;
2683    }
2684  }
2685}
2686
2687multiclass VPseudoVALU_VV_VF {
2688  foreach m = MxListF in {
2689    defvar mx = m.MX;
2690    defvar WriteVFALUV_MX = !cast<SchedWrite>("WriteVFALUV_" # mx);
2691    defvar ReadVFALUV_MX = !cast<SchedRead>("ReadVFALUV_" # mx);
2692
2693    defm "" : VPseudoBinaryFV_VV<m>,
2694              Sched<[WriteVFALUV_MX, ReadVFALUV_MX, ReadVFALUV_MX, ReadVMask]>;
2695  }
2696
2697  foreach f = FPList in {
2698    foreach m = f.MxList in {
2699      defvar mx = m.MX;
2700      defvar WriteVFALUF_MX = !cast<SchedWrite>("WriteVFALUF_" # mx);
2701      defvar ReadVFALUV_MX = !cast<SchedRead>("ReadVFALUV_" # mx);
2702      defvar ReadVFALUF_MX = !cast<SchedRead>("ReadVFALUF_" # mx);
2703      defm "" : VPseudoBinaryV_VF<m, f>,
2704                Sched<[WriteVFALUF_MX, ReadVFALUV_MX, ReadVFALUF_MX, ReadVMask]>;
2705    }
2706  }
2707}
2708
2709multiclass VPseudoVALU_VF {
2710  foreach f = FPList in {
2711    foreach m = f.MxList in {
2712      defvar mx = m.MX;
2713      defvar WriteVFALUF_MX = !cast<SchedWrite>("WriteVFALUF_" # mx);
2714      defvar ReadVFALUV_MX = !cast<SchedRead>("ReadVFALUV_" # mx);
2715      defvar ReadVFALUF_MX = !cast<SchedRead>("ReadVFALUF_" # mx);
2716
2717      defm "" : VPseudoBinaryV_VF<m, f>,
2718                Sched<[WriteVFALUF_MX, ReadVFALUV_MX, ReadVFALUF_MX, ReadVMask]>;
2719    }
2720  }
2721}
2722
2723multiclass VPseudoVALU_VX_VI<Operand ImmType = simm5> {
2724  foreach m = MxList in {
2725    defvar mx = m.MX;
2726    defvar WriteVIALUX_MX = !cast<SchedWrite>("WriteVIALUX_" # mx);
2727    defvar WriteVIALUI_MX = !cast<SchedWrite>("WriteVIALUI_" # mx);
2728    defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
2729    defvar ReadVIALUX_MX = !cast<SchedRead>("ReadVIALUX_" # mx);
2730
2731    defm "" : VPseudoBinaryV_VX<m>,
2732            Sched<[WriteVIALUX_MX, ReadVIALUV_MX, ReadVIALUX_MX, ReadVMask]>;
2733    defm "" : VPseudoBinaryV_VI<ImmType, m>,
2734            Sched<[WriteVIALUI_MX, ReadVIALUV_MX, ReadVMask]>;
2735  }
2736}
2737
2738multiclass VPseudoVWALU_VV_VX {
2739  foreach m = MxListW in {
2740    defvar mx = m.MX;
2741    defvar WriteVIWALUV_MX = !cast<SchedWrite>("WriteVIWALUV_" # mx);
2742    defvar WriteVIWALUX_MX = !cast<SchedWrite>("WriteVIWALUX_" # mx);
2743    defvar ReadVIWALUV_MX = !cast<SchedRead>("ReadVIWALUV_" # mx);
2744    defvar ReadVIWALUX_MX = !cast<SchedRead>("ReadVIWALUX_" # mx);
2745
2746    defm "" : VPseudoBinaryW_VV<m>,
2747            Sched<[WriteVIWALUV_MX, ReadVIWALUV_MX, ReadVIWALUV_MX, ReadVMask]>;
2748    defm "" : VPseudoBinaryW_VX<m>,
2749            Sched<[WriteVIWALUX_MX, ReadVIWALUV_MX, ReadVIWALUX_MX, ReadVMask]>;
2750  }
2751}
2752
2753multiclass VPseudoVWMUL_VV_VX {
2754  foreach m = MxListW in {
2755    defvar mx = m.MX;
2756    defvar WriteVIWMulV_MX = !cast<SchedWrite>("WriteVIWMulV_" # mx);
2757    defvar WriteVIWMulX_MX = !cast<SchedWrite>("WriteVIWMulX_" # mx);
2758    defvar ReadVIWMulV_MX = !cast<SchedRead>("ReadVIWMulV_" # mx);
2759    defvar ReadVIWMulX_MX = !cast<SchedRead>("ReadVIWMulX_" # mx);
2760
2761    defm "" : VPseudoBinaryW_VV<m>,
2762              Sched<[WriteVIWMulV_MX, ReadVIWMulV_MX, ReadVIWMulV_MX, ReadVMask]>;
2763    defm "" : VPseudoBinaryW_VX<m>,
2764              Sched<[WriteVIWMulX_MX, ReadVIWMulV_MX, ReadVIWMulX_MX, ReadVMask]>;
2765  }
2766}
2767
2768multiclass VPseudoVWMUL_VV_VF {
2769  foreach m = MxListFW in {
2770    defvar mx = m.MX;
2771    defvar WriteVFWMulV_MX = !cast<SchedWrite>("WriteVFWMulV_" # mx);
2772    defvar ReadVFWMulV_MX = !cast<SchedRead>("ReadVFWMulV_" # mx);
2773
2774    defm "" : VPseudoBinaryW_VV<m>,
2775              Sched<[WriteVFWMulV_MX, ReadVFWMulV_MX, ReadVFWMulV_MX, ReadVMask]>;
2776  }
2777
2778  foreach f = FPListW in {
2779    foreach m = f.MxListFW in {
2780      defvar mx = m.MX;
2781      defvar WriteVFWMulF_MX = !cast<SchedWrite>("WriteVFWMulF_" # mx);
2782      defvar ReadVFWMulV_MX = !cast<SchedRead>("ReadVFWMulV_" # mx);
2783      defvar ReadVFWMulF_MX = !cast<SchedRead>("ReadVFWMulF_" # mx);
2784
2785      defm "" : VPseudoBinaryW_VF<m, f>,
2786                Sched<[WriteVFWMulF_MX, ReadVFWMulV_MX, ReadVFWMulF_MX, ReadVMask]>;
2787    }
2788  }
2789}
2790
2791multiclass VPseudoVWALU_WV_WX {
2792  foreach m = MxListW in {
2793    defvar mx = m.MX;
2794    defvar WriteVIWALUV_MX = !cast<SchedWrite>("WriteVIWALUV_" # mx);
2795    defvar WriteVIWALUX_MX = !cast<SchedWrite>("WriteVIWALUX_" # mx);
2796    defvar ReadVIWALUV_MX = !cast<SchedRead>("ReadVIWALUV_" # mx);
2797    defvar ReadVIWALUX_MX = !cast<SchedRead>("ReadVIWALUX_" # mx);
2798
2799    defm "" : VPseudoBinaryW_WV<m>,
2800              Sched<[WriteVIWALUV_MX, ReadVIWALUV_MX, ReadVIWALUV_MX, ReadVMask]>;
2801    defm "" : VPseudoBinaryW_WX<m>,
2802              Sched<[WriteVIWALUX_MX, ReadVIWALUV_MX, ReadVIWALUX_MX, ReadVMask]>;
2803  }
2804}
2805
2806multiclass VPseudoVFWALU_VV_VF {
2807  foreach m = MxListFW in {
2808    defvar mx = m.MX;
2809    defvar WriteVFWALUV_MX = !cast<SchedWrite>("WriteVFWALUV_" # mx);
2810    defvar ReadVFWALUV_MX = !cast<SchedRead>("ReadVFWALUV_" # mx);
2811
2812    defm "" : VPseudoBinaryW_VV<m>,
2813              Sched<[WriteVFWALUV_MX, ReadVFWALUV_MX, ReadVFWALUV_MX, ReadVMask]>;
2814  }
2815
2816  foreach f = FPListW in {
2817    foreach m = f.MxListFW in {
2818      defvar mx = m.MX;
2819      defvar WriteVFWALUF_MX = !cast<SchedWrite>("WriteVFWALUF_" # mx);
2820      defvar ReadVFWALUV_MX = !cast<SchedRead>("ReadVFWALUV_" # mx);
2821      defvar ReadVFWALUF_MX = !cast<SchedRead>("ReadVFWALUF_" # mx);
2822
2823      defm "" : VPseudoBinaryW_VF<m, f>,
2824                Sched<[WriteVFWALUF_MX, ReadVFWALUV_MX, ReadVFWALUF_MX, ReadVMask]>;
2825    }
2826  }
2827}
2828
2829multiclass VPseudoVFWALU_WV_WF {
2830  foreach m = MxListFW in {
2831    defvar mx = m.MX;
2832    defvar WriteVFWALUV_MX = !cast<SchedWrite>("WriteVFWALUV_" # mx);
2833    defvar ReadVFWALUV_MX = !cast<SchedRead>("ReadVFWALUV_" # mx);
2834
2835    defm "" : VPseudoBinaryW_WV<m>,
2836              Sched<[WriteVFWALUV_MX, ReadVFWALUV_MX, ReadVFWALUV_MX, ReadVMask]>;
2837  }
2838  foreach f = FPListW in {
2839    foreach m = f.MxListFW in {
2840      defvar mx = m.MX;
2841      defvar WriteVFWALUF_MX = !cast<SchedWrite>("WriteVFWALUF_" # mx);
2842      defvar ReadVFWALUV_MX = !cast<SchedRead>("ReadVFWALUV_" # mx);
2843      defvar ReadVFWALUF_MX = !cast<SchedRead>("ReadVFWALUF_" # mx);
2844
2845      defm "" : VPseudoBinaryW_WF<m, f>,
2846                Sched<[WriteVFWALUF_MX, ReadVFWALUV_MX, ReadVFWALUF_MX, ReadVMask]>;
2847    }
2848  }
2849}
2850
2851multiclass VPseudoVMRG_VM_XM_IM {
2852  foreach m = MxList in {
2853    defvar mx = m.MX;
2854    defvar WriteVIMergeV_MX = !cast<SchedWrite>("WriteVIMergeV_" # mx);
2855    defvar WriteVIMergeX_MX = !cast<SchedWrite>("WriteVIMergeX_" # mx);
2856    defvar WriteVIMergeI_MX = !cast<SchedWrite>("WriteVIMergeI_" # mx);
2857    defvar ReadVIMergeV_MX = !cast<SchedRead>("ReadVIMergeV_" # mx);
2858    defvar ReadVIMergeX_MX = !cast<SchedRead>("ReadVIMergeX_" # mx);
2859
2860    defm "" : VPseudoBinaryV_VM<m>,
2861              Sched<[WriteVIMergeV_MX, ReadVIMergeV_MX, ReadVIMergeV_MX, ReadVMask]>;
2862    defm "" : VPseudoBinaryV_XM<m>,
2863              Sched<[WriteVIMergeX_MX, ReadVIMergeV_MX, ReadVIMergeX_MX, ReadVMask]>;
2864    defm "" : VPseudoBinaryV_IM<m>,
2865              Sched<[WriteVIMergeI_MX, ReadVIMergeV_MX, ReadVMask]>;
2866    // Tied versions to allow codegen control over the tail elements
2867    defm "" : VPseudoTiedBinaryV_VM<m>,
2868              Sched<[WriteVIMergeV_MX, ReadVIMergeV_MX, ReadVIMergeV_MX, ReadVMask]>;
2869    defm "" : VPseudoTiedBinaryV_XM<m>,
2870              Sched<[WriteVIMergeX_MX, ReadVIMergeV_MX, ReadVIMergeX_MX, ReadVMask]>;
2871    defm "" : VPseudoTiedBinaryV_IM<m>,
2872              Sched<[WriteVIMergeI_MX, ReadVIMergeV_MX, ReadVMask]>;
2873  }
2874}
2875
2876multiclass VPseudoVCALU_VM_XM_IM {
2877  foreach m = MxList in {
2878    defvar mx = m.MX;
2879    defvar WriteVICALUV_MX = !cast<SchedWrite>("WriteVICALUV_" # mx);
2880    defvar WriteVICALUX_MX = !cast<SchedWrite>("WriteVICALUX_" # mx);
2881    defvar WriteVICALUI_MX = !cast<SchedWrite>("WriteVICALUI_" # mx);
2882    defvar ReadVICALUV_MX = !cast<SchedRead>("ReadVICALUV_" # mx);
2883    defvar ReadVICALUX_MX = !cast<SchedRead>("ReadVICALUX_" # mx);
2884
2885    defm "" : VPseudoBinaryV_VM<m>,
2886              Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>;
2887    defm "" : VPseudoBinaryV_XM<m>,
2888              Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX, ReadVMask]>;
2889    defm "" : VPseudoBinaryV_IM<m>,
2890              Sched<[WriteVICALUI_MX, ReadVICALUV_MX, ReadVMask]>;
2891    // Tied versions to allow codegen control over the tail elements
2892    defm "" : VPseudoTiedBinaryV_VM<m>,
2893              Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>;
2894    defm "" : VPseudoTiedBinaryV_XM<m>,
2895              Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX, ReadVMask]>;
2896    defm "" : VPseudoTiedBinaryV_IM<m>,
2897              Sched<[WriteVICALUI_MX, ReadVICALUV_MX, ReadVMask]>;
2898  }
2899}
2900
2901multiclass VPseudoVCALU_VM_XM {
2902  foreach m = MxList in {
2903    defvar mx = m.MX;
2904    defvar WriteVICALUV_MX = !cast<SchedWrite>("WriteVICALUV_" # mx);
2905    defvar WriteVICALUX_MX = !cast<SchedWrite>("WriteVICALUX_" # mx);
2906    defvar ReadVICALUV_MX = !cast<SchedRead>("ReadVICALUV_" # mx);
2907    defvar ReadVICALUX_MX = !cast<SchedRead>("ReadVICALUX_" # mx);
2908
2909    defm "" : VPseudoBinaryV_VM<m>,
2910              Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>;
2911    defm "" : VPseudoBinaryV_XM<m>,
2912              Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX, ReadVMask]>;
2913    // Tied versions to allow codegen control over the tail elements
2914    defm "" : VPseudoTiedBinaryV_VM<m>,
2915              Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>;
2916    defm "" : VPseudoTiedBinaryV_XM<m>,
2917              Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX, ReadVMask]>;
2918  }
2919}
2920
2921multiclass VPseudoVCALUM_VM_XM_IM<string Constraint> {
2922  foreach m = MxList in {
2923    defvar mx = m.MX;
2924    defvar WriteVICALUV_MX = !cast<SchedWrite>("WriteVICALUV_" # mx);
2925    defvar WriteVICALUX_MX = !cast<SchedWrite>("WriteVICALUX_" # mx);
2926    defvar WriteVICALUI_MX = !cast<SchedWrite>("WriteVICALUI_" # mx);
2927    defvar ReadVICALUV_MX = !cast<SchedRead>("ReadVICALUV_" # mx);
2928    defvar ReadVICALUX_MX = !cast<SchedRead>("ReadVICALUX_" # mx);
2929
2930    defm "" : VPseudoBinaryV_VM<m, /*CarryOut=*/1, /*CarryIn=*/1, Constraint>,
2931              Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>;
2932    defm "" : VPseudoBinaryV_XM<m, /*CarryOut=*/1, /*CarryIn=*/1, Constraint>,
2933              Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX, ReadVMask]>;
2934    defm "" : VPseudoBinaryV_IM<m, /*CarryOut=*/1, /*CarryIn=*/1, Constraint>,
2935              Sched<[WriteVICALUI_MX, ReadVICALUV_MX, ReadVMask]>;
2936  }
2937}
2938
2939multiclass VPseudoVCALUM_VM_XM<string Constraint> {
2940  foreach m = MxList in {
2941    defvar mx = m.MX;
2942    defvar WriteVICALUV_MX = !cast<SchedWrite>("WriteVICALUV_" # mx);
2943    defvar WriteVICALUX_MX = !cast<SchedWrite>("WriteVICALUX_" # mx);
2944    defvar ReadVICALUV_MX = !cast<SchedRead>("ReadVICALUV_" # mx);
2945    defvar ReadVICALUX_MX = !cast<SchedRead>("ReadVICALUX_" # mx);
2946
2947    defm "" : VPseudoBinaryV_VM<m, /*CarryOut=*/1, /*CarryIn=*/1, Constraint>,
2948              Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>;
2949    defm "" : VPseudoBinaryV_XM<m, /*CarryOut=*/1, /*CarryIn=*/1, Constraint>,
2950              Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX, ReadVMask]>;
2951  }
2952}
2953
2954multiclass VPseudoVCALUM_V_X_I<string Constraint> {
2955  foreach m = MxList in {
2956    defvar mx = m.MX;
2957    defvar WriteVICALUV_MX = !cast<SchedWrite>("WriteVICALUV_" # mx);
2958    defvar WriteVICALUX_MX = !cast<SchedWrite>("WriteVICALUX_" # mx);
2959    defvar WriteVICALUI_MX = !cast<SchedWrite>("WriteVICALUI_" # mx);
2960    defvar ReadVICALUV_MX = !cast<SchedRead>("ReadVICALUV_" # mx);
2961    defvar ReadVICALUX_MX = !cast<SchedRead>("ReadVICALUX_" # mx);
2962
2963    defm "" : VPseudoBinaryV_VM<m, /*CarryOut=*/1, /*CarryIn=*/0, Constraint>,
2964              Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX]>;
2965    defm "" : VPseudoBinaryV_XM<m, /*CarryOut=*/1, /*CarryIn=*/0, Constraint>,
2966              Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX]>;
2967    defm "" : VPseudoBinaryV_IM<m, /*CarryOut=*/1, /*CarryIn=*/0, Constraint>,
2968              Sched<[WriteVICALUI_MX, ReadVICALUV_MX]>;
2969  }
2970}
2971
2972multiclass VPseudoVCALUM_V_X<string Constraint> {
2973  foreach m = MxList in {
2974    defvar mx = m.MX;
2975    defvar WriteVICALUV_MX = !cast<SchedWrite>("WriteVICALUV_" # mx);
2976    defvar WriteVICALUX_MX = !cast<SchedWrite>("WriteVICALUX_" # mx);
2977    defvar ReadVICALUV_MX = !cast<SchedRead>("ReadVICALUV_" # mx);
2978    defvar ReadVICALUX_MX = !cast<SchedRead>("ReadVICALUX_" # mx);
2979
2980    defm "" : VPseudoBinaryV_VM<m, /*CarryOut=*/1, /*CarryIn=*/0, Constraint>,
2981              Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX]>;
2982    defm "" : VPseudoBinaryV_XM<m, /*CarryOut=*/1, /*CarryIn=*/0, Constraint>,
2983              Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX]>;
2984  }
2985}
2986
2987multiclass VPseudoVNCLP_WV_WX_WI {
2988  foreach m = MxListW in {
2989    defvar mx = m.MX;
2990    defvar WriteVNClipV_MX = !cast<SchedWrite>("WriteVNClipV_" # mx);
2991    defvar WriteVNClipX_MX = !cast<SchedWrite>("WriteVNClipX_" # mx);
2992    defvar WriteVNClipI_MX = !cast<SchedWrite>("WriteVNClipI_" # mx);
2993    defvar ReadVNClipV_MX = !cast<SchedRead>("ReadVNClipV_" # mx);
2994    defvar ReadVNClipX_MX = !cast<SchedRead>("ReadVNClipX_" # mx);
2995
2996    defm "" : VPseudoBinaryV_WV<m>,
2997              Sched<[WriteVNClipV_MX, ReadVNClipV_MX, ReadVNClipV_MX, ReadVMask]>;
2998    defm "" : VPseudoBinaryV_WX<m>,
2999              Sched<[WriteVNClipX_MX, ReadVNClipV_MX, ReadVNClipX_MX, ReadVMask]>;
3000    defm "" : VPseudoBinaryV_WI<m>,
3001              Sched<[WriteVNClipI_MX, ReadVNClipV_MX, ReadVMask]>;
3002  }
3003}
3004
3005multiclass VPseudoVNSHT_WV_WX_WI {
3006  foreach m = MxListW in {
3007    defvar mx = m.MX;
3008    defvar WriteVNShiftV_MX = !cast<SchedWrite>("WriteVNShiftV_" # mx);
3009    defvar WriteVNShiftX_MX = !cast<SchedWrite>("WriteVNShiftX_" # mx);
3010    defvar WriteVNShiftI_MX = !cast<SchedWrite>("WriteVNShiftI_" # mx);
3011    defvar ReadVNShiftV_MX = !cast<SchedRead>("ReadVNShiftV_" # mx);
3012    defvar ReadVNShiftX_MX = !cast<SchedRead>("ReadVNShiftX_" # mx);
3013
3014    defm "" : VPseudoBinaryV_WV<m>,
3015              Sched<[WriteVNShiftV_MX, ReadVNShiftV_MX, ReadVNShiftV_MX, ReadVMask]>;
3016    defm "" : VPseudoBinaryV_WX<m>,
3017              Sched<[WriteVNShiftX_MX, ReadVNShiftV_MX, ReadVNShiftX_MX, ReadVMask]>;
3018    defm "" : VPseudoBinaryV_WI<m>,
3019              Sched<[WriteVNShiftI_MX, ReadVNShiftV_MX, ReadVMask]>;
3020  }
3021}
3022
3023multiclass VPseudoTernary<VReg RetClass,
3024                          RegisterClass Op1Class,
3025                          DAGOperand Op2Class,
3026                          LMULInfo MInfo,
3027                          string Constraint = ""> {
3028  let VLMul = MInfo.value in {
3029    def "_" # MInfo.MX : VPseudoTernaryNoMask<RetClass, Op1Class, Op2Class, Constraint>;
3030    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class, Constraint>;
3031  }
3032}
3033
3034multiclass VPseudoTernaryNoMaskNoPolicy<VReg RetClass,
3035                                        RegisterClass Op1Class,
3036                                        DAGOperand Op2Class,
3037                                        LMULInfo MInfo,
3038                                        string Constraint = ""> {
3039  let VLMul = MInfo.value in {
3040    def "_" # MInfo.MX : VPseudoTernaryNoMask<RetClass, Op1Class, Op2Class, Constraint>;
3041    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
3042                                                           Constraint>;
3043
3044  }
3045}
3046
3047multiclass VPseudoTernaryWithPolicy<VReg RetClass,
3048                                    RegisterClass Op1Class,
3049                                    DAGOperand Op2Class,
3050                                    LMULInfo MInfo,
3051                                    string Constraint = "",
3052                                    bit Commutable = 0> {
3053  let VLMul = MInfo.value in {
3054    let isCommutable = Commutable in
3055    def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint>;
3056    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class, Constraint>;
3057  }
3058}
3059
3060multiclass VPseudoTernaryV_VV_AAXA<LMULInfo m, string Constraint = ""> {
3061  defm _VV : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, m.vrclass, m,
3062                                      Constraint, /*Commutable*/1>;
3063}
3064
3065multiclass VPseudoVSLDV_VX<LMULInfo m, string Constraint = ""> {
3066  defm _VX : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, GPR, m, Constraint>;
3067}
3068
3069multiclass VPseudoTernaryV_VX_AAXA<LMULInfo m, string Constraint = ""> {
3070  defm "_VX" : VPseudoTernaryWithPolicy<m.vrclass, GPR, m.vrclass, m,
3071                                        Constraint, /*Commutable*/1>;
3072}
3073
3074multiclass VPseudoTernaryV_VF_AAXA<LMULInfo m, FPR_Info f, string Constraint = ""> {
3075  defm "_V" # f.FX : VPseudoTernaryWithPolicy<m.vrclass, f.fprclass,
3076                                              m.vrclass, m, Constraint,
3077                                              /*Commutable*/1>;
3078}
3079
3080multiclass VPseudoTernaryW_VV<LMULInfo m> {
3081  defvar constraint = "@earlyclobber $rd";
3082  defm _VV : VPseudoTernaryWithPolicy<m.wvrclass, m.vrclass, m.vrclass, m,
3083                                      constraint>;
3084}
3085
3086multiclass VPseudoTernaryW_VX<LMULInfo m> {
3087  defvar constraint = "@earlyclobber $rd";
3088  defm "_VX" : VPseudoTernaryWithPolicy<m.wvrclass, GPR, m.vrclass, m,
3089                                        constraint>;
3090}
3091
3092multiclass VPseudoTernaryW_VF<LMULInfo m, FPR_Info f> {
3093  defvar constraint = "@earlyclobber $rd";
3094  defm "_V" # f.FX : VPseudoTernaryWithPolicy<m.wvrclass, f.fprclass,
3095                                              m.vrclass, m, constraint>;
3096}
3097
3098multiclass VPseudoVSLDV_VI<Operand ImmType = simm5, LMULInfo m, string Constraint = ""> {
3099  defm _VI : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, ImmType, m, Constraint>;
3100}
3101
3102multiclass VPseudoVMAC_VV_VX_AAXA<string Constraint = ""> {
3103  foreach m = MxList in {
3104    defvar mx = m.MX;
3105    defvar WriteVIMulAddV_MX = !cast<SchedWrite>("WriteVIMulAddV_" # mx);
3106    defvar WriteVIMulAddX_MX = !cast<SchedWrite>("WriteVIMulAddX_" # mx);
3107    defvar ReadVIMulAddV_MX = !cast<SchedRead>("ReadVIMulAddV_" # mx);
3108    defvar ReadVIMulAddX_MX = !cast<SchedRead>("ReadVIMulAddX_" # mx);
3109
3110    defm "" : VPseudoTernaryV_VV_AAXA<m, Constraint>,
3111              Sched<[WriteVIMulAddV_MX, ReadVIMulAddV_MX, ReadVIMulAddV_MX,
3112                     ReadVIMulAddV_MX, ReadVMask]>;
3113    defm "" : VPseudoTernaryV_VX_AAXA<m, Constraint>,
3114              Sched<[WriteVIMulAddX_MX, ReadVIMulAddV_MX, ReadVIMulAddV_MX,
3115                     ReadVIMulAddX_MX, ReadVMask]>;
3116  }
3117}
3118
3119multiclass VPseudoVMAC_VV_VF_AAXA<string Constraint = ""> {
3120  foreach m = MxListF in {
3121    defvar mx = m.MX;
3122    defvar WriteVFMulAddV_MX = !cast<SchedWrite>("WriteVFMulAddV_" # mx);
3123    defvar ReadVFMulAddV_MX = !cast<SchedRead>("ReadVFMulAddV_" # mx);
3124
3125    defm "" : VPseudoTernaryV_VV_AAXA<m, Constraint>,
3126              Sched<[WriteVFMulAddV_MX, ReadVFMulAddV_MX, ReadVFMulAddV_MX, ReadVFMulAddV_MX, ReadVMask]>;
3127  }
3128
3129  foreach f = FPList in {
3130    foreach m = f.MxList in {
3131      defvar mx = m.MX;
3132      defvar WriteVFMulAddF_MX = !cast<SchedWrite>("WriteVFMulAddF_" # mx);
3133      defvar ReadVFMulAddV_MX = !cast<SchedRead>("ReadVFMulAddV_" # mx);
3134      defvar ReadVFMulAddF_MX = !cast<SchedRead>("ReadVFMulAddF_" # mx);
3135
3136      defm "" : VPseudoTernaryV_VF_AAXA<m, f, Constraint>,
3137                Sched<[WriteVFMulAddF_MX, ReadVFMulAddV_MX, ReadVFMulAddV_MX, ReadVFMulAddF_MX, ReadVMask]>;
3138    }
3139  }
3140}
3141
3142multiclass VPseudoVSLD_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
3143  foreach m = MxList in {
3144    defvar mx = m.MX;
3145    defvar WriteVISlideX_MX = !cast<SchedWrite>("WriteVISlideX_" # mx);
3146    defvar WriteVISlideI_MX = !cast<SchedWrite>("WriteVISlideI_" # mx);
3147    defvar ReadVISlideV_MX = !cast<SchedRead>("ReadVISlideV_" # mx);
3148    defvar ReadVISlideX_MX = !cast<SchedRead>("ReadVISlideX_" # mx);
3149
3150    defm "" : VPseudoVSLDV_VX<m, Constraint>,
3151              Sched<[WriteVISlideX_MX, ReadVISlideV_MX, ReadVISlideV_MX,
3152                     ReadVISlideX_MX, ReadVMask]>;
3153    defm "" : VPseudoVSLDV_VI<ImmType, m, Constraint>,
3154              Sched<[WriteVISlideI_MX, ReadVISlideV_MX, ReadVISlideV_MX, ReadVMask]>;
3155  }
3156}
3157
3158multiclass VPseudoVWMAC_VV_VX {
3159  foreach m = MxListW in {
3160    defvar mx = m.MX;
3161    defvar WriteVIWMulAddV_MX = !cast<SchedWrite>("WriteVIWMulAddV_" # mx);
3162    defvar WriteVIWMulAddX_MX = !cast<SchedWrite>("WriteVIWMulAddX_" # mx);
3163    defvar ReadVIWMulAddV_MX = !cast<SchedRead>("ReadVIWMulAddV_" # mx);
3164    defvar ReadVIWMulAddX_MX = !cast<SchedRead>("ReadVIWMulAddX_" # mx);
3165
3166    defm "" : VPseudoTernaryW_VV<m>,
3167              Sched<[WriteVIWMulAddV_MX, ReadVIWMulAddV_MX, ReadVIWMulAddV_MX,
3168                     ReadVIWMulAddV_MX, ReadVMask]>;
3169    defm "" : VPseudoTernaryW_VX<m>,
3170              Sched<[WriteVIWMulAddX_MX, ReadVIWMulAddV_MX, ReadVIWMulAddV_MX,
3171                     ReadVIWMulAddX_MX, ReadVMask]>;
3172  }
3173}
3174
3175multiclass VPseudoVWMAC_VX {
3176  foreach m = MxListW in {
3177    defvar mx = m.MX;
3178    defvar WriteVIWMulAddX_MX = !cast<SchedWrite>("WriteVIWMulAddX_" # mx);
3179    defvar ReadVIWMulAddV_MX= !cast<SchedRead>("ReadVIWMulAddV_" # mx);
3180    defvar ReadVIWMulAddX_MX = !cast<SchedRead>("ReadVIWMulAddX_" # mx);
3181
3182    defm "" : VPseudoTernaryW_VX<m>,
3183              Sched<[WriteVIWMulAddX_MX, ReadVIWMulAddV_MX, ReadVIWMulAddV_MX,
3184                     ReadVIWMulAddX_MX, ReadVMask]>;
3185  }
3186}
3187
3188multiclass VPseudoVWMAC_VV_VF {
3189  foreach m = MxListFW in {
3190    defvar mx = m.MX;
3191    defvar WriteVFWMulAddV_MX = !cast<SchedWrite>("WriteVFWMulAddV_" # mx);
3192    defvar ReadVFWMulAddV_MX = !cast<SchedRead>("ReadVFWMulAddV_" # mx);
3193
3194    defm "" : VPseudoTernaryW_VV<m>,
3195              Sched<[WriteVFWMulAddV_MX, ReadVFWMulAddV_MX,
3196                     ReadVFWMulAddV_MX, ReadVFWMulAddV_MX, ReadVMask]>;
3197  }
3198
3199  foreach f = FPListW in {
3200    foreach m = f.MxListFW in {
3201      defvar mx = m.MX;
3202      defvar WriteVFWMulAddF_MX = !cast<SchedWrite>("WriteVFWMulAddF_" # mx);
3203      defvar ReadVFWMulAddV_MX = !cast<SchedRead>("ReadVFWMulAddV_" # mx);
3204      defvar ReadVFWMulAddF_MX = !cast<SchedRead>("ReadVFWMulAddF_" # mx);
3205
3206      defm "" : VPseudoTernaryW_VF<m, f>,
3207                Sched<[WriteVFWMulAddF_MX, ReadVFWMulAddV_MX,
3208                       ReadVFWMulAddV_MX, ReadVFWMulAddF_MX, ReadVMask]>;
3209    }
3210  }
3211}
3212
3213multiclass VPseudoVCMPM_VV_VX_VI {
3214  foreach m = MxList in {
3215    defvar mx = m.MX;
3216    defvar WriteVICmpV_MX = !cast<SchedWrite>("WriteVICmpV_" # mx);
3217    defvar WriteVICmpX_MX = !cast<SchedWrite>("WriteVICmpX_" # mx);
3218    defvar WriteVICmpI_MX = !cast<SchedWrite>("WriteVICmpI_" # mx);
3219    defvar ReadVICmpV_MX = !cast<SchedRead>("ReadVICmpV_" # mx);
3220    defvar ReadVICmpX_MX = !cast<SchedRead>("ReadVICmpX_" # mx);
3221
3222    defm "" : VPseudoBinaryM_VV<m>,
3223              Sched<[WriteVICmpV_MX, ReadVICmpV_MX, ReadVICmpV_MX, ReadVMask]>;
3224    defm "" : VPseudoBinaryM_VX<m>,
3225              Sched<[WriteVICmpX_MX, ReadVICmpV_MX, ReadVICmpX_MX, ReadVMask]>;
3226    defm "" : VPseudoBinaryM_VI<m>,
3227              Sched<[WriteVICmpI_MX, ReadVICmpV_MX, ReadVMask]>;
3228  }
3229}
3230
3231multiclass VPseudoVCMPM_VV_VX {
3232  foreach m = MxList in {
3233    defvar mx = m.MX;
3234    defvar WriteVICmpV_MX = !cast<SchedWrite>("WriteVICmpV_" # mx);
3235    defvar WriteVICmpX_MX = !cast<SchedWrite>("WriteVICmpX_" # mx);
3236    defvar ReadVICmpV_MX = !cast<SchedRead>("ReadVICmpV_" # mx);
3237    defvar ReadVICmpX_MX = !cast<SchedRead>("ReadVICmpX_" # mx);
3238
3239    defm "" : VPseudoBinaryM_VV<m>,
3240              Sched<[WriteVICmpV_MX, ReadVICmpV_MX, ReadVICmpV_MX, ReadVMask]>;
3241    defm "" : VPseudoBinaryM_VX<m>,
3242              Sched<[WriteVICmpX_MX, ReadVICmpV_MX, ReadVICmpX_MX, ReadVMask]>;
3243  }
3244}
3245
3246multiclass VPseudoVCMPM_VV_VF {
3247  foreach m = MxListF in {
3248    defvar mx = m.MX;
3249    defvar WriteVFCmpV_MX = !cast<SchedWrite>("WriteVFCmpV_" # mx);
3250    defvar ReadVFCmpV_MX = !cast<SchedRead>("ReadVFCmpV_" # mx);
3251
3252    defm "" : VPseudoBinaryM_VV<m>,
3253              Sched<[WriteVFCmpV_MX, ReadVFCmpV_MX, ReadVFCmpV_MX, ReadVMask]>;
3254  }
3255
3256  foreach f = FPList in {
3257    foreach m = f.MxList in {
3258      defvar mx = m.MX;
3259      defvar WriteVFCmpF_MX = !cast<SchedWrite>("WriteVFCmpF_" # mx);
3260      defvar ReadVFCmpV_MX = !cast<SchedRead>("ReadVFCmpV_" # mx);
3261      defvar ReadVFCmpF_MX = !cast<SchedRead>("ReadVFCmpF_" # mx);
3262
3263      defm "" : VPseudoBinaryM_VF<m, f>,
3264                Sched<[WriteVFCmpF_MX, ReadVFCmpV_MX, ReadVFCmpF_MX, ReadVMask]>;
3265    }
3266  }
3267}
3268
3269multiclass VPseudoVCMPM_VF {
3270  foreach f = FPList in {
3271    foreach m = f.MxList in {
3272      defvar mx = m.MX;
3273      defvar WriteVFCmpF_MX = !cast<SchedWrite>("WriteVFCmpF_" # mx);
3274      defvar ReadVFCmpV_MX = !cast<SchedRead>("ReadVFCmpV_" # mx);
3275      defvar ReadVFCmpF_MX = !cast<SchedRead>("ReadVFCmpF_" # mx);
3276
3277      defm "" : VPseudoBinaryM_VF<m, f>,
3278                Sched<[WriteVFCmpF_MX, ReadVFCmpV_MX, ReadVFCmpF_MX, ReadVMask]>;
3279    }
3280  }
3281}
3282
3283multiclass VPseudoVCMPM_VX_VI {
3284  foreach m = MxList in {
3285    defvar mx = m.MX;
3286    defvar WriteVICmpX_MX = !cast<SchedWrite>("WriteVICmpX_" # mx);
3287    defvar WriteVICmpI_MX = !cast<SchedWrite>("WriteVICmpI_" # mx);
3288    defvar ReadVICmpV_MX = !cast<SchedRead>("ReadVICmpV_" # mx);
3289    defvar ReadVICmpX_MX = !cast<SchedRead>("ReadVICmpX_" # mx);
3290
3291    defm "" : VPseudoBinaryM_VX<m>,
3292              Sched<[WriteVICmpX_MX, ReadVICmpV_MX, ReadVICmpX_MX, ReadVMask]>;
3293    defm "" : VPseudoBinaryM_VI<m>,
3294              Sched<[WriteVICmpI_MX, ReadVICmpV_MX, ReadVMask]>;
3295  }
3296}
3297
3298multiclass VPseudoVRED_VS {
3299  foreach m = MxList in {
3300    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
3301               Sched<[WriteVIRedV, ReadVIRedV, ReadVIRedV, ReadVIRedV, ReadVMask]>;
3302  }
3303}
3304
3305multiclass VPseudoVWRED_VS {
3306  foreach m = MxList in {
3307    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
3308               Sched<[WriteVIWRedV, ReadVIWRedV, ReadVIWRedV, ReadVIWRedV, ReadVMask]>;
3309  }
3310}
3311
3312multiclass VPseudoVFRED_VS {
3313  foreach m = MxListF in {
3314    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
3315               Sched<[WriteVFRedV, ReadVFRedV, ReadVFRedV, ReadVFRedV, ReadVMask]>;
3316  }
3317}
3318
3319multiclass VPseudoVFREDO_VS {
3320  foreach m = MxListF in {
3321    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
3322               Sched<[WriteVFRedOV, ReadVFRedOV, ReadVFRedOV, ReadVFRedOV, ReadVMask]>;
3323  }
3324}
3325
3326multiclass VPseudoVFWRED_VS {
3327  foreach m = MxListF in {
3328    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
3329               Sched<[WriteVFWRedV, ReadVFWRedV, ReadVFWRedV, ReadVFWRedV, ReadVMask]>;
3330  }
3331}
3332
3333multiclass VPseudoConversion<VReg RetClass,
3334                             VReg Op1Class,
3335                             LMULInfo MInfo,
3336                             string Constraint = ""> {
3337  let VLMul = MInfo.value in {
3338    def "_" # MInfo.MX : VPseudoUnaryNoMask<RetClass, Op1Class, Constraint>;
3339    def "_" # MInfo.MX # "_TU": VPseudoUnaryNoMaskTU<RetClass, Op1Class, Constraint>;
3340    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMaskTA<RetClass, Op1Class,
3341                                                      Constraint>,
3342                                   RISCVMaskedPseudo</*MaskOpIdx*/ 2>;
3343  }
3344}
3345
3346multiclass VPseudoConversionRM<VReg RetClass,
3347                               VReg Op1Class,
3348                               LMULInfo MInfo,
3349                               string Constraint = ""> {
3350  let VLMul = MInfo.value in {
3351    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMaskTA_FRM<RetClass, Op1Class,
3352                                                          Constraint>;
3353  }
3354}
3355
3356multiclass VPseudoConversionNoExcept<VReg RetClass,
3357                                     VReg Op1Class,
3358                                     LMULInfo MInfo,
3359                                     string Constraint = ""> {
3360  let VLMul = MInfo.value in {
3361    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMaskTA_NoExcept<RetClass, Op1Class, Constraint>;
3362  }
3363}
3364
3365multiclass VPseudoVCVTI_V {
3366  foreach m = MxListF in {
3367    defvar mx = m.MX;
3368    defvar WriteVFCvtFToIV_MX = !cast<SchedWrite>("WriteVFCvtFToIV_" # mx);
3369    defvar ReadVFCvtFToIV_MX = !cast<SchedRead>("ReadVFCvtFToIV_" # mx);
3370
3371    defm _V : VPseudoConversion<m.vrclass, m.vrclass, m>,
3372              Sched<[WriteVFCvtFToIV_MX, ReadVFCvtFToIV_MX, ReadVMask]>;
3373  }
3374}
3375
3376multiclass VPseudoVCVTI_RM_V {
3377  foreach m = MxListF in {
3378    defvar mx = m.MX;
3379    defvar WriteVFCvtFToIV_MX = !cast<SchedWrite>("WriteVFCvtFToIV_" # mx);
3380    defvar ReadVFCvtFToIV_MX = !cast<SchedRead>("ReadVFCvtFToIV_" # mx);
3381
3382    defm _V : VPseudoConversionRM<m.vrclass, m.vrclass, m>,
3383              Sched<[WriteVFCvtFToIV_MX, ReadVFCvtFToIV_MX, ReadVMask]>;
3384  }
3385}
3386
3387multiclass VPseudoVFROUND_NOEXCEPT_V {
3388  foreach m = MxListF in {
3389    defvar mx = m.MX;
3390    defvar WriteVFCvtFToIV_MX = !cast<SchedWrite>("WriteVFCvtFToIV_" # mx);
3391    defvar ReadVFCvtFToIV_MX = !cast<SchedRead>("ReadVFCvtFToIV_" # mx);
3392
3393    defm _V : VPseudoConversionNoExcept<m.vrclass, m.vrclass, m>,
3394              Sched<[WriteVFCvtFToIV_MX, ReadVFCvtFToIV_MX, ReadVMask]>;
3395  }
3396}
3397
3398multiclass VPseudoVCVTF_V {
3399  foreach m = MxListF in {
3400    defvar mx = m.MX;
3401    defvar WriteVFCvtIToFV_MX = !cast<SchedWrite>("WriteVFCvtIToFV_" # mx);
3402    defvar ReadVFCvtIToFV_MX = !cast<SchedRead>("ReadVFCvtIToFV_" # mx);
3403
3404    defm _V : VPseudoConversion<m.vrclass, m.vrclass, m>,
3405              Sched<[WriteVFCvtIToFV_MX, ReadVFCvtIToFV_MX, ReadVMask]>;
3406  }
3407}
3408
3409multiclass VPseudoVCVTF_RM_V {
3410  foreach m = MxListF in {
3411    defvar mx = m.MX;
3412    defvar WriteVFCvtIToFV_MX = !cast<SchedWrite>("WriteVFCvtIToFV_" # mx);
3413    defvar ReadVFCvtIToFV_MX = !cast<SchedRead>("ReadVFCvtIToFV_" # mx);
3414
3415    defm _V : VPseudoConversionRM<m.vrclass, m.vrclass, m>,
3416              Sched<[WriteVFCvtIToFV_MX, ReadVFCvtIToFV_MX, ReadVMask]>;
3417  }
3418}
3419
3420multiclass VPseudoConversionW_V {
3421  defvar constraint = "@earlyclobber $rd";
3422  foreach m = MxListW in
3423    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>;
3424}
3425
3426multiclass VPseudoVWCVTI_V {
3427  defvar constraint = "@earlyclobber $rd";
3428  foreach m = MxListFW in {
3429    defvar mx = m.MX;
3430    defvar WriteVFWCvtFToIV_MX = !cast<SchedWrite>("WriteVFWCvtFToIV_" # mx);
3431    defvar ReadVFWCvtFToIV_MX = !cast<SchedRead>("ReadVFWCvtFToIV_" # mx);
3432
3433    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>,
3434              Sched<[WriteVFWCvtFToIV_MX, ReadVFWCvtFToIV_MX, ReadVMask]>;
3435  }
3436}
3437
3438multiclass VPseudoVWCVTI_RM_V {
3439  defvar constraint = "@earlyclobber $rd";
3440  foreach m = MxListFW in {
3441    defvar mx = m.MX;
3442    defvar WriteVFWCvtFToIV_MX = !cast<SchedWrite>("WriteVFWCvtFToIV_" # mx);
3443    defvar ReadVFWCvtFToIV_MX = !cast<SchedRead>("ReadVFWCvtFToIV_" # mx);
3444
3445    defm _V : VPseudoConversionRM<m.wvrclass, m.vrclass, m, constraint>,
3446              Sched<[WriteVFWCvtFToIV_MX, ReadVFWCvtFToIV_MX, ReadVMask]>;
3447  }
3448}
3449
3450multiclass VPseudoVWCVTF_V {
3451  defvar constraint = "@earlyclobber $rd";
3452  foreach m = MxListW in {
3453    defvar mx = m.MX;
3454    defvar WriteVFWCvtIToFV_MX = !cast<SchedWrite>("WriteVFWCvtIToFV_" # mx);
3455    defvar ReadVFWCvtIToFV_MX = !cast<SchedRead>("ReadVFWCvtIToFV_" # mx);
3456
3457    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>,
3458              Sched<[WriteVFWCvtIToFV_MX, ReadVFWCvtIToFV_MX, ReadVMask]>;
3459  }
3460}
3461
3462multiclass VPseudoVWCVTF_RM_V {
3463  defvar constraint = "@earlyclobber $rd";
3464  foreach m = MxListW in {
3465    defvar mx = m.MX;
3466    defvar WriteVFWCvtIToFV_MX = !cast<SchedWrite>("WriteVFWCvtIToFV_" # mx);
3467    defvar ReadVFWCvtIToFV_MX = !cast<SchedRead>("ReadVFWCvtIToFV_" # mx);
3468
3469    defm _V : VPseudoConversionRM<m.wvrclass, m.vrclass, m, constraint>,
3470              Sched<[WriteVFWCvtIToFV_MX, ReadVFWCvtIToFV_MX, ReadVMask]>;
3471  }
3472}
3473
3474multiclass VPseudoVWCVTD_V {
3475  defvar constraint = "@earlyclobber $rd";
3476  foreach m = MxListFW in {
3477    defvar mx = m.MX;
3478    defvar WriteVFWCvtFToFV_MX = !cast<SchedWrite>("WriteVFWCvtFToFV_" # mx);
3479    defvar ReadVFWCvtFToFV_MX = !cast<SchedRead>("ReadVFWCvtFToFV_" # mx);
3480
3481    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>,
3482              Sched<[WriteVFWCvtFToFV_MX, ReadVFWCvtFToFV_MX, ReadVMask]>;
3483  }
3484}
3485
3486multiclass VPseudoVNCVTI_W {
3487  defvar constraint = "@earlyclobber $rd";
3488  foreach m = MxListW in {
3489    defvar mx = m.MX;
3490    defvar WriteVFNCvtFToIV_MX = !cast<SchedWrite>("WriteVFNCvtFToIV_" # mx);
3491    defvar ReadVFNCvtFToIV_MX = !cast<SchedRead>("ReadVFNCvtFToIV_" # mx);
3492
3493    defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>,
3494              Sched<[WriteVFNCvtFToIV_MX, ReadVFNCvtFToIV_MX, ReadVMask]>;
3495  }
3496}
3497
3498multiclass VPseudoVNCVTI_RM_W {
3499  defvar constraint = "@earlyclobber $rd";
3500  foreach m = MxListW in {
3501    defvar mx = m.MX;
3502    defvar WriteVFNCvtFToIV_MX = !cast<SchedWrite>("WriteVFNCvtFToIV_" # mx);
3503    defvar ReadVFNCvtFToIV_MX = !cast<SchedRead>("ReadVFNCvtFToIV_" # mx);
3504
3505    defm _W : VPseudoConversionRM<m.vrclass, m.wvrclass, m, constraint>,
3506              Sched<[WriteVFNCvtFToIV_MX, ReadVFNCvtFToIV_MX, ReadVMask]>;
3507  }
3508}
3509
3510multiclass VPseudoVNCVTF_W {
3511  defvar constraint = "@earlyclobber $rd";
3512  foreach m = MxListFW in {
3513    defvar mx = m.MX;
3514    defvar WriteVFNCvtIToFV_MX = !cast<SchedWrite>("WriteVFNCvtIToFV_" # mx);
3515    defvar ReadVFNCvtIToFV_MX = !cast<SchedRead>("ReadVFNCvtIToFV_" # mx);
3516
3517    defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>,
3518              Sched<[WriteVFNCvtIToFV_MX, ReadVFNCvtIToFV_MX, ReadVMask]>;
3519  }
3520}
3521
3522multiclass VPseudoVNCVTF_RM_W {
3523  defvar constraint = "@earlyclobber $rd";
3524  foreach m = MxListFW in {
3525    defvar mx = m.MX;
3526    defvar WriteVFNCvtIToFV_MX = !cast<SchedWrite>("WriteVFNCvtIToFV_" # mx);
3527    defvar ReadVFNCvtIToFV_MX = !cast<SchedRead>("ReadVFNCvtIToFV_" # mx);
3528
3529    defm _W : VPseudoConversionRM<m.vrclass, m.wvrclass, m, constraint>,
3530              Sched<[WriteVFNCvtIToFV_MX, ReadVFNCvtIToFV_MX, ReadVMask]>;
3531  }
3532}
3533
3534multiclass VPseudoVNCVTD_W {
3535  defvar constraint = "@earlyclobber $rd";
3536  foreach m = MxListFW in {
3537    defvar mx = m.MX;
3538    defvar WriteVFNCvtFToFV_MX = !cast<SchedWrite>("WriteVFNCvtFToFV_" # mx);
3539    defvar ReadVFNCvtFToFV_MX = !cast<SchedRead>("ReadVFNCvtFToFV_" # mx);
3540
3541    defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>,
3542              Sched<[WriteVFNCvtFToFV_MX, ReadVFNCvtFToFV_MX, ReadVMask]>;
3543  }
3544}
3545
3546multiclass VPseudoUSSegLoad {
3547  foreach eew = EEWList in {
3548    foreach lmul = MxSet<eew>.m in {
3549      defvar LInfo = lmul.MX;
3550      let VLMul = lmul.value in {
3551        foreach nf = NFSet<lmul>.L in {
3552          defvar vreg = SegRegClass<lmul, nf>.RC;
3553          def nf # "E" # eew # "_V_" # LInfo :
3554            VPseudoUSSegLoadNoMask<vreg, eew, nf>, VLSEGSched<nf, eew, LInfo>;
3555          def nf # "E" # eew # "_V_" # LInfo # "_TU" :
3556            VPseudoUSSegLoadNoMaskTU<vreg, eew, nf>, VLSEGSched<nf, eew, LInfo>;
3557          def nf # "E" # eew # "_V_" # LInfo # "_MASK" :
3558            VPseudoUSSegLoadMask<vreg, eew, nf>, VLSEGSched<nf, eew, LInfo>;
3559        }
3560      }
3561    }
3562  }
3563}
3564
3565multiclass VPseudoUSSegLoadFF {
3566  foreach eew = EEWList in {
3567    foreach lmul = MxSet<eew>.m in {
3568      defvar LInfo = lmul.MX;
3569      let VLMul = lmul.value in {
3570        foreach nf = NFSet<lmul>.L in {
3571          defvar vreg = SegRegClass<lmul, nf>.RC;
3572          def nf # "E" # eew # "FF_V_" # LInfo :
3573            VPseudoUSSegLoadFFNoMask<vreg, eew, nf>, VLSEGFFSched<nf, eew, LInfo>;
3574          def nf # "E" # eew # "FF_V_" # LInfo # "_TU" :
3575            VPseudoUSSegLoadFFNoMaskTU<vreg, eew, nf>, VLSEGFFSched<nf, eew, LInfo>;
3576          def nf # "E" # eew # "FF_V_" # LInfo # "_MASK" :
3577            VPseudoUSSegLoadFFMask<vreg, eew, nf>, VLSEGFFSched<nf, eew, LInfo>;
3578        }
3579      }
3580    }
3581  }
3582}
3583
3584multiclass VPseudoSSegLoad {
3585  foreach eew = EEWList in {
3586    foreach lmul = MxSet<eew>.m in {
3587      defvar LInfo = lmul.MX;
3588      let VLMul = lmul.value in {
3589        foreach nf = NFSet<lmul>.L in {
3590          defvar vreg = SegRegClass<lmul, nf>.RC;
3591          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew, nf>,
3592                                               VLSSEGSched<nf, eew, LInfo>;
3593          def nf # "E" # eew # "_V_" # LInfo # "_TU" : VPseudoSSegLoadNoMaskTU<vreg, eew, nf>,
3594                                                       VLSSEGSched<nf, eew, LInfo>;
3595          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew, nf>,
3596                                                         VLSSEGSched<nf, eew, LInfo>;
3597        }
3598      }
3599    }
3600  }
3601}
3602
3603multiclass VPseudoISegLoad<bit Ordered> {
3604  foreach idx_eew = EEWList in {
3605    foreach sew = EEWList in {
3606      foreach val_lmul = MxSet<sew>.m in {
3607        defvar octuple_lmul = val_lmul.octuple;
3608        // Calculate emul = eew * lmul / sew
3609        defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), log2<sew>.val);
3610        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
3611          defvar ValLInfo = val_lmul.MX;
3612          defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
3613          defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
3614          defvar Vreg = val_lmul.vrclass;
3615          defvar IdxVreg = idx_lmul.vrclass;
3616          defvar Order = !if(Ordered, "O", "U");
3617          let VLMul = val_lmul.value in {
3618            foreach nf = NFSet<val_lmul>.L in {
3619              defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
3620              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
3621                VPseudoISegLoadNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
3622                                      nf, Ordered>,
3623                VLXSEGSched<nf, idx_eew, Order, ValLInfo>;
3624              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_TU" :
3625                VPseudoISegLoadNoMaskTU<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
3626                                        nf, Ordered>,
3627                VLXSEGSched<nf, idx_eew, Order, ValLInfo>;
3628              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
3629                VPseudoISegLoadMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
3630                                    nf, Ordered>,
3631                VLXSEGSched<nf, idx_eew, Order, ValLInfo>;
3632            }
3633          }
3634        }
3635      }
3636    }
3637  }
3638}
3639
3640multiclass VPseudoUSSegStore {
3641  foreach eew = EEWList in {
3642    foreach lmul = MxSet<eew>.m in {
3643      defvar LInfo = lmul.MX;
3644      let VLMul = lmul.value in {
3645        foreach nf = NFSet<lmul>.L in {
3646          defvar vreg = SegRegClass<lmul, nf>.RC;
3647          def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew, nf>,
3648                                               VSSEGSched<nf, eew, LInfo>;
3649          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew, nf>,
3650                                                         VSSEGSched<nf, eew, LInfo>;
3651        }
3652      }
3653    }
3654  }
3655}
3656
3657multiclass VPseudoSSegStore {
3658  foreach eew = EEWList in {
3659    foreach lmul = MxSet<eew>.m in {
3660      defvar LInfo = lmul.MX;
3661      let VLMul = lmul.value in {
3662        foreach nf = NFSet<lmul>.L in {
3663          defvar vreg = SegRegClass<lmul, nf>.RC;
3664          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew, nf>,
3665                                               VSSSEGSched<nf, eew, LInfo>;
3666          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew, nf>,
3667                                                         VSSSEGSched<nf, eew, LInfo>;
3668        }
3669      }
3670    }
3671  }
3672}
3673
3674multiclass VPseudoISegStore<bit Ordered> {
3675  foreach idx_eew = EEWList in {
3676    foreach sew = EEWList in {
3677      foreach val_lmul = MxSet<sew>.m in {
3678        defvar octuple_lmul = val_lmul.octuple;
3679        // Calculate emul = eew * lmul / sew
3680        defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), log2<sew>.val);
3681        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
3682          defvar ValLInfo = val_lmul.MX;
3683          defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
3684          defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
3685          defvar Vreg = val_lmul.vrclass;
3686          defvar IdxVreg = idx_lmul.vrclass;
3687          defvar Order = !if(Ordered, "O", "U");
3688          let VLMul = val_lmul.value in {
3689            foreach nf = NFSet<val_lmul>.L in {
3690              defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
3691              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
3692                VPseudoISegStoreNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
3693                                       nf, Ordered>,
3694                VSXSEGSched<nf, idx_eew, Order, ValLInfo>;
3695              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
3696                VPseudoISegStoreMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
3697                                     nf, Ordered>,
3698                VSXSEGSched<nf, idx_eew, Order, ValLInfo>;
3699            }
3700          }
3701        }
3702      }
3703    }
3704  }
3705}
3706
3707//===----------------------------------------------------------------------===//
3708// Helpers to define the intrinsic patterns.
3709//===----------------------------------------------------------------------===//
3710
3711class VPatUnaryNoMask<string intrinsic_name,
3712                      string inst,
3713                      string kind,
3714                      ValueType result_type,
3715                      ValueType op2_type,
3716                      int sew,
3717                      LMULInfo vlmul,
3718                      VReg op2_reg_class> :
3719  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3720                   (result_type undef),
3721                   (op2_type op2_reg_class:$rs2),
3722                   VLOpFrag)),
3723                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
3724                   (op2_type op2_reg_class:$rs2),
3725                   GPR:$vl, sew)>;
3726
3727class VPatUnaryNoMaskTU<string intrinsic_name,
3728                        string inst,
3729                        string kind,
3730                        ValueType result_type,
3731                        ValueType op2_type,
3732                        int sew,
3733                        LMULInfo vlmul,
3734                        VReg result_reg_class,
3735                        VReg op2_reg_class> :
3736  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3737                   (result_type result_reg_class:$merge),
3738                   (op2_type op2_reg_class:$rs2),
3739                   VLOpFrag)),
3740                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_TU")
3741                   (result_type result_reg_class:$merge),
3742                   (op2_type op2_reg_class:$rs2),
3743                   GPR:$vl, sew)>;
3744
3745class VPatUnaryMask<string intrinsic_name,
3746                    string inst,
3747                    string kind,
3748                    ValueType result_type,
3749                    ValueType op2_type,
3750                    ValueType mask_type,
3751                    int sew,
3752                    LMULInfo vlmul,
3753                    VReg result_reg_class,
3754                    VReg op2_reg_class> :
3755  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
3756                   (result_type result_reg_class:$merge),
3757                   (op2_type op2_reg_class:$rs2),
3758                   (mask_type V0),
3759                   VLOpFrag)),
3760                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
3761                   (result_type result_reg_class:$merge),
3762                   (op2_type op2_reg_class:$rs2),
3763                   (mask_type V0), GPR:$vl, sew)>;
3764
3765class VPatUnaryMaskTA<string intrinsic_name,
3766                      string inst,
3767                      string kind,
3768                      ValueType result_type,
3769                      ValueType op2_type,
3770                      ValueType mask_type,
3771                      int sew,
3772                      LMULInfo vlmul,
3773                      VReg result_reg_class,
3774                      VReg op2_reg_class> :
3775  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
3776                   (result_type result_reg_class:$merge),
3777                   (op2_type op2_reg_class:$rs2),
3778                   (mask_type V0),
3779                   VLOpFrag, (XLenVT timm:$policy))),
3780                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
3781                   (result_type result_reg_class:$merge),
3782                   (op2_type op2_reg_class:$rs2),
3783                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
3784
3785class VPatMaskUnaryNoMask<string intrinsic_name,
3786                          string inst,
3787                          MTypeInfo mti> :
3788  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name)
3789                (mti.Mask VR:$rs2),
3790                VLOpFrag)),
3791                (!cast<Instruction>(inst#"_M_"#mti.BX)
3792                (mti.Mask VR:$rs2),
3793                GPR:$vl, mti.Log2SEW)>;
3794
3795class VPatMaskUnaryMask<string intrinsic_name,
3796                        string inst,
3797                        MTypeInfo mti> :
3798  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name#"_mask")
3799                (mti.Mask VR:$merge),
3800                (mti.Mask VR:$rs2),
3801                (mti.Mask V0),
3802                VLOpFrag)),
3803                (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
3804                (mti.Mask VR:$merge),
3805                (mti.Mask VR:$rs2),
3806                (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
3807
3808class VPatUnaryAnyMask<string intrinsic,
3809                       string inst,
3810                       string kind,
3811                       ValueType result_type,
3812                       ValueType op1_type,
3813                       ValueType mask_type,
3814                       int sew,
3815                       LMULInfo vlmul,
3816                       VReg result_reg_class,
3817                       VReg op1_reg_class> :
3818  Pat<(result_type (!cast<Intrinsic>(intrinsic)
3819                   (result_type result_reg_class:$merge),
3820                   (op1_type op1_reg_class:$rs1),
3821                   (mask_type VR:$rs2),
3822                   VLOpFrag)),
3823                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
3824                   (result_type result_reg_class:$merge),
3825                   (op1_type op1_reg_class:$rs1),
3826                   (mask_type VR:$rs2),
3827                   GPR:$vl, sew)>;
3828
3829class VPatBinaryM<string intrinsic_name,
3830                  string inst,
3831                  ValueType result_type,
3832                  ValueType op1_type,
3833                  ValueType op2_type,
3834                  int sew,
3835                  VReg op1_reg_class,
3836                  DAGOperand op2_kind> :
3837  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3838                   (op1_type op1_reg_class:$rs1),
3839                   (op2_type op2_kind:$rs2),
3840                   VLOpFrag)),
3841                   (!cast<Instruction>(inst)
3842                   (op1_type op1_reg_class:$rs1),
3843                   (op2_type op2_kind:$rs2),
3844                   GPR:$vl, sew)>;
3845
3846class VPatBinaryNoMaskTA<string intrinsic_name,
3847                         string inst,
3848                         ValueType result_type,
3849                         ValueType op1_type,
3850                         ValueType op2_type,
3851                         int sew,
3852                         VReg op1_reg_class,
3853                         DAGOperand op2_kind> :
3854  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3855                   (result_type (undef)),
3856                   (op1_type op1_reg_class:$rs1),
3857                   (op2_type op2_kind:$rs2),
3858                   VLOpFrag)),
3859                   (!cast<Instruction>(inst)
3860                   (op1_type op1_reg_class:$rs1),
3861                   (op2_type op2_kind:$rs2),
3862                   GPR:$vl, sew)>;
3863
3864class VPatBinaryNoMaskTU<string intrinsic_name,
3865                         string inst,
3866                         ValueType result_type,
3867                         ValueType op1_type,
3868                         ValueType op2_type,
3869                         int sew,
3870                         VReg result_reg_class,
3871                         VReg op1_reg_class,
3872                         DAGOperand op2_kind> :
3873  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3874                   (result_type result_reg_class:$merge),
3875                   (op1_type op1_reg_class:$rs1),
3876                   (op2_type op2_kind:$rs2),
3877                   VLOpFrag)),
3878                   (!cast<Instruction>(inst#"_TU")
3879                   (result_type result_reg_class:$merge),
3880                   (op1_type op1_reg_class:$rs1),
3881                   (op2_type op2_kind:$rs2),
3882                   GPR:$vl, sew)>;
3883
3884// Same as above but source operands are swapped.
3885class VPatBinaryNoMaskSwapped<string intrinsic_name,
3886                              string inst,
3887                              ValueType result_type,
3888                              ValueType op1_type,
3889                              ValueType op2_type,
3890                              int sew,
3891                              VReg op1_reg_class,
3892                              DAGOperand op2_kind> :
3893  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3894                   (op2_type op2_kind:$rs2),
3895                   (op1_type op1_reg_class:$rs1),
3896                   VLOpFrag)),
3897                   (!cast<Instruction>(inst)
3898                   (op1_type op1_reg_class:$rs1),
3899                   (op2_type op2_kind:$rs2),
3900                   GPR:$vl, sew)>;
3901
3902class VPatBinaryMask<string intrinsic_name,
3903                     string inst,
3904                     ValueType result_type,
3905                     ValueType op1_type,
3906                     ValueType op2_type,
3907                     ValueType mask_type,
3908                     int sew,
3909                     VReg result_reg_class,
3910                     VReg op1_reg_class,
3911                     DAGOperand op2_kind> :
3912  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
3913                   (result_type result_reg_class:$merge),
3914                   (op1_type op1_reg_class:$rs1),
3915                   (op2_type op2_kind:$rs2),
3916                   (mask_type V0),
3917                   VLOpFrag)),
3918                   (!cast<Instruction>(inst#"_MASK")
3919                   (result_type result_reg_class:$merge),
3920                   (op1_type op1_reg_class:$rs1),
3921                   (op2_type op2_kind:$rs2),
3922                   (mask_type V0), GPR:$vl, sew)>;
3923
3924class VPatBinaryMaskTA<string intrinsic_name,
3925                       string inst,
3926                       ValueType result_type,
3927                       ValueType op1_type,
3928                       ValueType op2_type,
3929                       ValueType mask_type,
3930                       int sew,
3931                       VReg result_reg_class,
3932                       VReg op1_reg_class,
3933                       DAGOperand op2_kind> :
3934  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
3935                   (result_type result_reg_class:$merge),
3936                   (op1_type op1_reg_class:$rs1),
3937                   (op2_type op2_kind:$rs2),
3938                   (mask_type V0),
3939                   VLOpFrag, (XLenVT timm:$policy))),
3940                   (!cast<Instruction>(inst#"_MASK")
3941                   (result_type result_reg_class:$merge),
3942                   (op1_type op1_reg_class:$rs1),
3943                   (op2_type op2_kind:$rs2),
3944                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
3945
3946// Same as above but source operands are swapped.
3947class VPatBinaryMaskSwapped<string intrinsic_name,
3948                            string inst,
3949                            ValueType result_type,
3950                            ValueType op1_type,
3951                            ValueType op2_type,
3952                            ValueType mask_type,
3953                            int sew,
3954                            VReg result_reg_class,
3955                            VReg op1_reg_class,
3956                            DAGOperand op2_kind> :
3957  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
3958                   (result_type result_reg_class:$merge),
3959                   (op2_type op2_kind:$rs2),
3960                   (op1_type op1_reg_class:$rs1),
3961                   (mask_type V0),
3962                   VLOpFrag)),
3963                   (!cast<Instruction>(inst#"_MASK")
3964                   (result_type result_reg_class:$merge),
3965                   (op1_type op1_reg_class:$rs1),
3966                   (op2_type op2_kind:$rs2),
3967                   (mask_type V0), GPR:$vl, sew)>;
3968
3969class VPatTiedBinaryNoMask<string intrinsic_name,
3970                           string inst,
3971                           ValueType result_type,
3972                           ValueType op2_type,
3973                           int sew,
3974                           VReg result_reg_class,
3975                           DAGOperand op2_kind> :
3976  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3977                   (result_type (undef)),
3978                   (result_type result_reg_class:$rs1),
3979                   (op2_type op2_kind:$rs2),
3980                   VLOpFrag)),
3981                   (!cast<Instruction>(inst#"_TIED")
3982                   (result_type result_reg_class:$rs1),
3983                   (op2_type op2_kind:$rs2),
3984                   GPR:$vl, sew, TAIL_AGNOSTIC)>;
3985
3986class VPatTiedBinaryNoMaskTU<string intrinsic_name,
3987                             string inst,
3988                             ValueType result_type,
3989                             ValueType op2_type,
3990                             int sew,
3991                             VReg result_reg_class,
3992                             DAGOperand op2_kind> :
3993  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3994                   (result_type result_reg_class:$merge),
3995                   (result_type result_reg_class:$merge),
3996                   (op2_type op2_kind:$rs2),
3997                   VLOpFrag)),
3998                   (!cast<Instruction>(inst#"_TIED")
3999                   (result_type result_reg_class:$merge),
4000                   (op2_type op2_kind:$rs2),
4001                   GPR:$vl, sew, TAIL_UNDISTURBED_MASK_UNDISTURBED)>;
4002
4003class VPatTiedBinaryMask<string intrinsic_name,
4004                         string inst,
4005                         ValueType result_type,
4006                         ValueType op2_type,
4007                         ValueType mask_type,
4008                         int sew,
4009                         VReg result_reg_class,
4010                         DAGOperand op2_kind> :
4011  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4012                   (result_type result_reg_class:$merge),
4013                   (result_type result_reg_class:$merge),
4014                   (op2_type op2_kind:$rs2),
4015                   (mask_type V0),
4016                   VLOpFrag, (XLenVT timm:$policy))),
4017                   (!cast<Instruction>(inst#"_MASK_TIED")
4018                   (result_type result_reg_class:$merge),
4019                   (op2_type op2_kind:$rs2),
4020                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
4021
4022class VPatTernaryNoMask<string intrinsic,
4023                        string inst,
4024                        string kind,
4025                        ValueType result_type,
4026                        ValueType op1_type,
4027                        ValueType op2_type,
4028                        int sew,
4029                        LMULInfo vlmul,
4030                        VReg result_reg_class,
4031                        RegisterClass op1_reg_class,
4032                        DAGOperand op2_kind> :
4033  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4034                    (result_type result_reg_class:$rs3),
4035                    (op1_type op1_reg_class:$rs1),
4036                    (op2_type op2_kind:$rs2),
4037                    VLOpFrag)),
4038                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4039                    result_reg_class:$rs3,
4040                    (op1_type op1_reg_class:$rs1),
4041                    op2_kind:$rs2,
4042                    GPR:$vl, sew)>;
4043
4044class VPatTernaryNoMaskWithPolicy<string intrinsic,
4045                                  string inst,
4046                                  string kind,
4047                                  ValueType result_type,
4048                                  ValueType op1_type,
4049                                  ValueType op2_type,
4050                                  int sew,
4051                                  LMULInfo vlmul,
4052                                  VReg result_reg_class,
4053                                  RegisterClass op1_reg_class,
4054                                  DAGOperand op2_kind> :
4055  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4056                    (result_type result_reg_class:$rs3),
4057                    (op1_type op1_reg_class:$rs1),
4058                    (op2_type op2_kind:$rs2),
4059                    VLOpFrag, (XLenVT timm:$policy))),
4060                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4061                    result_reg_class:$rs3,
4062                    (op1_type op1_reg_class:$rs1),
4063                    op2_kind:$rs2,
4064                    GPR:$vl, sew, (XLenVT timm:$policy))>;
4065
4066class VPatTernaryMask<string intrinsic,
4067                      string inst,
4068                      string kind,
4069                      ValueType result_type,
4070                      ValueType op1_type,
4071                      ValueType op2_type,
4072                      ValueType mask_type,
4073                      int sew,
4074                      LMULInfo vlmul,
4075                      VReg result_reg_class,
4076                      RegisterClass op1_reg_class,
4077                      DAGOperand op2_kind> :
4078  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4079                    (result_type result_reg_class:$rs3),
4080                    (op1_type op1_reg_class:$rs1),
4081                    (op2_type op2_kind:$rs2),
4082                    (mask_type V0),
4083                    VLOpFrag)),
4084                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
4085                    result_reg_class:$rs3,
4086                    (op1_type op1_reg_class:$rs1),
4087                    op2_kind:$rs2,
4088                    (mask_type V0),
4089                    GPR:$vl, sew)>;
4090
4091class VPatTernaryMaskPolicy<string intrinsic,
4092                            string inst,
4093                            string kind,
4094                            ValueType result_type,
4095                            ValueType op1_type,
4096                            ValueType op2_type,
4097                            ValueType mask_type,
4098                            int sew,
4099                            LMULInfo vlmul,
4100                            VReg result_reg_class,
4101                            RegisterClass op1_reg_class,
4102                            DAGOperand op2_kind> :
4103  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4104                    (result_type result_reg_class:$rs3),
4105                    (op1_type op1_reg_class:$rs1),
4106                    (op2_type op2_kind:$rs2),
4107                    (mask_type V0),
4108                    VLOpFrag, (XLenVT timm:$policy))),
4109                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
4110                    result_reg_class:$rs3,
4111                    (op1_type op1_reg_class:$rs1),
4112                    op2_kind:$rs2,
4113                    (mask_type V0),
4114                    GPR:$vl, sew, (XLenVT timm:$policy))>;
4115
4116multiclass VPatUnaryS_M<string intrinsic_name,
4117                             string inst>
4118{
4119  foreach mti = AllMasks in {
4120    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name)
4121                      (mti.Mask VR:$rs1), VLOpFrag)),
4122                      (!cast<Instruction>(inst#"_M_"#mti.BX) $rs1,
4123                      GPR:$vl, mti.Log2SEW)>;
4124    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name # "_mask")
4125                      (mti.Mask VR:$rs1), (mti.Mask V0), VLOpFrag)),
4126                      (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK") $rs1,
4127                      (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
4128  }
4129}
4130
4131multiclass VPatUnaryV_V_AnyMask<string intrinsic, string instruction,
4132                                list<VTypeInfo> vtilist> {
4133  foreach vti = vtilist in {
4134    def : VPatUnaryAnyMask<intrinsic, instruction, "VM",
4135                           vti.Vector, vti.Vector, vti.Mask,
4136                           vti.Log2SEW, vti.LMul, vti.RegClass,
4137                           vti.RegClass>;
4138  }
4139}
4140
4141multiclass VPatUnaryM_M<string intrinsic,
4142                         string inst>
4143{
4144  foreach mti = AllMasks in {
4145    def : VPatMaskUnaryNoMask<intrinsic, inst, mti>;
4146    def : VPatMaskUnaryMask<intrinsic, inst, mti>;
4147  }
4148}
4149
4150multiclass VPatUnaryV_M<string intrinsic, string instruction>
4151{
4152  foreach vti = AllIntegerVectors in {
4153    def : VPatUnaryNoMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
4154                          vti.Log2SEW, vti.LMul, VR>;
4155    def : VPatUnaryNoMaskTU<intrinsic, instruction, "M", vti.Vector, vti.Mask,
4156                            vti.Log2SEW, vti.LMul, vti.RegClass,VR>;
4157    def : VPatUnaryMaskTA<intrinsic, instruction, "M", vti.Vector, vti.Mask,
4158                          vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, VR>;
4159  }
4160}
4161
4162multiclass VPatUnaryV_VF<string intrinsic, string instruction, string suffix,
4163                         list<VTypeInfoToFraction> fractionList>
4164{
4165  foreach vtiTofti = fractionList in
4166  {
4167      defvar vti = vtiTofti.Vti;
4168      defvar fti = vtiTofti.Fti;
4169      def : VPatUnaryNoMask<intrinsic, instruction, suffix,
4170                            vti.Vector, fti.Vector,
4171                            vti.Log2SEW, vti.LMul, fti.RegClass>;
4172      def : VPatUnaryNoMaskTU<intrinsic, instruction, suffix,
4173                              vti.Vector, fti.Vector,
4174                              vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
4175      def : VPatUnaryMaskTA<intrinsic, instruction, suffix,
4176                            vti.Vector, fti.Vector, vti.Mask,
4177                            vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
4178   }
4179}
4180
4181multiclass VPatUnaryV_V<string intrinsic, string instruction,
4182                        list<VTypeInfo> vtilist> {
4183  foreach vti = vtilist in {
4184    def : VPatUnaryNoMask<intrinsic, instruction, "V",
4185                          vti.Vector, vti.Vector,
4186                          vti.Log2SEW, vti.LMul, vti.RegClass>;
4187    def : VPatUnaryNoMaskTU<intrinsic, instruction, "V",
4188                            vti.Vector, vti.Vector,
4189                            vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
4190    def : VPatUnaryMaskTA<intrinsic, instruction, "V",
4191                          vti.Vector, vti.Vector, vti.Mask,
4192                          vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
4193  }
4194}
4195
4196multiclass VPatNullaryV<string intrinsic, string instruction>
4197{
4198  foreach vti = AllIntegerVectors in {
4199    def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
4200                          (vti.Vector undef),
4201                          VLOpFrag)),
4202                          (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
4203                          GPR:$vl, vti.Log2SEW)>;
4204    def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
4205                          (vti.Vector vti.RegClass:$merge),
4206                          VLOpFrag)),
4207                          (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_TU")
4208                          vti.RegClass:$merge, GPR:$vl, vti.Log2SEW)>;
4209    def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
4210                          (vti.Vector vti.RegClass:$merge),
4211                          (vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))),
4212                          (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK")
4213                          vti.RegClass:$merge, (vti.Mask V0),
4214                          GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
4215  }
4216}
4217
4218multiclass VPatNullaryM<string intrinsic, string inst> {
4219  foreach mti = AllMasks in
4220    def : Pat<(mti.Mask (!cast<Intrinsic>(intrinsic)
4221                        VLOpFrag)),
4222                        (!cast<Instruction>(inst#"_M_"#mti.BX)
4223                        GPR:$vl, mti.Log2SEW)>;
4224}
4225
4226multiclass VPatBinaryM<string intrinsic,
4227                      string inst,
4228                      ValueType result_type,
4229                      ValueType op1_type,
4230                      ValueType op2_type,
4231                      ValueType mask_type,
4232                      int sew,
4233                      VReg result_reg_class,
4234                      VReg op1_reg_class,
4235                      DAGOperand op2_kind>
4236{
4237  def : VPatBinaryM<intrinsic, inst, result_type, op1_type, op2_type,
4238                    sew, op1_reg_class, op2_kind>;
4239  def : VPatBinaryMask<intrinsic, inst, result_type, op1_type, op2_type,
4240                       mask_type, sew, result_reg_class, op1_reg_class,
4241                       op2_kind>;
4242}
4243
4244multiclass VPatBinaryTA<string intrinsic,
4245                        string inst,
4246                        ValueType result_type,
4247                        ValueType op1_type,
4248                        ValueType op2_type,
4249                        ValueType mask_type,
4250                        int sew,
4251                        VReg result_reg_class,
4252                        VReg op1_reg_class,
4253                        DAGOperand op2_kind>
4254{
4255  def : VPatBinaryNoMaskTA<intrinsic, inst, result_type, op1_type, op2_type,
4256                           sew, op1_reg_class, op2_kind>;
4257  def : VPatBinaryNoMaskTU<intrinsic, inst, result_type, op1_type, op2_type,
4258                           sew, result_reg_class, op1_reg_class, op2_kind>;
4259  def : VPatBinaryMaskTA<intrinsic, inst, result_type, op1_type, op2_type,
4260                         mask_type, sew, result_reg_class, op1_reg_class,
4261                         op2_kind>;
4262}
4263
4264multiclass VPatBinarySwapped<string intrinsic,
4265                      string inst,
4266                      ValueType result_type,
4267                      ValueType op1_type,
4268                      ValueType op2_type,
4269                      ValueType mask_type,
4270                      int sew,
4271                      VReg result_reg_class,
4272                      VReg op1_reg_class,
4273                      DAGOperand op2_kind>
4274{
4275  def : VPatBinaryNoMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type,
4276                                sew, op1_reg_class, op2_kind>;
4277  def : VPatBinaryMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type,
4278                              mask_type, sew, result_reg_class, op1_reg_class,
4279                              op2_kind>;
4280}
4281
4282multiclass VPatBinaryCarryInTAIL<string intrinsic,
4283                                 string inst,
4284                                 string kind,
4285                                 ValueType result_type,
4286                                 ValueType op1_type,
4287                                 ValueType op2_type,
4288                                 ValueType mask_type,
4289                                 int sew,
4290                                 LMULInfo vlmul,
4291                                 VReg result_reg_class,
4292                                 VReg op1_reg_class,
4293                                 DAGOperand op2_kind>
4294{
4295  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
4296                         (result_type undef),
4297                         (op1_type op1_reg_class:$rs1),
4298                         (op2_type op2_kind:$rs2),
4299                         (mask_type V0),
4300                         VLOpFrag)),
4301                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4302                         (op1_type op1_reg_class:$rs1),
4303                         (op2_type op2_kind:$rs2),
4304                         (mask_type V0), GPR:$vl, sew)>;
4305  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
4306                         (result_type result_reg_class:$merge),
4307                         (op1_type op1_reg_class:$rs1),
4308                         (op2_type op2_kind:$rs2),
4309                         (mask_type V0),
4310                         VLOpFrag)),
4311                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_TU")
4312                         (result_type result_reg_class:$merge),
4313                         (op1_type op1_reg_class:$rs1),
4314                         (op2_type op2_kind:$rs2),
4315                         (mask_type V0), GPR:$vl, sew)>;
4316}
4317
4318multiclass VPatBinaryCarryIn<string intrinsic,
4319                             string inst,
4320                             string kind,
4321                             ValueType result_type,
4322                             ValueType op1_type,
4323                             ValueType op2_type,
4324                             ValueType mask_type,
4325                             int sew,
4326                             LMULInfo vlmul,
4327                             VReg op1_reg_class,
4328                             DAGOperand op2_kind>
4329{
4330  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
4331                         (op1_type op1_reg_class:$rs1),
4332                         (op2_type op2_kind:$rs2),
4333                         (mask_type V0),
4334                         VLOpFrag)),
4335                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4336                         (op1_type op1_reg_class:$rs1),
4337                         (op2_type op2_kind:$rs2),
4338                         (mask_type V0), GPR:$vl, sew)>;
4339}
4340
4341multiclass VPatBinaryMaskOut<string intrinsic,
4342                             string inst,
4343                             string kind,
4344                             ValueType result_type,
4345                             ValueType op1_type,
4346                             ValueType op2_type,
4347                             int sew,
4348                             LMULInfo vlmul,
4349                             VReg op1_reg_class,
4350                             DAGOperand op2_kind>
4351{
4352  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
4353                         (op1_type op1_reg_class:$rs1),
4354                         (op2_type op2_kind:$rs2),
4355                         VLOpFrag)),
4356                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4357                         (op1_type op1_reg_class:$rs1),
4358                         (op2_type op2_kind:$rs2),
4359                         GPR:$vl, sew)>;
4360}
4361
4362multiclass VPatConversionTA<string intrinsic,
4363                            string inst,
4364                            string kind,
4365                            ValueType result_type,
4366                            ValueType op1_type,
4367                            ValueType mask_type,
4368                            int sew,
4369                            LMULInfo vlmul,
4370                            VReg result_reg_class,
4371                            VReg op1_reg_class>
4372{
4373  def : VPatUnaryNoMask<intrinsic, inst, kind, result_type, op1_type,
4374                        sew, vlmul, op1_reg_class>;
4375  def : VPatUnaryNoMaskTU<intrinsic, inst, kind, result_type, op1_type,
4376                          sew, vlmul, result_reg_class, op1_reg_class>;
4377  def : VPatUnaryMaskTA<intrinsic, inst, kind, result_type, op1_type,
4378                        mask_type, sew, vlmul, result_reg_class, op1_reg_class>;
4379}
4380
4381multiclass VPatBinaryV_VV<string intrinsic, string instruction,
4382                          list<VTypeInfo> vtilist> {
4383  foreach vti = vtilist in
4384    defm : VPatBinaryTA<intrinsic, instruction # "_VV_" # vti.LMul.MX,
4385                        vti.Vector, vti.Vector, vti.Vector,vti.Mask,
4386                        vti.Log2SEW, vti.RegClass,
4387                        vti.RegClass, vti.RegClass>;
4388}
4389
4390multiclass VPatBinaryV_VV_INT<string intrinsic, string instruction,
4391                          list<VTypeInfo> vtilist> {
4392  foreach vti = vtilist in {
4393    defvar ivti = GetIntVTypeInfo<vti>.Vti;
4394    defm : VPatBinaryTA<intrinsic, instruction # "_VV_" # vti.LMul.MX,
4395                        vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
4396                        vti.Log2SEW, vti.RegClass,
4397                        vti.RegClass, vti.RegClass>;
4398  }
4399}
4400
4401multiclass VPatBinaryV_VV_INT_EEW<string intrinsic, string instruction,
4402                                  int eew, list<VTypeInfo> vtilist> {
4403  foreach vti = vtilist in {
4404    // emul = lmul * eew / sew
4405    defvar vlmul = vti.LMul;
4406    defvar octuple_lmul = vlmul.octuple;
4407    defvar octuple_emul = !srl(!mul(octuple_lmul, eew), vti.Log2SEW);
4408    if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
4409      defvar emul_str = octuple_to_str<octuple_emul>.ret;
4410      defvar ivti = !cast<VTypeInfo>("VI" # eew # emul_str);
4411      defvar inst = instruction # "_VV_" # vti.LMul.MX # "_" # emul_str;
4412      defm : VPatBinaryTA<intrinsic, inst,
4413                          vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
4414                          vti.Log2SEW, vti.RegClass,
4415                          vti.RegClass, ivti.RegClass>;
4416    }
4417  }
4418}
4419
4420multiclass VPatBinaryV_VX<string intrinsic, string instruction,
4421                          list<VTypeInfo> vtilist> {
4422  foreach vti = vtilist in {
4423    defvar kind = "V"#vti.ScalarSuffix;
4424    defm : VPatBinaryTA<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
4425                        vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
4426                        vti.Log2SEW, vti.RegClass,
4427                        vti.RegClass, vti.ScalarRegClass>;
4428  }
4429}
4430
4431multiclass VPatBinaryV_VX_INT<string intrinsic, string instruction,
4432                          list<VTypeInfo> vtilist> {
4433  foreach vti = vtilist in
4434    defm : VPatBinaryTA<intrinsic, instruction # "_VX_" # vti.LMul.MX,
4435                        vti.Vector, vti.Vector, XLenVT, vti.Mask,
4436                        vti.Log2SEW, vti.RegClass,
4437                        vti.RegClass, GPR>;
4438}
4439
4440multiclass VPatBinaryV_VI<string intrinsic, string instruction,
4441                          list<VTypeInfo> vtilist, Operand imm_type> {
4442  foreach vti = vtilist in
4443    defm : VPatBinaryTA<intrinsic, instruction # "_VI_" # vti.LMul.MX,
4444                        vti.Vector, vti.Vector, XLenVT, vti.Mask,
4445                        vti.Log2SEW, vti.RegClass,
4446                        vti.RegClass, imm_type>;
4447}
4448
4449multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
4450  foreach mti = AllMasks in
4451    def : VPatBinaryM<intrinsic, instruction # "_MM_" # mti.LMul.MX,
4452                      mti.Mask, mti.Mask, mti.Mask,
4453                      mti.Log2SEW, VR, VR>;
4454}
4455
4456multiclass VPatBinaryW_VV<string intrinsic, string instruction,
4457                          list<VTypeInfoToWide> vtilist> {
4458  foreach VtiToWti = vtilist in {
4459    defvar Vti = VtiToWti.Vti;
4460    defvar Wti = VtiToWti.Wti;
4461    defm : VPatBinaryTA<intrinsic, instruction # "_VV_" # Vti.LMul.MX,
4462                        Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
4463                        Vti.Log2SEW, Wti.RegClass,
4464                        Vti.RegClass, Vti.RegClass>;
4465  }
4466}
4467
4468multiclass VPatBinaryW_VX<string intrinsic, string instruction,
4469                          list<VTypeInfoToWide> vtilist> {
4470  foreach VtiToWti = vtilist in {
4471    defvar Vti = VtiToWti.Vti;
4472    defvar Wti = VtiToWti.Wti;
4473    defvar kind = "V"#Vti.ScalarSuffix;
4474    defm : VPatBinaryTA<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
4475                        Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
4476                        Vti.Log2SEW, Wti.RegClass,
4477                        Vti.RegClass, Vti.ScalarRegClass>;
4478  }
4479}
4480
4481multiclass VPatBinaryW_WV<string intrinsic, string instruction,
4482                          list<VTypeInfoToWide> vtilist> {
4483  foreach VtiToWti = vtilist in {
4484    defvar Vti = VtiToWti.Vti;
4485    defvar Wti = VtiToWti.Wti;
4486    def : VPatTiedBinaryNoMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
4487                               Wti.Vector, Vti.Vector,
4488                               Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
4489    def : VPatBinaryNoMaskTU<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
4490                             Wti.Vector, Wti.Vector, Vti.Vector, Vti.Log2SEW,
4491                             Wti.RegClass, Wti.RegClass, Vti.RegClass>;
4492    let AddedComplexity = 1 in {
4493    def : VPatTiedBinaryNoMaskTU<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
4494                                 Wti.Vector, Vti.Vector,
4495                                 Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
4496    def : VPatTiedBinaryMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
4497                             Wti.Vector, Vti.Vector, Vti.Mask,
4498                             Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
4499    }
4500    def : VPatBinaryMaskTA<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
4501                           Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
4502                           Vti.Log2SEW, Wti.RegClass,
4503                           Wti.RegClass, Vti.RegClass>;
4504  }
4505}
4506
4507multiclass VPatBinaryW_WX<string intrinsic, string instruction,
4508                          list<VTypeInfoToWide> vtilist> {
4509  foreach VtiToWti = vtilist in {
4510    defvar Vti = VtiToWti.Vti;
4511    defvar Wti = VtiToWti.Wti;
4512    defvar kind = "W"#Vti.ScalarSuffix;
4513    defm : VPatBinaryTA<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
4514                        Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
4515                        Vti.Log2SEW, Wti.RegClass,
4516                        Wti.RegClass, Vti.ScalarRegClass>;
4517  }
4518}
4519
4520multiclass VPatBinaryV_WV<string intrinsic, string instruction,
4521                          list<VTypeInfoToWide> vtilist> {
4522  foreach VtiToWti = vtilist in {
4523    defvar Vti = VtiToWti.Vti;
4524    defvar Wti = VtiToWti.Wti;
4525    defm : VPatBinaryTA<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
4526                        Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
4527                        Vti.Log2SEW, Vti.RegClass,
4528                        Wti.RegClass, Vti.RegClass>;
4529  }
4530}
4531
4532multiclass VPatBinaryV_WX<string intrinsic, string instruction,
4533                          list<VTypeInfoToWide> vtilist> {
4534  foreach VtiToWti = vtilist in {
4535    defvar Vti = VtiToWti.Vti;
4536    defvar Wti = VtiToWti.Wti;
4537    defvar kind = "W"#Vti.ScalarSuffix;
4538    defm : VPatBinaryTA<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
4539                        Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
4540                        Vti.Log2SEW, Vti.RegClass,
4541                        Wti.RegClass, Vti.ScalarRegClass>;
4542  }
4543}
4544
4545multiclass VPatBinaryV_WI<string intrinsic, string instruction,
4546                          list<VTypeInfoToWide> vtilist> {
4547  foreach VtiToWti = vtilist in {
4548    defvar Vti = VtiToWti.Vti;
4549    defvar Wti = VtiToWti.Wti;
4550    defm : VPatBinaryTA<intrinsic, instruction # "_WI_" # Vti.LMul.MX,
4551                        Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
4552                        Vti.Log2SEW, Vti.RegClass,
4553                        Wti.RegClass, uimm5>;
4554  }
4555}
4556
4557multiclass VPatBinaryV_VM<string intrinsic, string instruction,
4558                          bit CarryOut = 0,
4559                          list<VTypeInfo> vtilist = AllIntegerVectors> {
4560  foreach vti = vtilist in
4561    defm : VPatBinaryCarryIn<intrinsic, instruction, "VVM",
4562                             !if(CarryOut, vti.Mask, vti.Vector),
4563                             vti.Vector, vti.Vector, vti.Mask,
4564                             vti.Log2SEW, vti.LMul,
4565                             vti.RegClass, vti.RegClass>;
4566}
4567
4568multiclass VPatBinaryV_XM<string intrinsic, string instruction,
4569                          bit CarryOut = 0,
4570                          list<VTypeInfo> vtilist = AllIntegerVectors> {
4571  foreach vti = vtilist in
4572    defm : VPatBinaryCarryIn<intrinsic, instruction,
4573                             "V"#vti.ScalarSuffix#"M",
4574                             !if(CarryOut, vti.Mask, vti.Vector),
4575                             vti.Vector, vti.Scalar, vti.Mask,
4576                             vti.Log2SEW, vti.LMul,
4577                             vti.RegClass, vti.ScalarRegClass>;
4578}
4579
4580multiclass VPatBinaryV_IM<string intrinsic, string instruction,
4581                          bit CarryOut = 0> {
4582  foreach vti = AllIntegerVectors in
4583    defm : VPatBinaryCarryIn<intrinsic, instruction, "VIM",
4584                             !if(CarryOut, vti.Mask, vti.Vector),
4585                             vti.Vector, XLenVT, vti.Mask,
4586                             vti.Log2SEW, vti.LMul,
4587                             vti.RegClass, simm5>;
4588}
4589
4590multiclass VPatBinaryV_VM_TAIL<string intrinsic, string instruction,
4591                               bit CarryOut = 0,
4592                               list<VTypeInfo> vtilist = AllIntegerVectors> {
4593  foreach vti = vtilist in
4594    defm : VPatBinaryCarryInTAIL<intrinsic, instruction, "VVM",
4595                                 !if(CarryOut, vti.Mask, vti.Vector),
4596                                 vti.Vector, vti.Vector, vti.Mask,
4597                                 vti.Log2SEW, vti.LMul, vti.RegClass,
4598                                 vti.RegClass, vti.RegClass>;
4599}
4600
4601multiclass VPatBinaryV_XM_TAIL<string intrinsic, string instruction,
4602                               bit CarryOut = 0,
4603                               list<VTypeInfo> vtilist = AllIntegerVectors> {
4604  foreach vti = vtilist in
4605    defm : VPatBinaryCarryInTAIL<intrinsic, instruction,
4606                                 "V"#vti.ScalarSuffix#"M",
4607                                 !if(CarryOut, vti.Mask, vti.Vector),
4608                                 vti.Vector, vti.Scalar, vti.Mask,
4609                                 vti.Log2SEW, vti.LMul, vti.RegClass,
4610                                 vti.RegClass, vti.ScalarRegClass>;
4611}
4612
4613multiclass VPatBinaryV_IM_TAIL<string intrinsic, string instruction,
4614                               bit CarryOut = 0> {
4615  foreach vti = AllIntegerVectors in
4616    defm : VPatBinaryCarryInTAIL<intrinsic, instruction, "VIM",
4617                                 !if(CarryOut, vti.Mask, vti.Vector),
4618                                 vti.Vector, XLenVT, vti.Mask,
4619                                 vti.Log2SEW, vti.LMul,
4620                                 vti.RegClass, vti.RegClass, simm5>;
4621}
4622
4623multiclass VPatBinaryV_V<string intrinsic, string instruction> {
4624  foreach vti = AllIntegerVectors in
4625    defm : VPatBinaryMaskOut<intrinsic, instruction, "VV",
4626                             vti.Mask, vti.Vector, vti.Vector,
4627                             vti.Log2SEW, vti.LMul,
4628                             vti.RegClass, vti.RegClass>;
4629}
4630
4631multiclass VPatBinaryV_X<string intrinsic, string instruction> {
4632  foreach vti = AllIntegerVectors in
4633    defm : VPatBinaryMaskOut<intrinsic, instruction, "VX",
4634                             vti.Mask, vti.Vector, XLenVT,
4635                             vti.Log2SEW, vti.LMul,
4636                             vti.RegClass, GPR>;
4637}
4638
4639multiclass VPatBinaryV_I<string intrinsic, string instruction> {
4640  foreach vti = AllIntegerVectors in
4641    defm : VPatBinaryMaskOut<intrinsic, instruction, "VI",
4642                             vti.Mask, vti.Vector, XLenVT,
4643                             vti.Log2SEW, vti.LMul,
4644                             vti.RegClass, simm5>;
4645}
4646
4647multiclass VPatBinaryM_VV<string intrinsic, string instruction,
4648                          list<VTypeInfo> vtilist> {
4649  foreach vti = vtilist in
4650    defm : VPatBinaryM<intrinsic, instruction # "_VV_" # vti.LMul.MX,
4651                       vti.Mask, vti.Vector, vti.Vector, vti.Mask,
4652                       vti.Log2SEW, VR,
4653                       vti.RegClass, vti.RegClass>;
4654}
4655
4656multiclass VPatBinarySwappedM_VV<string intrinsic, string instruction,
4657                                 list<VTypeInfo> vtilist> {
4658  foreach vti = vtilist in
4659    defm : VPatBinarySwapped<intrinsic, instruction # "_VV_" # vti.LMul.MX,
4660                             vti.Mask, vti.Vector, vti.Vector, vti.Mask,
4661                             vti.Log2SEW, VR,
4662                             vti.RegClass, vti.RegClass>;
4663}
4664
4665multiclass VPatBinaryM_VX<string intrinsic, string instruction,
4666                          list<VTypeInfo> vtilist> {
4667  foreach vti = vtilist in {
4668    defvar kind = "V"#vti.ScalarSuffix;
4669    defm : VPatBinaryM<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
4670                       vti.Mask, vti.Vector, vti.Scalar, vti.Mask,
4671                       vti.Log2SEW, VR,
4672                       vti.RegClass, vti.ScalarRegClass>;
4673  }
4674}
4675
4676multiclass VPatBinaryM_VI<string intrinsic, string instruction,
4677                          list<VTypeInfo> vtilist> {
4678  foreach vti = vtilist in
4679    defm : VPatBinaryM<intrinsic, instruction # "_VI_" # vti.LMul.MX,
4680                       vti.Mask, vti.Vector, XLenVT, vti.Mask,
4681                       vti.Log2SEW, VR,
4682                       vti.RegClass, simm5>;
4683}
4684
4685multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
4686                                list<VTypeInfo> vtilist, Operand ImmType = simm5>
4687    : VPatBinaryV_VV<intrinsic, instruction, vtilist>,
4688      VPatBinaryV_VX<intrinsic, instruction, vtilist>,
4689      VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
4690
4691multiclass VPatBinaryV_VV_VX<string intrinsic, string instruction,
4692                             list<VTypeInfo> vtilist>
4693    : VPatBinaryV_VV<intrinsic, instruction, vtilist>,
4694      VPatBinaryV_VX<intrinsic, instruction, vtilist>;
4695
4696multiclass VPatBinaryV_VX_VI<string intrinsic, string instruction,
4697                             list<VTypeInfo> vtilist>
4698    : VPatBinaryV_VX<intrinsic, instruction, vtilist>,
4699      VPatBinaryV_VI<intrinsic, instruction, vtilist, simm5>;
4700
4701multiclass VPatBinaryW_VV_VX<string intrinsic, string instruction,
4702                             list<VTypeInfoToWide> vtilist>
4703    : VPatBinaryW_VV<intrinsic, instruction, vtilist>,
4704      VPatBinaryW_VX<intrinsic, instruction, vtilist>;
4705
4706multiclass VPatBinaryW_WV_WX<string intrinsic, string instruction,
4707                             list<VTypeInfoToWide> vtilist>
4708    : VPatBinaryW_WV<intrinsic, instruction, vtilist>,
4709      VPatBinaryW_WX<intrinsic, instruction, vtilist>;
4710
4711multiclass VPatBinaryV_WV_WX_WI<string intrinsic, string instruction,
4712                                list<VTypeInfoToWide> vtilist>
4713    : VPatBinaryV_WV<intrinsic, instruction, vtilist>,
4714      VPatBinaryV_WX<intrinsic, instruction, vtilist>,
4715      VPatBinaryV_WI<intrinsic, instruction, vtilist>;
4716
4717multiclass VPatBinaryV_VM_XM_IM<string intrinsic, string instruction>
4718    : VPatBinaryV_VM_TAIL<intrinsic, instruction>,
4719      VPatBinaryV_XM_TAIL<intrinsic, instruction>,
4720      VPatBinaryV_IM_TAIL<intrinsic, instruction>;
4721
4722multiclass VPatBinaryM_VM_XM_IM<string intrinsic, string instruction>
4723    : VPatBinaryV_VM<intrinsic, instruction, /*CarryOut=*/1>,
4724      VPatBinaryV_XM<intrinsic, instruction, /*CarryOut=*/1>,
4725      VPatBinaryV_IM<intrinsic, instruction, /*CarryOut=*/1>;
4726
4727multiclass VPatBinaryM_V_X_I<string intrinsic, string instruction>
4728    : VPatBinaryV_V<intrinsic, instruction>,
4729      VPatBinaryV_X<intrinsic, instruction>,
4730      VPatBinaryV_I<intrinsic, instruction>;
4731
4732multiclass VPatBinaryV_VM_XM<string intrinsic, string instruction>
4733    : VPatBinaryV_VM_TAIL<intrinsic, instruction>,
4734      VPatBinaryV_XM_TAIL<intrinsic, instruction>;
4735
4736multiclass VPatBinaryM_VM_XM<string intrinsic, string instruction>
4737    : VPatBinaryV_VM<intrinsic, instruction, /*CarryOut=*/1>,
4738      VPatBinaryV_XM<intrinsic, instruction, /*CarryOut=*/1>;
4739
4740multiclass VPatBinaryM_V_X<string intrinsic, string instruction>
4741    : VPatBinaryV_V<intrinsic, instruction>,
4742      VPatBinaryV_X<intrinsic, instruction>;
4743
4744multiclass VPatTernary<string intrinsic,
4745                       string inst,
4746                       string kind,
4747                       ValueType result_type,
4748                       ValueType op1_type,
4749                       ValueType op2_type,
4750                       ValueType mask_type,
4751                       int sew,
4752                       LMULInfo vlmul,
4753                       VReg result_reg_class,
4754                       RegisterClass op1_reg_class,
4755                       DAGOperand op2_kind> {
4756  def : VPatTernaryNoMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
4757                          sew, vlmul, result_reg_class, op1_reg_class,
4758                          op2_kind>;
4759  def : VPatTernaryMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
4760                        mask_type, sew, vlmul, result_reg_class, op1_reg_class,
4761                        op2_kind>;
4762}
4763
4764multiclass VPatTernaryNoMaskNoPolicy<string intrinsic,
4765                                     string inst,
4766                                     string kind,
4767                                     ValueType result_type,
4768                                     ValueType op1_type,
4769                                     ValueType op2_type,
4770                                     ValueType mask_type,
4771                                     int sew,
4772                                     LMULInfo vlmul,
4773                                     VReg result_reg_class,
4774                                     RegisterClass op1_reg_class,
4775                                     DAGOperand op2_kind> {
4776  def : VPatTernaryNoMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
4777                          sew, vlmul, result_reg_class, op1_reg_class,
4778                          op2_kind>;
4779  def : VPatTernaryMaskPolicy<intrinsic, inst, kind, result_type, op1_type, op2_type,
4780                              mask_type, sew, vlmul, result_reg_class, op1_reg_class,
4781                              op2_kind>;
4782}
4783
4784multiclass VPatTernaryWithPolicy<string intrinsic,
4785                                 string inst,
4786                                 string kind,
4787                                 ValueType result_type,
4788                                 ValueType op1_type,
4789                                 ValueType op2_type,
4790                                 ValueType mask_type,
4791                                 int sew,
4792                                 LMULInfo vlmul,
4793                                 VReg result_reg_class,
4794                                 RegisterClass op1_reg_class,
4795                                 DAGOperand op2_kind> {
4796  def : VPatTernaryNoMaskWithPolicy<intrinsic, inst, kind, result_type, op1_type,
4797                                    op2_type, sew, vlmul, result_reg_class,
4798                                    op1_reg_class, op2_kind>;
4799  def : VPatTernaryMaskPolicy<intrinsic, inst, kind, result_type, op1_type, op2_type,
4800                              mask_type, sew, vlmul, result_reg_class, op1_reg_class,
4801                              op2_kind>;
4802}
4803
4804multiclass VPatTernaryV_VV_AAXA<string intrinsic, string instruction,
4805                                list<VTypeInfo> vtilist> {
4806  foreach vti = vtilist in
4807    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VV",
4808                                 vti.Vector, vti.Vector, vti.Vector, vti.Mask,
4809                                 vti.Log2SEW, vti.LMul, vti.RegClass,
4810                                 vti.RegClass, vti.RegClass>;
4811}
4812
4813multiclass VPatTernaryV_VX<string intrinsic, string instruction,
4814                           list<VTypeInfo> vtilist> {
4815  foreach vti = vtilist in
4816    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VX",
4817                                 vti.Vector, vti.Vector, XLenVT, vti.Mask,
4818                                 vti.Log2SEW, vti.LMul, vti.RegClass,
4819                                 vti.RegClass, GPR>;
4820}
4821
4822multiclass VPatTernaryV_VX_AAXA<string intrinsic, string instruction,
4823                           list<VTypeInfo> vtilist> {
4824  foreach vti = vtilist in
4825    defm : VPatTernaryWithPolicy<intrinsic, instruction,
4826                                 "V"#vti.ScalarSuffix,
4827                                 vti.Vector, vti.Scalar, vti.Vector, vti.Mask,
4828                                 vti.Log2SEW, vti.LMul, vti.RegClass,
4829                                 vti.ScalarRegClass, vti.RegClass>;
4830}
4831
4832multiclass VPatTernaryV_VI<string intrinsic, string instruction,
4833                           list<VTypeInfo> vtilist, Operand Imm_type> {
4834  foreach vti = vtilist in
4835    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VI",
4836                                 vti.Vector, vti.Vector, XLenVT, vti.Mask,
4837                                 vti.Log2SEW, vti.LMul, vti.RegClass,
4838                                 vti.RegClass, Imm_type>;
4839}
4840
4841multiclass VPatTernaryW_VV<string intrinsic, string instruction,
4842                           list<VTypeInfoToWide> vtilist> {
4843  foreach vtiToWti = vtilist in {
4844    defvar vti = vtiToWti.Vti;
4845    defvar wti = vtiToWti.Wti;
4846    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VV",
4847                                 wti.Vector, vti.Vector, vti.Vector,
4848                                 vti.Mask, vti.Log2SEW, vti.LMul,
4849                                 wti.RegClass, vti.RegClass, vti.RegClass>;
4850  }
4851}
4852
4853multiclass VPatTernaryW_VX<string intrinsic, string instruction,
4854                           list<VTypeInfoToWide> vtilist> {
4855  foreach vtiToWti = vtilist in {
4856    defvar vti = vtiToWti.Vti;
4857    defvar wti = vtiToWti.Wti;
4858    defm : VPatTernaryWithPolicy<intrinsic, instruction,
4859                                 "V"#vti.ScalarSuffix,
4860                                 wti.Vector, vti.Scalar, vti.Vector,
4861                                 vti.Mask, vti.Log2SEW, vti.LMul,
4862                                 wti.RegClass, vti.ScalarRegClass, vti.RegClass>;
4863  }
4864}
4865
4866multiclass VPatTernaryV_VV_VX_AAXA<string intrinsic, string instruction,
4867                              list<VTypeInfo> vtilist>
4868    : VPatTernaryV_VV_AAXA<intrinsic, instruction, vtilist>,
4869      VPatTernaryV_VX_AAXA<intrinsic, instruction, vtilist>;
4870
4871multiclass VPatTernaryV_VX_VI<string intrinsic, string instruction,
4872                              list<VTypeInfo> vtilist, Operand Imm_type = simm5>
4873    : VPatTernaryV_VX<intrinsic, instruction, vtilist>,
4874      VPatTernaryV_VI<intrinsic, instruction, vtilist, Imm_type>;
4875
4876
4877multiclass VPatBinaryM_VV_VX_VI<string intrinsic, string instruction,
4878                                list<VTypeInfo> vtilist>
4879    : VPatBinaryM_VV<intrinsic, instruction, vtilist>,
4880      VPatBinaryM_VX<intrinsic, instruction, vtilist>,
4881      VPatBinaryM_VI<intrinsic, instruction, vtilist>;
4882
4883multiclass VPatTernaryW_VV_VX<string intrinsic, string instruction,
4884                              list<VTypeInfoToWide> vtilist>
4885    : VPatTernaryW_VV<intrinsic, instruction, vtilist>,
4886      VPatTernaryW_VX<intrinsic, instruction, vtilist>;
4887
4888multiclass VPatBinaryM_VV_VX<string intrinsic, string instruction,
4889                             list<VTypeInfo> vtilist>
4890    : VPatBinaryM_VV<intrinsic, instruction, vtilist>,
4891      VPatBinaryM_VX<intrinsic, instruction, vtilist>;
4892
4893multiclass VPatBinaryM_VX_VI<string intrinsic, string instruction,
4894                             list<VTypeInfo> vtilist>
4895    : VPatBinaryM_VX<intrinsic, instruction, vtilist>,
4896      VPatBinaryM_VI<intrinsic, instruction, vtilist>;
4897
4898multiclass VPatBinaryV_VV_VX_VI_INT<string intrinsic, string instruction,
4899                                    list<VTypeInfo> vtilist, Operand ImmType = simm5>
4900    : VPatBinaryV_VV_INT<intrinsic#"_vv", instruction, vtilist>,
4901      VPatBinaryV_VX_INT<intrinsic#"_vx", instruction, vtilist>,
4902      VPatBinaryV_VI<intrinsic#"_vx", instruction, vtilist, ImmType>;
4903
4904multiclass VPatReductionV_VS<string intrinsic, string instruction, bit IsFloat = 0> {
4905  foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in
4906  {
4907    defvar vectorM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # vti.SEW # "M1");
4908    defm : VPatTernary<intrinsic, instruction, "VS",
4909                       vectorM1.Vector, vti.Vector,
4910                       vectorM1.Vector, vti.Mask,
4911                       vti.Log2SEW, vti.LMul,
4912                       VR, vti.RegClass, VR>;
4913  }
4914  foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in
4915  {
4916    defm : VPatTernary<intrinsic, instruction, "VS",
4917                       gvti.VectorM1, gvti.Vector,
4918                       gvti.VectorM1, gvti.Mask,
4919                       gvti.Log2SEW, gvti.LMul,
4920                       VR, gvti.RegClass, VR>;
4921  }
4922}
4923
4924multiclass VPatReductionW_VS<string intrinsic, string instruction, bit IsFloat = 0> {
4925  foreach vti = !if(IsFloat, AllFloatVectors, AllIntegerVectors) in
4926  {
4927    defvar wtiSEW = !mul(vti.SEW, 2);
4928    if !le(wtiSEW, 64) then {
4929      defvar wtiM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # wtiSEW # "M1");
4930      defm : VPatTernary<intrinsic, instruction, "VS",
4931                         wtiM1.Vector, vti.Vector,
4932                         wtiM1.Vector, vti.Mask,
4933                         vti.Log2SEW, vti.LMul,
4934                         wtiM1.RegClass, vti.RegClass,
4935                         wtiM1.RegClass>;
4936    }
4937  }
4938}
4939
4940multiclass VPatConversionVI_VF<string intrinsic,
4941                               string instruction>
4942{
4943  foreach fvti = AllFloatVectors in
4944  {
4945    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
4946
4947    defm : VPatConversionTA<intrinsic, instruction, "V",
4948                            ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
4949                            fvti.LMul, ivti.RegClass, fvti.RegClass>;
4950  }
4951}
4952
4953multiclass VPatConversionVF_VI<string intrinsic,
4954                               string instruction>
4955{
4956  foreach fvti = AllFloatVectors in
4957  {
4958    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
4959
4960    defm : VPatConversionTA<intrinsic, instruction, "V",
4961                            fvti.Vector, ivti.Vector, fvti.Mask, ivti.Log2SEW,
4962                            ivti.LMul, fvti.RegClass, ivti.RegClass>;
4963  }
4964}
4965
4966multiclass VPatConversionWI_VF<string intrinsic, string instruction> {
4967  foreach fvtiToFWti = AllWidenableFloatVectors in
4968  {
4969    defvar fvti = fvtiToFWti.Vti;
4970    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
4971
4972    defm : VPatConversionTA<intrinsic, instruction, "V",
4973                            iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
4974                            fvti.LMul, iwti.RegClass, fvti.RegClass>;
4975  }
4976}
4977
4978multiclass VPatConversionWF_VI<string intrinsic, string instruction> {
4979  foreach vtiToWti = AllWidenableIntToFloatVectors in
4980  {
4981    defvar vti = vtiToWti.Vti;
4982    defvar fwti = vtiToWti.Wti;
4983
4984    defm : VPatConversionTA<intrinsic, instruction, "V",
4985                            fwti.Vector, vti.Vector, fwti.Mask, vti.Log2SEW,
4986                            vti.LMul, fwti.RegClass, vti.RegClass>;
4987  }
4988}
4989
4990multiclass VPatConversionWF_VF <string intrinsic, string instruction> {
4991  foreach fvtiToFWti = AllWidenableFloatVectors in
4992  {
4993    defvar fvti = fvtiToFWti.Vti;
4994    defvar fwti = fvtiToFWti.Wti;
4995
4996    defm : VPatConversionTA<intrinsic, instruction, "V",
4997                            fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
4998                            fvti.LMul, fwti.RegClass, fvti.RegClass>;
4999  }
5000}
5001
5002multiclass VPatConversionVI_WF <string intrinsic, string instruction> {
5003  foreach vtiToWti = AllWidenableIntToFloatVectors in
5004  {
5005    defvar vti = vtiToWti.Vti;
5006    defvar fwti = vtiToWti.Wti;
5007
5008    defm : VPatConversionTA<intrinsic, instruction, "W",
5009                            vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
5010                            vti.LMul, vti.RegClass, fwti.RegClass>;
5011  }
5012}
5013
5014multiclass VPatConversionVF_WI <string intrinsic, string instruction> {
5015  foreach fvtiToFWti = AllWidenableFloatVectors in
5016  {
5017    defvar fvti = fvtiToFWti.Vti;
5018    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
5019
5020    defm : VPatConversionTA<intrinsic, instruction, "W",
5021                            fvti.Vector, iwti.Vector, fvti.Mask, fvti.Log2SEW,
5022                            fvti.LMul, fvti.RegClass, iwti.RegClass>;
5023  }
5024}
5025
5026multiclass VPatConversionVF_WF <string intrinsic, string instruction> {
5027  foreach fvtiToFWti = AllWidenableFloatVectors in
5028  {
5029    defvar fvti = fvtiToFWti.Vti;
5030    defvar fwti = fvtiToFWti.Wti;
5031
5032    defm : VPatConversionTA<intrinsic, instruction, "W",
5033                            fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
5034                            fvti.LMul, fvti.RegClass, fwti.RegClass>;
5035  }
5036}
5037
5038multiclass VPatCompare_VI<string intrinsic, string inst,
5039                          ImmLeaf ImmType> {
5040  foreach vti = AllIntegerVectors in {
5041    defvar Intr = !cast<Intrinsic>(intrinsic);
5042    defvar Pseudo = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX);
5043    def : Pat<(vti.Mask (Intr (vti.Vector vti.RegClass:$rs1),
5044                              (vti.Scalar ImmType:$rs2),
5045                              VLOpFrag)),
5046              (Pseudo vti.RegClass:$rs1, (DecImm ImmType:$rs2),
5047                      GPR:$vl, vti.Log2SEW)>;
5048    defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
5049    defvar PseudoMask = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX#"_MASK");
5050    def : Pat<(vti.Mask (IntrMask (vti.Mask VR:$merge),
5051                                  (vti.Vector vti.RegClass:$rs1),
5052                                  (vti.Scalar ImmType:$rs2),
5053                                  (vti.Mask V0),
5054                                  VLOpFrag)),
5055              (PseudoMask VR:$merge, vti.RegClass:$rs1, (DecImm ImmType:$rs2),
5056                          (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
5057  }
5058}
5059
5060//===----------------------------------------------------------------------===//
5061// Pseudo instructions
5062//===----------------------------------------------------------------------===//
5063
5064let Predicates = [HasVInstructions] in {
5065
5066//===----------------------------------------------------------------------===//
5067// Pseudo Instructions for CodeGen
5068//===----------------------------------------------------------------------===//
5069
5070let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1 in {
5071  def PseudoReadVLENB : Pseudo<(outs GPR:$rd), (ins),
5072                               [(set GPR:$rd, (riscv_read_vlenb))]>,
5073                        Sched<[WriteRdVLENB]>;
5074}
5075
5076let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1,
5077    Uses = [VL] in
5078def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins), []>;
5079
5080foreach lmul = MxList in {
5081  foreach nf = NFSet<lmul>.L in {
5082    defvar vreg = SegRegClass<lmul, nf>.RC;
5083    let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1,
5084        Size = !mul(4, !sub(!mul(nf, 2), 1)) in {
5085      def "PseudoVSPILL" # nf # "_" # lmul.MX :
5086        Pseudo<(outs), (ins vreg:$rs1, GPR:$rs2), []>;
5087    }
5088    let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1,
5089        Size = !mul(4, !sub(!mul(nf, 2), 1)) in {
5090      def "PseudoVRELOAD" # nf # "_" # lmul.MX :
5091        Pseudo<(outs vreg:$rs1), (ins GPR:$rs2), []>;
5092    }
5093  }
5094}
5095
5096//===----------------------------------------------------------------------===//
5097// 6. Configuration-Setting Instructions
5098//===----------------------------------------------------------------------===//
5099
5100// Pseudos.
5101let hasSideEffects = 1, mayLoad = 0, mayStore = 0, Defs = [VL, VTYPE] in {
5102// Due to rs1=X0 having special meaning, we need a GPRNoX0 register class for
5103// the when we aren't using one of the special X0 encodings. Otherwise it could
5104// be accidentally be made X0 by MachineIR optimizations. To satisfy the
5105// verifier, we also need a GPRX0 instruction for the special encodings.
5106def PseudoVSETVLI : Pseudo<(outs GPR:$rd), (ins GPRNoX0:$rs1, VTypeIOp11:$vtypei), []>,
5107                    Sched<[WriteVSETVLI, ReadVSETVLI]>;
5108def PseudoVSETVLIX0 : Pseudo<(outs GPR:$rd), (ins GPRX0:$rs1, VTypeIOp11:$vtypei), []>,
5109                      Sched<[WriteVSETVLI, ReadVSETVLI]>;
5110def PseudoVSETIVLI : Pseudo<(outs GPR:$rd), (ins uimm5:$rs1, VTypeIOp10:$vtypei), []>,
5111                     Sched<[WriteVSETIVLI]>;
5112}
5113
5114//===----------------------------------------------------------------------===//
5115// 7. Vector Loads and Stores
5116//===----------------------------------------------------------------------===//
5117
5118//===----------------------------------------------------------------------===//
5119// 7.4 Vector Unit-Stride Instructions
5120//===----------------------------------------------------------------------===//
5121
5122// Pseudos Unit-Stride Loads and Stores
5123defm PseudoVL : VPseudoUSLoad;
5124defm PseudoVS : VPseudoUSStore;
5125
5126defm PseudoVLM : VPseudoLoadMask;
5127defm PseudoVSM : VPseudoStoreMask;
5128
5129//===----------------------------------------------------------------------===//
5130// 7.5 Vector Strided Instructions
5131//===----------------------------------------------------------------------===//
5132
5133// Vector Strided Loads and Stores
5134defm PseudoVLS : VPseudoSLoad;
5135defm PseudoVSS : VPseudoSStore;
5136
5137//===----------------------------------------------------------------------===//
5138// 7.6 Vector Indexed Instructions
5139//===----------------------------------------------------------------------===//
5140
5141// Vector Indexed Loads and Stores
5142defm PseudoVLUX : VPseudoILoad</*Ordered=*/false>;
5143defm PseudoVLOX : VPseudoILoad</*Ordered=*/true>;
5144defm PseudoVSOX : VPseudoIStore</*Ordered=*/true>;
5145defm PseudoVSUX : VPseudoIStore</*Ordered=*/false>;
5146
5147//===----------------------------------------------------------------------===//
5148// 7.7. Unit-stride Fault-Only-First Loads
5149//===----------------------------------------------------------------------===//
5150
5151// vleff may update VL register
5152let hasSideEffects = 1, Defs = [VL] in
5153defm PseudoVL : VPseudoFFLoad;
5154
5155//===----------------------------------------------------------------------===//
5156// 7.8. Vector Load/Store Segment Instructions
5157//===----------------------------------------------------------------------===//
5158defm PseudoVLSEG : VPseudoUSSegLoad;
5159defm PseudoVLSSEG : VPseudoSSegLoad;
5160defm PseudoVLOXSEG : VPseudoISegLoad</*Ordered=*/true>;
5161defm PseudoVLUXSEG : VPseudoISegLoad</*Ordered=*/false>;
5162defm PseudoVSSEG : VPseudoUSSegStore;
5163defm PseudoVSSSEG : VPseudoSSegStore;
5164defm PseudoVSOXSEG : VPseudoISegStore</*Ordered=*/true>;
5165defm PseudoVSUXSEG : VPseudoISegStore</*Ordered=*/false>;
5166
5167// vlseg<nf>e<eew>ff.v may update VL register
5168let hasSideEffects = 1, Defs = [VL] in {
5169defm PseudoVLSEG : VPseudoUSSegLoadFF;
5170}
5171
5172//===----------------------------------------------------------------------===//
5173// 11. Vector Integer Arithmetic Instructions
5174//===----------------------------------------------------------------------===//
5175
5176//===----------------------------------------------------------------------===//
5177// 11.1. Vector Single-Width Integer Add and Subtract
5178//===----------------------------------------------------------------------===//
5179defm PseudoVADD   : VPseudoVALU_VV_VX_VI;
5180defm PseudoVSUB   : VPseudoVALU_VV_VX;
5181defm PseudoVRSUB  : VPseudoVALU_VX_VI;
5182
5183foreach vti = AllIntegerVectors in {
5184  // Match vrsub with 2 vector operands to vsub.vv by swapping operands. This
5185  // Occurs when legalizing vrsub.vx intrinsics for i64 on RV32 since we need
5186  // to use a more complex splat sequence. Add the pattern for all VTs for
5187  // consistency.
5188  def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector (undef)),
5189                                         (vti.Vector vti.RegClass:$rs2),
5190                                         (vti.Vector vti.RegClass:$rs1),
5191                                         VLOpFrag)),
5192            (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
5193                                                              vti.RegClass:$rs2,
5194                                                              GPR:$vl,
5195                                                              vti.Log2SEW)>;
5196  def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$merge),
5197                                         (vti.Vector vti.RegClass:$rs2),
5198                                         (vti.Vector vti.RegClass:$rs1),
5199                                         VLOpFrag)),
5200            (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_TU")
5201                                                      vti.RegClass:$merge,
5202                                                      vti.RegClass:$rs1,
5203                                                      vti.RegClass:$rs2,
5204                                                      GPR:$vl,
5205                                                      vti.Log2SEW)>;
5206  def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge),
5207                                              (vti.Vector vti.RegClass:$rs2),
5208                                              (vti.Vector vti.RegClass:$rs1),
5209                                              (vti.Mask V0),
5210                                              VLOpFrag,
5211                                              (XLenVT timm:$policy))),
5212            (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK")
5213                                                      vti.RegClass:$merge,
5214                                                      vti.RegClass:$rs1,
5215                                                      vti.RegClass:$rs2,
5216                                                      (vti.Mask V0),
5217                                                      GPR:$vl,
5218                                                      vti.Log2SEW,
5219                                                      (XLenVT timm:$policy))>;
5220
5221  // Match VSUB with a small immediate to vadd.vi by negating the immediate.
5222  def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector (undef)),
5223                                        (vti.Vector vti.RegClass:$rs1),
5224                                        (vti.Scalar simm5_plus1:$rs2),
5225                                        VLOpFrag)),
5226            (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
5227                                                              (NegImm simm5_plus1:$rs2),
5228                                                              GPR:$vl,
5229                                                              vti.Log2SEW)>;
5230  def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge),
5231                                             (vti.Vector vti.RegClass:$rs1),
5232                                             (vti.Scalar simm5_plus1:$rs2),
5233                                             (vti.Mask V0),
5234                                             VLOpFrag,
5235                                             (XLenVT timm:$policy))),
5236            (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX#"_MASK")
5237                                                      vti.RegClass:$merge,
5238                                                      vti.RegClass:$rs1,
5239                                                      (NegImm simm5_plus1:$rs2),
5240                                                      (vti.Mask V0),
5241                                                      GPR:$vl,
5242                                                      vti.Log2SEW,
5243                                                      (XLenVT timm:$policy))>;
5244}
5245
5246//===----------------------------------------------------------------------===//
5247// 11.2. Vector Widening Integer Add/Subtract
5248//===----------------------------------------------------------------------===//
5249defm PseudoVWADDU : VPseudoVWALU_VV_VX;
5250defm PseudoVWSUBU : VPseudoVWALU_VV_VX;
5251defm PseudoVWADD  : VPseudoVWALU_VV_VX;
5252defm PseudoVWSUB  : VPseudoVWALU_VV_VX;
5253defm PseudoVWADDU : VPseudoVWALU_WV_WX;
5254defm PseudoVWSUBU : VPseudoVWALU_WV_WX;
5255defm PseudoVWADD  : VPseudoVWALU_WV_WX;
5256defm PseudoVWSUB  : VPseudoVWALU_WV_WX;
5257
5258//===----------------------------------------------------------------------===//
5259// 11.3. Vector Integer Extension
5260//===----------------------------------------------------------------------===//
5261defm PseudoVZEXT_VF2 : PseudoVEXT_VF2;
5262defm PseudoVZEXT_VF4 : PseudoVEXT_VF4;
5263defm PseudoVZEXT_VF8 : PseudoVEXT_VF8;
5264defm PseudoVSEXT_VF2 : PseudoVEXT_VF2;
5265defm PseudoVSEXT_VF4 : PseudoVEXT_VF4;
5266defm PseudoVSEXT_VF8 : PseudoVEXT_VF8;
5267
5268//===----------------------------------------------------------------------===//
5269// 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
5270//===----------------------------------------------------------------------===//
5271defm PseudoVADC  : VPseudoVCALU_VM_XM_IM;
5272defm PseudoVMADC : VPseudoVCALUM_VM_XM_IM<"@earlyclobber $rd">;
5273defm PseudoVMADC : VPseudoVCALUM_V_X_I<"@earlyclobber $rd">;
5274
5275defm PseudoVSBC  : VPseudoVCALU_VM_XM;
5276defm PseudoVMSBC : VPseudoVCALUM_VM_XM<"@earlyclobber $rd">;
5277defm PseudoVMSBC : VPseudoVCALUM_V_X<"@earlyclobber $rd">;
5278
5279//===----------------------------------------------------------------------===//
5280// 11.5. Vector Bitwise Logical Instructions
5281//===----------------------------------------------------------------------===//
5282defm PseudoVAND : VPseudoVALU_VV_VX_VI;
5283defm PseudoVOR  : VPseudoVALU_VV_VX_VI;
5284defm PseudoVXOR : VPseudoVALU_VV_VX_VI;
5285
5286//===----------------------------------------------------------------------===//
5287// 11.6. Vector Single-Width Bit Shift Instructions
5288//===----------------------------------------------------------------------===//
5289defm PseudoVSLL : VPseudoVSHT_VV_VX_VI<uimm5>;
5290defm PseudoVSRL : VPseudoVSHT_VV_VX_VI<uimm5>;
5291defm PseudoVSRA : VPseudoVSHT_VV_VX_VI<uimm5>;
5292
5293//===----------------------------------------------------------------------===//
5294// 11.7. Vector Narrowing Integer Right Shift Instructions
5295//===----------------------------------------------------------------------===//
5296defm PseudoVNSRL : VPseudoVNSHT_WV_WX_WI;
5297defm PseudoVNSRA : VPseudoVNSHT_WV_WX_WI;
5298
5299//===----------------------------------------------------------------------===//
5300// 11.8. Vector Integer Comparison Instructions
5301//===----------------------------------------------------------------------===//
5302defm PseudoVMSEQ  : VPseudoVCMPM_VV_VX_VI;
5303defm PseudoVMSNE  : VPseudoVCMPM_VV_VX_VI;
5304defm PseudoVMSLTU : VPseudoVCMPM_VV_VX;
5305defm PseudoVMSLT  : VPseudoVCMPM_VV_VX;
5306defm PseudoVMSLEU : VPseudoVCMPM_VV_VX_VI;
5307defm PseudoVMSLE  : VPseudoVCMPM_VV_VX_VI;
5308defm PseudoVMSGTU : VPseudoVCMPM_VX_VI;
5309defm PseudoVMSGT  : VPseudoVCMPM_VX_VI;
5310
5311//===----------------------------------------------------------------------===//
5312// 11.9. Vector Integer Min/Max Instructions
5313//===----------------------------------------------------------------------===//
5314defm PseudoVMINU : VPseudoVMINMAX_VV_VX;
5315defm PseudoVMIN  : VPseudoVMINMAX_VV_VX;
5316defm PseudoVMAXU : VPseudoVMINMAX_VV_VX;
5317defm PseudoVMAX  : VPseudoVMINMAX_VV_VX;
5318
5319//===----------------------------------------------------------------------===//
5320// 11.10. Vector Single-Width Integer Multiply Instructions
5321//===----------------------------------------------------------------------===//
5322defm PseudoVMUL    : VPseudoVMUL_VV_VX;
5323defm PseudoVMULH   : VPseudoVMUL_VV_VX;
5324defm PseudoVMULHU  : VPseudoVMUL_VV_VX;
5325defm PseudoVMULHSU : VPseudoVMUL_VV_VX;
5326
5327//===----------------------------------------------------------------------===//
5328// 11.11. Vector Integer Divide Instructions
5329//===----------------------------------------------------------------------===//
5330defm PseudoVDIVU : VPseudoVDIV_VV_VX;
5331defm PseudoVDIV  : VPseudoVDIV_VV_VX;
5332defm PseudoVREMU : VPseudoVDIV_VV_VX;
5333defm PseudoVREM  : VPseudoVDIV_VV_VX;
5334
5335//===----------------------------------------------------------------------===//
5336// 11.12. Vector Widening Integer Multiply Instructions
5337//===----------------------------------------------------------------------===//
5338defm PseudoVWMUL   : VPseudoVWMUL_VV_VX;
5339defm PseudoVWMULU  : VPseudoVWMUL_VV_VX;
5340defm PseudoVWMULSU : VPseudoVWMUL_VV_VX;
5341
5342//===----------------------------------------------------------------------===//
5343// 11.13. Vector Single-Width Integer Multiply-Add Instructions
5344//===----------------------------------------------------------------------===//
5345defm PseudoVMACC  : VPseudoVMAC_VV_VX_AAXA;
5346defm PseudoVNMSAC : VPseudoVMAC_VV_VX_AAXA;
5347defm PseudoVMADD  : VPseudoVMAC_VV_VX_AAXA;
5348defm PseudoVNMSUB : VPseudoVMAC_VV_VX_AAXA;
5349
5350//===----------------------------------------------------------------------===//
5351// 11.14. Vector Widening Integer Multiply-Add Instructions
5352//===----------------------------------------------------------------------===//
5353defm PseudoVWMACCU  : VPseudoVWMAC_VV_VX;
5354defm PseudoVWMACC   : VPseudoVWMAC_VV_VX;
5355defm PseudoVWMACCSU : VPseudoVWMAC_VV_VX;
5356defm PseudoVWMACCUS : VPseudoVWMAC_VX;
5357
5358//===----------------------------------------------------------------------===//
5359// 11.15. Vector Integer Merge Instructions
5360//===----------------------------------------------------------------------===//
5361defm PseudoVMERGE : VPseudoVMRG_VM_XM_IM;
5362
5363//===----------------------------------------------------------------------===//
5364// 11.16. Vector Integer Move Instructions
5365//===----------------------------------------------------------------------===//
5366defm PseudoVMV_V : VPseudoUnaryVMV_V_X_I;
5367
5368//===----------------------------------------------------------------------===//
5369// 12. Vector Fixed-Point Arithmetic Instructions
5370//===----------------------------------------------------------------------===//
5371
5372//===----------------------------------------------------------------------===//
5373// 12.1. Vector Single-Width Saturating Add and Subtract
5374//===----------------------------------------------------------------------===//
5375let Defs = [VXSAT], hasSideEffects = 1 in {
5376  defm PseudoVSADDU : VPseudoVSALU_VV_VX_VI;
5377  defm PseudoVSADD  : VPseudoVSALU_VV_VX_VI;
5378  defm PseudoVSSUBU : VPseudoVSALU_VV_VX;
5379  defm PseudoVSSUB  : VPseudoVSALU_VV_VX;
5380}
5381
5382//===----------------------------------------------------------------------===//
5383// 12.2. Vector Single-Width Averaging Add and Subtract
5384//===----------------------------------------------------------------------===//
5385let Uses = [VXRM], hasSideEffects = 1 in {
5386  defm PseudoVAADDU : VPseudoVAALU_VV_VX;
5387  defm PseudoVAADD  : VPseudoVAALU_VV_VX;
5388  defm PseudoVASUBU : VPseudoVAALU_VV_VX;
5389  defm PseudoVASUB  : VPseudoVAALU_VV_VX;
5390}
5391
5392//===----------------------------------------------------------------------===//
5393// 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
5394//===----------------------------------------------------------------------===//
5395let Uses = [VXRM], Defs = [VXSAT], hasSideEffects = 1 in {
5396  defm PseudoVSMUL : VPseudoVSMUL_VV_VX;
5397}
5398
5399//===----------------------------------------------------------------------===//
5400// 12.4. Vector Single-Width Scaling Shift Instructions
5401//===----------------------------------------------------------------------===//
5402let Uses = [VXRM], hasSideEffects = 1 in {
5403  defm PseudoVSSRL : VPseudoVSSHT_VV_VX_VI<uimm5>;
5404  defm PseudoVSSRA : VPseudoVSSHT_VV_VX_VI<uimm5>;
5405}
5406
5407//===----------------------------------------------------------------------===//
5408// 12.5. Vector Narrowing Fixed-Point Clip Instructions
5409//===----------------------------------------------------------------------===//
5410let Uses = [VXRM], Defs = [VXSAT], hasSideEffects = 1 in {
5411  defm PseudoVNCLIP  : VPseudoVNCLP_WV_WX_WI;
5412  defm PseudoVNCLIPU : VPseudoVNCLP_WV_WX_WI;
5413}
5414
5415} // Predicates = [HasVInstructions]
5416
5417//===----------------------------------------------------------------------===//
5418// 13. Vector Floating-Point Instructions
5419//===----------------------------------------------------------------------===//
5420
5421let Predicates = [HasVInstructionsAnyF] in {
5422//===----------------------------------------------------------------------===//
5423// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
5424//===----------------------------------------------------------------------===//
5425let Uses = [FRM], mayRaiseFPException = true in {
5426defm PseudoVFADD  : VPseudoVALU_VV_VF;
5427defm PseudoVFSUB  : VPseudoVALU_VV_VF;
5428defm PseudoVFRSUB : VPseudoVALU_VF;
5429}
5430
5431//===----------------------------------------------------------------------===//
5432// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
5433//===----------------------------------------------------------------------===//
5434let Uses = [FRM], mayRaiseFPException = true in {
5435defm PseudoVFWADD : VPseudoVFWALU_VV_VF;
5436defm PseudoVFWSUB : VPseudoVFWALU_VV_VF;
5437defm PseudoVFWADD : VPseudoVFWALU_WV_WF;
5438defm PseudoVFWSUB : VPseudoVFWALU_WV_WF;
5439}
5440
5441//===----------------------------------------------------------------------===//
5442// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
5443//===----------------------------------------------------------------------===//
5444let Uses = [FRM], mayRaiseFPException = true in {
5445defm PseudoVFMUL  : VPseudoVFMUL_VV_VF;
5446defm PseudoVFDIV  : VPseudoVFDIV_VV_VF;
5447defm PseudoVFRDIV : VPseudoVFRDIV_VF;
5448}
5449
5450//===----------------------------------------------------------------------===//
5451// 13.5. Vector Widening Floating-Point Multiply
5452//===----------------------------------------------------------------------===//
5453let Uses = [FRM], mayRaiseFPException = true in {
5454defm PseudoVFWMUL : VPseudoVWMUL_VV_VF;
5455}
5456
5457//===----------------------------------------------------------------------===//
5458// 13.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
5459//===----------------------------------------------------------------------===//
5460let Uses = [FRM], mayRaiseFPException = true in {
5461defm PseudoVFMACC  : VPseudoVMAC_VV_VF_AAXA;
5462defm PseudoVFNMACC : VPseudoVMAC_VV_VF_AAXA;
5463defm PseudoVFMSAC  : VPseudoVMAC_VV_VF_AAXA;
5464defm PseudoVFNMSAC : VPseudoVMAC_VV_VF_AAXA;
5465defm PseudoVFMADD  : VPseudoVMAC_VV_VF_AAXA;
5466defm PseudoVFNMADD : VPseudoVMAC_VV_VF_AAXA;
5467defm PseudoVFMSUB  : VPseudoVMAC_VV_VF_AAXA;
5468defm PseudoVFNMSUB : VPseudoVMAC_VV_VF_AAXA;
5469}
5470
5471//===----------------------------------------------------------------------===//
5472// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
5473//===----------------------------------------------------------------------===//
5474let Uses = [FRM], mayRaiseFPException = true in {
5475defm PseudoVFWMACC  : VPseudoVWMAC_VV_VF;
5476defm PseudoVFWNMACC : VPseudoVWMAC_VV_VF;
5477defm PseudoVFWMSAC  : VPseudoVWMAC_VV_VF;
5478defm PseudoVFWNMSAC : VPseudoVWMAC_VV_VF;
5479}
5480
5481//===----------------------------------------------------------------------===//
5482// 13.8. Vector Floating-Point Square-Root Instruction
5483//===----------------------------------------------------------------------===//
5484let Uses = [FRM], mayRaiseFPException = true in
5485defm PseudoVFSQRT : VPseudoVSQR_V;
5486
5487//===----------------------------------------------------------------------===//
5488// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
5489//===----------------------------------------------------------------------===//
5490let mayRaiseFPException = true in
5491defm PseudoVFRSQRT7 : VPseudoVRCP_V;
5492
5493//===----------------------------------------------------------------------===//
5494// 13.10. Vector Floating-Point Reciprocal Estimate Instruction
5495//===----------------------------------------------------------------------===//
5496let Uses = [FRM], mayRaiseFPException = true in
5497defm PseudoVFREC7 : VPseudoVRCP_V;
5498
5499//===----------------------------------------------------------------------===//
5500// 13.11. Vector Floating-Point Min/Max Instructions
5501//===----------------------------------------------------------------------===//
5502let mayRaiseFPException = true in {
5503defm PseudoVFMIN : VPseudoVMAX_VV_VF;
5504defm PseudoVFMAX : VPseudoVMAX_VV_VF;
5505}
5506
5507//===----------------------------------------------------------------------===//
5508// 13.12. Vector Floating-Point Sign-Injection Instructions
5509//===----------------------------------------------------------------------===//
5510defm PseudoVFSGNJ  : VPseudoVSGNJ_VV_VF;
5511defm PseudoVFSGNJN : VPseudoVSGNJ_VV_VF;
5512defm PseudoVFSGNJX : VPseudoVSGNJ_VV_VF;
5513
5514//===----------------------------------------------------------------------===//
5515// 13.13. Vector Floating-Point Compare Instructions
5516//===----------------------------------------------------------------------===//
5517let mayRaiseFPException = true in {
5518defm PseudoVMFEQ : VPseudoVCMPM_VV_VF;
5519defm PseudoVMFNE : VPseudoVCMPM_VV_VF;
5520defm PseudoVMFLT : VPseudoVCMPM_VV_VF;
5521defm PseudoVMFLE : VPseudoVCMPM_VV_VF;
5522defm PseudoVMFGT : VPseudoVCMPM_VF;
5523defm PseudoVMFGE : VPseudoVCMPM_VF;
5524}
5525
5526//===----------------------------------------------------------------------===//
5527// 13.14. Vector Floating-Point Classify Instruction
5528//===----------------------------------------------------------------------===//
5529defm PseudoVFCLASS : VPseudoVCLS_V;
5530
5531//===----------------------------------------------------------------------===//
5532// 13.15. Vector Floating-Point Merge Instruction
5533//===----------------------------------------------------------------------===//
5534defm PseudoVFMERGE : VPseudoVMRG_FM;
5535
5536//===----------------------------------------------------------------------===//
5537// 13.16. Vector Floating-Point Move Instruction
5538//===----------------------------------------------------------------------===//
5539defm PseudoVFMV_V : VPseudoVMV_F;
5540
5541//===----------------------------------------------------------------------===//
5542// 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions
5543//===----------------------------------------------------------------------===//
5544let mayRaiseFPException = true in {
5545let Uses = [FRM] in {
5546defm PseudoVFCVT_XU_F : VPseudoVCVTI_V;
5547defm PseudoVFCVT_X_F : VPseudoVCVTI_V;
5548}
5549
5550defm PseudoVFCVT_RM_XU_F : VPseudoVCVTI_RM_V;
5551defm PseudoVFCVT_RM_X_F : VPseudoVCVTI_RM_V;
5552
5553defm PseudoVFCVT_RTZ_XU_F : VPseudoVCVTI_V;
5554defm PseudoVFCVT_RTZ_X_F : VPseudoVCVTI_V;
5555
5556defm PseudoVFROUND_NOEXCEPT : VPseudoVFROUND_NOEXCEPT_V;
5557let Uses = [FRM] in {
5558defm PseudoVFCVT_F_XU : VPseudoVCVTF_V;
5559defm PseudoVFCVT_F_X : VPseudoVCVTF_V;
5560}
5561defm PseudoVFCVT_RM_F_XU : VPseudoVCVTF_RM_V;
5562defm PseudoVFCVT_RM_F_X  : VPseudoVCVTF_RM_V;
5563} // mayRaiseFPException = true
5564
5565//===----------------------------------------------------------------------===//
5566// 13.18. Widening Floating-Point/Integer Type-Convert Instructions
5567//===----------------------------------------------------------------------===//
5568let mayRaiseFPException = true in {
5569let Uses = [FRM] in {
5570defm PseudoVFWCVT_XU_F     : VPseudoVWCVTI_V;
5571defm PseudoVFWCVT_X_F      : VPseudoVWCVTI_V;
5572}
5573defm PseudoVFWCVT_RM_XU_F  : VPseudoVWCVTI_RM_V;
5574defm PseudoVFWCVT_RM_X_F   : VPseudoVWCVTI_RM_V;
5575
5576defm PseudoVFWCVT_RTZ_XU_F : VPseudoVWCVTI_V;
5577defm PseudoVFWCVT_RTZ_X_F  : VPseudoVWCVTI_V;
5578
5579let Uses = [FRM] in {
5580defm PseudoVFWCVT_F_XU     : VPseudoVWCVTF_V;
5581defm PseudoVFWCVT_F_X      : VPseudoVWCVTF_V;
5582}
5583defm PseudoVFWCVT_RM_F_XU  : VPseudoVWCVTF_RM_V;
5584defm PseudoVFWCVT_RM_F_X   : VPseudoVWCVTF_RM_V;
5585
5586defm PseudoVFWCVT_F_F      : VPseudoVWCVTD_V;
5587} // mayRaiseFPException = true
5588
5589//===----------------------------------------------------------------------===//
5590// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions
5591//===----------------------------------------------------------------------===//
5592let mayRaiseFPException = true in {
5593let Uses = [FRM] in {
5594defm PseudoVFNCVT_XU_F     : VPseudoVNCVTI_W;
5595defm PseudoVFNCVT_X_F      : VPseudoVNCVTI_W;
5596}
5597defm PseudoVFNCVT_RM_XU_F  : VPseudoVNCVTI_RM_W;
5598defm PseudoVFNCVT_RM_X_F   : VPseudoVNCVTI_RM_W;
5599
5600defm PseudoVFNCVT_RTZ_XU_F : VPseudoVNCVTI_W;
5601defm PseudoVFNCVT_RTZ_X_F  : VPseudoVNCVTI_W;
5602
5603let Uses = [FRM] in {
5604defm PseudoVFNCVT_F_XU     : VPseudoVNCVTF_W;
5605defm PseudoVFNCVT_F_X      : VPseudoVNCVTF_W;
5606}
5607defm PseudoVFNCVT_RM_F_XU  : VPseudoVNCVTF_RM_W;
5608defm PseudoVFNCVT_RM_F_X   : VPseudoVNCVTF_RM_W;
5609
5610let Uses = [FRM] in
5611defm PseudoVFNCVT_F_F      : VPseudoVNCVTD_W;
5612
5613defm PseudoVFNCVT_ROD_F_F  : VPseudoVNCVTD_W;
5614} // mayRaiseFPException = true
5615} // Predicates = [HasVInstructionsAnyF]
5616
5617//===----------------------------------------------------------------------===//
5618// 14. Vector Reduction Operations
5619//===----------------------------------------------------------------------===//
5620
5621let Predicates = [HasVInstructions] in {
5622//===----------------------------------------------------------------------===//
5623// 14.1. Vector Single-Width Integer Reduction Instructions
5624//===----------------------------------------------------------------------===//
5625defm PseudoVREDSUM  : VPseudoVRED_VS;
5626defm PseudoVREDAND  : VPseudoVRED_VS;
5627defm PseudoVREDOR   : VPseudoVRED_VS;
5628defm PseudoVREDXOR  : VPseudoVRED_VS;
5629defm PseudoVREDMINU : VPseudoVRED_VS;
5630defm PseudoVREDMIN  : VPseudoVRED_VS;
5631defm PseudoVREDMAXU : VPseudoVRED_VS;
5632defm PseudoVREDMAX  : VPseudoVRED_VS;
5633
5634//===----------------------------------------------------------------------===//
5635// 14.2. Vector Widening Integer Reduction Instructions
5636//===----------------------------------------------------------------------===//
5637let IsRVVWideningReduction = 1 in {
5638defm PseudoVWREDSUMU   : VPseudoVWRED_VS;
5639defm PseudoVWREDSUM    : VPseudoVWRED_VS;
5640}
5641} // Predicates = [HasVInstructions]
5642
5643let Predicates = [HasVInstructionsAnyF] in {
5644//===----------------------------------------------------------------------===//
5645// 14.3. Vector Single-Width Floating-Point Reduction Instructions
5646//===----------------------------------------------------------------------===//
5647let Uses = [FRM], mayRaiseFPException = true in {
5648defm PseudoVFREDOSUM : VPseudoVFREDO_VS;
5649defm PseudoVFREDUSUM : VPseudoVFRED_VS;
5650}
5651let mayRaiseFPException = true in {
5652defm PseudoVFREDMIN  : VPseudoVFRED_VS;
5653defm PseudoVFREDMAX  : VPseudoVFRED_VS;
5654}
5655
5656//===----------------------------------------------------------------------===//
5657// 14.4. Vector Widening Floating-Point Reduction Instructions
5658//===----------------------------------------------------------------------===//
5659let IsRVVWideningReduction = 1,
5660    Uses = [FRM],
5661    mayRaiseFPException = true in {
5662defm PseudoVFWREDUSUM  : VPseudoVFWRED_VS;
5663defm PseudoVFWREDOSUM  : VPseudoVFWRED_VS;
5664}
5665
5666} // Predicates = [HasVInstructionsAnyF]
5667
5668//===----------------------------------------------------------------------===//
5669// 15. Vector Mask Instructions
5670//===----------------------------------------------------------------------===//
5671
5672//===----------------------------------------------------------------------===//
5673// 15.1 Vector Mask-Register Logical Instructions
5674//===----------------------------------------------------------------------===//
5675
5676defm PseudoVMAND: VPseudoVALU_MM;
5677defm PseudoVMNAND: VPseudoVALU_MM;
5678defm PseudoVMANDN: VPseudoVALU_MM;
5679defm PseudoVMXOR: VPseudoVALU_MM;
5680defm PseudoVMOR: VPseudoVALU_MM;
5681defm PseudoVMNOR: VPseudoVALU_MM;
5682defm PseudoVMORN: VPseudoVALU_MM;
5683defm PseudoVMXNOR: VPseudoVALU_MM;
5684
5685// Pseudo instructions
5686defm PseudoVMCLR : VPseudoNullaryPseudoM<"VMXOR">;
5687defm PseudoVMSET : VPseudoNullaryPseudoM<"VMXNOR">;
5688
5689//===----------------------------------------------------------------------===//
5690// 15.2. Vector mask population count vcpop
5691//===----------------------------------------------------------------------===//
5692
5693defm PseudoVCPOP: VPseudoVPOP_M;
5694
5695//===----------------------------------------------------------------------===//
5696// 15.3. vfirst find-first-set mask bit
5697//===----------------------------------------------------------------------===//
5698
5699defm PseudoVFIRST: VPseudoV1ST_M;
5700
5701//===----------------------------------------------------------------------===//
5702// 15.4. vmsbf.m set-before-first mask bit
5703//===----------------------------------------------------------------------===//
5704defm PseudoVMSBF: VPseudoVSFS_M;
5705
5706//===----------------------------------------------------------------------===//
5707// 15.5. vmsif.m set-including-first mask bit
5708//===----------------------------------------------------------------------===//
5709defm PseudoVMSIF: VPseudoVSFS_M;
5710
5711//===----------------------------------------------------------------------===//
5712// 15.6. vmsof.m set-only-first mask bit
5713//===----------------------------------------------------------------------===//
5714defm PseudoVMSOF: VPseudoVSFS_M;
5715
5716//===----------------------------------------------------------------------===//
5717// 15.8.  Vector Iota Instruction
5718//===----------------------------------------------------------------------===//
5719defm PseudoVIOTA_M: VPseudoVIOT_M;
5720
5721//===----------------------------------------------------------------------===//
5722// 15.9. Vector Element Index Instruction
5723//===----------------------------------------------------------------------===//
5724defm PseudoVID : VPseudoVID_V;
5725
5726//===----------------------------------------------------------------------===//
5727// 16. Vector Permutation Instructions
5728//===----------------------------------------------------------------------===//
5729
5730//===----------------------------------------------------------------------===//
5731// 16.1. Integer Scalar Move Instructions
5732//===----------------------------------------------------------------------===//
5733
5734let Predicates = [HasVInstructions] in {
5735let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
5736  foreach m = MxList in {
5737    defvar mx = m.MX;
5738    defvar WriteVIMovVX_MX = !cast<SchedWrite>("WriteVIMovVX_" # mx);
5739    defvar WriteVIMovXV_MX = !cast<SchedWrite>("WriteVIMovXV_" # mx);
5740    defvar ReadVIMovVX_MX = !cast<SchedRead>("ReadVIMovVX_" # mx);
5741    defvar ReadVIMovXV_MX = !cast<SchedRead>("ReadVIMovXV_" # mx);
5742    defvar ReadVIMovXX_MX = !cast<SchedRead>("ReadVIMovXX_" # mx);
5743    let VLMul = m.value in {
5744      let HasSEWOp = 1, BaseInstr = VMV_X_S in
5745      def PseudoVMV_X_S # "_" # mx:
5746        Pseudo<(outs GPR:$rd), (ins m.vrclass:$rs2, ixlenimm:$sew), []>,
5747        Sched<[WriteVIMovVX_MX, ReadVIMovVX_MX]>,
5748        RISCVVPseudo;
5749      let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X,
5750          Constraints = "$rd = $rs1" in
5751      def PseudoVMV_S_X # "_" # mx: Pseudo<(outs m.vrclass:$rd),
5752                                             (ins m.vrclass:$rs1, GPR:$rs2,
5753                                                  AVL:$vl, ixlenimm:$sew),
5754                                             []>,
5755        Sched<[WriteVIMovXV_MX, ReadVIMovXV_MX, ReadVIMovXX_MX]>,
5756        RISCVVPseudo;
5757    }
5758  }
5759}
5760} // Predicates = [HasVInstructions]
5761
5762//===----------------------------------------------------------------------===//
5763// 16.2. Floating-Point Scalar Move Instructions
5764//===----------------------------------------------------------------------===//
5765
5766let Predicates = [HasVInstructionsAnyF] in {
5767let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
5768  foreach f = FPList in {
5769    foreach m = f.MxList in {
5770      defvar mx = m.MX;
5771      defvar WriteVFMovVF_MX = !cast<SchedWrite>("WriteVFMovVF_" # mx);
5772      defvar WriteVFMovFV_MX = !cast<SchedWrite>("WriteVFMovFV_" # mx);
5773      defvar ReadVFMovVF_MX = !cast<SchedRead>("ReadVFMovVF_" # mx);
5774      defvar ReadVFMovFV_MX = !cast<SchedRead>("ReadVFMovFV_" # mx);
5775      defvar ReadVFMovFX_MX = !cast<SchedRead>("ReadVFMovFX_" # mx);
5776      let VLMul = m.value in {
5777        let HasSEWOp = 1, BaseInstr = VFMV_F_S in
5778        def "PseudoVFMV_" # f.FX # "_S_" # mx :
5779          Pseudo<(outs f.fprclass:$rd),
5780                 (ins m.vrclass:$rs2, ixlenimm:$sew), []>,
5781          Sched<[WriteVFMovVF_MX, ReadVFMovVF_MX]>,
5782          RISCVVPseudo;
5783        let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F,
5784            Constraints = "$rd = $rs1" in
5785        def "PseudoVFMV_S_" # f.FX # "_" # mx :
5786                                          Pseudo<(outs m.vrclass:$rd),
5787                                                 (ins m.vrclass:$rs1, f.fprclass:$rs2,
5788                                                      AVL:$vl, ixlenimm:$sew),
5789                                                 []>,
5790          Sched<[WriteVFMovFV_MX, ReadVFMovFV_MX, ReadVFMovFX_MX]>,
5791          RISCVVPseudo;
5792      }
5793    }
5794  }
5795}
5796} // Predicates = [HasVInstructionsAnyF]
5797
5798//===----------------------------------------------------------------------===//
5799// 16.3. Vector Slide Instructions
5800//===----------------------------------------------------------------------===//
5801let Predicates = [HasVInstructions] in {
5802  defm PseudoVSLIDEUP    : VPseudoVSLD_VX_VI<uimm5, "@earlyclobber $rd">;
5803  defm PseudoVSLIDEDOWN  : VPseudoVSLD_VX_VI<uimm5>;
5804  defm PseudoVSLIDE1UP   : VPseudoVSLD1_VX<"@earlyclobber $rd">;
5805  defm PseudoVSLIDE1DOWN : VPseudoVSLD1_VX;
5806} // Predicates = [HasVInstructions]
5807
5808let Predicates = [HasVInstructionsAnyF] in {
5809  defm PseudoVFSLIDE1UP  : VPseudoVSLD1_VF<"@earlyclobber $rd">;
5810  defm PseudoVFSLIDE1DOWN : VPseudoVSLD1_VF;
5811} // Predicates = [HasVInstructionsAnyF]
5812
5813//===----------------------------------------------------------------------===//
5814// 16.4. Vector Register Gather Instructions
5815//===----------------------------------------------------------------------===//
5816defm PseudoVRGATHER     : VPseudoVGTR_VV_VX_VI<uimm5, "@earlyclobber $rd">;
5817defm PseudoVRGATHEREI16 : VPseudoVGTR_VV_EEW</* eew */ 16, "@earlyclobber $rd">;
5818
5819//===----------------------------------------------------------------------===//
5820// 16.5. Vector Compress Instruction
5821//===----------------------------------------------------------------------===//
5822defm PseudoVCOMPRESS : VPseudoVCPR_V;
5823
5824//===----------------------------------------------------------------------===//
5825// Patterns.
5826//===----------------------------------------------------------------------===//
5827
5828//===----------------------------------------------------------------------===//
5829// 11. Vector Integer Arithmetic Instructions
5830//===----------------------------------------------------------------------===//
5831
5832let Predicates = [HasVInstructions] in {
5833//===----------------------------------------------------------------------===//
5834// 11.1. Vector Single-Width Integer Add and Subtract
5835//===----------------------------------------------------------------------===//
5836defm : VPatBinaryV_VV_VX_VI<"int_riscv_vadd", "PseudoVADD", AllIntegerVectors>;
5837defm : VPatBinaryV_VV_VX<"int_riscv_vsub", "PseudoVSUB", AllIntegerVectors>;
5838defm : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>;
5839
5840//===----------------------------------------------------------------------===//
5841// 11.2. Vector Widening Integer Add/Subtract
5842//===----------------------------------------------------------------------===//
5843defm : VPatBinaryW_VV_VX<"int_riscv_vwaddu", "PseudoVWADDU", AllWidenableIntVectors>;
5844defm : VPatBinaryW_VV_VX<"int_riscv_vwsubu", "PseudoVWSUBU", AllWidenableIntVectors>;
5845defm : VPatBinaryW_VV_VX<"int_riscv_vwadd", "PseudoVWADD", AllWidenableIntVectors>;
5846defm : VPatBinaryW_VV_VX<"int_riscv_vwsub", "PseudoVWSUB", AllWidenableIntVectors>;
5847defm : VPatBinaryW_WV_WX<"int_riscv_vwaddu_w", "PseudoVWADDU", AllWidenableIntVectors>;
5848defm : VPatBinaryW_WV_WX<"int_riscv_vwsubu_w", "PseudoVWSUBU", AllWidenableIntVectors>;
5849defm : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD", AllWidenableIntVectors>;
5850defm : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB", AllWidenableIntVectors>;
5851
5852//===----------------------------------------------------------------------===//
5853// 11.3. Vector Integer Extension
5854//===----------------------------------------------------------------------===//
5855defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF2",
5856                     AllFractionableVF2IntVectors>;
5857defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF4",
5858                     AllFractionableVF4IntVectors>;
5859defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF8",
5860                     AllFractionableVF8IntVectors>;
5861defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF2",
5862                     AllFractionableVF2IntVectors>;
5863defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF4",
5864                     AllFractionableVF4IntVectors>;
5865defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF8",
5866                     AllFractionableVF8IntVectors>;
5867
5868//===----------------------------------------------------------------------===//
5869// 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
5870//===----------------------------------------------------------------------===//
5871defm : VPatBinaryV_VM_XM_IM<"int_riscv_vadc", "PseudoVADC">;
5872defm : VPatBinaryM_VM_XM_IM<"int_riscv_vmadc_carry_in", "PseudoVMADC">;
5873defm : VPatBinaryM_V_X_I<"int_riscv_vmadc", "PseudoVMADC">;
5874
5875defm : VPatBinaryV_VM_XM<"int_riscv_vsbc", "PseudoVSBC">;
5876defm : VPatBinaryM_VM_XM<"int_riscv_vmsbc_borrow_in", "PseudoVMSBC">;
5877defm : VPatBinaryM_V_X<"int_riscv_vmsbc", "PseudoVMSBC">;
5878
5879//===----------------------------------------------------------------------===//
5880// 11.5. Vector Bitwise Logical Instructions
5881//===----------------------------------------------------------------------===//
5882defm : VPatBinaryV_VV_VX_VI<"int_riscv_vand", "PseudoVAND", AllIntegerVectors>;
5883defm : VPatBinaryV_VV_VX_VI<"int_riscv_vor", "PseudoVOR", AllIntegerVectors>;
5884defm : VPatBinaryV_VV_VX_VI<"int_riscv_vxor", "PseudoVXOR", AllIntegerVectors>;
5885
5886//===----------------------------------------------------------------------===//
5887// 11.6. Vector Single-Width Bit Shift Instructions
5888//===----------------------------------------------------------------------===//
5889defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsll", "PseudoVSLL", AllIntegerVectors,
5890                            uimm5>;
5891defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsrl", "PseudoVSRL", AllIntegerVectors,
5892                            uimm5>;
5893defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors,
5894                            uimm5>;
5895
5896foreach vti = AllIntegerVectors in {
5897  // Emit shift by 1 as an add since it might be faster.
5898  def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector undef),
5899                                        (vti.Vector vti.RegClass:$rs1),
5900                                        (XLenVT 1), VLOpFrag)),
5901            (!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
5902                                                              vti.RegClass:$rs1,
5903                                                              GPR:$vl,
5904                                                              vti.Log2SEW)>;
5905  def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$merge),
5906                                             (vti.Vector vti.RegClass:$rs1),
5907                                             (XLenVT 1),
5908                                             (vti.Mask V0),
5909                                             VLOpFrag,
5910                                             (XLenVT timm:$policy))),
5911            (!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX#"_MASK")
5912                                                        vti.RegClass:$merge,
5913                                                        vti.RegClass:$rs1,
5914                                                        vti.RegClass:$rs1,
5915                                                        (vti.Mask V0),
5916                                                        GPR:$vl,
5917                                                        vti.Log2SEW,
5918                                                        (XLenVT timm:$policy))>;
5919}
5920
5921//===----------------------------------------------------------------------===//
5922// 11.7. Vector Narrowing Integer Right Shift Instructions
5923//===----------------------------------------------------------------------===//
5924defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsrl", "PseudoVNSRL", AllWidenableIntVectors>;
5925defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsra", "PseudoVNSRA", AllWidenableIntVectors>;
5926
5927//===----------------------------------------------------------------------===//
5928// 11.8. Vector Integer Comparison Instructions
5929//===----------------------------------------------------------------------===//
5930defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmseq", "PseudoVMSEQ", AllIntegerVectors>;
5931defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsne", "PseudoVMSNE", AllIntegerVectors>;
5932defm : VPatBinaryM_VV_VX<"int_riscv_vmsltu", "PseudoVMSLTU", AllIntegerVectors>;
5933defm : VPatBinaryM_VV_VX<"int_riscv_vmslt", "PseudoVMSLT", AllIntegerVectors>;
5934defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsleu", "PseudoVMSLEU", AllIntegerVectors>;
5935defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsle", "PseudoVMSLE", AllIntegerVectors>;
5936
5937defm : VPatBinaryM_VX_VI<"int_riscv_vmsgtu", "PseudoVMSGTU", AllIntegerVectors>;
5938defm : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>;
5939
5940// Match vmsgt with 2 vector operands to vmslt with the operands swapped.
5941defm : VPatBinarySwappedM_VV<"int_riscv_vmsgtu", "PseudoVMSLTU", AllIntegerVectors>;
5942defm : VPatBinarySwappedM_VV<"int_riscv_vmsgt", "PseudoVMSLT", AllIntegerVectors>;
5943
5944defm : VPatBinarySwappedM_VV<"int_riscv_vmsgeu", "PseudoVMSLEU", AllIntegerVectors>;
5945defm : VPatBinarySwappedM_VV<"int_riscv_vmsge", "PseudoVMSLE", AllIntegerVectors>;
5946
5947// Match vmslt(u).vx intrinsics to vmsle(u).vi if the scalar is -15 to 16 and
5948// non-zero. Zero can be .vx with x0. This avoids the user needing to know that
5949// there is no vmslt(u).vi instruction. Similar for vmsge(u).vx intrinsics
5950// using vmslt(u).vi.
5951defm : VPatCompare_VI<"int_riscv_vmslt", "PseudoVMSLE", simm5_plus1_nonzero>;
5952defm : VPatCompare_VI<"int_riscv_vmsltu", "PseudoVMSLEU", simm5_plus1_nonzero>;
5953
5954// We need to handle 0 for vmsge.vi using vmslt.vi because there is no vmsge.vx.
5955defm : VPatCompare_VI<"int_riscv_vmsge", "PseudoVMSGT", simm5_plus1>;
5956defm : VPatCompare_VI<"int_riscv_vmsgeu", "PseudoVMSGTU", simm5_plus1_nonzero>;
5957
5958//===----------------------------------------------------------------------===//
5959// 11.9. Vector Integer Min/Max Instructions
5960//===----------------------------------------------------------------------===//
5961defm : VPatBinaryV_VV_VX<"int_riscv_vminu", "PseudoVMINU", AllIntegerVectors>;
5962defm : VPatBinaryV_VV_VX<"int_riscv_vmin", "PseudoVMIN", AllIntegerVectors>;
5963defm : VPatBinaryV_VV_VX<"int_riscv_vmaxu", "PseudoVMAXU", AllIntegerVectors>;
5964defm : VPatBinaryV_VV_VX<"int_riscv_vmax", "PseudoVMAX", AllIntegerVectors>;
5965
5966//===----------------------------------------------------------------------===//
5967// 11.10. Vector Single-Width Integer Multiply Instructions
5968//===----------------------------------------------------------------------===//
5969defm : VPatBinaryV_VV_VX<"int_riscv_vmul", "PseudoVMUL", AllIntegerVectors>;
5970defm : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH", AllIntegerVectors>;
5971defm : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU", AllIntegerVectors>;
5972defm : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU", AllIntegerVectors>;
5973
5974//===----------------------------------------------------------------------===//
5975// 11.11. Vector Integer Divide Instructions
5976//===----------------------------------------------------------------------===//
5977defm : VPatBinaryV_VV_VX<"int_riscv_vdivu", "PseudoVDIVU", AllIntegerVectors>;
5978defm : VPatBinaryV_VV_VX<"int_riscv_vdiv", "PseudoVDIV", AllIntegerVectors>;
5979defm : VPatBinaryV_VV_VX<"int_riscv_vremu", "PseudoVREMU", AllIntegerVectors>;
5980defm : VPatBinaryV_VV_VX<"int_riscv_vrem", "PseudoVREM", AllIntegerVectors>;
5981
5982//===----------------------------------------------------------------------===//
5983// 11.12. Vector Widening Integer Multiply Instructions
5984//===----------------------------------------------------------------------===//
5985defm : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL", AllWidenableIntVectors>;
5986defm : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU", AllWidenableIntVectors>;
5987defm : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU", AllWidenableIntVectors>;
5988
5989//===----------------------------------------------------------------------===//
5990// 11.13. Vector Single-Width Integer Multiply-Add Instructions
5991//===----------------------------------------------------------------------===//
5992defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmadd", "PseudoVMADD", AllIntegerVectors>;
5993defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsub", "PseudoVNMSUB", AllIntegerVectors>;
5994defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmacc", "PseudoVMACC", AllIntegerVectors>;
5995defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsac", "PseudoVNMSAC", AllIntegerVectors>;
5996
5997//===----------------------------------------------------------------------===//
5998// 11.14. Vector Widening Integer Multiply-Add Instructions
5999//===----------------------------------------------------------------------===//
6000defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccu", "PseudoVWMACCU", AllWidenableIntVectors>;
6001defm : VPatTernaryW_VV_VX<"int_riscv_vwmacc", "PseudoVWMACC", AllWidenableIntVectors>;
6002defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccsu", "PseudoVWMACCSU", AllWidenableIntVectors>;
6003defm : VPatTernaryW_VX<"int_riscv_vwmaccus", "PseudoVWMACCUS", AllWidenableIntVectors>;
6004
6005//===----------------------------------------------------------------------===//
6006// 11.15. Vector Integer Merge Instructions
6007//===----------------------------------------------------------------------===//
6008defm : VPatBinaryV_VM_XM_IM<"int_riscv_vmerge", "PseudoVMERGE">;
6009
6010//===----------------------------------------------------------------------===//
6011// 11.16. Vector Integer Move Instructions
6012//===----------------------------------------------------------------------===//
6013foreach vti = AllVectors in {
6014  def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector undef),
6015                                           (vti.Vector vti.RegClass:$rs1),
6016                                           VLOpFrag)),
6017            (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX)
6018             $rs1, GPR:$vl, vti.Log2SEW)>;
6019  def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$passthru),
6020                                           (vti.Vector vti.RegClass:$rs1),
6021                                           VLOpFrag)),
6022            (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX#"_TU")
6023             $passthru, $rs1, GPR:$vl, vti.Log2SEW)>;
6024
6025  // vmv.v.x/vmv.v.i are handled in RISCInstrVInstrInfoVVLPatterns.td
6026}
6027
6028//===----------------------------------------------------------------------===//
6029// 12. Vector Fixed-Point Arithmetic Instructions
6030//===----------------------------------------------------------------------===//
6031
6032//===----------------------------------------------------------------------===//
6033// 12.1. Vector Single-Width Saturating Add and Subtract
6034//===----------------------------------------------------------------------===//
6035defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsaddu", "PseudoVSADDU", AllIntegerVectors>;
6036defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsadd", "PseudoVSADD", AllIntegerVectors>;
6037defm : VPatBinaryV_VV_VX<"int_riscv_vssubu", "PseudoVSSUBU", AllIntegerVectors>;
6038defm : VPatBinaryV_VV_VX<"int_riscv_vssub", "PseudoVSSUB", AllIntegerVectors>;
6039
6040//===----------------------------------------------------------------------===//
6041// 12.2. Vector Single-Width Averaging Add and Subtract
6042//===----------------------------------------------------------------------===//
6043defm : VPatBinaryV_VV_VX<"int_riscv_vaaddu", "PseudoVAADDU", AllIntegerVectors>;
6044defm : VPatBinaryV_VV_VX<"int_riscv_vaadd", "PseudoVAADD", AllIntegerVectors>;
6045defm : VPatBinaryV_VV_VX<"int_riscv_vasubu", "PseudoVASUBU", AllIntegerVectors>;
6046defm : VPatBinaryV_VV_VX<"int_riscv_vasub", "PseudoVASUB", AllIntegerVectors>;
6047
6048//===----------------------------------------------------------------------===//
6049// 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
6050//===----------------------------------------------------------------------===//
6051defm : VPatBinaryV_VV_VX<"int_riscv_vsmul", "PseudoVSMUL", AllIntegerVectors>;
6052
6053//===----------------------------------------------------------------------===//
6054// 12.4. Vector Single-Width Scaling Shift Instructions
6055//===----------------------------------------------------------------------===//
6056defm : VPatBinaryV_VV_VX_VI<"int_riscv_vssrl", "PseudoVSSRL", AllIntegerVectors,
6057                            uimm5>;
6058defm : VPatBinaryV_VV_VX_VI<"int_riscv_vssra", "PseudoVSSRA", AllIntegerVectors,
6059                            uimm5>;
6060
6061//===----------------------------------------------------------------------===//
6062// 12.5. Vector Narrowing Fixed-Point Clip Instructions
6063//===----------------------------------------------------------------------===//
6064defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclipu", "PseudoVNCLIPU", AllWidenableIntVectors>;
6065defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclip", "PseudoVNCLIP", AllWidenableIntVectors>;
6066
6067} // Predicates = [HasVInstructions]
6068
6069//===----------------------------------------------------------------------===//
6070// 13. Vector Floating-Point Instructions
6071//===----------------------------------------------------------------------===//
6072
6073let Predicates = [HasVInstructionsAnyF] in {
6074//===----------------------------------------------------------------------===//
6075// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
6076//===----------------------------------------------------------------------===//
6077defm : VPatBinaryV_VV_VX<"int_riscv_vfadd", "PseudoVFADD", AllFloatVectors>;
6078defm : VPatBinaryV_VV_VX<"int_riscv_vfsub", "PseudoVFSUB", AllFloatVectors>;
6079defm : VPatBinaryV_VX<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>;
6080
6081//===----------------------------------------------------------------------===//
6082// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
6083//===----------------------------------------------------------------------===//
6084defm : VPatBinaryW_VV_VX<"int_riscv_vfwadd", "PseudoVFWADD", AllWidenableFloatVectors>;
6085defm : VPatBinaryW_VV_VX<"int_riscv_vfwsub", "PseudoVFWSUB", AllWidenableFloatVectors>;
6086defm : VPatBinaryW_WV_WX<"int_riscv_vfwadd_w", "PseudoVFWADD", AllWidenableFloatVectors>;
6087defm : VPatBinaryW_WV_WX<"int_riscv_vfwsub_w", "PseudoVFWSUB", AllWidenableFloatVectors>;
6088
6089//===----------------------------------------------------------------------===//
6090// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
6091//===----------------------------------------------------------------------===//
6092defm : VPatBinaryV_VV_VX<"int_riscv_vfmul", "PseudoVFMUL", AllFloatVectors>;
6093defm : VPatBinaryV_VV_VX<"int_riscv_vfdiv", "PseudoVFDIV", AllFloatVectors>;
6094defm : VPatBinaryV_VX<"int_riscv_vfrdiv", "PseudoVFRDIV", AllFloatVectors>;
6095
6096//===----------------------------------------------------------------------===//
6097// 13.5. Vector Widening Floating-Point Multiply
6098//===----------------------------------------------------------------------===//
6099defm : VPatBinaryW_VV_VX<"int_riscv_vfwmul", "PseudoVFWMUL", AllWidenableFloatVectors>;
6100
6101//===----------------------------------------------------------------------===//
6102// 13.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
6103//===----------------------------------------------------------------------===//
6104defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmacc", "PseudoVFMACC", AllFloatVectors>;
6105defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmacc", "PseudoVFNMACC", AllFloatVectors>;
6106defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmsac", "PseudoVFMSAC", AllFloatVectors>;
6107defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmsac", "PseudoVFNMSAC", AllFloatVectors>;
6108defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmadd", "PseudoVFMADD", AllFloatVectors>;
6109defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmadd", "PseudoVFNMADD", AllFloatVectors>;
6110defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmsub", "PseudoVFMSUB", AllFloatVectors>;
6111defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmsub", "PseudoVFNMSUB", AllFloatVectors>;
6112
6113//===----------------------------------------------------------------------===//
6114// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
6115//===----------------------------------------------------------------------===//
6116defm : VPatTernaryW_VV_VX<"int_riscv_vfwmacc", "PseudoVFWMACC", AllWidenableFloatVectors>;
6117defm : VPatTernaryW_VV_VX<"int_riscv_vfwnmacc", "PseudoVFWNMACC", AllWidenableFloatVectors>;
6118defm : VPatTernaryW_VV_VX<"int_riscv_vfwmsac", "PseudoVFWMSAC", AllWidenableFloatVectors>;
6119defm : VPatTernaryW_VV_VX<"int_riscv_vfwnmsac", "PseudoVFWNMSAC", AllWidenableFloatVectors>;
6120
6121//===----------------------------------------------------------------------===//
6122// 13.8. Vector Floating-Point Square-Root Instruction
6123//===----------------------------------------------------------------------===//
6124defm : VPatUnaryV_V<"int_riscv_vfsqrt", "PseudoVFSQRT", AllFloatVectors>;
6125
6126//===----------------------------------------------------------------------===//
6127// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
6128//===----------------------------------------------------------------------===//
6129defm : VPatUnaryV_V<"int_riscv_vfrsqrt7", "PseudoVFRSQRT7", AllFloatVectors>;
6130
6131//===----------------------------------------------------------------------===//
6132// 13.10. Vector Floating-Point Reciprocal Estimate Instruction
6133//===----------------------------------------------------------------------===//
6134defm : VPatUnaryV_V<"int_riscv_vfrec7", "PseudoVFREC7", AllFloatVectors>;
6135
6136//===----------------------------------------------------------------------===//
6137// 13.11. Vector Floating-Point Min/Max Instructions
6138//===----------------------------------------------------------------------===//
6139defm : VPatBinaryV_VV_VX<"int_riscv_vfmin", "PseudoVFMIN", AllFloatVectors>;
6140defm : VPatBinaryV_VV_VX<"int_riscv_vfmax", "PseudoVFMAX", AllFloatVectors>;
6141
6142//===----------------------------------------------------------------------===//
6143// 13.12. Vector Floating-Point Sign-Injection Instructions
6144//===----------------------------------------------------------------------===//
6145defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnj", "PseudoVFSGNJ", AllFloatVectors>;
6146defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjn", "PseudoVFSGNJN", AllFloatVectors>;
6147defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjx", "PseudoVFSGNJX", AllFloatVectors>;
6148
6149//===----------------------------------------------------------------------===//
6150// 13.13. Vector Floating-Point Compare Instructions
6151//===----------------------------------------------------------------------===//
6152defm : VPatBinaryM_VV_VX<"int_riscv_vmfeq", "PseudoVMFEQ", AllFloatVectors>;
6153defm : VPatBinaryM_VV_VX<"int_riscv_vmfle", "PseudoVMFLE", AllFloatVectors>;
6154defm : VPatBinaryM_VV_VX<"int_riscv_vmflt", "PseudoVMFLT", AllFloatVectors>;
6155defm : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE", AllFloatVectors>;
6156defm : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT", AllFloatVectors>;
6157defm : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE", AllFloatVectors>;
6158defm : VPatBinarySwappedM_VV<"int_riscv_vmfgt", "PseudoVMFLT", AllFloatVectors>;
6159defm : VPatBinarySwappedM_VV<"int_riscv_vmfge", "PseudoVMFLE", AllFloatVectors>;
6160
6161//===----------------------------------------------------------------------===//
6162// 13.14. Vector Floating-Point Classify Instruction
6163//===----------------------------------------------------------------------===//
6164defm : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">;
6165
6166//===----------------------------------------------------------------------===//
6167// 13.15. Vector Floating-Point Merge Instruction
6168//===----------------------------------------------------------------------===//
6169// We can use vmerge.vvm to support vector-vector vfmerge.
6170// NOTE: Clang previously used int_riscv_vfmerge for vector-vector, but now uses
6171// int_riscv_vmerge. Support both for compatibility.
6172defm : VPatBinaryV_VM_TAIL<"int_riscv_vmerge", "PseudoVMERGE",
6173                           /*CarryOut = */0, /*vtilist=*/AllFloatVectors>;
6174defm : VPatBinaryV_VM_TAIL<"int_riscv_vfmerge", "PseudoVMERGE",
6175                           /*CarryOut = */0, /*vtilist=*/AllFloatVectors>;
6176defm : VPatBinaryV_XM_TAIL<"int_riscv_vfmerge", "PseudoVFMERGE",
6177                           /*CarryOut = */0, /*vtilist=*/AllFloatVectors>;
6178
6179foreach fvti = AllFloatVectors in {
6180  defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX);
6181  def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector undef),
6182                                            (fvti.Vector fvti.RegClass:$rs2),
6183                                            (fvti.Scalar (fpimm0)),
6184                                            (fvti.Mask V0), VLOpFrag)),
6185            (instr fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
6186  defvar instr_tu = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX#"_TU");
6187  def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$merge),
6188                                            (fvti.Vector fvti.RegClass:$rs2),
6189                                            (fvti.Scalar (fpimm0)),
6190                                            (fvti.Mask V0), VLOpFrag)),
6191            (instr_tu fvti.RegClass:$merge, fvti.RegClass:$rs2, 0,
6192                      (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
6193}
6194
6195//===----------------------------------------------------------------------===//
6196// 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions
6197//===----------------------------------------------------------------------===//
6198defm : VPatConversionVI_VF<"int_riscv_vfcvt_xu_f_v", "PseudoVFCVT_XU_F">;
6199defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_xu_f_v", "PseudoVFCVT_RTZ_XU_F">;
6200defm : VPatConversionVI_VF<"int_riscv_vfcvt_x_f_v", "PseudoVFCVT_X_F">;
6201defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_x_f_v", "PseudoVFCVT_RTZ_X_F">;
6202defm : VPatConversionVF_VI<"int_riscv_vfcvt_f_x_v", "PseudoVFCVT_F_X">;
6203defm : VPatConversionVF_VI<"int_riscv_vfcvt_f_xu_v", "PseudoVFCVT_F_XU">;
6204
6205//===----------------------------------------------------------------------===//
6206// 13.18. Widening Floating-Point/Integer Type-Convert Instructions
6207//===----------------------------------------------------------------------===//
6208defm : VPatConversionWI_VF<"int_riscv_vfwcvt_xu_f_v", "PseudoVFWCVT_XU_F">;
6209defm : VPatConversionWI_VF<"int_riscv_vfwcvt_x_f_v", "PseudoVFWCVT_X_F">;
6210defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_xu_f_v", "PseudoVFWCVT_RTZ_XU_F">;
6211defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_x_f_v", "PseudoVFWCVT_RTZ_X_F">;
6212defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU">;
6213defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X">;
6214defm : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F">;
6215
6216//===----------------------------------------------------------------------===//
6217// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions
6218//===----------------------------------------------------------------------===//
6219defm : VPatConversionVI_WF<"int_riscv_vfncvt_xu_f_w", "PseudoVFNCVT_XU_F">;
6220defm : VPatConversionVI_WF<"int_riscv_vfncvt_x_f_w", "PseudoVFNCVT_X_F">;
6221defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_xu_f_w", "PseudoVFNCVT_RTZ_XU_F">;
6222defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_x_f_w", "PseudoVFNCVT_RTZ_X_F">;
6223defm : VPatConversionVF_WI <"int_riscv_vfncvt_f_xu_w", "PseudoVFNCVT_F_XU">;
6224defm : VPatConversionVF_WI <"int_riscv_vfncvt_f_x_w", "PseudoVFNCVT_F_X">;
6225defm : VPatConversionVF_WF<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F">;
6226defm : VPatConversionVF_WF<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F">;
6227} // Predicates = [HasVInstructionsAnyF]
6228
6229//===----------------------------------------------------------------------===//
6230// 14. Vector Reduction Operations
6231//===----------------------------------------------------------------------===//
6232
6233let Predicates = [HasVInstructions] in {
6234//===----------------------------------------------------------------------===//
6235// 14.1. Vector Single-Width Integer Reduction Instructions
6236//===----------------------------------------------------------------------===//
6237defm : VPatReductionV_VS<"int_riscv_vredsum", "PseudoVREDSUM">;
6238defm : VPatReductionV_VS<"int_riscv_vredand", "PseudoVREDAND">;
6239defm : VPatReductionV_VS<"int_riscv_vredor", "PseudoVREDOR">;
6240defm : VPatReductionV_VS<"int_riscv_vredxor", "PseudoVREDXOR">;
6241defm : VPatReductionV_VS<"int_riscv_vredminu", "PseudoVREDMINU">;
6242defm : VPatReductionV_VS<"int_riscv_vredmin", "PseudoVREDMIN">;
6243defm : VPatReductionV_VS<"int_riscv_vredmaxu", "PseudoVREDMAXU">;
6244defm : VPatReductionV_VS<"int_riscv_vredmax", "PseudoVREDMAX">;
6245
6246//===----------------------------------------------------------------------===//
6247// 14.2. Vector Widening Integer Reduction Instructions
6248//===----------------------------------------------------------------------===//
6249defm : VPatReductionW_VS<"int_riscv_vwredsumu", "PseudoVWREDSUMU">;
6250defm : VPatReductionW_VS<"int_riscv_vwredsum", "PseudoVWREDSUM">;
6251} // Predicates = [HasVInstructions]
6252
6253let Predicates = [HasVInstructionsAnyF] in {
6254//===----------------------------------------------------------------------===//
6255// 14.3. Vector Single-Width Floating-Point Reduction Instructions
6256//===----------------------------------------------------------------------===//
6257defm : VPatReductionV_VS<"int_riscv_vfredosum", "PseudoVFREDOSUM", /*IsFloat=*/1>;
6258defm : VPatReductionV_VS<"int_riscv_vfredusum", "PseudoVFREDUSUM", /*IsFloat=*/1>;
6259defm : VPatReductionV_VS<"int_riscv_vfredmin", "PseudoVFREDMIN", /*IsFloat=*/1>;
6260defm : VPatReductionV_VS<"int_riscv_vfredmax", "PseudoVFREDMAX", /*IsFloat=*/1>;
6261
6262//===----------------------------------------------------------------------===//
6263// 14.4. Vector Widening Floating-Point Reduction Instructions
6264//===----------------------------------------------------------------------===//
6265defm : VPatReductionW_VS<"int_riscv_vfwredusum", "PseudoVFWREDUSUM", /*IsFloat=*/1>;
6266defm : VPatReductionW_VS<"int_riscv_vfwredosum", "PseudoVFWREDOSUM", /*IsFloat=*/1>;
6267
6268} // Predicates = [HasVInstructionsAnyF]
6269
6270//===----------------------------------------------------------------------===//
6271// 15. Vector Mask Instructions
6272//===----------------------------------------------------------------------===//
6273
6274let Predicates = [HasVInstructions] in {
6275//===----------------------------------------------------------------------===//
6276// 15.1 Vector Mask-Register Logical Instructions
6277//===----------------------------------------------------------------------===//
6278defm : VPatBinaryM_MM<"int_riscv_vmand", "PseudoVMAND">;
6279defm : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">;
6280defm : VPatBinaryM_MM<"int_riscv_vmandn", "PseudoVMANDN">;
6281defm : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">;
6282defm : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">;
6283defm : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">;
6284defm : VPatBinaryM_MM<"int_riscv_vmorn", "PseudoVMORN">;
6285defm : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">;
6286
6287// pseudo instructions
6288defm : VPatNullaryM<"int_riscv_vmclr", "PseudoVMCLR">;
6289defm : VPatNullaryM<"int_riscv_vmset", "PseudoVMSET">;
6290
6291//===----------------------------------------------------------------------===//
6292// 15.2. Vector count population in mask vcpop.m
6293//===----------------------------------------------------------------------===//
6294defm : VPatUnaryS_M<"int_riscv_vcpop", "PseudoVCPOP">;
6295
6296//===----------------------------------------------------------------------===//
6297// 15.3. vfirst find-first-set mask bit
6298//===----------------------------------------------------------------------===//
6299defm : VPatUnaryS_M<"int_riscv_vfirst", "PseudoVFIRST">;
6300
6301//===----------------------------------------------------------------------===//
6302// 15.4. vmsbf.m set-before-first mask bit
6303//===----------------------------------------------------------------------===//
6304defm : VPatUnaryM_M<"int_riscv_vmsbf", "PseudoVMSBF">;
6305
6306//===----------------------------------------------------------------------===//
6307// 15.5. vmsif.m set-including-first mask bit
6308//===----------------------------------------------------------------------===//
6309defm : VPatUnaryM_M<"int_riscv_vmsif", "PseudoVMSIF">;
6310
6311//===----------------------------------------------------------------------===//
6312// 15.6. vmsof.m set-only-first mask bit
6313//===----------------------------------------------------------------------===//
6314defm : VPatUnaryM_M<"int_riscv_vmsof", "PseudoVMSOF">;
6315
6316//===----------------------------------------------------------------------===//
6317// 15.8.  Vector Iota Instruction
6318//===----------------------------------------------------------------------===//
6319defm : VPatUnaryV_M<"int_riscv_viota", "PseudoVIOTA">;
6320
6321//===----------------------------------------------------------------------===//
6322// 15.9. Vector Element Index Instruction
6323//===----------------------------------------------------------------------===//
6324defm : VPatNullaryV<"int_riscv_vid", "PseudoVID">;
6325
6326} // Predicates = [HasVInstructions]
6327
6328//===----------------------------------------------------------------------===//
6329// 16. Vector Permutation Instructions
6330//===----------------------------------------------------------------------===//
6331
6332//===----------------------------------------------------------------------===//
6333// 16.1. Integer Scalar Move Instructions
6334//===----------------------------------------------------------------------===//
6335
6336let Predicates = [HasVInstructions] in {
6337foreach vti = AllIntegerVectors in {
6338  def : Pat<(riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2)),
6339            (!cast<Instruction>("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.Log2SEW)>;
6340  // vmv.s.x is handled with a custom node in RISCVInstrInfoVVLPatterns.td
6341}
6342} // Predicates = [HasVInstructions]
6343
6344//===----------------------------------------------------------------------===//
6345// 16.2. Floating-Point Scalar Move Instructions
6346//===----------------------------------------------------------------------===//
6347
6348let Predicates = [HasVInstructionsAnyF] in {
6349foreach fvti = AllFloatVectors in {
6350  def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1),
6351                         (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)),
6352            (!cast<Instruction>("PseudoVFMV_S_"#fvti.ScalarSuffix#"_" #
6353                                fvti.LMul.MX)
6354             (fvti.Vector $rs1),
6355             (fvti.Scalar fvti.ScalarRegClass:$rs2),
6356             GPR:$vl, fvti.Log2SEW)>;
6357
6358  def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1),
6359                         (fvti.Scalar (fpimm0)), VLOpFrag)),
6360            (!cast<Instruction>("PseudoVMV_S_X_" # fvti.LMul.MX)
6361             (fvti.Vector $rs1), X0, GPR:$vl, fvti.Log2SEW)>;
6362}
6363} // Predicates = [HasVInstructionsAnyF]
6364
6365//===----------------------------------------------------------------------===//
6366// 16.3. Vector Slide Instructions
6367//===----------------------------------------------------------------------===//
6368let Predicates = [HasVInstructions] in {
6369  defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllIntegerVectors, uimm5>;
6370  defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllIntegerVectors, uimm5>;
6371  defm : VPatBinaryV_VX<"int_riscv_vslide1up", "PseudoVSLIDE1UP", AllIntegerVectors>;
6372  defm : VPatBinaryV_VX<"int_riscv_vslide1down", "PseudoVSLIDE1DOWN", AllIntegerVectors>;
6373} // Predicates = [HasVInstructions]
6374
6375let Predicates = [HasVInstructionsAnyF] in {
6376  defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllFloatVectors, uimm5>;
6377  defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVectors, uimm5>;
6378  defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>;
6379  defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>;
6380} // Predicates = [HasVInstructionsAnyF]
6381
6382//===----------------------------------------------------------------------===//
6383// 16.4. Vector Register Gather Instructions
6384//===----------------------------------------------------------------------===//
6385let Predicates = [HasVInstructions] in {
6386  defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
6387                                  AllIntegerVectors, uimm5>;
6388  defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
6389                                /* eew */ 16, AllIntegerVectors>;
6390} // Predicates = [HasVInstructions]
6391
6392let Predicates = [HasVInstructionsAnyF] in {
6393  defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
6394                                  AllFloatVectors, uimm5>;
6395  defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
6396                                /* eew */ 16, AllFloatVectors>;
6397} // Predicates = [HasVInstructionsAnyF]
6398
6399//===----------------------------------------------------------------------===//
6400// 16.5. Vector Compress Instruction
6401//===----------------------------------------------------------------------===//
6402let Predicates = [HasVInstructions] in {
6403  defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>;
6404} // Predicates = [HasVInstructions]
6405
6406let Predicates = [HasVInstructionsAnyF] in {
6407  defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>;
6408} // Predicates = [HasVInstructionsAnyF]
6409
6410// Include the non-intrinsic ISel patterns
6411include "RISCVInstrInfoVVLPatterns.td"
6412include "RISCVInstrInfoVSDPatterns.td"
6413