xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td (revision e92ffd9b626833ebdbf2742c8ffddc6cd94b963e)
1//===-- RISCVInstrInfoVPseudos.td - RISC-V 'V' Pseudos -----*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// This file contains the required infrastructure to support code generation
10/// for the standard 'V' (Vector) extension, version 0.10.  This version is still
11/// experimental as the 'V' extension hasn't been ratified yet.
12///
13/// This file is included from RISCVInstrInfoV.td
14///
15//===----------------------------------------------------------------------===//
16
17def riscv_vmv_x_s : SDNode<"RISCVISD::VMV_X_S",
18                           SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>,
19                                                SDTCisInt<1>]>>;
20def riscv_read_vlenb : SDNode<"RISCVISD::READ_VLENB",
21                              SDTypeProfile<1, 0, [SDTCisVT<0, XLenVT>]>>;
22
23// Operand that is allowed to be a register or a 5 bit immediate.
24// This allows us to pick between VSETIVLI and VSETVLI opcodes using the same
25// pseudo instructions.
26def AVL : RegisterOperand<GPR> {
27  let OperandNamespace = "RISCVOp";
28  let OperandType = "OPERAND_AVL";
29}
30
31// X0 has special meaning for vsetvl/vsetvli.
32//  rd | rs1 |   AVL value | Effect on vl
33//--------------------------------------------------------------
34// !X0 |  X0 |       VLMAX | Set vl to VLMAX
35//  X0 |  X0 | Value in vl | Keep current vl, just change vtype.
36def VLOp : ComplexPattern<XLenVT, 1, "selectVLOp">;
37
38def DecImm : SDNodeXForm<imm, [{
39  return CurDAG->getTargetConstant(N->getSExtValue() - 1, SDLoc(N),
40                                   N->getValueType(0));
41}]>;
42
43//===----------------------------------------------------------------------===//
44// Utilities.
45//===----------------------------------------------------------------------===//
46
47// This class describes information associated to the LMUL.
48class LMULInfo<int lmul, int oct, VReg regclass, VReg wregclass,
49               VReg f2regclass, VReg f4regclass, VReg f8regclass, string mx> {
50  bits<3> value = lmul; // This is encoded as the vlmul field of vtype.
51  VReg vrclass = regclass;
52  VReg wvrclass = wregclass;
53  VReg f8vrclass = f8regclass;
54  VReg f4vrclass = f4regclass;
55  VReg f2vrclass = f2regclass;
56  string MX = mx;
57  int octuple = oct;
58}
59
60// Associate LMUL with tablegen records of register classes.
61def V_M1  : LMULInfo<0b000,  8,   VR,        VRM2,   VR,   VR, VR, "M1">;
62def V_M2  : LMULInfo<0b001, 16, VRM2,        VRM4,   VR,   VR, VR, "M2">;
63def V_M4  : LMULInfo<0b010, 32, VRM4,        VRM8, VRM2,   VR, VR, "M4">;
64def V_M8  : LMULInfo<0b011, 64, VRM8,/*NoVReg*/VR, VRM4, VRM2, VR, "M8">;
65
66def V_MF8 : LMULInfo<0b101, 1, VR, VR,/*NoVReg*/VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF8">;
67def V_MF4 : LMULInfo<0b110, 2, VR, VR,          VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF4">;
68def V_MF2 : LMULInfo<0b111, 4, VR, VR,          VR,          VR,/*NoVReg*/VR, "MF2">;
69
70// Used to iterate over all possible LMULs.
71def MxList {
72  list<LMULInfo> m = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
73}
74// Used for widening and narrowing instructions as it doesn't contain M8.
75def MxListW {
76  list<LMULInfo> m = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4];
77}
78// Use for zext/sext.vf2
79def MxListVF2 {
80  list<LMULInfo> m = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
81}
82// Use for zext/sext.vf4
83def MxListVF4 {
84  list<LMULInfo> m = [V_MF2, V_M1, V_M2, V_M4, V_M8];
85}
86// Use for zext/sext.vf8
87def MxListVF8 {
88  list<LMULInfo> m = [V_M1, V_M2, V_M4, V_M8];
89}
90
91class FPR_Info<RegisterClass regclass, string fx> {
92  RegisterClass fprclass = regclass;
93  string FX = fx;
94}
95
96def SCALAR_F16 : FPR_Info<FPR16, "F16">;
97def SCALAR_F32 : FPR_Info<FPR32, "F32">;
98def SCALAR_F64 : FPR_Info<FPR64, "F64">;
99
100def FPList {
101  list<FPR_Info> fpinfo = [SCALAR_F16, SCALAR_F32, SCALAR_F64];
102}
103// Used for widening instructions. It excludes F64.
104def FPListW {
105  list<FPR_Info> fpinfo = [SCALAR_F16, SCALAR_F32];
106}
107
108class MxSet<int eew> {
109  list<LMULInfo> m = !cond(!eq(eew, 8) : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
110                           !eq(eew, 16) : [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
111                           !eq(eew, 32) : [V_MF2, V_M1, V_M2, V_M4, V_M8],
112                           !eq(eew, 64) : [V_M1, V_M2, V_M4, V_M8]);
113}
114
115class NFSet<LMULInfo m> {
116  list<int> L = !cond(!eq(m.value, V_M8.value): [],
117                      !eq(m.value, V_M4.value): [2],
118                      !eq(m.value, V_M2.value): [2, 3, 4],
119                      true: [2, 3, 4, 5, 6, 7, 8]);
120}
121
122class log2<int num> {
123  int val = !if(!eq(num, 1), 0, !add(1, log2<!srl(num, 1)>.val));
124}
125
126class octuple_to_str<int octuple> {
127  string ret = !if(!eq(octuple, 1), "MF8",
128                   !if(!eq(octuple, 2), "MF4",
129                   !if(!eq(octuple, 4), "MF2",
130                   !if(!eq(octuple, 8), "M1",
131                   !if(!eq(octuple, 16), "M2",
132                   !if(!eq(octuple, 32), "M4",
133                   !if(!eq(octuple, 64), "M8",
134                   "NoDef")))))));
135}
136
137def VLOpFrag : PatFrag<(ops), (XLenVT (VLOp (XLenVT AVL:$vl)))>;
138
139// Output pattern for X0 used to represent VLMAX in the pseudo instructions.
140def VLMax : OutPatFrag<(ops), (XLenVT X0)>;
141
142// List of EEW.
143defvar EEWList = [8, 16, 32, 64];
144
145class SegRegClass<LMULInfo m, int nf> {
146  VReg RC = !cast<VReg>("VRN" # nf # !cond(!eq(m.value, V_MF8.value): V_M1.MX,
147                                           !eq(m.value, V_MF4.value): V_M1.MX,
148                                           !eq(m.value, V_MF2.value): V_M1.MX,
149                                           true: m.MX));
150}
151
152//===----------------------------------------------------------------------===//
153// Vector register and vector group type information.
154//===----------------------------------------------------------------------===//
155
156class VTypeInfo<ValueType Vec, ValueType Mas, int Sew, VReg Reg, LMULInfo M,
157                ValueType Scal = XLenVT, RegisterClass ScalarReg = GPR>
158{
159  ValueType Vector = Vec;
160  ValueType Mask = Mas;
161  int SEW = Sew;
162  int Log2SEW = log2<Sew>.val;
163  VReg RegClass = Reg;
164  LMULInfo LMul = M;
165  ValueType Scalar = Scal;
166  RegisterClass ScalarRegClass = ScalarReg;
167  // The pattern fragment which produces the AVL operand, representing the
168  // "natural" vector length for this type. For scalable vectors this is VLMax.
169  OutPatFrag AVL = VLMax;
170
171  string ScalarSuffix = !cond(!eq(Scal, XLenVT) : "X",
172                              !eq(Scal, f16) : "F16",
173                              !eq(Scal, f32) : "F32",
174                              !eq(Scal, f64) : "F64");
175}
176
177class GroupVTypeInfo<ValueType Vec, ValueType VecM1, ValueType Mas, int Sew,
178                     VReg Reg, LMULInfo M, ValueType Scal = XLenVT,
179                     RegisterClass ScalarReg = GPR>
180    : VTypeInfo<Vec, Mas, Sew, Reg, M, Scal, ScalarReg>
181{
182  ValueType VectorM1 = VecM1;
183}
184
185defset list<VTypeInfo> AllVectors = {
186  defset list<VTypeInfo> AllIntegerVectors = {
187    defset list<VTypeInfo> NoGroupIntegerVectors = {
188      defset list<VTypeInfo> FractionalGroupIntegerVectors = {
189        def VI8MF8: VTypeInfo<vint8mf8_t,  vbool64_t,  8, VR, V_MF8>;
190        def VI8MF4: VTypeInfo<vint8mf4_t,  vbool32_t,  8, VR, V_MF4>;
191        def VI8MF2: VTypeInfo<vint8mf2_t,  vbool16_t,  8, VR, V_MF2>;
192        def VI16MF4: VTypeInfo<vint16mf4_t, vbool64_t, 16, VR, V_MF4>;
193        def VI16MF2: VTypeInfo<vint16mf2_t, vbool32_t, 16, VR, V_MF2>;
194        def VI32MF2: VTypeInfo<vint32mf2_t, vbool64_t, 32, VR, V_MF2>;
195      }
196      def VI8M1: VTypeInfo<vint8m1_t,   vbool8_t,   8, VR, V_M1>;
197      def VI16M1: VTypeInfo<vint16m1_t,  vbool16_t, 16, VR, V_M1>;
198      def VI32M1: VTypeInfo<vint32m1_t,  vbool32_t, 32, VR, V_M1>;
199      def VI64M1: VTypeInfo<vint64m1_t,  vbool64_t, 64, VR, V_M1>;
200    }
201    defset list<GroupVTypeInfo> GroupIntegerVectors = {
202      def VI8M2: GroupVTypeInfo<vint8m2_t, vint8m1_t, vbool4_t, 8, VRM2, V_M2>;
203      def VI8M4: GroupVTypeInfo<vint8m4_t, vint8m1_t, vbool2_t, 8, VRM4, V_M4>;
204      def VI8M8: GroupVTypeInfo<vint8m8_t, vint8m1_t, vbool1_t, 8, VRM8, V_M8>;
205
206      def VI16M2: GroupVTypeInfo<vint16m2_t,vint16m1_t,vbool8_t, 16,VRM2, V_M2>;
207      def VI16M4: GroupVTypeInfo<vint16m4_t,vint16m1_t,vbool4_t, 16,VRM4, V_M4>;
208      def VI16M8: GroupVTypeInfo<vint16m8_t,vint16m1_t,vbool2_t, 16,VRM8, V_M8>;
209
210      def VI32M2: GroupVTypeInfo<vint32m2_t,vint32m1_t,vbool16_t,32,VRM2, V_M2>;
211      def VI32M4: GroupVTypeInfo<vint32m4_t,vint32m1_t,vbool8_t, 32,VRM4, V_M4>;
212      def VI32M8: GroupVTypeInfo<vint32m8_t,vint32m1_t,vbool4_t, 32,VRM8, V_M8>;
213
214      def VI64M2: GroupVTypeInfo<vint64m2_t,vint64m1_t,vbool32_t,64,VRM2, V_M2>;
215      def VI64M4: GroupVTypeInfo<vint64m4_t,vint64m1_t,vbool16_t,64,VRM4, V_M4>;
216      def VI64M8: GroupVTypeInfo<vint64m8_t,vint64m1_t,vbool8_t, 64,VRM8, V_M8>;
217    }
218  }
219
220  defset list<VTypeInfo> AllFloatVectors = {
221    defset list<VTypeInfo> NoGroupFloatVectors = {
222      defset list<VTypeInfo> FractionalGroupFloatVectors = {
223        def VF16MF4: VTypeInfo<vfloat16mf4_t, vbool64_t, 16, VR, V_MF4, f16, FPR16>;
224        def VF16MF2: VTypeInfo<vfloat16mf2_t, vbool32_t, 16, VR, V_MF2, f16, FPR16>;
225        def VF32MF2: VTypeInfo<vfloat32mf2_t,vbool64_t, 32, VR, V_MF2, f32, FPR32>;
226      }
227      def VF16M1:  VTypeInfo<vfloat16m1_t,  vbool16_t, 16, VR, V_M1,  f16, FPR16>;
228      def VF32M1:  VTypeInfo<vfloat32m1_t, vbool32_t, 32, VR, V_M1,  f32, FPR32>;
229      def VF64M1: VTypeInfo<vfloat64m1_t, vbool64_t, 64, VR, V_M1, f64, FPR64>;
230    }
231
232    defset list<GroupVTypeInfo> GroupFloatVectors = {
233      def VF16M2: GroupVTypeInfo<vfloat16m2_t, vfloat16m1_t, vbool8_t, 16,
234                                  VRM2, V_M2, f16, FPR16>;
235      def VF16M4: GroupVTypeInfo<vfloat16m4_t, vfloat16m1_t, vbool4_t, 16,
236                                  VRM4, V_M4, f16, FPR16>;
237      def VF16M8: GroupVTypeInfo<vfloat16m8_t, vfloat16m1_t, vbool2_t, 16,
238                                  VRM8, V_M8, f16, FPR16>;
239
240      def VF32M2: GroupVTypeInfo<vfloat32m2_t, vfloat32m1_t, vbool16_t, 32,
241                                  VRM2, V_M2, f32, FPR32>;
242      def VF32M4: GroupVTypeInfo<vfloat32m4_t, vfloat32m1_t, vbool8_t,  32,
243                                  VRM4, V_M4, f32, FPR32>;
244      def VF32M8: GroupVTypeInfo<vfloat32m8_t, vfloat32m1_t, vbool4_t,  32,
245                                  VRM8, V_M8, f32, FPR32>;
246
247      def VF64M2: GroupVTypeInfo<vfloat64m2_t, vfloat64m1_t, vbool32_t, 64,
248                                  VRM2, V_M2, f64, FPR64>;
249      def VF64M4: GroupVTypeInfo<vfloat64m4_t, vfloat64m1_t, vbool16_t, 64,
250                                  VRM4, V_M4, f64, FPR64>;
251      def VF64M8: GroupVTypeInfo<vfloat64m8_t, vfloat64m1_t, vbool8_t,  64,
252                                  VRM8, V_M8, f64, FPR64>;
253    }
254  }
255}
256
257// This functor is used to obtain the int vector type that has the same SEW and
258// multiplier as the input parameter type
259class GetIntVTypeInfo<VTypeInfo vti>
260{
261  // Equivalent integer vector type. Eg.
262  //   VI8M1 → VI8M1 (identity)
263  //   VF64M4 → VI64M4
264  VTypeInfo Vti = !cast<VTypeInfo>(!subst("VF", "VI", !cast<string>(vti)));
265}
266
267class MTypeInfo<ValueType Mas, LMULInfo M, string Bx> {
268  ValueType Mask = Mas;
269  // {SEW, VLMul} values set a valid VType to deal with this mask type.
270  // we assume SEW=1 and set corresponding LMUL. vsetvli insertion will
271  // look for SEW=1 to optimize based on surrounding instructions.
272  int SEW = 1;
273  int Log2SEW = 0;
274  LMULInfo LMul = M;
275  string BX = Bx; // Appendix of mask operations.
276  // The pattern fragment which produces the AVL operand, representing the
277  // "natural" vector length for this mask type. For scalable masks this is
278  // VLMax.
279  OutPatFrag AVL = VLMax;
280}
281
282defset list<MTypeInfo> AllMasks = {
283  // vbool<n>_t, <n> = SEW/LMUL, we assume SEW=8 and corresponding LMUL.
284  def : MTypeInfo<vbool64_t, V_MF8, "B1">;
285  def : MTypeInfo<vbool32_t, V_MF4, "B2">;
286  def : MTypeInfo<vbool16_t, V_MF2, "B4">;
287  def : MTypeInfo<vbool8_t, V_M1, "B8">;
288  def : MTypeInfo<vbool4_t, V_M2, "B16">;
289  def : MTypeInfo<vbool2_t, V_M4, "B32">;
290  def : MTypeInfo<vbool1_t, V_M8, "B64">;
291}
292
293class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti>
294{
295  VTypeInfo Vti = vti;
296  VTypeInfo Wti = wti;
297}
298
299class VTypeInfoToFraction<VTypeInfo vti, VTypeInfo fti>
300{
301  VTypeInfo Vti = vti;
302  VTypeInfo Fti = fti;
303}
304
305defset list<VTypeInfoToWide> AllWidenableIntVectors = {
306  def : VTypeInfoToWide<VI8MF8,  VI16MF4>;
307  def : VTypeInfoToWide<VI8MF4,  VI16MF2>;
308  def : VTypeInfoToWide<VI8MF2,  VI16M1>;
309  def : VTypeInfoToWide<VI8M1,   VI16M2>;
310  def : VTypeInfoToWide<VI8M2,   VI16M4>;
311  def : VTypeInfoToWide<VI8M4,   VI16M8>;
312
313  def : VTypeInfoToWide<VI16MF4, VI32MF2>;
314  def : VTypeInfoToWide<VI16MF2, VI32M1>;
315  def : VTypeInfoToWide<VI16M1,  VI32M2>;
316  def : VTypeInfoToWide<VI16M2,  VI32M4>;
317  def : VTypeInfoToWide<VI16M4,  VI32M8>;
318
319  def : VTypeInfoToWide<VI32MF2, VI64M1>;
320  def : VTypeInfoToWide<VI32M1,  VI64M2>;
321  def : VTypeInfoToWide<VI32M2,  VI64M4>;
322  def : VTypeInfoToWide<VI32M4,  VI64M8>;
323}
324
325defset list<VTypeInfoToWide> AllWidenableFloatVectors = {
326  def : VTypeInfoToWide<VF16MF4, VF32MF2>;
327  def : VTypeInfoToWide<VF16MF2, VF32M1>;
328  def : VTypeInfoToWide<VF16M1, VF32M2>;
329  def : VTypeInfoToWide<VF16M2, VF32M4>;
330  def : VTypeInfoToWide<VF16M4, VF32M8>;
331
332  def : VTypeInfoToWide<VF32MF2, VF64M1>;
333  def : VTypeInfoToWide<VF32M1, VF64M2>;
334  def : VTypeInfoToWide<VF32M2, VF64M4>;
335  def : VTypeInfoToWide<VF32M4, VF64M8>;
336}
337
338defset list<VTypeInfoToFraction> AllFractionableVF2IntVectors = {
339  def : VTypeInfoToFraction<VI16MF4, VI8MF8>;
340  def : VTypeInfoToFraction<VI16MF2, VI8MF4>;
341  def : VTypeInfoToFraction<VI16M1, VI8MF2>;
342  def : VTypeInfoToFraction<VI16M2, VI8M1>;
343  def : VTypeInfoToFraction<VI16M4, VI8M2>;
344  def : VTypeInfoToFraction<VI16M8, VI8M4>;
345  def : VTypeInfoToFraction<VI32MF2, VI16MF4>;
346  def : VTypeInfoToFraction<VI32M1, VI16MF2>;
347  def : VTypeInfoToFraction<VI32M2, VI16M1>;
348  def : VTypeInfoToFraction<VI32M4, VI16M2>;
349  def : VTypeInfoToFraction<VI32M8, VI16M4>;
350  def : VTypeInfoToFraction<VI64M1, VI32MF2>;
351  def : VTypeInfoToFraction<VI64M2, VI32M1>;
352  def : VTypeInfoToFraction<VI64M4, VI32M2>;
353  def : VTypeInfoToFraction<VI64M8, VI32M4>;
354}
355
356defset list<VTypeInfoToFraction> AllFractionableVF4IntVectors = {
357  def : VTypeInfoToFraction<VI32MF2, VI8MF8>;
358  def : VTypeInfoToFraction<VI32M1, VI8MF4>;
359  def : VTypeInfoToFraction<VI32M2, VI8MF2>;
360  def : VTypeInfoToFraction<VI32M4, VI8M1>;
361  def : VTypeInfoToFraction<VI32M8, VI8M2>;
362  def : VTypeInfoToFraction<VI64M1, VI16MF4>;
363  def : VTypeInfoToFraction<VI64M2, VI16MF2>;
364  def : VTypeInfoToFraction<VI64M4, VI16M1>;
365  def : VTypeInfoToFraction<VI64M8, VI16M2>;
366}
367
368defset list<VTypeInfoToFraction> AllFractionableVF8IntVectors = {
369  def : VTypeInfoToFraction<VI64M1, VI8MF8>;
370  def : VTypeInfoToFraction<VI64M2, VI8MF4>;
371  def : VTypeInfoToFraction<VI64M4, VI8MF2>;
372  def : VTypeInfoToFraction<VI64M8, VI8M1>;
373}
374
375defset list<VTypeInfoToWide> AllWidenableIntToFloatVectors = {
376  def : VTypeInfoToWide<VI8MF8, VF16MF4>;
377  def : VTypeInfoToWide<VI8MF4, VF16MF2>;
378  def : VTypeInfoToWide<VI8MF2, VF16M1>;
379  def : VTypeInfoToWide<VI8M1, VF16M2>;
380  def : VTypeInfoToWide<VI8M2, VF16M4>;
381  def : VTypeInfoToWide<VI8M4, VF16M8>;
382
383  def : VTypeInfoToWide<VI16MF4, VF32MF2>;
384  def : VTypeInfoToWide<VI16MF2, VF32M1>;
385  def : VTypeInfoToWide<VI16M1, VF32M2>;
386  def : VTypeInfoToWide<VI16M2, VF32M4>;
387  def : VTypeInfoToWide<VI16M4, VF32M8>;
388
389  def : VTypeInfoToWide<VI32MF2, VF64M1>;
390  def : VTypeInfoToWide<VI32M1, VF64M2>;
391  def : VTypeInfoToWide<VI32M2, VF64M4>;
392  def : VTypeInfoToWide<VI32M4, VF64M8>;
393}
394
395// This class holds the record of the RISCVVPseudoTable below.
396// This represents the information we need in codegen for each pseudo.
397// The definition should be consistent with `struct PseudoInfo` in
398// RISCVBaseInfo.h.
399class CONST8b<bits<8> val> {
400  bits<8> V = val;
401}
402def InvalidIndex : CONST8b<0x80>;
403class RISCVVPseudo {
404  Pseudo Pseudo = !cast<Pseudo>(NAME); // Used as a key.
405  Instruction BaseInstr;
406}
407
408// The actual table.
409def RISCVVPseudosTable : GenericTable {
410  let FilterClass = "RISCVVPseudo";
411  let CppTypeName = "PseudoInfo";
412  let Fields = [ "Pseudo", "BaseInstr" ];
413  let PrimaryKey = [ "Pseudo" ];
414  let PrimaryKeyName = "getPseudoInfo";
415  let PrimaryKeyEarlyOut = true;
416}
417
418def RISCVVIntrinsicsTable : GenericTable {
419  let FilterClass = "RISCVVIntrinsic";
420  let CppTypeName = "RISCVVIntrinsicInfo";
421  let Fields = ["IntrinsicID", "SplatOperand"];
422  let PrimaryKey = ["IntrinsicID"];
423  let PrimaryKeyName = "getRISCVVIntrinsicInfo";
424}
425
426class RISCVVLE<bit M, bit Str, bit F, bits<3> S, bits<3> L> {
427  bits<1> Masked = M;
428  bits<1> Strided = Str;
429  bits<1> FF = F;
430  bits<3> Log2SEW = S;
431  bits<3> LMUL = L;
432  Pseudo Pseudo = !cast<Pseudo>(NAME);
433}
434
435def RISCVVLETable : GenericTable {
436  let FilterClass = "RISCVVLE";
437  let CppTypeName = "VLEPseudo";
438  let Fields = ["Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
439  let PrimaryKey = ["Masked", "Strided", "FF", "Log2SEW", "LMUL"];
440  let PrimaryKeyName = "getVLEPseudo";
441}
442
443class RISCVVSE<bit M, bit Str, bits<3> S, bits<3> L> {
444  bits<1> Masked = M;
445  bits<1> Strided = Str;
446  bits<3> Log2SEW = S;
447  bits<3> LMUL = L;
448  Pseudo Pseudo = !cast<Pseudo>(NAME);
449}
450
451def RISCVVSETable : GenericTable {
452  let FilterClass = "RISCVVSE";
453  let CppTypeName = "VSEPseudo";
454  let Fields = ["Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
455  let PrimaryKey = ["Masked", "Strided", "Log2SEW", "LMUL"];
456  let PrimaryKeyName = "getVSEPseudo";
457}
458
459class RISCVVLX_VSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
460  bits<1> Masked = M;
461  bits<1> Ordered = O;
462  bits<3> Log2SEW = S;
463  bits<3> LMUL = L;
464  bits<3> IndexLMUL = IL;
465  Pseudo Pseudo = !cast<Pseudo>(NAME);
466}
467
468class RISCVVLX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
469  RISCVVLX_VSX<M, O, S, L, IL>;
470class RISCVVSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
471  RISCVVLX_VSX<M, O, S, L, IL>;
472
473class RISCVVLX_VSXTable : GenericTable {
474  let CppTypeName = "VLX_VSXPseudo";
475  let Fields = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
476  let PrimaryKey = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
477}
478
479def RISCVVLXTable : RISCVVLX_VSXTable {
480  let FilterClass = "RISCVVLX";
481  let PrimaryKeyName = "getVLXPseudo";
482}
483
484def RISCVVSXTable : RISCVVLX_VSXTable {
485  let FilterClass = "RISCVVSX";
486  let PrimaryKeyName = "getVSXPseudo";
487}
488
489class RISCVVLSEG<bits<4> N, bit M, bit Str, bit F, bits<3> S, bits<3> L> {
490  bits<4> NF = N;
491  bits<1> Masked = M;
492  bits<1> Strided = Str;
493  bits<1> FF = F;
494  bits<3> Log2SEW = S;
495  bits<3> LMUL = L;
496  Pseudo Pseudo = !cast<Pseudo>(NAME);
497}
498
499def RISCVVLSEGTable : GenericTable {
500  let FilterClass = "RISCVVLSEG";
501  let CppTypeName = "VLSEGPseudo";
502  let Fields = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
503  let PrimaryKey = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL"];
504  let PrimaryKeyName = "getVLSEGPseudo";
505}
506
507class RISCVVLXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
508  bits<4> NF = N;
509  bits<1> Masked = M;
510  bits<1> Ordered = O;
511  bits<3> Log2SEW = S;
512  bits<3> LMUL = L;
513  bits<3> IndexLMUL = IL;
514  Pseudo Pseudo = !cast<Pseudo>(NAME);
515}
516
517def RISCVVLXSEGTable : GenericTable {
518  let FilterClass = "RISCVVLXSEG";
519  let CppTypeName = "VLXSEGPseudo";
520  let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
521  let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
522  let PrimaryKeyName = "getVLXSEGPseudo";
523}
524
525class RISCVVSSEG<bits<4> N, bit M, bit Str, bits<3> S, bits<3> L> {
526  bits<4> NF = N;
527  bits<1> Masked = M;
528  bits<1> Strided = Str;
529  bits<3> Log2SEW = S;
530  bits<3> LMUL = L;
531  Pseudo Pseudo = !cast<Pseudo>(NAME);
532}
533
534def RISCVVSSEGTable : GenericTable {
535  let FilterClass = "RISCVVSSEG";
536  let CppTypeName = "VSSEGPseudo";
537  let Fields = ["NF", "Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
538  let PrimaryKey = ["NF", "Masked", "Strided", "Log2SEW", "LMUL"];
539  let PrimaryKeyName = "getVSSEGPseudo";
540}
541
542class RISCVVSXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
543  bits<4> NF = N;
544  bits<1> Masked = M;
545  bits<1> Ordered = O;
546  bits<3> Log2SEW = S;
547  bits<3> LMUL = L;
548  bits<3> IndexLMUL = IL;
549  Pseudo Pseudo = !cast<Pseudo>(NAME);
550}
551
552def RISCVVSXSEGTable : GenericTable {
553  let FilterClass = "RISCVVSXSEG";
554  let CppTypeName = "VSXSEGPseudo";
555  let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
556  let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
557  let PrimaryKeyName = "getVSXSEGPseudo";
558}
559
560//===----------------------------------------------------------------------===//
561// Helpers to define the different pseudo instructions.
562//===----------------------------------------------------------------------===//
563
564class PseudoToVInst<string PseudoInst> {
565  string VInst = !subst("_M8", "",
566                 !subst("_M4", "",
567                 !subst("_M2", "",
568                 !subst("_M1", "",
569                 !subst("_MF2", "",
570                 !subst("_MF4", "",
571                 !subst("_MF8", "",
572                 !subst("_B1", "",
573                 !subst("_B2", "",
574                 !subst("_B4", "",
575                 !subst("_B8", "",
576                 !subst("_B16", "",
577                 !subst("_B32", "",
578                 !subst("_B64", "",
579                 !subst("_MASK", "",
580                 !subst("_COMMUTABLE", "",
581                 !subst("_TA", "",
582                 !subst("_TIED", "",
583                 !subst("F16", "F",
584                 !subst("F32", "F",
585                 !subst("F64", "F",
586                 !subst("Pseudo", "", PseudoInst))))))))))))))))))))));
587}
588
589// The destination vector register group for a masked vector instruction cannot
590// overlap the source mask register (v0), unless the destination vector register
591// is being written with a mask value (e.g., comparisons) or the scalar result
592// of a reduction.
593class GetVRegNoV0<VReg VRegClass> {
594  VReg R = !cond(!eq(VRegClass, VR) : VRNoV0,
595                 !eq(VRegClass, VRM2) : VRM2NoV0,
596                 !eq(VRegClass, VRM4) : VRM4NoV0,
597                 !eq(VRegClass, VRM8) : VRM8NoV0,
598                 !eq(VRegClass, VRN2M1) : VRN2M1NoV0,
599                 !eq(VRegClass, VRN2M2) : VRN2M2NoV0,
600                 !eq(VRegClass, VRN2M4) : VRN2M4NoV0,
601                 !eq(VRegClass, VRN3M1) : VRN3M1NoV0,
602                 !eq(VRegClass, VRN3M2) : VRN3M2NoV0,
603                 !eq(VRegClass, VRN4M1) : VRN4M1NoV0,
604                 !eq(VRegClass, VRN4M2) : VRN4M2NoV0,
605                 !eq(VRegClass, VRN5M1) : VRN5M1NoV0,
606                 !eq(VRegClass, VRN6M1) : VRN6M1NoV0,
607                 !eq(VRegClass, VRN7M1) : VRN7M1NoV0,
608                 !eq(VRegClass, VRN8M1) : VRN8M1NoV0,
609                 true : VRegClass);
610}
611
612// Join strings in list using separator and ignoring empty elements
613class Join<list<string> strings, string separator> {
614  string ret = !foldl(!head(strings), !tail(strings), a, b,
615                      !cond(
616                        !and(!empty(a), !empty(b)) : "",
617                        !empty(a) : b,
618                        !empty(b) : a,
619                        1 : a#separator#b));
620}
621
622class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins> :
623      Pseudo<outs, ins, []>, RISCVVPseudo {
624  let BaseInstr = instr;
625  let VLMul = m.value;
626}
627
628class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit isFF> :
629      Pseudo<(outs RetClass:$rd),
630             (ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
631      RISCVVPseudo,
632      RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
633  let mayLoad = 1;
634  let mayStore = 0;
635  let hasSideEffects = 0;
636  let HasVLOp = 1;
637  let HasSEWOp = 1;
638  let HasDummyMask = 1;
639  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
640}
641
642class VPseudoUSLoadMask<VReg RetClass, int EEW, bit isFF> :
643      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
644              (ins GetVRegNoV0<RetClass>.R:$merge,
645                   GPR:$rs1,
646                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
647      RISCVVPseudo,
648      RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
649  let mayLoad = 1;
650  let mayStore = 0;
651  let hasSideEffects = 0;
652  let Constraints = "$rd = $merge";
653  let HasVLOp = 1;
654  let HasSEWOp = 1;
655  let HasMergeOp = 1;
656  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
657}
658
659class VPseudoSLoadNoMask<VReg RetClass, int EEW>:
660      Pseudo<(outs RetClass:$rd),
661             (ins GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
662      RISCVVPseudo,
663      RISCVVLE</*Masked*/0, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
664  let mayLoad = 1;
665  let mayStore = 0;
666  let hasSideEffects = 0;
667  let HasVLOp = 1;
668  let HasSEWOp = 1;
669  let HasDummyMask = 1;
670  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
671}
672
673class VPseudoSLoadMask<VReg RetClass, int EEW>:
674      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
675              (ins GetVRegNoV0<RetClass>.R:$merge,
676                   GPR:$rs1, GPR:$rs2,
677                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
678      RISCVVPseudo,
679      RISCVVLE</*Masked*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
680  let mayLoad = 1;
681  let mayStore = 0;
682  let hasSideEffects = 0;
683  let Constraints = "$rd = $merge";
684  let HasVLOp = 1;
685  let HasSEWOp = 1;
686  let HasMergeOp = 1;
687  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
688}
689
690class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
691                         bit Ordered, bit EarlyClobber>:
692      Pseudo<(outs RetClass:$rd),
693             (ins GPR:$rs1, IdxClass:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
694      RISCVVPseudo,
695      RISCVVLX</*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
696  let mayLoad = 1;
697  let mayStore = 0;
698  let hasSideEffects = 0;
699  let HasVLOp = 1;
700  let HasSEWOp = 1;
701  let HasDummyMask = 1;
702  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd", "");
703  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
704}
705
706class VPseudoILoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
707                       bit Ordered, bit EarlyClobber>:
708      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
709              (ins GetVRegNoV0<RetClass>.R:$merge,
710                   GPR:$rs1, IdxClass:$rs2,
711                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
712      RISCVVPseudo,
713      RISCVVLX</*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
714  let mayLoad = 1;
715  let mayStore = 0;
716  let hasSideEffects = 0;
717  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $merge", "$rd = $merge");
718  let HasVLOp = 1;
719  let HasSEWOp = 1;
720  let HasMergeOp = 1;
721  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
722}
723
724class VPseudoUSStoreNoMask<VReg StClass, int EEW>:
725      Pseudo<(outs),
726              (ins StClass:$rd, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
727      RISCVVPseudo,
728      RISCVVSE</*Masked*/0, /*Strided*/0, log2<EEW>.val, VLMul> {
729  let mayLoad = 0;
730  let mayStore = 1;
731  let hasSideEffects = 0;
732  let HasVLOp = 1;
733  let HasSEWOp = 1;
734  let HasDummyMask = 1;
735  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
736}
737
738class VPseudoUSStoreMask<VReg StClass, int EEW>:
739      Pseudo<(outs),
740              (ins StClass:$rd, GPR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
741      RISCVVPseudo,
742      RISCVVSE</*Masked*/1, /*Strided*/0, log2<EEW>.val, VLMul> {
743  let mayLoad = 0;
744  let mayStore = 1;
745  let hasSideEffects = 0;
746  let HasVLOp = 1;
747  let HasSEWOp = 1;
748  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
749}
750
751class VPseudoSStoreNoMask<VReg StClass, int EEW>:
752      Pseudo<(outs),
753              (ins StClass:$rd, GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
754      RISCVVPseudo,
755      RISCVVSE</*Masked*/0, /*Strided*/1, log2<EEW>.val, VLMul> {
756  let mayLoad = 0;
757  let mayStore = 1;
758  let hasSideEffects = 0;
759  let HasVLOp = 1;
760  let HasSEWOp = 1;
761  let HasDummyMask = 1;
762  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
763}
764
765class VPseudoSStoreMask<VReg StClass, int EEW>:
766      Pseudo<(outs),
767              (ins StClass:$rd, GPR:$rs1, GPR:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
768      RISCVVPseudo,
769      RISCVVSE</*Masked*/1, /*Strided*/1, log2<EEW>.val, VLMul> {
770  let mayLoad = 0;
771  let mayStore = 1;
772  let hasSideEffects = 0;
773  let HasVLOp = 1;
774  let HasSEWOp = 1;
775  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
776}
777
778// Unary instruction that is never masked so HasDummyMask=0.
779class VPseudoUnaryNoDummyMask<VReg RetClass,
780                              DAGOperand Op2Class> :
781        Pseudo<(outs RetClass:$rd),
782               (ins Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
783        RISCVVPseudo {
784  let mayLoad = 0;
785  let mayStore = 0;
786  let hasSideEffects = 0;
787  let HasVLOp = 1;
788  let HasSEWOp = 1;
789  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
790}
791
792class VPseudoNullaryNoMask<VReg RegClass>:
793      Pseudo<(outs RegClass:$rd),
794             (ins AVL:$vl, ixlenimm:$sew),
795             []>, RISCVVPseudo {
796  let mayLoad = 0;
797  let mayStore = 0;
798  let hasSideEffects = 0;
799  let HasVLOp = 1;
800  let HasSEWOp = 1;
801  let HasDummyMask = 1;
802  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
803}
804
805class VPseudoNullaryMask<VReg RegClass>:
806      Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd),
807             (ins GetVRegNoV0<RegClass>.R:$merge, VMaskOp:$vm, AVL:$vl,
808              ixlenimm:$sew), []>, RISCVVPseudo {
809  let mayLoad = 0;
810  let mayStore = 0;
811  let hasSideEffects = 0;
812  let Constraints ="$rd = $merge";
813  let HasVLOp = 1;
814  let HasSEWOp = 1;
815  let HasMergeOp = 1;
816  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
817}
818
819// Nullary for pseudo instructions. They are expanded in
820// RISCVExpandPseudoInsts pass.
821class VPseudoNullaryPseudoM<string BaseInst>
822       : Pseudo<(outs VR:$rd), (ins AVL:$vl, ixlenimm:$sew), []>,
823       RISCVVPseudo {
824  let mayLoad = 0;
825  let mayStore = 0;
826  let hasSideEffects = 0;
827  let HasVLOp = 1;
828  let HasSEWOp = 1;
829  // BaseInstr is not used in RISCVExpandPseudoInsts pass.
830  // Just fill a corresponding real v-inst to pass tablegen check.
831  let BaseInstr = !cast<Instruction>(BaseInst);
832}
833
834// RetClass could be GPR or VReg.
835class VPseudoUnaryNoMask<DAGOperand RetClass, VReg OpClass, string Constraint = ""> :
836        Pseudo<(outs RetClass:$rd),
837               (ins OpClass:$rs2, AVL:$vl, ixlenimm:$sew), []>,
838        RISCVVPseudo {
839  let mayLoad = 0;
840  let mayStore = 0;
841  let hasSideEffects = 0;
842  let Constraints = Constraint;
843  let HasVLOp = 1;
844  let HasSEWOp = 1;
845  let HasDummyMask = 1;
846  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
847}
848
849class VPseudoUnaryMask<VReg RetClass, VReg OpClass, string Constraint = ""> :
850        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
851               (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
852                    VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
853        RISCVVPseudo {
854  let mayLoad = 0;
855  let mayStore = 0;
856  let hasSideEffects = 0;
857  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
858  let HasVLOp = 1;
859  let HasSEWOp = 1;
860  let HasMergeOp = 1;
861  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
862}
863
864// mask unary operation without maskedoff
865class VPseudoMaskUnarySOutMask:
866        Pseudo<(outs GPR:$rd),
867               (ins VR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
868        RISCVVPseudo {
869  let mayLoad = 0;
870  let mayStore = 0;
871  let hasSideEffects = 0;
872  let HasVLOp = 1;
873  let HasSEWOp = 1;
874  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
875}
876
877// Mask can be V0~V31
878class VPseudoUnaryAnyMask<VReg RetClass,
879                          VReg Op1Class> :
880      Pseudo<(outs RetClass:$rd),
881             (ins RetClass:$merge,
882                  Op1Class:$rs2,
883                  VR:$vm, AVL:$vl, ixlenimm:$sew),
884             []>,
885      RISCVVPseudo {
886  let mayLoad = 0;
887  let mayStore = 0;
888  let hasSideEffects = 0;
889  let Constraints = "@earlyclobber $rd, $rd = $merge";
890  let HasVLOp = 1;
891  let HasSEWOp = 1;
892  let HasMergeOp = 1;
893  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
894}
895
896class VPseudoBinaryNoMask<VReg RetClass,
897                          VReg Op1Class,
898                          DAGOperand Op2Class,
899                          string Constraint> :
900        Pseudo<(outs RetClass:$rd),
901               (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
902        RISCVVPseudo {
903  let mayLoad = 0;
904  let mayStore = 0;
905  let hasSideEffects = 0;
906  let Constraints = Constraint;
907  let HasVLOp = 1;
908  let HasSEWOp = 1;
909  let HasDummyMask = 1;
910  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
911}
912
913class VPseudoTiedBinaryNoMask<VReg RetClass,
914                              DAGOperand Op2Class,
915                              string Constraint> :
916        Pseudo<(outs RetClass:$rd),
917               (ins RetClass:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
918        RISCVVPseudo {
919  let mayLoad = 0;
920  let mayStore = 0;
921  let hasSideEffects = 0;
922  let Constraints = Join<[Constraint, "$rd = $rs2"], ",">.ret;
923  let HasVLOp = 1;
924  let HasSEWOp = 1;
925  let HasDummyMask = 1;
926  let ForceTailAgnostic = 1;
927  let isConvertibleToThreeAddress = 1;
928  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
929}
930
931class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
932                          bit Ordered>:
933      Pseudo<(outs),
934              (ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
935      RISCVVPseudo,
936      RISCVVSX</*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
937  let mayLoad = 0;
938  let mayStore = 1;
939  let hasSideEffects = 0;
940  let HasVLOp = 1;
941  let HasSEWOp = 1;
942  let HasDummyMask = 1;
943  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
944}
945
946class VPseudoIStoreMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
947                        bit Ordered>:
948      Pseudo<(outs),
949              (ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
950      RISCVVPseudo,
951      RISCVVSX</*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
952  let mayLoad = 0;
953  let mayStore = 1;
954  let hasSideEffects = 0;
955  let HasVLOp = 1;
956  let HasSEWOp = 1;
957  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
958}
959
960class VPseudoBinaryMask<VReg RetClass,
961                        RegisterClass Op1Class,
962                        DAGOperand Op2Class,
963                        string Constraint> :
964        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
965                (ins GetVRegNoV0<RetClass>.R:$merge,
966                     Op1Class:$rs2, Op2Class:$rs1,
967                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
968        RISCVVPseudo {
969  let mayLoad = 0;
970  let mayStore = 0;
971  let hasSideEffects = 0;
972  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
973  let HasVLOp = 1;
974  let HasSEWOp = 1;
975  let HasMergeOp = 1;
976  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
977}
978
979// Like VPseudoBinaryMask, but output can be V0.
980class VPseudoBinaryMOutMask<VReg RetClass,
981                            RegisterClass Op1Class,
982                            DAGOperand Op2Class,
983                            string Constraint> :
984        Pseudo<(outs RetClass:$rd),
985                (ins RetClass:$merge,
986                     Op1Class:$rs2, Op2Class:$rs1,
987                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
988        RISCVVPseudo {
989  let mayLoad = 0;
990  let mayStore = 0;
991  let hasSideEffects = 0;
992  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
993  let HasVLOp = 1;
994  let HasSEWOp = 1;
995  let HasMergeOp = 1;
996  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
997}
998
999// Special version of VPseudoBinaryMask where we pretend the first source is
1000// tied to the destination so we can workaround the earlyclobber constraint.
1001// This allows maskedoff and rs2 to be the same register.
1002class VPseudoTiedBinaryMask<VReg RetClass,
1003                            DAGOperand Op2Class,
1004                            string Constraint> :
1005        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1006                (ins GetVRegNoV0<RetClass>.R:$merge,
1007                     Op2Class:$rs1,
1008                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1009        RISCVVPseudo {
1010  let mayLoad = 0;
1011  let mayStore = 0;
1012  let hasSideEffects = 0;
1013  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
1014  let HasVLOp = 1;
1015  let HasSEWOp = 1;
1016  let HasMergeOp = 0; // Merge is also rs2.
1017  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1018}
1019
1020class VPseudoBinaryCarryIn<VReg RetClass,
1021                           VReg Op1Class,
1022                           DAGOperand Op2Class,
1023                           LMULInfo MInfo,
1024                           bit CarryIn,
1025                           string Constraint> :
1026        Pseudo<(outs RetClass:$rd),
1027               !if(CarryIn,
1028                  (ins Op1Class:$rs2, Op2Class:$rs1, VMV0:$carry, AVL:$vl,
1029                       ixlenimm:$sew),
1030                  (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew)), []>,
1031        RISCVVPseudo {
1032  let mayLoad = 0;
1033  let mayStore = 0;
1034  let hasSideEffects = 0;
1035  let Constraints = Constraint;
1036  let HasVLOp = 1;
1037  let HasSEWOp = 1;
1038  let HasMergeOp = 0;
1039  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1040  let VLMul = MInfo.value;
1041}
1042
1043class VPseudoTernaryNoMask<VReg RetClass,
1044                           RegisterClass Op1Class,
1045                           DAGOperand Op2Class,
1046                           string Constraint> :
1047        Pseudo<(outs RetClass:$rd),
1048               (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1049                    AVL:$vl, ixlenimm:$sew),
1050               []>,
1051        RISCVVPseudo {
1052  let mayLoad = 0;
1053  let mayStore = 0;
1054  let hasSideEffects = 0;
1055  let Constraints = Join<[Constraint, "$rd = $rs3"], ",">.ret;
1056  let HasVLOp = 1;
1057  let HasSEWOp = 1;
1058  let HasMergeOp = 1;
1059  let HasDummyMask = 1;
1060  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1061}
1062
1063class VPseudoAMOWDNoMask<VReg RetClass,
1064                         VReg Op1Class> :
1065        Pseudo<(outs GetVRegNoV0<RetClass>.R:$vd_wd),
1066               (ins GPR:$rs1,
1067                    Op1Class:$vs2,
1068                    GetVRegNoV0<RetClass>.R:$vd,
1069                    AVL:$vl, ixlenimm:$sew), []>,
1070        RISCVVPseudo {
1071  let mayLoad = 1;
1072  let mayStore = 1;
1073  let hasSideEffects = 1;
1074  let Constraints = "$vd_wd = $vd";
1075  let HasVLOp = 1;
1076  let HasSEWOp = 1;
1077  let HasDummyMask = 1;
1078  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1079}
1080
1081class VPseudoAMOWDMask<VReg RetClass,
1082                       VReg Op1Class> :
1083        Pseudo<(outs GetVRegNoV0<RetClass>.R:$vd_wd),
1084               (ins GPR:$rs1,
1085                    Op1Class:$vs2,
1086                    GetVRegNoV0<RetClass>.R:$vd,
1087                    VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1088        RISCVVPseudo {
1089  let mayLoad = 1;
1090  let mayStore = 1;
1091  let hasSideEffects = 1;
1092  let Constraints = "$vd_wd = $vd";
1093  let HasVLOp = 1;
1094  let HasSEWOp = 1;
1095  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1096}
1097
1098multiclass VPseudoAMOEI<int eew> {
1099  // Standard scalar AMO supports 32, 64, and 128 Mem data bits,
1100  // and in the base vector "V" extension, only SEW up to ELEN = max(XLEN, FLEN)
1101  // are required to be supported.
1102  // therefore only [32, 64] is allowed here.
1103  foreach sew = [32, 64] in {
1104    foreach lmul = MxSet<sew>.m in {
1105      defvar octuple_lmul = lmul.octuple;
1106      // Calculate emul = eew * lmul / sew
1107      defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2<sew>.val);
1108      if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
1109        defvar emulMX = octuple_to_str<octuple_emul>.ret;
1110        defvar emul= !cast<LMULInfo>("V_" # emulMX);
1111        let VLMul = lmul.value in {
1112          def "_WD_" # lmul.MX # "_" # emulMX : VPseudoAMOWDNoMask<lmul.vrclass, emul.vrclass>;
1113          def "_WD_" # lmul.MX # "_" # emulMX # "_MASK" : VPseudoAMOWDMask<lmul.vrclass, emul.vrclass>;
1114        }
1115      }
1116    }
1117  }
1118}
1119
1120multiclass VPseudoAMO {
1121  foreach eew = EEWList in
1122  defm "EI" # eew : VPseudoAMOEI<eew>;
1123}
1124
1125class VPseudoUSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF, bit isFF>:
1126      Pseudo<(outs RetClass:$rd),
1127             (ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
1128      RISCVVPseudo,
1129      RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
1130  let mayLoad = 1;
1131  let mayStore = 0;
1132  let hasSideEffects = 0;
1133  let HasVLOp = 1;
1134  let HasSEWOp = 1;
1135  let HasDummyMask = 1;
1136  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1137}
1138
1139class VPseudoUSSegLoadMask<VReg RetClass, int EEW, bits<4> NF, bit isFF>:
1140      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1141             (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
1142                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1143      RISCVVPseudo,
1144      RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
1145  let mayLoad = 1;
1146  let mayStore = 0;
1147  let hasSideEffects = 0;
1148  let Constraints = "$rd = $merge";
1149  let HasVLOp = 1;
1150  let HasSEWOp = 1;
1151  let HasMergeOp = 1;
1152  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1153}
1154
1155class VPseudoSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF>:
1156      Pseudo<(outs RetClass:$rd),
1157             (ins GPR:$rs1, GPR:$offset, AVL:$vl, ixlenimm:$sew),[]>,
1158      RISCVVPseudo,
1159      RISCVVLSEG<NF, /*Masked*/0, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
1160  let mayLoad = 1;
1161  let mayLoad = 1;
1162  let mayStore = 0;
1163  let hasSideEffects = 0;
1164  let HasVLOp = 1;
1165  let HasSEWOp = 1;
1166  let HasDummyMask = 1;
1167  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1168}
1169
1170class VPseudoSSegLoadMask<VReg RetClass, int EEW, bits<4> NF>:
1171      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1172             (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
1173                  GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1174      RISCVVPseudo,
1175      RISCVVLSEG<NF, /*Masked*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
1176  let mayLoad = 1;
1177  let mayStore = 0;
1178  let hasSideEffects = 0;
1179  let Constraints = "$rd = $merge";
1180  let HasVLOp = 1;
1181  let HasSEWOp = 1;
1182  let HasMergeOp = 1;
1183  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1184}
1185
1186class VPseudoISegLoadNoMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
1187                            bits<4> NF, bit Ordered>:
1188      Pseudo<(outs RetClass:$rd),
1189             (ins GPR:$rs1, IdxClass:$offset, AVL:$vl, ixlenimm:$sew),[]>,
1190      RISCVVPseudo,
1191      RISCVVLXSEG<NF, /*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
1192  let mayLoad = 1;
1193  let mayStore = 0;
1194  let hasSideEffects = 0;
1195  // For vector indexed segment loads, the destination vector register groups
1196  // cannot overlap the source vector register group
1197  let Constraints = "@earlyclobber $rd";
1198  let HasVLOp = 1;
1199  let HasSEWOp = 1;
1200  let HasDummyMask = 1;
1201  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1202}
1203
1204class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
1205                          bits<4> NF, bit Ordered>:
1206      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1207             (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
1208                  IdxClass:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1209      RISCVVPseudo,
1210      RISCVVLXSEG<NF, /*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
1211  let mayLoad = 1;
1212  let mayStore = 0;
1213  let hasSideEffects = 0;
1214  // For vector indexed segment loads, the destination vector register groups
1215  // cannot overlap the source vector register group
1216  let Constraints = "@earlyclobber $rd, $rd = $merge";
1217  let HasVLOp = 1;
1218  let HasSEWOp = 1;
1219  let HasMergeOp = 1;
1220  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1221}
1222
1223class VPseudoUSSegStoreNoMask<VReg ValClass, int EEW, bits<4> NF>:
1224      Pseudo<(outs),
1225             (ins ValClass:$rd, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
1226      RISCVVPseudo,
1227      RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, log2<EEW>.val, VLMul> {
1228  let mayLoad = 0;
1229  let mayStore = 1;
1230  let hasSideEffects = 0;
1231  let HasVLOp = 1;
1232  let HasSEWOp = 1;
1233  let HasDummyMask = 1;
1234  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1235}
1236
1237class VPseudoUSSegStoreMask<VReg ValClass, int EEW, bits<4> NF>:
1238      Pseudo<(outs),
1239             (ins ValClass:$rd, GPR:$rs1,
1240                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1241      RISCVVPseudo,
1242      RISCVVSSEG<NF, /*Masked*/1, /*Strided*/0, log2<EEW>.val, VLMul> {
1243  let mayLoad = 0;
1244  let mayStore = 1;
1245  let hasSideEffects = 0;
1246  let HasVLOp = 1;
1247  let HasSEWOp = 1;
1248  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1249}
1250
1251class VPseudoSSegStoreNoMask<VReg ValClass, int EEW, bits<4> NF>:
1252      Pseudo<(outs),
1253             (ins ValClass:$rd, GPR:$rs1, GPR: $offset, AVL:$vl, ixlenimm:$sew),[]>,
1254      RISCVVPseudo,
1255      RISCVVSSEG<NF, /*Masked*/0, /*Strided*/1, log2<EEW>.val, VLMul> {
1256  let mayLoad = 0;
1257  let mayStore = 1;
1258  let hasSideEffects = 0;
1259  let HasVLOp = 1;
1260  let HasSEWOp = 1;
1261  let HasDummyMask = 1;
1262  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1263}
1264
1265class VPseudoSSegStoreMask<VReg ValClass, int EEW, bits<4> NF>:
1266      Pseudo<(outs),
1267             (ins ValClass:$rd, GPR:$rs1, GPR: $offset,
1268                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1269      RISCVVPseudo,
1270      RISCVVSSEG<NF, /*Masked*/1, /*Strided*/1, log2<EEW>.val, VLMul> {
1271  let mayLoad = 0;
1272  let mayStore = 1;
1273  let hasSideEffects = 0;
1274  let HasVLOp = 1;
1275  let HasSEWOp = 1;
1276  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1277}
1278
1279class VPseudoISegStoreNoMask<VReg ValClass, VReg IdxClass, int EEW, bits<3> LMUL,
1280                             bits<4> NF, bit Ordered>:
1281      Pseudo<(outs),
1282             (ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
1283                  AVL:$vl, ixlenimm:$sew),[]>,
1284      RISCVVPseudo,
1285      RISCVVSXSEG<NF, /*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
1286  let mayLoad = 0;
1287  let mayStore = 1;
1288  let hasSideEffects = 0;
1289  let HasVLOp = 1;
1290  let HasSEWOp = 1;
1291  let HasDummyMask = 1;
1292  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1293}
1294
1295class VPseudoISegStoreMask<VReg ValClass, VReg IdxClass, int EEW, bits<3> LMUL,
1296                           bits<4> NF, bit Ordered>:
1297      Pseudo<(outs),
1298             (ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
1299                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1300      RISCVVPseudo,
1301      RISCVVSXSEG<NF, /*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
1302  let mayLoad = 0;
1303  let mayStore = 1;
1304  let hasSideEffects = 0;
1305  let HasVLOp = 1;
1306  let HasSEWOp = 1;
1307  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1308}
1309
1310multiclass VPseudoUSLoad<bit isFF> {
1311  foreach eew = EEWList in {
1312    foreach lmul = MxSet<eew>.m in {
1313      defvar LInfo = lmul.MX;
1314      defvar vreg = lmul.vrclass;
1315      defvar FFStr = !if(isFF, "FF", "");
1316      let VLMul = lmul.value in {
1317        def "E" # eew # FFStr # "_V_" # LInfo :
1318          VPseudoUSLoadNoMask<vreg, eew, isFF>;
1319        def "E" # eew # FFStr # "_V_" # LInfo # "_MASK" :
1320          VPseudoUSLoadMask<vreg, eew, isFF>;
1321      }
1322    }
1323  }
1324}
1325
1326multiclass VPseudoLoadMask {
1327  foreach mti = AllMasks in {
1328    let VLMul = mti.LMul.value in {
1329      def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, /*EEW*/1, /*isFF*/0>;
1330    }
1331  }
1332}
1333
1334multiclass VPseudoSLoad {
1335  foreach eew = EEWList in {
1336    foreach lmul = MxSet<eew>.m in {
1337      defvar LInfo = lmul.MX;
1338      defvar vreg = lmul.vrclass;
1339      let VLMul = lmul.value in {
1340        def "E" # eew # "_V_" # LInfo : VPseudoSLoadNoMask<vreg, eew>;
1341        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSLoadMask<vreg, eew>;
1342      }
1343    }
1344  }
1345}
1346
1347multiclass VPseudoILoad<bit Ordered> {
1348  foreach eew = EEWList in {
1349    foreach sew = EEWList in {
1350      foreach lmul = MxSet<sew>.m in {
1351        defvar octuple_lmul = lmul.octuple;
1352        // Calculate emul = eew * lmul / sew
1353        defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2<sew>.val);
1354        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
1355          defvar LInfo = lmul.MX;
1356          defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
1357          defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
1358          defvar Vreg = lmul.vrclass;
1359          defvar IdxVreg = idx_lmul.vrclass;
1360          defvar HasConstraint = !ne(sew, eew);
1361          let VLMul = lmul.value in {
1362            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo :
1363              VPseudoILoadNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>;
1364            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
1365              VPseudoILoadMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>;
1366          }
1367        }
1368      }
1369    }
1370  }
1371}
1372
1373multiclass VPseudoUSStore {
1374  foreach eew = EEWList in {
1375    foreach lmul = MxSet<eew>.m in {
1376      defvar LInfo = lmul.MX;
1377      defvar vreg = lmul.vrclass;
1378      let VLMul = lmul.value in {
1379        def "E" # eew # "_V_" # LInfo : VPseudoUSStoreNoMask<vreg, eew>;
1380        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask<vreg, eew>;
1381      }
1382    }
1383  }
1384}
1385
1386multiclass VPseudoStoreMask {
1387  foreach mti = AllMasks in {
1388    let VLMul = mti.LMul.value in {
1389      def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, /*EEW*/1>;
1390    }
1391  }
1392}
1393
1394multiclass VPseudoSStore {
1395  foreach eew = EEWList in {
1396    foreach lmul = MxSet<eew>.m in {
1397      defvar LInfo = lmul.MX;
1398      defvar vreg = lmul.vrclass;
1399      let VLMul = lmul.value in {
1400        def "E" # eew # "_V_" # LInfo : VPseudoSStoreNoMask<vreg, eew>;
1401        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSStoreMask<vreg, eew>;
1402      }
1403    }
1404  }
1405}
1406
1407multiclass VPseudoIStore<bit Ordered> {
1408  foreach eew = EEWList in {
1409    foreach sew = EEWList in {
1410      foreach lmul = MxSet<sew>.m in {
1411        defvar octuple_lmul = lmul.octuple;
1412        // Calculate emul = eew * lmul / sew
1413        defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2<sew>.val);
1414        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
1415          defvar LInfo = lmul.MX;
1416          defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
1417          defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
1418          defvar Vreg = lmul.vrclass;
1419          defvar IdxVreg = idx_lmul.vrclass;
1420          let VLMul = lmul.value in {
1421            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo :
1422              VPseudoIStoreNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>;
1423            def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
1424              VPseudoIStoreMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>;
1425          }
1426        }
1427      }
1428    }
1429  }
1430}
1431
1432multiclass VPseudoUnaryS_M {
1433  foreach mti = AllMasks in
1434  {
1435    let VLMul = mti.LMul.value in {
1436      def "_M_" # mti.BX : VPseudoUnaryNoMask<GPR, VR>;
1437      def "_M_" # mti.BX # "_MASK" : VPseudoMaskUnarySOutMask;
1438    }
1439  }
1440}
1441
1442multiclass VPseudoUnaryM_M {
1443  defvar constraint = "@earlyclobber $rd";
1444  foreach mti = AllMasks in
1445  {
1446    let VLMul = mti.LMul.value in {
1447      def "_M_" # mti.BX : VPseudoUnaryNoMask<VR, VR, constraint>;
1448      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR, constraint>;
1449    }
1450  }
1451}
1452
1453multiclass VPseudoMaskNullaryV {
1454  foreach m = MxList.m in {
1455    let VLMul = m.value in {
1456      def "_V_" # m.MX : VPseudoNullaryNoMask<m.vrclass>;
1457      def "_V_" # m.MX # "_MASK" : VPseudoNullaryMask<m.vrclass>;
1458    }
1459  }
1460}
1461
1462multiclass VPseudoNullaryPseudoM <string BaseInst> {
1463  foreach mti = AllMasks in {
1464    let VLMul = mti.LMul.value in {
1465      def "_M_" # mti.BX : VPseudoNullaryPseudoM<BaseInst # "_MM">;
1466    }
1467  }
1468}
1469
1470multiclass VPseudoUnaryV_M {
1471  defvar constraint = "@earlyclobber $rd";
1472  foreach m = MxList.m in {
1473    let VLMul = m.value in {
1474      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, VR, constraint>;
1475      def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, VR, constraint>;
1476    }
1477  }
1478}
1479
1480multiclass VPseudoUnaryV_V_AnyMask {
1481  foreach m = MxList.m in {
1482    let VLMul = m.value in
1483      def _VM # "_" # m.MX : VPseudoUnaryAnyMask<m.vrclass, m.vrclass>;
1484  }
1485}
1486
1487multiclass VPseudoBinary<VReg RetClass,
1488                         VReg Op1Class,
1489                         DAGOperand Op2Class,
1490                         LMULInfo MInfo,
1491                         string Constraint = ""> {
1492  let VLMul = MInfo.value in {
1493    def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
1494                                             Constraint>;
1495    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class,
1496                                                     Constraint>;
1497  }
1498}
1499
1500multiclass VPseudoBinaryM<VReg RetClass,
1501                          VReg Op1Class,
1502                          DAGOperand Op2Class,
1503                          LMULInfo MInfo,
1504                          string Constraint = ""> {
1505  let VLMul = MInfo.value in {
1506    def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
1507                                             Constraint>;
1508    let ForceTailAgnostic = true in
1509    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask<RetClass, Op1Class,
1510                                                         Op2Class, Constraint>;
1511  }
1512}
1513
1514multiclass VPseudoBinaryEmul<VReg RetClass,
1515                             VReg Op1Class,
1516                             DAGOperand Op2Class,
1517                             LMULInfo lmul,
1518                             LMULInfo emul,
1519                             string Constraint = ""> {
1520  let VLMul = lmul.value in {
1521    def "_" # lmul.MX # "_" # emul.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
1522                                                            Constraint>;
1523    def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class,
1524                                                                    Constraint>;
1525  }
1526}
1527
1528multiclass VPseudoTiedBinary<VReg RetClass,
1529                             DAGOperand Op2Class,
1530                             LMULInfo MInfo,
1531                             string Constraint = ""> {
1532  let VLMul = MInfo.value in {
1533    def "_" # MInfo.MX # "_TIED": VPseudoTiedBinaryNoMask<RetClass, Op2Class,
1534                                                          Constraint>;
1535    def "_" # MInfo.MX # "_MASK_TIED" : VPseudoTiedBinaryMask<RetClass, Op2Class,
1536                                                         Constraint>;
1537  }
1538}
1539
1540multiclass VPseudoBinaryV_VV<string Constraint = ""> {
1541  foreach m = MxList.m in
1542    defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
1543}
1544
1545multiclass VPseudoBinaryV_VV_EEW<int eew, string Constraint = ""> {
1546  foreach m = MxList.m in {
1547    foreach sew = EEWList in {
1548      defvar octuple_lmul = m.octuple;
1549      // emul = lmul * eew / sew
1550      defvar octuple_emul = !srl(!mul(octuple_lmul, eew), log2<sew>.val);
1551      if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
1552        defvar emulMX = octuple_to_str<octuple_emul>.ret;
1553        defvar emul = !cast<LMULInfo>("V_" # emulMX);
1554        defm _VV : VPseudoBinaryEmul<m.vrclass, m.vrclass, emul.vrclass, m, emul, Constraint>;
1555      }
1556    }
1557  }
1558}
1559
1560multiclass VPseudoBinaryV_VX<string Constraint = ""> {
1561  foreach m = MxList.m in
1562    defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>;
1563}
1564
1565multiclass VPseudoBinaryV_VF<string Constraint = ""> {
1566  foreach m = MxList.m in
1567    foreach f = FPList.fpinfo in
1568      defm "_V" # f.FX : VPseudoBinary<m.vrclass, m.vrclass,
1569                                       f.fprclass, m, Constraint>;
1570}
1571
1572multiclass VPseudoBinaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
1573  foreach m = MxList.m in
1574    defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
1575}
1576
1577multiclass VPseudoBinaryM_MM {
1578  foreach m = MxList.m in
1579    let VLMul = m.value in {
1580      def "_MM_" # m.MX : VPseudoBinaryNoMask<VR, VR, VR, "">;
1581    }
1582}
1583
1584// We use earlyclobber here due to
1585// * The destination EEW is smaller than the source EEW and the overlap is
1586//   in the lowest-numbered part of the source register group is legal.
1587//   Otherwise, it is illegal.
1588// * The destination EEW is greater than the source EEW, the source EMUL is
1589//   at least 1, and the overlap is in the highest-numbered part of the
1590//   destination register group is legal. Otherwise, it is illegal.
1591multiclass VPseudoBinaryW_VV {
1592  foreach m = MxListW.m in
1593    defm _VV : VPseudoBinary<m.wvrclass, m.vrclass, m.vrclass, m,
1594                             "@earlyclobber $rd">;
1595}
1596
1597multiclass VPseudoBinaryW_VX {
1598  foreach m = MxListW.m in
1599    defm "_VX" : VPseudoBinary<m.wvrclass, m.vrclass, GPR, m,
1600                               "@earlyclobber $rd">;
1601}
1602
1603multiclass VPseudoBinaryW_VF {
1604  foreach m = MxListW.m in
1605    foreach f = FPListW.fpinfo in
1606      defm "_V" # f.FX : VPseudoBinary<m.wvrclass, m.vrclass,
1607                                       f.fprclass, m,
1608                                       "@earlyclobber $rd">;
1609}
1610
1611multiclass VPseudoBinaryW_WV {
1612  foreach m = MxListW.m in {
1613    defm _WV : VPseudoBinary<m.wvrclass, m.wvrclass, m.vrclass, m,
1614                             "@earlyclobber $rd">;
1615    defm _WV : VPseudoTiedBinary<m.wvrclass, m.vrclass, m,
1616                                 "@earlyclobber $rd">;
1617  }
1618}
1619
1620multiclass VPseudoBinaryW_WX {
1621  foreach m = MxListW.m in
1622    defm "_WX" : VPseudoBinary<m.wvrclass, m.wvrclass, GPR, m>;
1623}
1624
1625multiclass VPseudoBinaryW_WF {
1626  foreach m = MxListW.m in
1627    foreach f = FPListW.fpinfo in
1628      defm "_W" # f.FX : VPseudoBinary<m.wvrclass, m.wvrclass,
1629                                       f.fprclass, m>;
1630}
1631
1632// Narrowing instructions like vnsrl/vnsra/vnclip(u) don't need @earlyclobber
1633// if the source and destination have an LMUL<=1. This matches this overlap
1634// exception from the spec.
1635// "The destination EEW is smaller than the source EEW and the overlap is in the
1636//  lowest-numbered part of the source register group."
1637multiclass VPseudoBinaryV_WV {
1638  foreach m = MxListW.m in
1639    defm _WV : VPseudoBinary<m.vrclass, m.wvrclass, m.vrclass, m,
1640                             !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
1641}
1642
1643multiclass VPseudoBinaryV_WX {
1644  foreach m = MxListW.m in
1645    defm _WX : VPseudoBinary<m.vrclass, m.wvrclass, GPR, m,
1646                             !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
1647}
1648
1649multiclass VPseudoBinaryV_WI {
1650  foreach m = MxListW.m in
1651    defm _WI : VPseudoBinary<m.vrclass, m.wvrclass, uimm5, m,
1652                             !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
1653}
1654
1655// For vadc and vsbc, the instruction encoding is reserved if the destination
1656// vector register is v0.
1657// For vadc and vsbc, CarryIn == 1 and CarryOut == 0
1658multiclass VPseudoBinaryV_VM<bit CarryOut = 0, bit CarryIn = 1,
1659                             string Constraint = ""> {
1660  foreach m = MxList.m in
1661    def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX :
1662      VPseudoBinaryCarryIn<!if(CarryOut, VR,
1663                           !if(!and(CarryIn, !not(CarryOut)),
1664                               GetVRegNoV0<m.vrclass>.R, m.vrclass)),
1665                           m.vrclass, m.vrclass, m, CarryIn, Constraint>;
1666}
1667
1668multiclass VPseudoBinaryV_XM<bit CarryOut = 0, bit CarryIn = 1,
1669                             string Constraint = ""> {
1670  foreach m = MxList.m in
1671    def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX :
1672      VPseudoBinaryCarryIn<!if(CarryOut, VR,
1673                           !if(!and(CarryIn, !not(CarryOut)),
1674                               GetVRegNoV0<m.vrclass>.R, m.vrclass)),
1675                           m.vrclass, GPR, m, CarryIn, Constraint>;
1676}
1677
1678multiclass VPseudoBinaryV_FM {
1679  foreach m = MxList.m in
1680    foreach f = FPList.fpinfo in
1681      def "_V" # f.FX # "M_" # m.MX :
1682        VPseudoBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
1683                             m.vrclass, f.fprclass, m, /*CarryIn=*/1, "">;
1684}
1685
1686multiclass VPseudoBinaryV_IM<bit CarryOut = 0, bit CarryIn = 1,
1687                             string Constraint = ""> {
1688  foreach m = MxList.m in
1689    def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX :
1690      VPseudoBinaryCarryIn<!if(CarryOut, VR,
1691                           !if(!and(CarryIn, !not(CarryOut)),
1692                               GetVRegNoV0<m.vrclass>.R, m.vrclass)),
1693                           m.vrclass, simm5, m, CarryIn, Constraint>;
1694}
1695
1696multiclass VPseudoUnaryV_V_X_I_NoDummyMask {
1697  foreach m = MxList.m in {
1698    let VLMul = m.value in {
1699      def "_V_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, m.vrclass>;
1700      def "_X_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, GPR>;
1701      def "_I_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, simm5>;
1702    }
1703  }
1704}
1705
1706multiclass VPseudoUnaryV_F_NoDummyMask {
1707  foreach m = MxList.m in {
1708    foreach f = FPList.fpinfo in {
1709      let VLMul = m.value in {
1710        def "_" # f.FX # "_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, f.fprclass>;
1711      }
1712    }
1713  }
1714}
1715
1716multiclass VPseudoUnaryV_V {
1717  foreach m = MxList.m in {
1718    let VLMul = m.value in {
1719      def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>;
1720      def "_V_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>;
1721    }
1722  }
1723}
1724
1725multiclass PseudoUnaryV_VF2 {
1726  defvar constraints = "@earlyclobber $rd";
1727  foreach m = MxListVF2.m in
1728  {
1729    let VLMul = m.value in {
1730      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f2vrclass, constraints>;
1731      def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.f2vrclass,
1732                                                  constraints>;
1733    }
1734  }
1735}
1736
1737multiclass PseudoUnaryV_VF4 {
1738  defvar constraints = "@earlyclobber $rd";
1739  foreach m = MxListVF4.m in
1740  {
1741    let VLMul = m.value in {
1742      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f4vrclass, constraints>;
1743      def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.f4vrclass,
1744                                                  constraints>;
1745    }
1746  }
1747}
1748
1749multiclass PseudoUnaryV_VF8 {
1750  defvar constraints = "@earlyclobber $rd";
1751  foreach m = MxListVF8.m in
1752  {
1753    let VLMul = m.value in {
1754      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f8vrclass, constraints>;
1755      def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.f8vrclass,
1756                                                  constraints>;
1757    }
1758  }
1759}
1760
1761// The destination EEW is 1 since "For the purposes of register group overlap
1762// constraints, mask elements have EEW=1."
1763// The source EEW is 8, 16, 32, or 64.
1764// When the destination EEW is different from source EEW, we need to use
1765// @earlyclobber to avoid the overlap between destination and source registers.
1766// We don't need @earlyclobber for LMUL<=1 since that matches this overlap
1767// exception from the spec
1768// "The destination EEW is smaller than the source EEW and the overlap is in the
1769//  lowest-numbered part of the source register group".
1770// With LMUL<=1 the source and dest occupy a single register so any overlap
1771// is in the lowest-numbered part.
1772multiclass VPseudoBinaryM_VV {
1773  foreach m = MxList.m in
1774    defm _VV : VPseudoBinaryM<VR, m.vrclass, m.vrclass, m,
1775                              !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
1776}
1777
1778multiclass VPseudoBinaryM_VX {
1779  foreach m = MxList.m in
1780    defm "_VX" :
1781      VPseudoBinaryM<VR, m.vrclass, GPR, m,
1782                     !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
1783}
1784
1785multiclass VPseudoBinaryM_VF {
1786  foreach m = MxList.m in
1787    foreach f = FPList.fpinfo in
1788      defm "_V" # f.FX :
1789        VPseudoBinaryM<VR, m.vrclass, f.fprclass, m,
1790                       !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
1791}
1792
1793multiclass VPseudoBinaryM_VI {
1794  foreach m = MxList.m in
1795    defm _VI : VPseudoBinaryM<VR, m.vrclass, simm5, m,
1796                              !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
1797}
1798
1799multiclass VPseudoBinaryV_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
1800  defm "" : VPseudoBinaryV_VV<Constraint>;
1801  defm "" : VPseudoBinaryV_VX<Constraint>;
1802  defm "" : VPseudoBinaryV_VI<ImmType, Constraint>;
1803}
1804
1805multiclass VPseudoBinaryV_VV_VX {
1806  defm "" : VPseudoBinaryV_VV;
1807  defm "" : VPseudoBinaryV_VX;
1808}
1809
1810multiclass VPseudoBinaryV_VV_VF {
1811  defm "" : VPseudoBinaryV_VV;
1812  defm "" : VPseudoBinaryV_VF;
1813}
1814
1815multiclass VPseudoBinaryV_VX_VI<Operand ImmType = simm5> {
1816  defm "" : VPseudoBinaryV_VX;
1817  defm "" : VPseudoBinaryV_VI<ImmType>;
1818}
1819
1820multiclass VPseudoBinaryW_VV_VX {
1821  defm "" : VPseudoBinaryW_VV;
1822  defm "" : VPseudoBinaryW_VX;
1823}
1824
1825multiclass VPseudoBinaryW_VV_VF {
1826  defm "" : VPseudoBinaryW_VV;
1827  defm "" : VPseudoBinaryW_VF;
1828}
1829
1830multiclass VPseudoBinaryW_WV_WX {
1831  defm "" : VPseudoBinaryW_WV;
1832  defm "" : VPseudoBinaryW_WX;
1833}
1834
1835multiclass VPseudoBinaryW_WV_WF {
1836  defm "" : VPseudoBinaryW_WV;
1837  defm "" : VPseudoBinaryW_WF;
1838}
1839
1840multiclass VPseudoBinaryV_VM_XM_IM {
1841  defm "" : VPseudoBinaryV_VM;
1842  defm "" : VPseudoBinaryV_XM;
1843  defm "" : VPseudoBinaryV_IM;
1844}
1845
1846multiclass VPseudoBinaryV_VM_XM {
1847  defm "" : VPseudoBinaryV_VM;
1848  defm "" : VPseudoBinaryV_XM;
1849}
1850
1851multiclass VPseudoBinaryM_VM_XM_IM<string Constraint> {
1852  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
1853  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
1854  defm "" : VPseudoBinaryV_IM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
1855}
1856
1857multiclass VPseudoBinaryM_VM_XM<string Constraint> {
1858  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
1859  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
1860}
1861
1862multiclass VPseudoBinaryM_V_X_I<string Constraint> {
1863  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
1864  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
1865  defm "" : VPseudoBinaryV_IM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
1866}
1867
1868multiclass VPseudoBinaryM_V_X<string Constraint> {
1869  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
1870  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
1871}
1872
1873multiclass VPseudoBinaryV_WV_WX_WI {
1874  defm "" : VPseudoBinaryV_WV;
1875  defm "" : VPseudoBinaryV_WX;
1876  defm "" : VPseudoBinaryV_WI;
1877}
1878
1879multiclass VPseudoTernary<VReg RetClass,
1880                          RegisterClass Op1Class,
1881                          DAGOperand Op2Class,
1882                          LMULInfo MInfo,
1883                          string Constraint = ""> {
1884  let VLMul = MInfo.value in {
1885    def "_" # MInfo.MX : VPseudoTernaryNoMask<RetClass, Op1Class, Op2Class, Constraint>;
1886    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class, Constraint>;
1887  }
1888}
1889
1890multiclass VPseudoTernaryV_VV<string Constraint = ""> {
1891  foreach m = MxList.m in {
1892    defm _VV : VPseudoTernary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
1893
1894    // Add a commutable version for use by IR mul+add.
1895    let isCommutable = 1, ForceTailAgnostic = true, VLMul = m.value in
1896    def "_VV_" # m.MX # "_COMMUTABLE" : VPseudoTernaryNoMask<m.vrclass,
1897                                                             m.vrclass,
1898                                                             m.vrclass,
1899                                                             Constraint>;
1900  }
1901}
1902
1903multiclass VPseudoTernaryV_VX<string Constraint = ""> {
1904  foreach m = MxList.m in
1905    defm _VX : VPseudoTernary<m.vrclass, m.vrclass, GPR, m, Constraint>;
1906}
1907
1908multiclass VPseudoTernaryV_VX_AAXA<string Constraint = ""> {
1909  foreach m = MxList.m in {
1910    defm "_VX" : VPseudoTernary<m.vrclass, GPR, m.vrclass, m, Constraint>;
1911
1912    // Add a commutable version for use by IR mul+add.
1913    let isCommutable = 1, ForceTailAgnostic = true, VLMul = m.value in
1914    def "_VX_" # m.MX # "_COMMUTABLE" :
1915       VPseudoTernaryNoMask<m.vrclass, GPR, m.vrclass, Constraint>;
1916  }
1917}
1918
1919multiclass VPseudoTernaryV_VF_AAXA<string Constraint = ""> {
1920  foreach m = MxList.m in {
1921    foreach f = FPList.fpinfo in {
1922      defm "_V" # f.FX : VPseudoTernary<m.vrclass, f.fprclass, m.vrclass,
1923                                        m, Constraint>;
1924
1925      // Add a commutable version for use by IR mul+add.
1926      let isCommutable = 1, ForceTailAgnostic = true, VLMul = m.value in
1927      def "_V" # f.FX # "_" # m.MX # "_COMMUTABLE" :
1928         VPseudoTernaryNoMask<m.vrclass, f.fprclass, m.vrclass, Constraint>;
1929    }
1930  }
1931}
1932
1933multiclass VPseudoTernaryW_VV {
1934  defvar constraint = "@earlyclobber $rd";
1935  foreach m = MxListW.m in {
1936    defm _VV : VPseudoTernary<m.wvrclass, m.vrclass, m.vrclass, m, constraint>;
1937
1938    // Add a tail agnostic version for us by IR mul+add.
1939    let ForceTailAgnostic = true, VLMul = m.value in
1940    def "_VV_" # m.MX # "_TA" : VPseudoTernaryNoMask<m.wvrclass,
1941                                                     m.vrclass,
1942                                                     m.vrclass,
1943                                                     constraint>;
1944  }
1945}
1946
1947multiclass VPseudoTernaryW_VX {
1948  defvar constraint = "@earlyclobber $rd";
1949  foreach m = MxListW.m in {
1950    defm "_VX" : VPseudoTernary<m.wvrclass, GPR, m.vrclass, m, constraint>;
1951
1952    // Add a tail agnostic version for use by IR mul+add.
1953    let ForceTailAgnostic = true, VLMul = m.value in
1954    def "_VX_" # m.MX # "_TA" :
1955       VPseudoTernaryNoMask<m.wvrclass, GPR, m.vrclass, constraint>;
1956  }
1957}
1958
1959multiclass VPseudoTernaryW_VF {
1960  defvar constraint = "@earlyclobber $rd";
1961  foreach m = MxListW.m in
1962    foreach f = FPListW.fpinfo in {
1963      defm "_V" # f.FX : VPseudoTernary<m.wvrclass, f.fprclass, m.vrclass, m,
1964                                        constraint>;
1965
1966      // Add a tail agnostic version for use by IR mul+add.
1967      let ForceTailAgnostic = true, VLMul = m.value in
1968      def "_V" # f.FX # "_" # m.MX # "_TA" :
1969         VPseudoTernaryNoMask<m.vrclass, f.fprclass, m.vrclass, constraint>;
1970    }
1971}
1972
1973multiclass VPseudoTernaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
1974  foreach m = MxList.m in
1975    defm _VI : VPseudoTernary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
1976}
1977
1978multiclass VPseudoTernaryV_VV_VX_AAXA<string Constraint = ""> {
1979  defm "" : VPseudoTernaryV_VV<Constraint>;
1980  defm "" : VPseudoTernaryV_VX_AAXA<Constraint>;
1981}
1982
1983multiclass VPseudoTernaryV_VV_VF_AAXA<string Constraint = ""> {
1984  defm "" : VPseudoTernaryV_VV<Constraint>;
1985  defm "" : VPseudoTernaryV_VF_AAXA<Constraint>;
1986}
1987
1988multiclass VPseudoTernaryV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
1989  defm "" : VPseudoTernaryV_VX<Constraint>;
1990  defm "" : VPseudoTernaryV_VI<ImmType, Constraint>;
1991}
1992
1993multiclass VPseudoTernaryW_VV_VX {
1994  defm "" : VPseudoTernaryW_VV;
1995  defm "" : VPseudoTernaryW_VX;
1996}
1997
1998multiclass VPseudoTernaryW_VV_VF {
1999  defm "" : VPseudoTernaryW_VV;
2000  defm "" : VPseudoTernaryW_VF;
2001}
2002
2003multiclass VPseudoBinaryM_VV_VX_VI {
2004  defm "" : VPseudoBinaryM_VV;
2005  defm "" : VPseudoBinaryM_VX;
2006  defm "" : VPseudoBinaryM_VI;
2007}
2008
2009multiclass VPseudoBinaryM_VV_VX {
2010  defm "" : VPseudoBinaryM_VV;
2011  defm "" : VPseudoBinaryM_VX;
2012}
2013
2014multiclass VPseudoBinaryM_VV_VF {
2015  defm "" : VPseudoBinaryM_VV;
2016  defm "" : VPseudoBinaryM_VF;
2017}
2018
2019multiclass VPseudoBinaryM_VX_VI {
2020  defm "" : VPseudoBinaryM_VX;
2021  defm "" : VPseudoBinaryM_VI;
2022}
2023
2024multiclass VPseudoReductionV_VS {
2025  foreach m = MxList.m in {
2026    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>;
2027  }
2028}
2029
2030multiclass VPseudoConversion<VReg RetClass,
2031                             VReg Op1Class,
2032                             LMULInfo MInfo,
2033                             string Constraint = ""> {
2034  let VLMul = MInfo.value in {
2035    def "_" # MInfo.MX : VPseudoUnaryNoMask<RetClass, Op1Class, Constraint>;
2036    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMask<RetClass, Op1Class,
2037                                                    Constraint>;
2038  }
2039}
2040
2041multiclass VPseudoConversionV_V {
2042  foreach m = MxList.m in
2043    defm _V : VPseudoConversion<m.vrclass, m.vrclass, m>;
2044}
2045
2046multiclass VPseudoConversionW_V {
2047  defvar constraint = "@earlyclobber $rd";
2048  foreach m = MxListW.m in
2049    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>;
2050}
2051
2052multiclass VPseudoConversionV_W {
2053  defvar constraint = "@earlyclobber $rd";
2054  foreach m = MxListW.m in
2055    defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>;
2056}
2057
2058multiclass VPseudoUSSegLoad<bit isFF> {
2059  foreach eew = EEWList in {
2060    foreach lmul = MxSet<eew>.m in {
2061      defvar LInfo = lmul.MX;
2062      let VLMul = lmul.value in {
2063        foreach nf = NFSet<lmul>.L in {
2064          defvar vreg = SegRegClass<lmul, nf>.RC;
2065          defvar FFStr = !if(isFF, "FF", "");
2066          def nf # "E" # eew # FFStr # "_V_" # LInfo :
2067            VPseudoUSSegLoadNoMask<vreg, eew, nf, isFF>;
2068          def nf # "E" # eew # FFStr # "_V_" # LInfo # "_MASK" :
2069            VPseudoUSSegLoadMask<vreg, eew, nf, isFF>;
2070        }
2071      }
2072    }
2073  }
2074}
2075
2076multiclass VPseudoSSegLoad {
2077  foreach eew = EEWList in {
2078    foreach lmul = MxSet<eew>.m in {
2079      defvar LInfo = lmul.MX;
2080      let VLMul = lmul.value in {
2081        foreach nf = NFSet<lmul>.L in {
2082          defvar vreg = SegRegClass<lmul, nf>.RC;
2083          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew, nf>;
2084          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew, nf>;
2085        }
2086      }
2087    }
2088  }
2089}
2090
2091multiclass VPseudoISegLoad<bit Ordered> {
2092  foreach idx_eew = EEWList in {
2093    foreach sew = EEWList in {
2094      foreach val_lmul = MxSet<sew>.m in {
2095        defvar octuple_lmul = val_lmul.octuple;
2096        // Calculate emul = eew * lmul / sew
2097        defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), log2<sew>.val);
2098        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
2099          defvar ValLInfo = val_lmul.MX;
2100          defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
2101          defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
2102          defvar Vreg = val_lmul.vrclass;
2103          defvar IdxVreg = idx_lmul.vrclass;
2104          let VLMul = val_lmul.value in {
2105            foreach nf = NFSet<val_lmul>.L in {
2106              defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
2107              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
2108                VPseudoISegLoadNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
2109                                      nf, Ordered>;
2110              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
2111                VPseudoISegLoadMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
2112                                    nf, Ordered>;
2113            }
2114          }
2115        }
2116      }
2117    }
2118  }
2119}
2120
2121multiclass VPseudoUSSegStore {
2122  foreach eew = EEWList in {
2123    foreach lmul = MxSet<eew>.m in {
2124      defvar LInfo = lmul.MX;
2125      let VLMul = lmul.value in {
2126        foreach nf = NFSet<lmul>.L in {
2127          defvar vreg = SegRegClass<lmul, nf>.RC;
2128          def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew, nf>;
2129          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew, nf>;
2130        }
2131      }
2132    }
2133  }
2134}
2135
2136multiclass VPseudoSSegStore {
2137  foreach eew = EEWList in {
2138    foreach lmul = MxSet<eew>.m in {
2139      defvar LInfo = lmul.MX;
2140      let VLMul = lmul.value in {
2141        foreach nf = NFSet<lmul>.L in {
2142          defvar vreg = SegRegClass<lmul, nf>.RC;
2143          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew, nf>;
2144          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew, nf>;
2145        }
2146      }
2147    }
2148  }
2149}
2150
2151multiclass VPseudoISegStore<bit Ordered> {
2152  foreach idx_eew = EEWList in {
2153    foreach sew = EEWList in {
2154      foreach val_lmul = MxSet<sew>.m in {
2155        defvar octuple_lmul = val_lmul.octuple;
2156        // Calculate emul = eew * lmul / sew
2157        defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), log2<sew>.val);
2158        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
2159          defvar ValLInfo = val_lmul.MX;
2160          defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
2161          defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
2162          defvar Vreg = val_lmul.vrclass;
2163          defvar IdxVreg = idx_lmul.vrclass;
2164          let VLMul = val_lmul.value in {
2165            foreach nf = NFSet<val_lmul>.L in {
2166              defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
2167              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
2168                VPseudoISegStoreNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
2169                                       nf, Ordered>;
2170              def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
2171                VPseudoISegStoreMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
2172                                     nf, Ordered>;
2173            }
2174          }
2175        }
2176      }
2177    }
2178  }
2179}
2180
2181//===----------------------------------------------------------------------===//
2182// Helpers to define the intrinsic patterns.
2183//===----------------------------------------------------------------------===//
2184
2185class VPatUnaryNoMask<string intrinsic_name,
2186                      string inst,
2187                      string kind,
2188                      ValueType result_type,
2189                      ValueType op2_type,
2190                      int sew,
2191                      LMULInfo vlmul,
2192                      VReg op2_reg_class> :
2193  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
2194                   (op2_type op2_reg_class:$rs2),
2195                   VLOpFrag)),
2196                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2197                   (op2_type op2_reg_class:$rs2),
2198                   GPR:$vl, sew)>;
2199
2200class VPatUnaryMask<string intrinsic_name,
2201                    string inst,
2202                    string kind,
2203                    ValueType result_type,
2204                    ValueType op2_type,
2205                    ValueType mask_type,
2206                    int sew,
2207                    LMULInfo vlmul,
2208                    VReg result_reg_class,
2209                    VReg op2_reg_class> :
2210  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
2211                   (result_type result_reg_class:$merge),
2212                   (op2_type op2_reg_class:$rs2),
2213                   (mask_type V0),
2214                   VLOpFrag)),
2215                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
2216                   (result_type result_reg_class:$merge),
2217                   (op2_type op2_reg_class:$rs2),
2218                   (mask_type V0), GPR:$vl, sew)>;
2219
2220class VPatMaskUnaryNoMask<string intrinsic_name,
2221                          string inst,
2222                          MTypeInfo mti> :
2223  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name)
2224                (mti.Mask VR:$rs2),
2225                VLOpFrag)),
2226                (!cast<Instruction>(inst#"_M_"#mti.BX)
2227                (mti.Mask VR:$rs2),
2228                GPR:$vl, mti.Log2SEW)>;
2229
2230class VPatMaskUnaryMask<string intrinsic_name,
2231                        string inst,
2232                        MTypeInfo mti> :
2233  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name#"_mask")
2234                (mti.Mask VR:$merge),
2235                (mti.Mask VR:$rs2),
2236                (mti.Mask V0),
2237                VLOpFrag)),
2238                (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
2239                (mti.Mask VR:$merge),
2240                (mti.Mask VR:$rs2),
2241                (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
2242
2243class VPatUnaryAnyMask<string intrinsic,
2244                       string inst,
2245                       string kind,
2246                       ValueType result_type,
2247                       ValueType op1_type,
2248                       ValueType mask_type,
2249                       int sew,
2250                       LMULInfo vlmul,
2251                       VReg result_reg_class,
2252                       VReg op1_reg_class> :
2253  Pat<(result_type (!cast<Intrinsic>(intrinsic)
2254                   (result_type result_reg_class:$merge),
2255                   (op1_type op1_reg_class:$rs1),
2256                   (mask_type VR:$rs2),
2257                   VLOpFrag)),
2258                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2259                   (result_type result_reg_class:$merge),
2260                   (op1_type op1_reg_class:$rs1),
2261                   (mask_type VR:$rs2),
2262                   GPR:$vl, sew)>;
2263
2264class VPatBinaryNoMask<string intrinsic_name,
2265                       string inst,
2266                       ValueType result_type,
2267                       ValueType op1_type,
2268                       ValueType op2_type,
2269                       int sew,
2270                       VReg op1_reg_class,
2271                       DAGOperand op2_kind> :
2272  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
2273                   (op1_type op1_reg_class:$rs1),
2274                   (op2_type op2_kind:$rs2),
2275                   VLOpFrag)),
2276                   (!cast<Instruction>(inst)
2277                   (op1_type op1_reg_class:$rs1),
2278                   (op2_type op2_kind:$rs2),
2279                   GPR:$vl, sew)>;
2280
2281// Same as above but source operands are swapped.
2282class VPatBinaryNoMaskSwapped<string intrinsic_name,
2283                              string inst,
2284                              ValueType result_type,
2285                              ValueType op1_type,
2286                              ValueType op2_type,
2287                              int sew,
2288                              VReg op1_reg_class,
2289                              DAGOperand op2_kind> :
2290  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
2291                   (op2_type op2_kind:$rs2),
2292                   (op1_type op1_reg_class:$rs1),
2293                   VLOpFrag)),
2294                   (!cast<Instruction>(inst)
2295                   (op1_type op1_reg_class:$rs1),
2296                   (op2_type op2_kind:$rs2),
2297                   GPR:$vl, sew)>;
2298
2299class VPatBinaryMask<string intrinsic_name,
2300                     string inst,
2301                     ValueType result_type,
2302                     ValueType op1_type,
2303                     ValueType op2_type,
2304                     ValueType mask_type,
2305                     int sew,
2306                     VReg result_reg_class,
2307                     VReg op1_reg_class,
2308                     DAGOperand op2_kind> :
2309  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
2310                   (result_type result_reg_class:$merge),
2311                   (op1_type op1_reg_class:$rs1),
2312                   (op2_type op2_kind:$rs2),
2313                   (mask_type V0),
2314                   VLOpFrag)),
2315                   (!cast<Instruction>(inst#"_MASK")
2316                   (result_type result_reg_class:$merge),
2317                   (op1_type op1_reg_class:$rs1),
2318                   (op2_type op2_kind:$rs2),
2319                   (mask_type V0), GPR:$vl, sew)>;
2320
2321// Same as above but source operands are swapped.
2322class VPatBinaryMaskSwapped<string intrinsic_name,
2323                            string inst,
2324                            ValueType result_type,
2325                            ValueType op1_type,
2326                            ValueType op2_type,
2327                            ValueType mask_type,
2328                            int sew,
2329                            VReg result_reg_class,
2330                            VReg op1_reg_class,
2331                            DAGOperand op2_kind> :
2332  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
2333                   (result_type result_reg_class:$merge),
2334                   (op2_type op2_kind:$rs2),
2335                   (op1_type op1_reg_class:$rs1),
2336                   (mask_type V0),
2337                   VLOpFrag)),
2338                   (!cast<Instruction>(inst#"_MASK")
2339                   (result_type result_reg_class:$merge),
2340                   (op1_type op1_reg_class:$rs1),
2341                   (op2_type op2_kind:$rs2),
2342                   (mask_type V0), GPR:$vl, sew)>;
2343
2344class VPatTiedBinaryNoMask<string intrinsic_name,
2345                           string inst,
2346                           ValueType result_type,
2347                           ValueType op2_type,
2348                           int sew,
2349                           VReg result_reg_class,
2350                           DAGOperand op2_kind> :
2351  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
2352                   (result_type result_reg_class:$rs1),
2353                   (op2_type op2_kind:$rs2),
2354                   VLOpFrag)),
2355                   (!cast<Instruction>(inst#"_TIED")
2356                   (result_type result_reg_class:$rs1),
2357                   (op2_type op2_kind:$rs2),
2358                   GPR:$vl, sew)>;
2359
2360class VPatTiedBinaryMask<string intrinsic_name,
2361                         string inst,
2362                         ValueType result_type,
2363                         ValueType op2_type,
2364                         ValueType mask_type,
2365                         int sew,
2366                         VReg result_reg_class,
2367                         DAGOperand op2_kind> :
2368  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
2369                   (result_type result_reg_class:$merge),
2370                   (result_type result_reg_class:$merge),
2371                   (op2_type op2_kind:$rs2),
2372                   (mask_type V0),
2373                   VLOpFrag)),
2374                   (!cast<Instruction>(inst#"_MASK_TIED")
2375                   (result_type result_reg_class:$merge),
2376                   (op2_type op2_kind:$rs2),
2377                   (mask_type V0), GPR:$vl, sew)>;
2378
2379class VPatTernaryNoMask<string intrinsic,
2380                        string inst,
2381                        string kind,
2382                        ValueType result_type,
2383                        ValueType op1_type,
2384                        ValueType op2_type,
2385                        ValueType mask_type,
2386                        int sew,
2387                        LMULInfo vlmul,
2388                        VReg result_reg_class,
2389                        RegisterClass op1_reg_class,
2390                        DAGOperand op2_kind> :
2391  Pat<(result_type (!cast<Intrinsic>(intrinsic)
2392                    (result_type result_reg_class:$rs3),
2393                    (op1_type op1_reg_class:$rs1),
2394                    (op2_type op2_kind:$rs2),
2395                    VLOpFrag)),
2396                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2397                    result_reg_class:$rs3,
2398                    (op1_type op1_reg_class:$rs1),
2399                    op2_kind:$rs2,
2400                    GPR:$vl, sew)>;
2401
2402class VPatTernaryMask<string intrinsic,
2403                      string inst,
2404                      string kind,
2405                      ValueType result_type,
2406                      ValueType op1_type,
2407                      ValueType op2_type,
2408                      ValueType mask_type,
2409                      int sew,
2410                      LMULInfo vlmul,
2411                      VReg result_reg_class,
2412                      RegisterClass op1_reg_class,
2413                      DAGOperand op2_kind> :
2414  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
2415                    (result_type result_reg_class:$rs3),
2416                    (op1_type op1_reg_class:$rs1),
2417                    (op2_type op2_kind:$rs2),
2418                    (mask_type V0),
2419                    VLOpFrag)),
2420                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
2421                    result_reg_class:$rs3,
2422                    (op1_type op1_reg_class:$rs1),
2423                    op2_kind:$rs2,
2424                    (mask_type V0),
2425                    GPR:$vl, sew)>;
2426
2427class VPatAMOWDNoMask<string intrinsic_name,
2428                    string inst,
2429                    ValueType result_type,
2430                    ValueType op1_type,
2431                    int sew,
2432                    LMULInfo vlmul,
2433                    LMULInfo emul,
2434                    VReg op1_reg_class> :
2435  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
2436                    GPR:$rs1,
2437                    (op1_type op1_reg_class:$vs2),
2438                    (result_type vlmul.vrclass:$vd),
2439                    VLOpFrag)),
2440                   (!cast<Instruction>(inst # "_WD_" # vlmul.MX # "_" # emul.MX)
2441                    $rs1, $vs2, $vd,
2442                    GPR:$vl, sew)>;
2443
2444class VPatAMOWDMask<string intrinsic_name,
2445                    string inst,
2446                    ValueType result_type,
2447                    ValueType op1_type,
2448                    ValueType mask_type,
2449                    int sew,
2450                    LMULInfo vlmul,
2451                    LMULInfo emul,
2452                    VReg op1_reg_class> :
2453  Pat<(result_type (!cast<Intrinsic>(intrinsic_name # "_mask")
2454                    GPR:$rs1,
2455                    (op1_type op1_reg_class:$vs2),
2456                    (result_type vlmul.vrclass:$vd),
2457                    (mask_type V0),
2458                    VLOpFrag)),
2459                   (!cast<Instruction>(inst # "_WD_" # vlmul.MX # "_" # emul.MX # "_MASK")
2460                    $rs1, $vs2, $vd,
2461                    (mask_type V0), GPR:$vl, sew)>;
2462
2463multiclass VPatUnaryS_M<string intrinsic_name,
2464                             string inst>
2465{
2466  foreach mti = AllMasks in {
2467    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name)
2468                      (mti.Mask VR:$rs1), VLOpFrag)),
2469                      (!cast<Instruction>(inst#"_M_"#mti.BX) $rs1,
2470                      GPR:$vl, mti.Log2SEW)>;
2471    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name # "_mask")
2472                      (mti.Mask VR:$rs1), (mti.Mask V0), VLOpFrag)),
2473                      (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK") $rs1,
2474                      (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
2475  }
2476}
2477
2478multiclass VPatUnaryV_V_AnyMask<string intrinsic, string instruction,
2479                                list<VTypeInfo> vtilist> {
2480  foreach vti = vtilist in {
2481    def : VPatUnaryAnyMask<intrinsic, instruction, "VM",
2482                           vti.Vector, vti.Vector, vti.Mask,
2483                           vti.Log2SEW, vti.LMul, vti.RegClass,
2484                           vti.RegClass>;
2485  }
2486}
2487
2488multiclass VPatUnaryM_M<string intrinsic,
2489                         string inst>
2490{
2491  foreach mti = AllMasks in {
2492    def : VPatMaskUnaryNoMask<intrinsic, inst, mti>;
2493    def : VPatMaskUnaryMask<intrinsic, inst, mti>;
2494  }
2495}
2496
2497multiclass VPatUnaryV_M<string intrinsic, string instruction>
2498{
2499  foreach vti = AllIntegerVectors in {
2500    def : VPatUnaryNoMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
2501                          vti.Log2SEW, vti.LMul, VR>;
2502    def : VPatUnaryMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
2503                        vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, VR>;
2504  }
2505}
2506
2507multiclass VPatUnaryV_VF<string intrinsic, string instruction, string suffix,
2508                         list<VTypeInfoToFraction> fractionList>
2509{
2510  foreach vtiTofti = fractionList in
2511  {
2512      defvar vti = vtiTofti.Vti;
2513      defvar fti = vtiTofti.Fti;
2514      def : VPatUnaryNoMask<intrinsic, instruction, suffix,
2515                            vti.Vector, fti.Vector,
2516                            vti.Log2SEW, vti.LMul, fti.RegClass>;
2517      def : VPatUnaryMask<intrinsic, instruction, suffix,
2518                          vti.Vector, fti.Vector, vti.Mask,
2519                          vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
2520   }
2521}
2522
2523multiclass VPatUnaryV_V<string intrinsic, string instruction,
2524                        list<VTypeInfo> vtilist> {
2525  foreach vti = vtilist in {
2526    def : VPatUnaryNoMask<intrinsic, instruction, "V",
2527                          vti.Vector, vti.Vector,
2528                          vti.Log2SEW, vti.LMul, vti.RegClass>;
2529    def : VPatUnaryMask<intrinsic, instruction, "V",
2530                        vti.Vector, vti.Vector, vti.Mask,
2531                        vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
2532  }
2533}
2534
2535multiclass VPatNullaryV<string intrinsic, string instruction>
2536{
2537  foreach vti = AllIntegerVectors in {
2538    def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
2539                          VLOpFrag)),
2540                          (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
2541                          GPR:$vl, vti.Log2SEW)>;
2542    def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
2543                          (vti.Vector vti.RegClass:$merge),
2544                          (vti.Mask V0), VLOpFrag)),
2545                          (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK")
2546                          vti.RegClass:$merge, (vti.Mask V0),
2547                          GPR:$vl, vti.Log2SEW)>;
2548  }
2549}
2550
2551multiclass VPatNullaryM<string intrinsic, string inst> {
2552  foreach mti = AllMasks in
2553    def : Pat<(mti.Mask (!cast<Intrinsic>(intrinsic)
2554                        (XLenVT (VLOp (XLenVT (XLenVT GPR:$vl)))))),
2555                        (!cast<Instruction>(inst#"_M_"#mti.BX)
2556                        GPR:$vl, mti.Log2SEW)>;
2557}
2558
2559multiclass VPatBinary<string intrinsic,
2560                      string inst,
2561                      ValueType result_type,
2562                      ValueType op1_type,
2563                      ValueType op2_type,
2564                      ValueType mask_type,
2565                      int sew,
2566                      VReg result_reg_class,
2567                      VReg op1_reg_class,
2568                      DAGOperand op2_kind>
2569{
2570  def : VPatBinaryNoMask<intrinsic, inst, result_type, op1_type, op2_type,
2571                         sew, op1_reg_class, op2_kind>;
2572  def : VPatBinaryMask<intrinsic, inst, result_type, op1_type, op2_type,
2573                       mask_type, sew, result_reg_class, op1_reg_class,
2574                       op2_kind>;
2575}
2576
2577multiclass VPatBinarySwapped<string intrinsic,
2578                      string inst,
2579                      ValueType result_type,
2580                      ValueType op1_type,
2581                      ValueType op2_type,
2582                      ValueType mask_type,
2583                      int sew,
2584                      VReg result_reg_class,
2585                      VReg op1_reg_class,
2586                      DAGOperand op2_kind>
2587{
2588  def : VPatBinaryNoMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type,
2589                                sew, op1_reg_class, op2_kind>;
2590  def : VPatBinaryMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type,
2591                              mask_type, sew, result_reg_class, op1_reg_class,
2592                              op2_kind>;
2593}
2594
2595multiclass VPatBinaryCarryIn<string intrinsic,
2596                             string inst,
2597                             string kind,
2598                             ValueType result_type,
2599                             ValueType op1_type,
2600                             ValueType op2_type,
2601                             ValueType mask_type,
2602                             int sew,
2603                             LMULInfo vlmul,
2604                             VReg op1_reg_class,
2605                             DAGOperand op2_kind>
2606{
2607  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
2608                         (op1_type op1_reg_class:$rs1),
2609                         (op2_type op2_kind:$rs2),
2610                         (mask_type V0),
2611                         VLOpFrag)),
2612                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2613                         (op1_type op1_reg_class:$rs1),
2614                         (op2_type op2_kind:$rs2),
2615                         (mask_type V0), GPR:$vl, sew)>;
2616}
2617
2618multiclass VPatBinaryMaskOut<string intrinsic,
2619                             string inst,
2620                             string kind,
2621                             ValueType result_type,
2622                             ValueType op1_type,
2623                             ValueType op2_type,
2624                             int sew,
2625                             LMULInfo vlmul,
2626                             VReg op1_reg_class,
2627                             DAGOperand op2_kind>
2628{
2629  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
2630                         (op1_type op1_reg_class:$rs1),
2631                         (op2_type op2_kind:$rs2),
2632                         VLOpFrag)),
2633                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2634                         (op1_type op1_reg_class:$rs1),
2635                         (op2_type op2_kind:$rs2),
2636                         GPR:$vl, sew)>;
2637}
2638
2639multiclass VPatConversion<string intrinsic,
2640                          string inst,
2641                          string kind,
2642                          ValueType result_type,
2643                          ValueType op1_type,
2644                          ValueType mask_type,
2645                          int sew,
2646                          LMULInfo vlmul,
2647                          VReg result_reg_class,
2648                          VReg op1_reg_class>
2649{
2650  def : VPatUnaryNoMask<intrinsic, inst, kind, result_type, op1_type,
2651                        sew, vlmul, op1_reg_class>;
2652  def : VPatUnaryMask<intrinsic, inst, kind, result_type, op1_type,
2653                      mask_type, sew, vlmul, result_reg_class, op1_reg_class>;
2654}
2655
2656multiclass VPatBinaryV_VV<string intrinsic, string instruction,
2657                          list<VTypeInfo> vtilist> {
2658  foreach vti = vtilist in
2659    defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX,
2660                      vti.Vector, vti.Vector, vti.Vector,vti.Mask,
2661                      vti.Log2SEW, vti.RegClass,
2662                      vti.RegClass, vti.RegClass>;
2663}
2664
2665multiclass VPatBinaryV_VV_INT<string intrinsic, string instruction,
2666                          list<VTypeInfo> vtilist> {
2667  foreach vti = vtilist in {
2668    defvar ivti = GetIntVTypeInfo<vti>.Vti;
2669    defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX,
2670                      vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
2671                      vti.Log2SEW, vti.RegClass,
2672                      vti.RegClass, vti.RegClass>;
2673  }
2674}
2675
2676multiclass VPatBinaryV_VV_INT_EEW<string intrinsic, string instruction,
2677                                  int eew, list<VTypeInfo> vtilist> {
2678  foreach vti = vtilist in {
2679    // emul = lmul * eew / sew
2680    defvar vlmul = vti.LMul;
2681    defvar octuple_lmul = vlmul.octuple;
2682    defvar octuple_emul = !srl(!mul(octuple_lmul, eew), vti.Log2SEW);
2683    if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
2684      defvar emul_str = octuple_to_str<octuple_emul>.ret;
2685      defvar ivti = !cast<VTypeInfo>("VI" # eew # emul_str);
2686      defvar inst = instruction # "_VV_" # vti.LMul.MX # "_" # emul_str;
2687      defm : VPatBinary<intrinsic, inst,
2688                        vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
2689                        vti.Log2SEW, vti.RegClass,
2690                        vti.RegClass, ivti.RegClass>;
2691    }
2692  }
2693}
2694
2695multiclass VPatBinaryV_VX<string intrinsic, string instruction,
2696                          list<VTypeInfo> vtilist> {
2697  foreach vti = vtilist in {
2698    defvar kind = "V"#vti.ScalarSuffix;
2699    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
2700                      vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
2701                      vti.Log2SEW, vti.RegClass,
2702                      vti.RegClass, vti.ScalarRegClass>;
2703  }
2704}
2705
2706multiclass VPatBinaryV_VX_INT<string intrinsic, string instruction,
2707                          list<VTypeInfo> vtilist> {
2708  foreach vti = vtilist in
2709    defm : VPatBinary<intrinsic, instruction # "_VX_" # vti.LMul.MX,
2710                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
2711                      vti.Log2SEW, vti.RegClass,
2712                      vti.RegClass, GPR>;
2713}
2714
2715multiclass VPatBinaryV_VI<string intrinsic, string instruction,
2716                          list<VTypeInfo> vtilist, Operand imm_type> {
2717  foreach vti = vtilist in
2718    defm : VPatBinary<intrinsic, instruction # "_VI_" # vti.LMul.MX,
2719                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
2720                      vti.Log2SEW, vti.RegClass,
2721                      vti.RegClass, imm_type>;
2722}
2723
2724multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
2725  foreach mti = AllMasks in
2726    def : VPatBinaryNoMask<intrinsic, instruction # "_MM_" # mti.LMul.MX,
2727                           mti.Mask, mti.Mask, mti.Mask,
2728                           mti.Log2SEW, VR, VR>;
2729}
2730
2731multiclass VPatBinaryW_VV<string intrinsic, string instruction,
2732                          list<VTypeInfoToWide> vtilist> {
2733  foreach VtiToWti = vtilist in {
2734    defvar Vti = VtiToWti.Vti;
2735    defvar Wti = VtiToWti.Wti;
2736    defm : VPatBinary<intrinsic, instruction # "_VV_" # Vti.LMul.MX,
2737                      Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
2738                      Vti.Log2SEW, Wti.RegClass,
2739                      Vti.RegClass, Vti.RegClass>;
2740  }
2741}
2742
2743multiclass VPatBinaryW_VX<string intrinsic, string instruction,
2744                          list<VTypeInfoToWide> vtilist> {
2745  foreach VtiToWti = vtilist in {
2746    defvar Vti = VtiToWti.Vti;
2747    defvar Wti = VtiToWti.Wti;
2748    defvar kind = "V"#Vti.ScalarSuffix;
2749    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
2750                      Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
2751                      Vti.Log2SEW, Wti.RegClass,
2752                      Vti.RegClass, Vti.ScalarRegClass>;
2753  }
2754}
2755
2756multiclass VPatBinaryW_WV<string intrinsic, string instruction,
2757                          list<VTypeInfoToWide> vtilist> {
2758  foreach VtiToWti = vtilist in {
2759    defvar Vti = VtiToWti.Vti;
2760    defvar Wti = VtiToWti.Wti;
2761    def : VPatTiedBinaryNoMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
2762                               Wti.Vector, Vti.Vector,
2763                               Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
2764    let AddedComplexity = 1 in
2765    def : VPatTiedBinaryMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
2766                             Wti.Vector, Vti.Vector, Vti.Mask,
2767                             Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
2768    def : VPatBinaryMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
2769                         Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
2770                         Vti.Log2SEW, Wti.RegClass,
2771                         Wti.RegClass, Vti.RegClass>;
2772  }
2773}
2774
2775multiclass VPatBinaryW_WX<string intrinsic, string instruction,
2776                          list<VTypeInfoToWide> vtilist> {
2777  foreach VtiToWti = vtilist in {
2778    defvar Vti = VtiToWti.Vti;
2779    defvar Wti = VtiToWti.Wti;
2780    defvar kind = "W"#Vti.ScalarSuffix;
2781    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
2782                      Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
2783                      Vti.Log2SEW, Wti.RegClass,
2784                      Wti.RegClass, Vti.ScalarRegClass>;
2785  }
2786}
2787
2788multiclass VPatBinaryV_WV<string intrinsic, string instruction,
2789                          list<VTypeInfoToWide> vtilist> {
2790  foreach VtiToWti = vtilist in {
2791    defvar Vti = VtiToWti.Vti;
2792    defvar Wti = VtiToWti.Wti;
2793    defm : VPatBinary<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
2794                      Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
2795                      Vti.Log2SEW, Vti.RegClass,
2796                      Wti.RegClass, Vti.RegClass>;
2797  }
2798}
2799
2800multiclass VPatBinaryV_WX<string intrinsic, string instruction,
2801                          list<VTypeInfoToWide> vtilist> {
2802  foreach VtiToWti = vtilist in {
2803    defvar Vti = VtiToWti.Vti;
2804    defvar Wti = VtiToWti.Wti;
2805    defvar kind = "W"#Vti.ScalarSuffix;
2806    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
2807                      Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
2808                      Vti.Log2SEW, Vti.RegClass,
2809                      Wti.RegClass, Vti.ScalarRegClass>;
2810  }
2811}
2812
2813multiclass VPatBinaryV_WI<string intrinsic, string instruction,
2814                          list<VTypeInfoToWide> vtilist> {
2815  foreach VtiToWti = vtilist in {
2816    defvar Vti = VtiToWti.Vti;
2817    defvar Wti = VtiToWti.Wti;
2818    defm : VPatBinary<intrinsic, instruction # "_WI_" # Vti.LMul.MX,
2819                      Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
2820                      Vti.Log2SEW, Vti.RegClass,
2821                      Wti.RegClass, uimm5>;
2822  }
2823}
2824
2825multiclass VPatBinaryV_VM<string intrinsic, string instruction,
2826                          bit CarryOut = 0,
2827                          list<VTypeInfo> vtilist = AllIntegerVectors> {
2828  foreach vti = vtilist in
2829    defm : VPatBinaryCarryIn<intrinsic, instruction, "VVM",
2830                             !if(CarryOut, vti.Mask, vti.Vector),
2831                             vti.Vector, vti.Vector, vti.Mask,
2832                             vti.Log2SEW, vti.LMul,
2833                             vti.RegClass, vti.RegClass>;
2834}
2835
2836multiclass VPatBinaryV_XM<string intrinsic, string instruction,
2837                          bit CarryOut = 0,
2838                          list<VTypeInfo> vtilist = AllIntegerVectors> {
2839  foreach vti = vtilist in
2840    defm : VPatBinaryCarryIn<intrinsic, instruction,
2841                             "V"#vti.ScalarSuffix#"M",
2842                             !if(CarryOut, vti.Mask, vti.Vector),
2843                             vti.Vector, vti.Scalar, vti.Mask,
2844                             vti.Log2SEW, vti.LMul,
2845                             vti.RegClass, vti.ScalarRegClass>;
2846}
2847
2848multiclass VPatBinaryV_IM<string intrinsic, string instruction,
2849                          bit CarryOut = 0> {
2850  foreach vti = AllIntegerVectors in
2851    defm : VPatBinaryCarryIn<intrinsic, instruction, "VIM",
2852                             !if(CarryOut, vti.Mask, vti.Vector),
2853                             vti.Vector, XLenVT, vti.Mask,
2854                             vti.Log2SEW, vti.LMul,
2855                             vti.RegClass, simm5>;
2856}
2857
2858multiclass VPatBinaryV_V<string intrinsic, string instruction> {
2859  foreach vti = AllIntegerVectors in
2860    defm : VPatBinaryMaskOut<intrinsic, instruction, "VV",
2861                             vti.Mask, vti.Vector, vti.Vector,
2862                             vti.Log2SEW, vti.LMul,
2863                             vti.RegClass, vti.RegClass>;
2864}
2865
2866multiclass VPatBinaryV_X<string intrinsic, string instruction> {
2867  foreach vti = AllIntegerVectors in
2868    defm : VPatBinaryMaskOut<intrinsic, instruction, "VX",
2869                             vti.Mask, vti.Vector, XLenVT,
2870                             vti.Log2SEW, vti.LMul,
2871                             vti.RegClass, GPR>;
2872}
2873
2874multiclass VPatBinaryV_I<string intrinsic, string instruction> {
2875  foreach vti = AllIntegerVectors in
2876    defm : VPatBinaryMaskOut<intrinsic, instruction, "VI",
2877                             vti.Mask, vti.Vector, XLenVT,
2878                             vti.Log2SEW, vti.LMul,
2879                             vti.RegClass, simm5>;
2880}
2881
2882multiclass VPatBinaryM_VV<string intrinsic, string instruction,
2883                          list<VTypeInfo> vtilist> {
2884  foreach vti = vtilist in
2885    defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX,
2886                      vti.Mask, vti.Vector, vti.Vector, vti.Mask,
2887                      vti.Log2SEW, VR,
2888                      vti.RegClass, vti.RegClass>;
2889}
2890
2891multiclass VPatBinarySwappedM_VV<string intrinsic, string instruction,
2892                                 list<VTypeInfo> vtilist> {
2893  foreach vti = vtilist in
2894    defm : VPatBinarySwapped<intrinsic, instruction # "_VV_" # vti.LMul.MX,
2895                             vti.Mask, vti.Vector, vti.Vector, vti.Mask,
2896                             vti.Log2SEW, VR,
2897                             vti.RegClass, vti.RegClass>;
2898}
2899
2900multiclass VPatBinaryM_VX<string intrinsic, string instruction,
2901                          list<VTypeInfo> vtilist> {
2902  foreach vti = vtilist in {
2903    defvar kind = "V"#vti.ScalarSuffix;
2904    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
2905                      vti.Mask, vti.Vector, vti.Scalar, vti.Mask,
2906                      vti.Log2SEW, VR,
2907                      vti.RegClass, vti.ScalarRegClass>;
2908  }
2909}
2910
2911multiclass VPatBinaryM_VI<string intrinsic, string instruction,
2912                          list<VTypeInfo> vtilist> {
2913  foreach vti = vtilist in
2914    defm : VPatBinary<intrinsic, instruction # "_VI_" # vti.LMul.MX,
2915                      vti.Mask, vti.Vector, XLenVT, vti.Mask,
2916                      vti.Log2SEW, VR,
2917                      vti.RegClass, simm5>;
2918}
2919
2920multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
2921                                list<VTypeInfo> vtilist, Operand ImmType = simm5>
2922    : VPatBinaryV_VV<intrinsic, instruction, vtilist>,
2923      VPatBinaryV_VX<intrinsic, instruction, vtilist>,
2924      VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
2925
2926multiclass VPatBinaryV_VV_VX<string intrinsic, string instruction,
2927                             list<VTypeInfo> vtilist>
2928    : VPatBinaryV_VV<intrinsic, instruction, vtilist>,
2929      VPatBinaryV_VX<intrinsic, instruction, vtilist>;
2930
2931multiclass VPatBinaryV_VX_VI<string intrinsic, string instruction,
2932                             list<VTypeInfo> vtilist>
2933    : VPatBinaryV_VX<intrinsic, instruction, vtilist>,
2934      VPatBinaryV_VI<intrinsic, instruction, vtilist, simm5>;
2935
2936multiclass VPatBinaryW_VV_VX<string intrinsic, string instruction,
2937                             list<VTypeInfoToWide> vtilist>
2938    : VPatBinaryW_VV<intrinsic, instruction, vtilist>,
2939      VPatBinaryW_VX<intrinsic, instruction, vtilist>;
2940
2941multiclass VPatBinaryW_WV_WX<string intrinsic, string instruction,
2942                             list<VTypeInfoToWide> vtilist>
2943    : VPatBinaryW_WV<intrinsic, instruction, vtilist>,
2944      VPatBinaryW_WX<intrinsic, instruction, vtilist>;
2945
2946multiclass VPatBinaryV_WV_WX_WI<string intrinsic, string instruction,
2947                                list<VTypeInfoToWide> vtilist>
2948    : VPatBinaryV_WV<intrinsic, instruction, vtilist>,
2949      VPatBinaryV_WX<intrinsic, instruction, vtilist>,
2950      VPatBinaryV_WI<intrinsic, instruction, vtilist>;
2951
2952multiclass VPatBinaryV_VM_XM_IM<string intrinsic, string instruction>
2953    : VPatBinaryV_VM<intrinsic, instruction>,
2954      VPatBinaryV_XM<intrinsic, instruction>,
2955      VPatBinaryV_IM<intrinsic, instruction>;
2956
2957multiclass VPatBinaryM_VM_XM_IM<string intrinsic, string instruction>
2958    : VPatBinaryV_VM<intrinsic, instruction, /*CarryOut=*/1>,
2959      VPatBinaryV_XM<intrinsic, instruction, /*CarryOut=*/1>,
2960      VPatBinaryV_IM<intrinsic, instruction, /*CarryOut=*/1>;
2961
2962multiclass VPatBinaryM_V_X_I<string intrinsic, string instruction>
2963    : VPatBinaryV_V<intrinsic, instruction>,
2964      VPatBinaryV_X<intrinsic, instruction>,
2965      VPatBinaryV_I<intrinsic, instruction>;
2966
2967multiclass VPatBinaryV_VM_XM<string intrinsic, string instruction>
2968    : VPatBinaryV_VM<intrinsic, instruction>,
2969      VPatBinaryV_XM<intrinsic, instruction>;
2970
2971multiclass VPatBinaryM_VM_XM<string intrinsic, string instruction>
2972    : VPatBinaryV_VM<intrinsic, instruction, /*CarryOut=*/1>,
2973      VPatBinaryV_XM<intrinsic, instruction, /*CarryOut=*/1>;
2974
2975multiclass VPatBinaryM_V_X<string intrinsic, string instruction>
2976    : VPatBinaryV_V<intrinsic, instruction>,
2977      VPatBinaryV_X<intrinsic, instruction>;
2978
2979multiclass VPatTernary<string intrinsic,
2980                       string inst,
2981                       string kind,
2982                       ValueType result_type,
2983                       ValueType op1_type,
2984                       ValueType op2_type,
2985                       ValueType mask_type,
2986                       int sew,
2987                       LMULInfo vlmul,
2988                       VReg result_reg_class,
2989                       RegisterClass op1_reg_class,
2990                       DAGOperand op2_kind> {
2991  def : VPatTernaryNoMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
2992                    mask_type, sew, vlmul, result_reg_class, op1_reg_class,
2993                    op2_kind>;
2994  def : VPatTernaryMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
2995                        mask_type, sew, vlmul, result_reg_class, op1_reg_class,
2996                        op2_kind>;
2997}
2998
2999multiclass VPatTernaryV_VV<string intrinsic, string instruction,
3000                           list<VTypeInfo> vtilist> {
3001  foreach vti = vtilist in
3002    defm : VPatTernary<intrinsic, instruction, "VV",
3003                       vti.Vector, vti.Vector, vti.Vector, vti.Mask,
3004                       vti.Log2SEW, vti.LMul, vti.RegClass,
3005                       vti.RegClass, vti.RegClass>;
3006}
3007
3008multiclass VPatTernaryV_VX<string intrinsic, string instruction,
3009                           list<VTypeInfo> vtilist> {
3010  foreach vti = vtilist in
3011    defm : VPatTernary<intrinsic, instruction, "VX",
3012                       vti.Vector, vti.Vector, XLenVT, vti.Mask,
3013                       vti.Log2SEW, vti.LMul, vti.RegClass,
3014                       vti.RegClass, GPR>;
3015}
3016
3017multiclass VPatTernaryV_VX_AAXA<string intrinsic, string instruction,
3018                           list<VTypeInfo> vtilist> {
3019  foreach vti = vtilist in
3020    defm : VPatTernary<intrinsic, instruction,
3021                       "V"#vti.ScalarSuffix,
3022                       vti.Vector, vti.Scalar, vti.Vector, vti.Mask,
3023                       vti.Log2SEW, vti.LMul, vti.RegClass,
3024                       vti.ScalarRegClass, vti.RegClass>;
3025}
3026
3027multiclass VPatTernaryV_VI<string intrinsic, string instruction,
3028                           list<VTypeInfo> vtilist, Operand Imm_type> {
3029  foreach vti = vtilist in
3030    defm : VPatTernary<intrinsic, instruction, "VI",
3031                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
3032                      vti.Log2SEW, vti.LMul, vti.RegClass,
3033                      vti.RegClass, Imm_type>;
3034}
3035
3036multiclass VPatTernaryW_VV<string intrinsic, string instruction,
3037                           list<VTypeInfoToWide> vtilist> {
3038  foreach vtiToWti = vtilist in {
3039    defvar vti = vtiToWti.Vti;
3040    defvar wti = vtiToWti.Wti;
3041    defm : VPatTernary<intrinsic, instruction, "VV",
3042                      wti.Vector, vti.Vector, vti.Vector,
3043                      vti.Mask, vti.Log2SEW, vti.LMul,
3044                      wti.RegClass, vti.RegClass, vti.RegClass>;
3045  }
3046}
3047
3048multiclass VPatTernaryW_VX<string intrinsic, string instruction,
3049                           list<VTypeInfoToWide> vtilist> {
3050  foreach vtiToWti = vtilist in {
3051    defvar vti = vtiToWti.Vti;
3052    defvar wti = vtiToWti.Wti;
3053    defm : VPatTernary<intrinsic, instruction,
3054                       "V"#vti.ScalarSuffix,
3055                       wti.Vector, vti.Scalar, vti.Vector,
3056                       vti.Mask, vti.Log2SEW, vti.LMul,
3057                       wti.RegClass, vti.ScalarRegClass, vti.RegClass>;
3058  }
3059}
3060
3061multiclass VPatTernaryV_VV_VX_AAXA<string intrinsic, string instruction,
3062                              list<VTypeInfo> vtilist>
3063    : VPatTernaryV_VV<intrinsic, instruction, vtilist>,
3064      VPatTernaryV_VX_AAXA<intrinsic, instruction, vtilist>;
3065
3066multiclass VPatTernaryV_VX_VI<string intrinsic, string instruction,
3067                              list<VTypeInfo> vtilist, Operand Imm_type = simm5>
3068    : VPatTernaryV_VX<intrinsic, instruction, vtilist>,
3069      VPatTernaryV_VI<intrinsic, instruction, vtilist, Imm_type>;
3070
3071multiclass VPatBinaryM_VV_VX_VI<string intrinsic, string instruction,
3072                                list<VTypeInfo> vtilist>
3073    : VPatBinaryM_VV<intrinsic, instruction, vtilist>,
3074      VPatBinaryM_VX<intrinsic, instruction, vtilist>,
3075      VPatBinaryM_VI<intrinsic, instruction, vtilist>;
3076
3077multiclass VPatTernaryW_VV_VX<string intrinsic, string instruction,
3078                              list<VTypeInfoToWide> vtilist>
3079    : VPatTernaryW_VV<intrinsic, instruction, vtilist>,
3080      VPatTernaryW_VX<intrinsic, instruction, vtilist>;
3081
3082multiclass VPatBinaryM_VV_VX<string intrinsic, string instruction,
3083                             list<VTypeInfo> vtilist>
3084    : VPatBinaryM_VV<intrinsic, instruction, vtilist>,
3085      VPatBinaryM_VX<intrinsic, instruction, vtilist>;
3086
3087multiclass VPatBinaryM_VX_VI<string intrinsic, string instruction,
3088                             list<VTypeInfo> vtilist>
3089    : VPatBinaryM_VX<intrinsic, instruction, vtilist>,
3090      VPatBinaryM_VI<intrinsic, instruction, vtilist>;
3091
3092multiclass VPatBinaryV_VV_VX_VI_INT<string intrinsic, string instruction,
3093                                    list<VTypeInfo> vtilist, Operand ImmType = simm5>
3094    : VPatBinaryV_VV_INT<intrinsic#"_vv", instruction, vtilist>,
3095      VPatBinaryV_VX_INT<intrinsic#"_vx", instruction, vtilist>,
3096      VPatBinaryV_VI<intrinsic#"_vx", instruction, vtilist, ImmType>;
3097
3098multiclass VPatReductionV_VS<string intrinsic, string instruction, bit IsFloat = 0> {
3099  foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in
3100  {
3101    defvar vectorM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # vti.SEW # "M1");
3102    defm : VPatTernary<intrinsic, instruction, "VS",
3103                       vectorM1.Vector, vti.Vector,
3104                       vectorM1.Vector, vti.Mask,
3105                       vti.Log2SEW, vti.LMul,
3106                       VR, vti.RegClass, VR>;
3107  }
3108  foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in
3109  {
3110    defm : VPatTernary<intrinsic, instruction, "VS",
3111                       gvti.VectorM1, gvti.Vector,
3112                       gvti.VectorM1, gvti.Mask,
3113                       gvti.Log2SEW, gvti.LMul,
3114                       VR, gvti.RegClass, VR>;
3115  }
3116}
3117
3118multiclass VPatReductionW_VS<string intrinsic, string instruction, bit IsFloat = 0> {
3119  foreach vti = !if(IsFloat, AllFloatVectors, AllIntegerVectors) in
3120  {
3121    defvar wtiSEW = !mul(vti.SEW, 2);
3122    if !le(wtiSEW, 64) then {
3123      defvar wtiM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # wtiSEW # "M1");
3124      defm : VPatTernary<intrinsic, instruction, "VS",
3125                         wtiM1.Vector, vti.Vector,
3126                         wtiM1.Vector, vti.Mask,
3127                         vti.Log2SEW, vti.LMul,
3128                         wtiM1.RegClass, vti.RegClass,
3129                         wtiM1.RegClass>;
3130    }
3131  }
3132}
3133
3134multiclass VPatConversionVI_VF<string intrinsic,
3135                               string instruction>
3136{
3137  foreach fvti = AllFloatVectors in
3138  {
3139    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
3140
3141    defm : VPatConversion<intrinsic, instruction, "V",
3142                          ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
3143                          fvti.LMul, ivti.RegClass, fvti.RegClass>;
3144  }
3145}
3146
3147multiclass VPatConversionVF_VI<string intrinsic,
3148                               string instruction>
3149{
3150  foreach fvti = AllFloatVectors in
3151  {
3152    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
3153
3154    defm : VPatConversion<intrinsic, instruction, "V",
3155                          fvti.Vector, ivti.Vector, fvti.Mask, ivti.Log2SEW,
3156                          ivti.LMul, fvti.RegClass, ivti.RegClass>;
3157  }
3158}
3159
3160multiclass VPatConversionWI_VF<string intrinsic, string instruction> {
3161  foreach fvtiToFWti = AllWidenableFloatVectors in
3162  {
3163    defvar fvti = fvtiToFWti.Vti;
3164    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
3165
3166    defm : VPatConversion<intrinsic, instruction, "V",
3167                          iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
3168                          fvti.LMul, iwti.RegClass, fvti.RegClass>;
3169  }
3170}
3171
3172multiclass VPatConversionWF_VI<string intrinsic, string instruction> {
3173  foreach vtiToWti = AllWidenableIntToFloatVectors in
3174  {
3175    defvar vti = vtiToWti.Vti;
3176    defvar fwti = vtiToWti.Wti;
3177
3178    defm : VPatConversion<intrinsic, instruction, "V",
3179                          fwti.Vector, vti.Vector, fwti.Mask, vti.Log2SEW,
3180                          vti.LMul, fwti.RegClass, vti.RegClass>;
3181  }
3182}
3183
3184multiclass VPatConversionWF_VF <string intrinsic, string instruction> {
3185  foreach fvtiToFWti = AllWidenableFloatVectors in
3186  {
3187    defvar fvti = fvtiToFWti.Vti;
3188    defvar fwti = fvtiToFWti.Wti;
3189
3190    defm : VPatConversion<intrinsic, instruction, "V",
3191                          fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
3192                          fvti.LMul, fwti.RegClass, fvti.RegClass>;
3193  }
3194}
3195
3196multiclass VPatConversionVI_WF <string intrinsic, string instruction> {
3197  foreach vtiToWti = AllWidenableIntToFloatVectors in
3198  {
3199    defvar vti = vtiToWti.Vti;
3200    defvar fwti = vtiToWti.Wti;
3201
3202    defm : VPatConversion<intrinsic, instruction, "W",
3203                          vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
3204                          vti.LMul, vti.RegClass, fwti.RegClass>;
3205  }
3206}
3207
3208multiclass VPatConversionVF_WI <string intrinsic, string instruction> {
3209  foreach fvtiToFWti = AllWidenableFloatVectors in
3210  {
3211    defvar fvti = fvtiToFWti.Vti;
3212    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
3213
3214    defm : VPatConversion<intrinsic, instruction, "W",
3215                          fvti.Vector, iwti.Vector, fvti.Mask, fvti.Log2SEW,
3216                          fvti.LMul, fvti.RegClass, iwti.RegClass>;
3217  }
3218}
3219
3220multiclass VPatConversionVF_WF <string intrinsic, string instruction> {
3221  foreach fvtiToFWti = AllWidenableFloatVectors in
3222  {
3223    defvar fvti = fvtiToFWti.Vti;
3224    defvar fwti = fvtiToFWti.Wti;
3225
3226    defm : VPatConversion<intrinsic, instruction, "W",
3227                          fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
3228                          fvti.LMul, fvti.RegClass, fwti.RegClass>;
3229  }
3230}
3231
3232multiclass VPatAMOWD<string intrinsic,
3233                     string inst,
3234                     ValueType result_type,
3235                     ValueType offset_type,
3236                     ValueType mask_type,
3237                     int sew,
3238                     LMULInfo vlmul,
3239                     LMULInfo emul,
3240                     VReg op1_reg_class>
3241{
3242  def : VPatAMOWDNoMask<intrinsic, inst, result_type, offset_type,
3243                        sew, vlmul, emul, op1_reg_class>;
3244  def : VPatAMOWDMask<intrinsic, inst, result_type, offset_type,
3245                      mask_type, sew, vlmul, emul, op1_reg_class>;
3246}
3247
3248multiclass VPatAMOV_WD<string intrinsic,
3249                       string inst,
3250                       list<VTypeInfo> vtilist> {
3251  foreach eew = EEWList in {
3252    foreach vti = vtilist in {
3253      if !or(!eq(vti.SEW, 32), !eq(vti.SEW, 64)) then {
3254        defvar octuple_lmul = vti.LMul.octuple;
3255        // Calculate emul = eew * lmul / sew
3256        defvar octuple_emul = !srl(!mul(eew, octuple_lmul), vti.Log2SEW);
3257        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
3258          defvar emulMX = octuple_to_str<octuple_emul>.ret;
3259          defvar offsetVti = !cast<VTypeInfo>("VI" # eew # emulMX);
3260          defvar inst_ei = inst # "EI" # eew;
3261          defm : VPatAMOWD<intrinsic, inst_ei,
3262                           vti.Vector, offsetVti.Vector,
3263                           vti.Mask, vti.Log2SEW, vti.LMul, offsetVti.LMul, offsetVti.RegClass>;
3264        }
3265      }
3266    }
3267  }
3268}
3269
3270//===----------------------------------------------------------------------===//
3271// Pseudo instructions
3272//===----------------------------------------------------------------------===//
3273
3274let Predicates = [HasStdExtV] in {
3275
3276//===----------------------------------------------------------------------===//
3277// Pseudo Instructions for CodeGen
3278//===----------------------------------------------------------------------===//
3279let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
3280  def PseudoVMV1R_V : VPseudo<VMV1R_V, V_M1, (outs VR:$vd), (ins VR:$vs2)>;
3281  def PseudoVMV2R_V : VPseudo<VMV2R_V, V_M2, (outs VRM2:$vd), (ins VRM2:$vs2)>;
3282  def PseudoVMV4R_V : VPseudo<VMV4R_V, V_M4, (outs VRM4:$vd), (ins VRM4:$vs2)>;
3283  def PseudoVMV8R_V : VPseudo<VMV8R_V, V_M8, (outs VRM8:$vd), (ins VRM8:$vs2)>;
3284}
3285
3286let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1 in {
3287  def PseudoReadVLENB : Pseudo<(outs GPR:$rd), (ins),
3288                               [(set GPR:$rd, (riscv_read_vlenb))]>;
3289}
3290
3291let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1,
3292    Uses = [VL] in
3293def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins), []>;
3294
3295let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1 in {
3296  def PseudoVSPILL_M1 : VPseudo<VS1R_V, V_M1, (outs), (ins VR:$rs1, GPR:$rs2)>;
3297  def PseudoVSPILL_M2 : VPseudo<VS2R_V, V_M2, (outs), (ins VRM2:$rs1, GPR:$rs2)>;
3298  def PseudoVSPILL_M4 : VPseudo<VS4R_V, V_M4, (outs), (ins VRM4:$rs1, GPR:$rs2)>;
3299  def PseudoVSPILL_M8 : VPseudo<VS8R_V, V_M8, (outs), (ins VRM8:$rs1, GPR:$rs2)>;
3300}
3301
3302let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1 in {
3303  def PseudoVRELOAD_M1 : VPseudo<VL1RE8_V, V_M1, (outs VR:$rs1), (ins GPR:$rs2)>;
3304  def PseudoVRELOAD_M2 : VPseudo<VL2RE8_V, V_M2, (outs VRM2:$rs1), (ins GPR:$rs2)>;
3305  def PseudoVRELOAD_M4 : VPseudo<VL4RE8_V, V_M4, (outs VRM4:$rs1), (ins GPR:$rs2)>;
3306  def PseudoVRELOAD_M8 : VPseudo<VL8RE8_V, V_M8, (outs VRM8:$rs1), (ins GPR:$rs2)>;
3307}
3308
3309foreach lmul = MxList.m in {
3310  foreach nf = NFSet<lmul>.L in {
3311    defvar vreg = SegRegClass<lmul, nf>.RC;
3312    let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1 in {
3313      def "PseudoVSPILL" # nf # "_" # lmul.MX :
3314        Pseudo<(outs), (ins vreg:$rs1, GPR:$rs2, GPR:$vlenb), []>;
3315    }
3316    let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1 in {
3317      def "PseudoVRELOAD" # nf # "_" # lmul.MX :
3318        Pseudo<(outs vreg:$rs1), (ins GPR:$rs2, GPR:$vlenb), []>;
3319    }
3320  }
3321}
3322
3323//===----------------------------------------------------------------------===//
3324// 6. Configuration-Setting Instructions
3325//===----------------------------------------------------------------------===//
3326
3327// Pseudos.
3328let hasSideEffects = 1, mayLoad = 0, mayStore = 0, Defs = [VL, VTYPE] in {
3329def PseudoVSETVLI : Pseudo<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei), []>;
3330def PseudoVSETIVLI : Pseudo<(outs GPR:$rd), (ins uimm5:$rs1, VTypeIOp:$vtypei), []>;
3331}
3332
3333//===----------------------------------------------------------------------===//
3334// 7. Vector Loads and Stores
3335//===----------------------------------------------------------------------===//
3336
3337//===----------------------------------------------------------------------===//
3338// 7.4 Vector Unit-Stride Instructions
3339//===----------------------------------------------------------------------===//
3340
3341// Pseudos Unit-Stride Loads and Stores
3342defm PseudoVL : VPseudoUSLoad</*isFF=*/false>;
3343defm PseudoVS : VPseudoUSStore;
3344
3345defm PseudoVLE1 : VPseudoLoadMask;
3346defm PseudoVSE1 : VPseudoStoreMask;
3347
3348//===----------------------------------------------------------------------===//
3349// 7.5 Vector Strided Instructions
3350//===----------------------------------------------------------------------===//
3351
3352// Vector Strided Loads and Stores
3353defm PseudoVLS : VPseudoSLoad;
3354defm PseudoVSS : VPseudoSStore;
3355
3356//===----------------------------------------------------------------------===//
3357// 7.6 Vector Indexed Instructions
3358//===----------------------------------------------------------------------===//
3359
3360// Vector Indexed Loads and Stores
3361defm PseudoVLUX : VPseudoILoad</*Ordered=*/false>;
3362defm PseudoVLOX : VPseudoILoad</*Ordered=*/true>;
3363defm PseudoVSOX : VPseudoIStore</*Ordered=*/true>;
3364defm PseudoVSUX : VPseudoIStore</*Ordered=*/false>;
3365
3366//===----------------------------------------------------------------------===//
3367// 7.7. Unit-stride Fault-Only-First Loads
3368//===----------------------------------------------------------------------===//
3369
3370// vleff may update VL register
3371let hasSideEffects = 1, Defs = [VL] in
3372defm PseudoVL : VPseudoUSLoad</*isFF=*/true>;
3373
3374//===----------------------------------------------------------------------===//
3375// 7.8. Vector Load/Store Segment Instructions
3376//===----------------------------------------------------------------------===//
3377defm PseudoVLSEG : VPseudoUSSegLoad</*isFF=*/false>;
3378defm PseudoVLSSEG : VPseudoSSegLoad;
3379defm PseudoVLOXSEG : VPseudoISegLoad</*Ordered=*/true>;
3380defm PseudoVLUXSEG : VPseudoISegLoad</*Ordered=*/false>;
3381defm PseudoVSSEG : VPseudoUSSegStore;
3382defm PseudoVSSSEG : VPseudoSSegStore;
3383defm PseudoVSOXSEG : VPseudoISegStore</*Ordered=*/true>;
3384defm PseudoVSUXSEG : VPseudoISegStore</*Ordered=*/false>;
3385
3386// vlseg<nf>e<eew>ff.v may update VL register
3387let hasSideEffects = 1, Defs = [VL] in
3388defm PseudoVLSEG : VPseudoUSSegLoad</*isFF=*/true>;
3389
3390//===----------------------------------------------------------------------===//
3391// 8. Vector AMO Operations
3392//===----------------------------------------------------------------------===//
3393defm PseudoVAMOSWAP : VPseudoAMO;
3394defm PseudoVAMOADD : VPseudoAMO;
3395defm PseudoVAMOXOR : VPseudoAMO;
3396defm PseudoVAMOAND : VPseudoAMO;
3397defm PseudoVAMOOR : VPseudoAMO;
3398defm PseudoVAMOMIN : VPseudoAMO;
3399defm PseudoVAMOMAX : VPseudoAMO;
3400defm PseudoVAMOMINU : VPseudoAMO;
3401defm PseudoVAMOMAXU : VPseudoAMO;
3402
3403//===----------------------------------------------------------------------===//
3404// 12. Vector Integer Arithmetic Instructions
3405//===----------------------------------------------------------------------===//
3406
3407//===----------------------------------------------------------------------===//
3408// 12.1. Vector Single-Width Integer Add and Subtract
3409//===----------------------------------------------------------------------===//
3410defm PseudoVADD        : VPseudoBinaryV_VV_VX_VI;
3411defm PseudoVSUB        : VPseudoBinaryV_VV_VX;
3412defm PseudoVRSUB       : VPseudoBinaryV_VX_VI;
3413
3414foreach vti = AllIntegerVectors in {
3415  // Match vrsub with 2 vector operands to vsub.vv by swapping operands. This
3416  // Occurs when legalizing vrsub.vx intrinsics for i64 on RV32 since we need
3417  // to use a more complex splat sequence. Add the pattern for all VTs for
3418  // consistency.
3419  def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$rs2),
3420                                         (vti.Vector vti.RegClass:$rs1),
3421                                         VLOpFrag)),
3422            (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
3423                                                              vti.RegClass:$rs2,
3424                                                              GPR:$vl,
3425                                                              vti.Log2SEW)>;
3426  def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge),
3427                                              (vti.Vector vti.RegClass:$rs2),
3428                                              (vti.Vector vti.RegClass:$rs1),
3429                                              (vti.Mask V0),
3430                                              VLOpFrag)),
3431            (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK")
3432                                                      vti.RegClass:$merge,
3433                                                      vti.RegClass:$rs1,
3434                                                      vti.RegClass:$rs2,
3435                                                      (vti.Mask V0),
3436                                                      GPR:$vl,
3437                                                      vti.Log2SEW)>;
3438
3439  // Match VSUB with a small immediate to vadd.vi by negating the immediate.
3440  def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$rs1),
3441                                        (vti.Scalar simm5_plus1:$rs2),
3442                                        VLOpFrag)),
3443            (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
3444                                                              (NegImm simm5_plus1:$rs2),
3445                                                              GPR:$vl,
3446                                                              vti.Log2SEW)>;
3447  def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge),
3448                                             (vti.Vector vti.RegClass:$rs1),
3449                                             (vti.Scalar simm5_plus1:$rs2),
3450                                             (vti.Mask V0),
3451                                             VLOpFrag)),
3452            (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX#"_MASK")
3453                                                      vti.RegClass:$merge,
3454                                                      vti.RegClass:$rs1,
3455                                                      (NegImm simm5_plus1:$rs2),
3456                                                      (vti.Mask V0),
3457                                                      GPR:$vl,
3458                                                      vti.Log2SEW)>;
3459}
3460
3461//===----------------------------------------------------------------------===//
3462// 12.2. Vector Widening Integer Add/Subtract
3463//===----------------------------------------------------------------------===//
3464defm PseudoVWADDU    : VPseudoBinaryW_VV_VX;
3465defm PseudoVWSUBU    : VPseudoBinaryW_VV_VX;
3466defm PseudoVWADD     : VPseudoBinaryW_VV_VX;
3467defm PseudoVWSUB     : VPseudoBinaryW_VV_VX;
3468defm PseudoVWADDU    : VPseudoBinaryW_WV_WX;
3469defm PseudoVWSUBU    : VPseudoBinaryW_WV_WX;
3470defm PseudoVWADD     : VPseudoBinaryW_WV_WX;
3471defm PseudoVWSUB     : VPseudoBinaryW_WV_WX;
3472
3473//===----------------------------------------------------------------------===//
3474// 12.3. Vector Integer Extension
3475//===----------------------------------------------------------------------===//
3476defm PseudoVZEXT_VF2 : PseudoUnaryV_VF2;
3477defm PseudoVZEXT_VF4 : PseudoUnaryV_VF4;
3478defm PseudoVZEXT_VF8 : PseudoUnaryV_VF8;
3479defm PseudoVSEXT_VF2 : PseudoUnaryV_VF2;
3480defm PseudoVSEXT_VF4 : PseudoUnaryV_VF4;
3481defm PseudoVSEXT_VF8 : PseudoUnaryV_VF8;
3482
3483//===----------------------------------------------------------------------===//
3484// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
3485//===----------------------------------------------------------------------===//
3486defm PseudoVADC        : VPseudoBinaryV_VM_XM_IM;
3487defm PseudoVMADC       : VPseudoBinaryM_VM_XM_IM<"@earlyclobber $rd">;
3488defm PseudoVMADC       : VPseudoBinaryM_V_X_I<"@earlyclobber $rd">;
3489
3490defm PseudoVSBC        : VPseudoBinaryV_VM_XM;
3491defm PseudoVMSBC       : VPseudoBinaryM_VM_XM<"@earlyclobber $rd">;
3492defm PseudoVMSBC       : VPseudoBinaryM_V_X<"@earlyclobber $rd">;
3493
3494//===----------------------------------------------------------------------===//
3495// 12.5. Vector Bitwise Logical Instructions
3496//===----------------------------------------------------------------------===//
3497defm PseudoVAND        : VPseudoBinaryV_VV_VX_VI;
3498defm PseudoVOR         : VPseudoBinaryV_VV_VX_VI;
3499defm PseudoVXOR        : VPseudoBinaryV_VV_VX_VI;
3500
3501//===----------------------------------------------------------------------===//
3502// 12.6. Vector Single-Width Bit Shift Instructions
3503//===----------------------------------------------------------------------===//
3504defm PseudoVSLL        : VPseudoBinaryV_VV_VX_VI<uimm5>;
3505defm PseudoVSRL        : VPseudoBinaryV_VV_VX_VI<uimm5>;
3506defm PseudoVSRA        : VPseudoBinaryV_VV_VX_VI<uimm5>;
3507
3508//===----------------------------------------------------------------------===//
3509// 12.7. Vector Narrowing Integer Right Shift Instructions
3510//===----------------------------------------------------------------------===//
3511defm PseudoVNSRL       : VPseudoBinaryV_WV_WX_WI;
3512defm PseudoVNSRA       : VPseudoBinaryV_WV_WX_WI;
3513
3514//===----------------------------------------------------------------------===//
3515// 12.8. Vector Integer Comparison Instructions
3516//===----------------------------------------------------------------------===//
3517defm PseudoVMSEQ       : VPseudoBinaryM_VV_VX_VI;
3518defm PseudoVMSNE       : VPseudoBinaryM_VV_VX_VI;
3519defm PseudoVMSLTU      : VPseudoBinaryM_VV_VX;
3520defm PseudoVMSLT       : VPseudoBinaryM_VV_VX;
3521defm PseudoVMSLEU      : VPseudoBinaryM_VV_VX_VI;
3522defm PseudoVMSLE       : VPseudoBinaryM_VV_VX_VI;
3523defm PseudoVMSGTU      : VPseudoBinaryM_VX_VI;
3524defm PseudoVMSGT       : VPseudoBinaryM_VX_VI;
3525
3526//===----------------------------------------------------------------------===//
3527// 12.9. Vector Integer Min/Max Instructions
3528//===----------------------------------------------------------------------===//
3529defm PseudoVMINU       : VPseudoBinaryV_VV_VX;
3530defm PseudoVMIN        : VPseudoBinaryV_VV_VX;
3531defm PseudoVMAXU       : VPseudoBinaryV_VV_VX;
3532defm PseudoVMAX        : VPseudoBinaryV_VV_VX;
3533
3534//===----------------------------------------------------------------------===//
3535// 12.10. Vector Single-Width Integer Multiply Instructions
3536//===----------------------------------------------------------------------===//
3537defm PseudoVMUL        : VPseudoBinaryV_VV_VX;
3538defm PseudoVMULH       : VPseudoBinaryV_VV_VX;
3539defm PseudoVMULHU      : VPseudoBinaryV_VV_VX;
3540defm PseudoVMULHSU     : VPseudoBinaryV_VV_VX;
3541
3542//===----------------------------------------------------------------------===//
3543// 12.11. Vector Integer Divide Instructions
3544//===----------------------------------------------------------------------===//
3545defm PseudoVDIVU       : VPseudoBinaryV_VV_VX;
3546defm PseudoVDIV        : VPseudoBinaryV_VV_VX;
3547defm PseudoVREMU       : VPseudoBinaryV_VV_VX;
3548defm PseudoVREM        : VPseudoBinaryV_VV_VX;
3549
3550//===----------------------------------------------------------------------===//
3551// 12.12. Vector Widening Integer Multiply Instructions
3552//===----------------------------------------------------------------------===//
3553defm PseudoVWMUL       : VPseudoBinaryW_VV_VX;
3554defm PseudoVWMULU      : VPseudoBinaryW_VV_VX;
3555defm PseudoVWMULSU     : VPseudoBinaryW_VV_VX;
3556
3557//===----------------------------------------------------------------------===//
3558// 12.13. Vector Single-Width Integer Multiply-Add Instructions
3559//===----------------------------------------------------------------------===//
3560defm PseudoVMACC       : VPseudoTernaryV_VV_VX_AAXA;
3561defm PseudoVNMSAC      : VPseudoTernaryV_VV_VX_AAXA;
3562defm PseudoVMADD       : VPseudoTernaryV_VV_VX_AAXA;
3563defm PseudoVNMSUB      : VPseudoTernaryV_VV_VX_AAXA;
3564
3565//===----------------------------------------------------------------------===//
3566// 12.14. Vector Widening Integer Multiply-Add Instructions
3567//===----------------------------------------------------------------------===//
3568defm PseudoVWMACCU     : VPseudoTernaryW_VV_VX;
3569defm PseudoVWMACC      : VPseudoTernaryW_VV_VX;
3570defm PseudoVWMACCSU    : VPseudoTernaryW_VV_VX;
3571defm PseudoVWMACCUS    : VPseudoTernaryW_VX;
3572
3573//===----------------------------------------------------------------------===//
3574// 12.15. Vector Integer Merge Instructions
3575//===----------------------------------------------------------------------===//
3576defm PseudoVMERGE      : VPseudoBinaryV_VM_XM_IM;
3577
3578//===----------------------------------------------------------------------===//
3579// 12.16. Vector Integer Move Instructions
3580//===----------------------------------------------------------------------===//
3581defm PseudoVMV_V       : VPseudoUnaryV_V_X_I_NoDummyMask;
3582
3583//===----------------------------------------------------------------------===//
3584// 13.1. Vector Single-Width Saturating Add and Subtract
3585//===----------------------------------------------------------------------===//
3586let Defs = [VXSAT], hasSideEffects = 1 in {
3587  defm PseudoVSADDU      : VPseudoBinaryV_VV_VX_VI;
3588  defm PseudoVSADD       : VPseudoBinaryV_VV_VX_VI;
3589  defm PseudoVSSUBU      : VPseudoBinaryV_VV_VX;
3590  defm PseudoVSSUB       : VPseudoBinaryV_VV_VX;
3591}
3592
3593//===----------------------------------------------------------------------===//
3594// 13.2. Vector Single-Width Averaging Add and Subtract
3595//===----------------------------------------------------------------------===//
3596let Uses = [VXRM], hasSideEffects = 1 in {
3597  defm PseudoVAADDU      : VPseudoBinaryV_VV_VX;
3598  defm PseudoVAADD       : VPseudoBinaryV_VV_VX;
3599  defm PseudoVASUBU      : VPseudoBinaryV_VV_VX;
3600  defm PseudoVASUB       : VPseudoBinaryV_VV_VX;
3601}
3602
3603//===----------------------------------------------------------------------===//
3604// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
3605//===----------------------------------------------------------------------===//
3606let Uses = [VXRM], Defs = [VXSAT], hasSideEffects = 1 in {
3607  defm PseudoVSMUL      : VPseudoBinaryV_VV_VX;
3608}
3609
3610//===----------------------------------------------------------------------===//
3611// 13.4. Vector Single-Width Scaling Shift Instructions
3612//===----------------------------------------------------------------------===//
3613let Uses = [VXRM], hasSideEffects = 1 in {
3614  defm PseudoVSSRL        : VPseudoBinaryV_VV_VX_VI<uimm5>;
3615  defm PseudoVSSRA        : VPseudoBinaryV_VV_VX_VI<uimm5>;
3616}
3617
3618//===----------------------------------------------------------------------===//
3619// 13.5. Vector Narrowing Fixed-Point Clip Instructions
3620//===----------------------------------------------------------------------===//
3621let Uses = [VXRM], Defs = [VXSAT], hasSideEffects = 1 in {
3622  defm PseudoVNCLIP     : VPseudoBinaryV_WV_WX_WI;
3623  defm PseudoVNCLIPU    : VPseudoBinaryV_WV_WX_WI;
3624}
3625
3626} // Predicates = [HasStdExtV]
3627
3628let Predicates = [HasStdExtV, HasStdExtF] in {
3629//===----------------------------------------------------------------------===//
3630// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
3631//===----------------------------------------------------------------------===//
3632defm PseudoVFADD       : VPseudoBinaryV_VV_VF;
3633defm PseudoVFSUB       : VPseudoBinaryV_VV_VF;
3634defm PseudoVFRSUB      : VPseudoBinaryV_VF;
3635
3636//===----------------------------------------------------------------------===//
3637// 14.3. Vector Widening Floating-Point Add/Subtract Instructions
3638//===----------------------------------------------------------------------===//
3639defm PseudoVFWADD     : VPseudoBinaryW_VV_VF;
3640defm PseudoVFWSUB     : VPseudoBinaryW_VV_VF;
3641defm PseudoVFWADD     : VPseudoBinaryW_WV_WF;
3642defm PseudoVFWSUB     : VPseudoBinaryW_WV_WF;
3643
3644//===----------------------------------------------------------------------===//
3645// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
3646//===----------------------------------------------------------------------===//
3647defm PseudoVFMUL       : VPseudoBinaryV_VV_VF;
3648defm PseudoVFDIV       : VPseudoBinaryV_VV_VF;
3649defm PseudoVFRDIV      : VPseudoBinaryV_VF;
3650
3651//===----------------------------------------------------------------------===//
3652// 14.5. Vector Widening Floating-Point Multiply
3653//===----------------------------------------------------------------------===//
3654defm PseudoVFWMUL      : VPseudoBinaryW_VV_VF;
3655
3656//===----------------------------------------------------------------------===//
3657// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
3658//===----------------------------------------------------------------------===//
3659defm PseudoVFMACC      : VPseudoTernaryV_VV_VF_AAXA;
3660defm PseudoVFNMACC     : VPseudoTernaryV_VV_VF_AAXA;
3661defm PseudoVFMSAC      : VPseudoTernaryV_VV_VF_AAXA;
3662defm PseudoVFNMSAC     : VPseudoTernaryV_VV_VF_AAXA;
3663defm PseudoVFMADD      : VPseudoTernaryV_VV_VF_AAXA;
3664defm PseudoVFNMADD     : VPseudoTernaryV_VV_VF_AAXA;
3665defm PseudoVFMSUB      : VPseudoTernaryV_VV_VF_AAXA;
3666defm PseudoVFNMSUB     : VPseudoTernaryV_VV_VF_AAXA;
3667
3668//===----------------------------------------------------------------------===//
3669// 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
3670//===----------------------------------------------------------------------===//
3671defm PseudoVFWMACC     : VPseudoTernaryW_VV_VF;
3672defm PseudoVFWNMACC    : VPseudoTernaryW_VV_VF;
3673defm PseudoVFWMSAC     : VPseudoTernaryW_VV_VF;
3674defm PseudoVFWNMSAC    : VPseudoTernaryW_VV_VF;
3675
3676//===----------------------------------------------------------------------===//
3677// 14.8. Vector Floating-Point Square-Root Instruction
3678//===----------------------------------------------------------------------===//
3679defm PseudoVFSQRT      : VPseudoUnaryV_V;
3680
3681//===----------------------------------------------------------------------===//
3682// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
3683//===----------------------------------------------------------------------===//
3684defm PseudoVFRSQRT7    : VPseudoUnaryV_V;
3685
3686//===----------------------------------------------------------------------===//
3687// 14.10. Vector Floating-Point Reciprocal Estimate Instruction
3688//===----------------------------------------------------------------------===//
3689defm PseudoVFREC7      : VPseudoUnaryV_V;
3690
3691//===----------------------------------------------------------------------===//
3692// 14.11. Vector Floating-Point Min/Max Instructions
3693//===----------------------------------------------------------------------===//
3694defm PseudoVFMIN       : VPseudoBinaryV_VV_VF;
3695defm PseudoVFMAX       : VPseudoBinaryV_VV_VF;
3696
3697//===----------------------------------------------------------------------===//
3698// 14.12. Vector Floating-Point Sign-Injection Instructions
3699//===----------------------------------------------------------------------===//
3700defm PseudoVFSGNJ      : VPseudoBinaryV_VV_VF;
3701defm PseudoVFSGNJN     : VPseudoBinaryV_VV_VF;
3702defm PseudoVFSGNJX     : VPseudoBinaryV_VV_VF;
3703
3704//===----------------------------------------------------------------------===//
3705// 14.13. Vector Floating-Point Compare Instructions
3706//===----------------------------------------------------------------------===//
3707defm PseudoVMFEQ       : VPseudoBinaryM_VV_VF;
3708defm PseudoVMFNE       : VPseudoBinaryM_VV_VF;
3709defm PseudoVMFLT       : VPseudoBinaryM_VV_VF;
3710defm PseudoVMFLE       : VPseudoBinaryM_VV_VF;
3711defm PseudoVMFGT       : VPseudoBinaryM_VF;
3712defm PseudoVMFGE       : VPseudoBinaryM_VF;
3713
3714//===----------------------------------------------------------------------===//
3715// 14.14. Vector Floating-Point Classify Instruction
3716//===----------------------------------------------------------------------===//
3717defm PseudoVFCLASS     : VPseudoUnaryV_V;
3718
3719//===----------------------------------------------------------------------===//
3720// 14.15. Vector Floating-Point Merge Instruction
3721//===----------------------------------------------------------------------===//
3722defm PseudoVFMERGE     : VPseudoBinaryV_FM;
3723
3724//===----------------------------------------------------------------------===//
3725// 14.16. Vector Floating-Point Move Instruction
3726//===----------------------------------------------------------------------===//
3727defm PseudoVFMV_V      : VPseudoUnaryV_F_NoDummyMask;
3728
3729//===----------------------------------------------------------------------===//
3730// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
3731//===----------------------------------------------------------------------===//
3732defm PseudoVFCVT_XU_F : VPseudoConversionV_V;
3733defm PseudoVFCVT_X_F : VPseudoConversionV_V;
3734defm PseudoVFCVT_RTZ_XU_F : VPseudoConversionV_V;
3735defm PseudoVFCVT_RTZ_X_F : VPseudoConversionV_V;
3736defm PseudoVFCVT_F_XU : VPseudoConversionV_V;
3737defm PseudoVFCVT_F_X : VPseudoConversionV_V;
3738
3739//===----------------------------------------------------------------------===//
3740// 14.18. Widening Floating-Point/Integer Type-Convert Instructions
3741//===----------------------------------------------------------------------===//
3742defm PseudoVFWCVT_XU_F : VPseudoConversionW_V;
3743defm PseudoVFWCVT_X_F : VPseudoConversionW_V;
3744defm PseudoVFWCVT_RTZ_XU_F : VPseudoConversionW_V;
3745defm PseudoVFWCVT_RTZ_X_F : VPseudoConversionW_V;
3746defm PseudoVFWCVT_F_XU : VPseudoConversionW_V;
3747defm PseudoVFWCVT_F_X : VPseudoConversionW_V;
3748defm PseudoVFWCVT_F_F : VPseudoConversionW_V;
3749
3750//===----------------------------------------------------------------------===//
3751// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
3752//===----------------------------------------------------------------------===//
3753defm PseudoVFNCVT_XU_F : VPseudoConversionV_W;
3754defm PseudoVFNCVT_X_F : VPseudoConversionV_W;
3755defm PseudoVFNCVT_RTZ_XU_F : VPseudoConversionV_W;
3756defm PseudoVFNCVT_RTZ_X_F : VPseudoConversionV_W;
3757defm PseudoVFNCVT_F_XU : VPseudoConversionV_W;
3758defm PseudoVFNCVT_F_X : VPseudoConversionV_W;
3759defm PseudoVFNCVT_F_F : VPseudoConversionV_W;
3760defm PseudoVFNCVT_ROD_F_F : VPseudoConversionV_W;
3761} // Predicates = [HasStdExtV, HasStdExtF]
3762
3763let Predicates = [HasStdExtV] in {
3764//===----------------------------------------------------------------------===//
3765// 15.1. Vector Single-Width Integer Reduction Instructions
3766//===----------------------------------------------------------------------===//
3767defm PseudoVREDSUM     : VPseudoReductionV_VS;
3768defm PseudoVREDAND     : VPseudoReductionV_VS;
3769defm PseudoVREDOR      : VPseudoReductionV_VS;
3770defm PseudoVREDXOR     : VPseudoReductionV_VS;
3771defm PseudoVREDMINU    : VPseudoReductionV_VS;
3772defm PseudoVREDMIN     : VPseudoReductionV_VS;
3773defm PseudoVREDMAXU    : VPseudoReductionV_VS;
3774defm PseudoVREDMAX     : VPseudoReductionV_VS;
3775
3776//===----------------------------------------------------------------------===//
3777// 15.2. Vector Widening Integer Reduction Instructions
3778//===----------------------------------------------------------------------===//
3779defm PseudoVWREDSUMU   : VPseudoReductionV_VS;
3780defm PseudoVWREDSUM    : VPseudoReductionV_VS;
3781} // Predicates = [HasStdExtV]
3782
3783let Predicates = [HasStdExtV, HasStdExtF] in {
3784//===----------------------------------------------------------------------===//
3785// 15.3. Vector Single-Width Floating-Point Reduction Instructions
3786//===----------------------------------------------------------------------===//
3787defm PseudoVFREDOSUM   : VPseudoReductionV_VS;
3788defm PseudoVFREDSUM    : VPseudoReductionV_VS;
3789defm PseudoVFREDMIN    : VPseudoReductionV_VS;
3790defm PseudoVFREDMAX    : VPseudoReductionV_VS;
3791
3792//===----------------------------------------------------------------------===//
3793// 15.4. Vector Widening Floating-Point Reduction Instructions
3794//===----------------------------------------------------------------------===//
3795defm PseudoVFWREDSUM   : VPseudoReductionV_VS;
3796defm PseudoVFWREDOSUM  : VPseudoReductionV_VS;
3797
3798} // Predicates = [HasStdExtV, HasStdExtF]
3799
3800//===----------------------------------------------------------------------===//
3801// 16. Vector Mask Instructions
3802//===----------------------------------------------------------------------===//
3803
3804//===----------------------------------------------------------------------===//
3805// 16.1 Vector Mask-Register Logical Instructions
3806//===----------------------------------------------------------------------===//
3807
3808defm PseudoVMAND: VPseudoBinaryM_MM;
3809defm PseudoVMNAND: VPseudoBinaryM_MM;
3810defm PseudoVMANDNOT: VPseudoBinaryM_MM;
3811defm PseudoVMXOR: VPseudoBinaryM_MM;
3812defm PseudoVMOR: VPseudoBinaryM_MM;
3813defm PseudoVMNOR: VPseudoBinaryM_MM;
3814defm PseudoVMORNOT: VPseudoBinaryM_MM;
3815defm PseudoVMXNOR: VPseudoBinaryM_MM;
3816
3817// Pseudo instructions
3818defm PseudoVMCLR : VPseudoNullaryPseudoM<"VMXOR">;
3819defm PseudoVMSET : VPseudoNullaryPseudoM<"VMXNOR">;
3820
3821//===----------------------------------------------------------------------===//
3822// 16.2. Vector mask population count vpopc
3823//===----------------------------------------------------------------------===//
3824
3825defm PseudoVPOPC: VPseudoUnaryS_M;
3826
3827//===----------------------------------------------------------------------===//
3828// 16.3. vfirst find-first-set mask bit
3829//===----------------------------------------------------------------------===//
3830
3831defm PseudoVFIRST: VPseudoUnaryS_M;
3832
3833//===----------------------------------------------------------------------===//
3834// 16.4. vmsbf.m set-before-first mask bit
3835//===----------------------------------------------------------------------===//
3836defm PseudoVMSBF: VPseudoUnaryM_M;
3837
3838//===----------------------------------------------------------------------===//
3839// 16.5. vmsif.m set-including-first mask bit
3840//===----------------------------------------------------------------------===//
3841defm PseudoVMSIF: VPseudoUnaryM_M;
3842
3843//===----------------------------------------------------------------------===//
3844// 16.6. vmsof.m set-only-first mask bit
3845//===----------------------------------------------------------------------===//
3846defm PseudoVMSOF: VPseudoUnaryM_M;
3847
3848//===----------------------------------------------------------------------===//
3849// 16.8.  Vector Iota Instruction
3850//===----------------------------------------------------------------------===//
3851defm PseudoVIOTA_M: VPseudoUnaryV_M;
3852
3853//===----------------------------------------------------------------------===//
3854// 16.9. Vector Element Index Instruction
3855//===----------------------------------------------------------------------===//
3856defm PseudoVID : VPseudoMaskNullaryV;
3857
3858//===----------------------------------------------------------------------===//
3859// 17. Vector Permutation Instructions
3860//===----------------------------------------------------------------------===//
3861
3862//===----------------------------------------------------------------------===//
3863// 17.1. Integer Scalar Move Instructions
3864//===----------------------------------------------------------------------===//
3865
3866let Predicates = [HasStdExtV] in {
3867let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
3868  foreach m = MxList.m in {
3869    let VLMul = m.value in {
3870      let HasSEWOp = 1, BaseInstr = VMV_X_S in
3871      def PseudoVMV_X_S # "_" # m.MX: Pseudo<(outs GPR:$rd),
3872                                             (ins m.vrclass:$rs2, ixlenimm:$sew),
3873                                             []>, RISCVVPseudo;
3874      let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X,
3875          Constraints = "$rd = $rs1" in
3876      def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd),
3877                                             (ins m.vrclass:$rs1, GPR:$rs2,
3878                                                  AVL:$vl, ixlenimm:$sew),
3879                                             []>, RISCVVPseudo;
3880    }
3881  }
3882}
3883} // Predicates = [HasStdExtV]
3884
3885//===----------------------------------------------------------------------===//
3886// 17.2. Floating-Point Scalar Move Instructions
3887//===----------------------------------------------------------------------===//
3888
3889let Predicates = [HasStdExtV, HasStdExtF] in {
3890let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
3891  foreach m = MxList.m in {
3892    foreach f = FPList.fpinfo in {
3893      let VLMul = m.value in {
3894        let HasSEWOp = 1, BaseInstr = VFMV_F_S in
3895        def "PseudoVFMV_" # f.FX # "_S_" # m.MX :
3896                                          Pseudo<(outs f.fprclass:$rd),
3897                                                 (ins m.vrclass:$rs2,
3898                                                      ixlenimm:$sew),
3899                                                 []>, RISCVVPseudo;
3900        let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F,
3901            Constraints = "$rd = $rs1" in
3902        def "PseudoVFMV_S_" # f.FX # "_" # m.MX :
3903                                          Pseudo<(outs m.vrclass:$rd),
3904                                                 (ins m.vrclass:$rs1, f.fprclass:$rs2,
3905                                                      AVL:$vl, ixlenimm:$sew),
3906                                                 []>, RISCVVPseudo;
3907      }
3908    }
3909  }
3910}
3911} // Predicates = [HasStdExtV, HasStdExtF]
3912
3913//===----------------------------------------------------------------------===//
3914// 17.3. Vector Slide Instructions
3915//===----------------------------------------------------------------------===//
3916let Predicates = [HasStdExtV] in {
3917  defm PseudoVSLIDEUP    : VPseudoTernaryV_VX_VI<uimm5, "@earlyclobber $rd">;
3918  defm PseudoVSLIDEDOWN  : VPseudoTernaryV_VX_VI<uimm5>;
3919  defm PseudoVSLIDE1UP   : VPseudoBinaryV_VX<"@earlyclobber $rd">;
3920  defm PseudoVSLIDE1DOWN : VPseudoBinaryV_VX;
3921} // Predicates = [HasStdExtV]
3922
3923let Predicates = [HasStdExtV, HasStdExtF] in {
3924  defm PseudoVFSLIDE1UP  : VPseudoBinaryV_VF<"@earlyclobber $rd">;
3925  defm PseudoVFSLIDE1DOWN : VPseudoBinaryV_VF;
3926} // Predicates = [HasStdExtV, HasStdExtF]
3927
3928//===----------------------------------------------------------------------===//
3929// 17.4. Vector Register Gather Instructions
3930//===----------------------------------------------------------------------===//
3931defm PseudoVRGATHER    : VPseudoBinaryV_VV_VX_VI<uimm5, "@earlyclobber $rd">;
3932defm PseudoVRGATHEREI16 : VPseudoBinaryV_VV_EEW</* eew */ 16, "@earlyclobber $rd">;
3933
3934//===----------------------------------------------------------------------===//
3935// 17.5. Vector Compress Instruction
3936//===----------------------------------------------------------------------===//
3937defm PseudoVCOMPRESS : VPseudoUnaryV_V_AnyMask;
3938
3939//===----------------------------------------------------------------------===//
3940// Patterns.
3941//===----------------------------------------------------------------------===//
3942
3943//===----------------------------------------------------------------------===//
3944// 8. Vector AMO Operations
3945//===----------------------------------------------------------------------===//
3946let Predicates = [HasStdExtZvamo] in {
3947  defm : VPatAMOV_WD<"int_riscv_vamoswap", "PseudoVAMOSWAP", AllIntegerVectors>;
3948  defm : VPatAMOV_WD<"int_riscv_vamoadd", "PseudoVAMOADD", AllIntegerVectors>;
3949  defm : VPatAMOV_WD<"int_riscv_vamoxor", "PseudoVAMOXOR", AllIntegerVectors>;
3950  defm : VPatAMOV_WD<"int_riscv_vamoand", "PseudoVAMOAND", AllIntegerVectors>;
3951  defm : VPatAMOV_WD<"int_riscv_vamoor", "PseudoVAMOOR", AllIntegerVectors>;
3952  defm : VPatAMOV_WD<"int_riscv_vamomin", "PseudoVAMOMIN", AllIntegerVectors>;
3953  defm : VPatAMOV_WD<"int_riscv_vamomax", "PseudoVAMOMAX", AllIntegerVectors>;
3954  defm : VPatAMOV_WD<"int_riscv_vamominu", "PseudoVAMOMINU", AllIntegerVectors>;
3955  defm : VPatAMOV_WD<"int_riscv_vamomaxu", "PseudoVAMOMAXU", AllIntegerVectors>;
3956} // Predicates = [HasStdExtZvamo]
3957
3958let Predicates = [HasStdExtZvamo, HasStdExtF] in {
3959  defm : VPatAMOV_WD<"int_riscv_vamoswap", "PseudoVAMOSWAP", AllFloatVectors>;
3960} // Predicates = [HasStdExtZvamo, HasStdExtF]
3961
3962//===----------------------------------------------------------------------===//
3963// 12. Vector Integer Arithmetic Instructions
3964//===----------------------------------------------------------------------===//
3965
3966let Predicates = [HasStdExtV] in {
3967//===----------------------------------------------------------------------===//
3968// 12.1. Vector Single-Width Integer Add and Subtract
3969//===----------------------------------------------------------------------===//
3970defm : VPatBinaryV_VV_VX_VI<"int_riscv_vadd", "PseudoVADD", AllIntegerVectors>;
3971defm : VPatBinaryV_VV_VX<"int_riscv_vsub", "PseudoVSUB", AllIntegerVectors>;
3972defm : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>;
3973
3974//===----------------------------------------------------------------------===//
3975// 12.2. Vector Widening Integer Add/Subtract
3976//===----------------------------------------------------------------------===//
3977defm : VPatBinaryW_VV_VX<"int_riscv_vwaddu", "PseudoVWADDU", AllWidenableIntVectors>;
3978defm : VPatBinaryW_VV_VX<"int_riscv_vwsubu", "PseudoVWSUBU", AllWidenableIntVectors>;
3979defm : VPatBinaryW_VV_VX<"int_riscv_vwadd", "PseudoVWADD", AllWidenableIntVectors>;
3980defm : VPatBinaryW_VV_VX<"int_riscv_vwsub", "PseudoVWSUB", AllWidenableIntVectors>;
3981defm : VPatBinaryW_WV_WX<"int_riscv_vwaddu_w", "PseudoVWADDU", AllWidenableIntVectors>;
3982defm : VPatBinaryW_WV_WX<"int_riscv_vwsubu_w", "PseudoVWSUBU", AllWidenableIntVectors>;
3983defm : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD", AllWidenableIntVectors>;
3984defm : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB", AllWidenableIntVectors>;
3985
3986//===----------------------------------------------------------------------===//
3987// 12.3. Vector Integer Extension
3988//===----------------------------------------------------------------------===//
3989defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF2",
3990                     AllFractionableVF2IntVectors>;
3991defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF4",
3992                     AllFractionableVF4IntVectors>;
3993defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF8",
3994                     AllFractionableVF8IntVectors>;
3995defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF2",
3996                     AllFractionableVF2IntVectors>;
3997defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF4",
3998                     AllFractionableVF4IntVectors>;
3999defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF8",
4000                     AllFractionableVF8IntVectors>;
4001
4002//===----------------------------------------------------------------------===//
4003// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
4004//===----------------------------------------------------------------------===//
4005defm : VPatBinaryV_VM_XM_IM<"int_riscv_vadc", "PseudoVADC">;
4006defm : VPatBinaryM_VM_XM_IM<"int_riscv_vmadc_carry_in", "PseudoVMADC">;
4007defm : VPatBinaryM_V_X_I<"int_riscv_vmadc", "PseudoVMADC">;
4008
4009defm : VPatBinaryV_VM_XM<"int_riscv_vsbc", "PseudoVSBC">;
4010defm : VPatBinaryM_VM_XM<"int_riscv_vmsbc_borrow_in", "PseudoVMSBC">;
4011defm : VPatBinaryM_V_X<"int_riscv_vmsbc", "PseudoVMSBC">;
4012
4013//===----------------------------------------------------------------------===//
4014// 12.5. Vector Bitwise Logical Instructions
4015//===----------------------------------------------------------------------===//
4016defm : VPatBinaryV_VV_VX_VI<"int_riscv_vand", "PseudoVAND", AllIntegerVectors>;
4017defm : VPatBinaryV_VV_VX_VI<"int_riscv_vor", "PseudoVOR", AllIntegerVectors>;
4018defm : VPatBinaryV_VV_VX_VI<"int_riscv_vxor", "PseudoVXOR", AllIntegerVectors>;
4019
4020//===----------------------------------------------------------------------===//
4021// 12.6. Vector Single-Width Bit Shift Instructions
4022//===----------------------------------------------------------------------===//
4023defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsll", "PseudoVSLL", AllIntegerVectors,
4024                            uimm5>;
4025defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsrl", "PseudoVSRL", AllIntegerVectors,
4026                            uimm5>;
4027defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors,
4028                            uimm5>;
4029
4030//===----------------------------------------------------------------------===//
4031// 12.7. Vector Narrowing Integer Right Shift Instructions
4032//===----------------------------------------------------------------------===//
4033defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsrl", "PseudoVNSRL", AllWidenableIntVectors>;
4034defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsra", "PseudoVNSRA", AllWidenableIntVectors>;
4035
4036//===----------------------------------------------------------------------===//
4037// 12.8. Vector Integer Comparison Instructions
4038//===----------------------------------------------------------------------===//
4039defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmseq", "PseudoVMSEQ", AllIntegerVectors>;
4040defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsne", "PseudoVMSNE", AllIntegerVectors>;
4041defm : VPatBinaryM_VV_VX<"int_riscv_vmsltu", "PseudoVMSLTU", AllIntegerVectors>;
4042defm : VPatBinaryM_VV_VX<"int_riscv_vmslt", "PseudoVMSLT", AllIntegerVectors>;
4043defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsleu", "PseudoVMSLEU", AllIntegerVectors>;
4044defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsle", "PseudoVMSLE", AllIntegerVectors>;
4045
4046defm : VPatBinaryM_VX_VI<"int_riscv_vmsgtu", "PseudoVMSGTU", AllIntegerVectors>;
4047defm : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>;
4048
4049// Match vmsgt with 2 vector operands to vmslt with the operands swapped.
4050defm : VPatBinarySwappedM_VV<"int_riscv_vmsgtu", "PseudoVMSLTU", AllIntegerVectors>;
4051defm : VPatBinarySwappedM_VV<"int_riscv_vmsgt", "PseudoVMSLT", AllIntegerVectors>;
4052
4053defm : VPatBinarySwappedM_VV<"int_riscv_vmsgeu", "PseudoVMSLEU", AllIntegerVectors>;
4054defm : VPatBinarySwappedM_VV<"int_riscv_vmsge", "PseudoVMSLE", AllIntegerVectors>;
4055
4056// Match vmslt(u).vx intrinsics to vmsle(u).vi if the scalar is -15 to 16. This
4057// avoids the user needing to know that there is no vmslt(u).vi instruction.
4058// Similar for vmsge(u).vx intrinsics using vmslt(u).vi.
4059foreach vti = AllIntegerVectors in {
4060  def : Pat<(vti.Mask (int_riscv_vmslt (vti.Vector vti.RegClass:$rs1),
4061                                       (vti.Scalar simm5_plus1:$rs2),
4062                                       VLOpFrag)),
4063            (!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
4064                                                               (DecImm simm5_plus1:$rs2),
4065                                                               GPR:$vl,
4066                                                               vti.Log2SEW)>;
4067  def : Pat<(vti.Mask (int_riscv_vmslt_mask (vti.Mask VR:$merge),
4068                                            (vti.Vector vti.RegClass:$rs1),
4069                                            (vti.Scalar simm5_plus1:$rs2),
4070                                            (vti.Mask V0),
4071                                            VLOpFrag)),
4072            (!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX#"_MASK")
4073                                                      VR:$merge,
4074                                                      vti.RegClass:$rs1,
4075                                                      (DecImm simm5_plus1:$rs2),
4076                                                      (vti.Mask V0),
4077                                                      GPR:$vl,
4078                                                      vti.Log2SEW)>;
4079
4080  def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1),
4081                                        (vti.Scalar simm5_plus1:$rs2),
4082                                        VLOpFrag)),
4083            (!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
4084                                                                (DecImm simm5_plus1:$rs2),
4085                                                                GPR:$vl,
4086                                                                vti.Log2SEW)>;
4087  def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask VR:$merge),
4088                                             (vti.Vector vti.RegClass:$rs1),
4089                                             (vti.Scalar simm5_plus1:$rs2),
4090                                             (vti.Mask V0),
4091                                             VLOpFrag)),
4092            (!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX#"_MASK")
4093                                                      VR:$merge,
4094                                                      vti.RegClass:$rs1,
4095                                                      (DecImm simm5_plus1:$rs2),
4096                                                      (vti.Mask V0),
4097                                                      GPR:$vl,
4098                                                      vti.Log2SEW)>;
4099
4100  // Special cases to avoid matching vmsltu.vi 0 (always false) to
4101  // vmsleu.vi -1 (always true). Instead match to vmsne.vv.
4102  def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1),
4103                                        (vti.Scalar 0), VLOpFrag)),
4104            (!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
4105                                                               vti.RegClass:$rs1,
4106                                                               GPR:$vl,
4107                                                               vti.Log2SEW)>;
4108  def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask VR:$merge),
4109                                             (vti.Vector vti.RegClass:$rs1),
4110                                             (vti.Scalar 0),
4111                                             (vti.Mask V0),
4112                                             VLOpFrag)),
4113            (!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX#"_MASK")
4114                                                     VR:$merge,
4115                                                     vti.RegClass:$rs1,
4116                                                     vti.RegClass:$rs1,
4117                                                     (vti.Mask V0),
4118                                                     GPR:$vl,
4119                                                     vti.Log2SEW)>;
4120
4121  def : Pat<(vti.Mask (int_riscv_vmsge (vti.Vector vti.RegClass:$rs1),
4122                                       (vti.Scalar simm5_plus1:$rs2),
4123                                       VLOpFrag)),
4124            (!cast<Instruction>("PseudoVMSGT_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
4125                                                               (DecImm simm5_plus1:$rs2),
4126                                                               GPR:$vl,
4127                                                               vti.Log2SEW)>;
4128  def : Pat<(vti.Mask (int_riscv_vmsge_mask (vti.Mask VR:$merge),
4129                                            (vti.Vector vti.RegClass:$rs1),
4130                                            (vti.Scalar simm5_plus1:$rs2),
4131                                            (vti.Mask V0),
4132                                            VLOpFrag)),
4133            (!cast<Instruction>("PseudoVMSGT_VI_"#vti.LMul.MX#"_MASK")
4134                                                      VR:$merge,
4135                                                      vti.RegClass:$rs1,
4136                                                      (DecImm simm5_plus1:$rs2),
4137                                                      (vti.Mask V0),
4138                                                      GPR:$vl,
4139                                                      vti.Log2SEW)>;
4140
4141  def : Pat<(vti.Mask (int_riscv_vmsgeu (vti.Vector vti.RegClass:$rs1),
4142                                        (vti.Scalar simm5_plus1:$rs2),
4143                                        VLOpFrag)),
4144            (!cast<Instruction>("PseudoVMSGTU_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
4145                                                                (DecImm simm5_plus1:$rs2),
4146                                                                GPR:$vl,
4147                                                                vti.Log2SEW)>;
4148  def : Pat<(vti.Mask (int_riscv_vmsgeu_mask (vti.Mask VR:$merge),
4149                                             (vti.Vector vti.RegClass:$rs1),
4150                                             (vti.Scalar simm5_plus1:$rs2),
4151                                             (vti.Mask V0),
4152                                             VLOpFrag)),
4153            (!cast<Instruction>("PseudoVMSGTU_VI_"#vti.LMul.MX#"_MASK")
4154                                                      VR:$merge,
4155                                                      vti.RegClass:$rs1,
4156                                                      (DecImm simm5_plus1:$rs2),
4157                                                      (vti.Mask V0),
4158                                                      GPR:$vl,
4159                                                      vti.Log2SEW)>;
4160
4161  // Special cases to avoid matching vmsgeu.vi 0 (always true) to
4162  // vmsgtu.vi -1 (always false). Instead match to vmsne.vv.
4163  def : Pat<(vti.Mask (int_riscv_vmsgeu (vti.Vector vti.RegClass:$rs1),
4164                                        (vti.Scalar 0), VLOpFrag)),
4165            (!cast<Instruction>("PseudoVMSEQ_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
4166                                                               vti.RegClass:$rs1,
4167                                                               GPR:$vl,
4168                                                               vti.Log2SEW)>;
4169  def : Pat<(vti.Mask (int_riscv_vmsgeu_mask (vti.Mask VR:$merge),
4170                                             (vti.Vector vti.RegClass:$rs1),
4171                                             (vti.Scalar 0),
4172                                             (vti.Mask V0),
4173                                             VLOpFrag)),
4174            (!cast<Instruction>("PseudoVMSEQ_VV_"#vti.LMul.MX#"_MASK")
4175                                                     VR:$merge,
4176                                                     vti.RegClass:$rs1,
4177                                                     vti.RegClass:$rs1,
4178                                                     (vti.Mask V0),
4179                                                     GPR:$vl,
4180                                                     vti.Log2SEW)>;
4181}
4182
4183//===----------------------------------------------------------------------===//
4184// 12.9. Vector Integer Min/Max Instructions
4185//===----------------------------------------------------------------------===//
4186defm : VPatBinaryV_VV_VX<"int_riscv_vminu", "PseudoVMINU", AllIntegerVectors>;
4187defm : VPatBinaryV_VV_VX<"int_riscv_vmin", "PseudoVMIN", AllIntegerVectors>;
4188defm : VPatBinaryV_VV_VX<"int_riscv_vmaxu", "PseudoVMAXU", AllIntegerVectors>;
4189defm : VPatBinaryV_VV_VX<"int_riscv_vmax", "PseudoVMAX", AllIntegerVectors>;
4190
4191//===----------------------------------------------------------------------===//
4192// 12.10. Vector Single-Width Integer Multiply Instructions
4193//===----------------------------------------------------------------------===//
4194defm : VPatBinaryV_VV_VX<"int_riscv_vmul", "PseudoVMUL", AllIntegerVectors>;
4195defm : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH", AllIntegerVectors>;
4196defm : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU", AllIntegerVectors>;
4197defm : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU", AllIntegerVectors>;
4198
4199//===----------------------------------------------------------------------===//
4200// 12.11. Vector Integer Divide Instructions
4201//===----------------------------------------------------------------------===//
4202defm : VPatBinaryV_VV_VX<"int_riscv_vdivu", "PseudoVDIVU", AllIntegerVectors>;
4203defm : VPatBinaryV_VV_VX<"int_riscv_vdiv", "PseudoVDIV", AllIntegerVectors>;
4204defm : VPatBinaryV_VV_VX<"int_riscv_vremu", "PseudoVREMU", AllIntegerVectors>;
4205defm : VPatBinaryV_VV_VX<"int_riscv_vrem", "PseudoVREM", AllIntegerVectors>;
4206
4207//===----------------------------------------------------------------------===//
4208// 12.12. Vector Widening Integer Multiply Instructions
4209//===----------------------------------------------------------------------===//
4210defm : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL", AllWidenableIntVectors>;
4211defm : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU", AllWidenableIntVectors>;
4212defm : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU", AllWidenableIntVectors>;
4213
4214//===----------------------------------------------------------------------===//
4215// 12.13. Vector Single-Width Integer Multiply-Add Instructions
4216//===----------------------------------------------------------------------===//
4217defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmadd", "PseudoVMADD", AllIntegerVectors>;
4218defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsub", "PseudoVNMSUB", AllIntegerVectors>;
4219defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmacc", "PseudoVMACC", AllIntegerVectors>;
4220defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsac", "PseudoVNMSAC", AllIntegerVectors>;
4221
4222//===----------------------------------------------------------------------===//
4223// 12.14. Vector Widening Integer Multiply-Add Instructions
4224//===----------------------------------------------------------------------===//
4225defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccu", "PseudoVWMACCU", AllWidenableIntVectors>;
4226defm : VPatTernaryW_VV_VX<"int_riscv_vwmacc", "PseudoVWMACC", AllWidenableIntVectors>;
4227defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccsu", "PseudoVWMACCSU", AllWidenableIntVectors>;
4228defm : VPatTernaryW_VX<"int_riscv_vwmaccus", "PseudoVWMACCUS", AllWidenableIntVectors>;
4229
4230//===----------------------------------------------------------------------===//
4231// 12.15. Vector Integer Merge Instructions
4232//===----------------------------------------------------------------------===//
4233defm : VPatBinaryV_VM_XM_IM<"int_riscv_vmerge", "PseudoVMERGE">;
4234
4235//===----------------------------------------------------------------------===//
4236// 12.16. Vector Integer Move Instructions
4237//===----------------------------------------------------------------------===//
4238foreach vti = AllVectors in {
4239  def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$rs1),
4240                                           VLOpFrag)),
4241            (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX)
4242             $rs1, GPR:$vl, vti.Log2SEW)>;
4243
4244  // vmv.v.x/vmv.v.i are handled in RISCInstrVInstrInfoVVLPatterns.td
4245}
4246
4247//===----------------------------------------------------------------------===//
4248// 13.1. Vector Single-Width Saturating Add and Subtract
4249//===----------------------------------------------------------------------===//
4250defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsaddu", "PseudoVSADDU", AllIntegerVectors>;
4251defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsadd", "PseudoVSADD", AllIntegerVectors>;
4252defm : VPatBinaryV_VV_VX<"int_riscv_vssubu", "PseudoVSSUBU", AllIntegerVectors>;
4253defm : VPatBinaryV_VV_VX<"int_riscv_vssub", "PseudoVSSUB", AllIntegerVectors>;
4254
4255//===----------------------------------------------------------------------===//
4256// 13.2. Vector Single-Width Averaging Add and Subtract
4257//===----------------------------------------------------------------------===//
4258defm : VPatBinaryV_VV_VX<"int_riscv_vaaddu", "PseudoVAADDU", AllIntegerVectors>;
4259defm : VPatBinaryV_VV_VX<"int_riscv_vaadd", "PseudoVAADD", AllIntegerVectors>;
4260defm : VPatBinaryV_VV_VX<"int_riscv_vasubu", "PseudoVASUBU", AllIntegerVectors>;
4261defm : VPatBinaryV_VV_VX<"int_riscv_vasub", "PseudoVASUB", AllIntegerVectors>;
4262
4263//===----------------------------------------------------------------------===//
4264// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
4265//===----------------------------------------------------------------------===//
4266defm : VPatBinaryV_VV_VX<"int_riscv_vsmul", "PseudoVSMUL", AllIntegerVectors>;
4267
4268//===----------------------------------------------------------------------===//
4269// 13.4. Vector Single-Width Scaling Shift Instructions
4270//===----------------------------------------------------------------------===//
4271defm : VPatBinaryV_VV_VX_VI<"int_riscv_vssrl", "PseudoVSSRL", AllIntegerVectors,
4272                            uimm5>;
4273defm : VPatBinaryV_VV_VX_VI<"int_riscv_vssra", "PseudoVSSRA", AllIntegerVectors,
4274                            uimm5>;
4275
4276//===----------------------------------------------------------------------===//
4277// 13.5. Vector Narrowing Fixed-Point Clip Instructions
4278//===----------------------------------------------------------------------===//
4279defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclipu", "PseudoVNCLIPU", AllWidenableIntVectors>;
4280defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclip", "PseudoVNCLIP", AllWidenableIntVectors>;
4281
4282} // Predicates = [HasStdExtV]
4283
4284let Predicates = [HasStdExtV, HasStdExtF] in {
4285//===----------------------------------------------------------------------===//
4286// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
4287//===----------------------------------------------------------------------===//
4288defm : VPatBinaryV_VV_VX<"int_riscv_vfadd", "PseudoVFADD", AllFloatVectors>;
4289defm : VPatBinaryV_VV_VX<"int_riscv_vfsub", "PseudoVFSUB", AllFloatVectors>;
4290defm : VPatBinaryV_VX<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>;
4291
4292//===----------------------------------------------------------------------===//
4293// 14.3. Vector Widening Floating-Point Add/Subtract Instructions
4294//===----------------------------------------------------------------------===//
4295defm : VPatBinaryW_VV_VX<"int_riscv_vfwadd", "PseudoVFWADD", AllWidenableFloatVectors>;
4296defm : VPatBinaryW_VV_VX<"int_riscv_vfwsub", "PseudoVFWSUB", AllWidenableFloatVectors>;
4297defm : VPatBinaryW_WV_WX<"int_riscv_vfwadd_w", "PseudoVFWADD", AllWidenableFloatVectors>;
4298defm : VPatBinaryW_WV_WX<"int_riscv_vfwsub_w", "PseudoVFWSUB", AllWidenableFloatVectors>;
4299
4300//===----------------------------------------------------------------------===//
4301// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
4302//===----------------------------------------------------------------------===//
4303defm : VPatBinaryV_VV_VX<"int_riscv_vfmul", "PseudoVFMUL", AllFloatVectors>;
4304defm : VPatBinaryV_VV_VX<"int_riscv_vfdiv", "PseudoVFDIV", AllFloatVectors>;
4305defm : VPatBinaryV_VX<"int_riscv_vfrdiv", "PseudoVFRDIV", AllFloatVectors>;
4306
4307//===----------------------------------------------------------------------===//
4308// 14.5. Vector Widening Floating-Point Multiply
4309//===----------------------------------------------------------------------===//
4310defm : VPatBinaryW_VV_VX<"int_riscv_vfwmul", "PseudoVFWMUL", AllWidenableFloatVectors>;
4311
4312//===----------------------------------------------------------------------===//
4313// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
4314//===----------------------------------------------------------------------===//
4315defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmacc", "PseudoVFMACC", AllFloatVectors>;
4316defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmacc", "PseudoVFNMACC", AllFloatVectors>;
4317defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmsac", "PseudoVFMSAC", AllFloatVectors>;
4318defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmsac", "PseudoVFNMSAC", AllFloatVectors>;
4319defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmadd", "PseudoVFMADD", AllFloatVectors>;
4320defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmadd", "PseudoVFNMADD", AllFloatVectors>;
4321defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmsub", "PseudoVFMSUB", AllFloatVectors>;
4322defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmsub", "PseudoVFNMSUB", AllFloatVectors>;
4323
4324//===----------------------------------------------------------------------===//
4325// 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
4326//===----------------------------------------------------------------------===//
4327defm : VPatTernaryW_VV_VX<"int_riscv_vfwmacc", "PseudoVFWMACC", AllWidenableFloatVectors>;
4328defm : VPatTernaryW_VV_VX<"int_riscv_vfwnmacc", "PseudoVFWNMACC", AllWidenableFloatVectors>;
4329defm : VPatTernaryW_VV_VX<"int_riscv_vfwmsac", "PseudoVFWMSAC", AllWidenableFloatVectors>;
4330defm : VPatTernaryW_VV_VX<"int_riscv_vfwnmsac", "PseudoVFWNMSAC", AllWidenableFloatVectors>;
4331
4332//===----------------------------------------------------------------------===//
4333// 14.8. Vector Floating-Point Square-Root Instruction
4334//===----------------------------------------------------------------------===//
4335defm : VPatUnaryV_V<"int_riscv_vfsqrt", "PseudoVFSQRT", AllFloatVectors>;
4336
4337//===----------------------------------------------------------------------===//
4338// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
4339//===----------------------------------------------------------------------===//
4340defm : VPatUnaryV_V<"int_riscv_vfrsqrt7", "PseudoVFRSQRT7", AllFloatVectors>;
4341
4342//===----------------------------------------------------------------------===//
4343// 14.10. Vector Floating-Point Reciprocal Estimate Instruction
4344//===----------------------------------------------------------------------===//
4345defm : VPatUnaryV_V<"int_riscv_vfrec7", "PseudoVFREC7", AllFloatVectors>;
4346
4347//===----------------------------------------------------------------------===//
4348// 14.11. Vector Floating-Point Min/Max Instructions
4349//===----------------------------------------------------------------------===//
4350defm : VPatBinaryV_VV_VX<"int_riscv_vfmin", "PseudoVFMIN", AllFloatVectors>;
4351defm : VPatBinaryV_VV_VX<"int_riscv_vfmax", "PseudoVFMAX", AllFloatVectors>;
4352
4353//===----------------------------------------------------------------------===//
4354// 14.12. Vector Floating-Point Sign-Injection Instructions
4355//===----------------------------------------------------------------------===//
4356defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnj", "PseudoVFSGNJ", AllFloatVectors>;
4357defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjn", "PseudoVFSGNJN", AllFloatVectors>;
4358defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjx", "PseudoVFSGNJX", AllFloatVectors>;
4359
4360//===----------------------------------------------------------------------===//
4361// 14.13. Vector Floating-Point Compare Instructions
4362//===----------------------------------------------------------------------===//
4363defm : VPatBinaryM_VV_VX<"int_riscv_vmfeq", "PseudoVMFEQ", AllFloatVectors>;
4364defm : VPatBinaryM_VV_VX<"int_riscv_vmfle", "PseudoVMFLE", AllFloatVectors>;
4365defm : VPatBinaryM_VV_VX<"int_riscv_vmflt", "PseudoVMFLT", AllFloatVectors>;
4366defm : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE", AllFloatVectors>;
4367defm : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT", AllFloatVectors>;
4368defm : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE", AllFloatVectors>;
4369defm : VPatBinarySwappedM_VV<"int_riscv_vmfgt", "PseudoVMFLT", AllFloatVectors>;
4370defm : VPatBinarySwappedM_VV<"int_riscv_vmfge", "PseudoVMFLE", AllFloatVectors>;
4371
4372//===----------------------------------------------------------------------===//
4373// 14.14. Vector Floating-Point Classify Instruction
4374//===----------------------------------------------------------------------===//
4375defm : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">;
4376
4377//===----------------------------------------------------------------------===//
4378// 14.15. Vector Floating-Point Merge Instruction
4379//===----------------------------------------------------------------------===//
4380// We can use vmerge.vvm to support vector-vector vfmerge.
4381defm : VPatBinaryV_VM<"int_riscv_vfmerge", "PseudoVMERGE",
4382                      /*CarryOut = */0, /*vtilist=*/AllFloatVectors>;
4383defm : VPatBinaryV_XM<"int_riscv_vfmerge", "PseudoVFMERGE",
4384                      /*CarryOut = */0, /*vtilist=*/AllFloatVectors>;
4385
4386foreach fvti = AllFloatVectors in {
4387  defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX);
4388  def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$rs2),
4389                                            (fvti.Scalar (fpimm0)),
4390                                            (fvti.Mask V0), VLOpFrag)),
4391            (instr fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
4392}
4393
4394//===----------------------------------------------------------------------===//
4395// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
4396//===----------------------------------------------------------------------===//
4397defm : VPatConversionVI_VF<"int_riscv_vfcvt_xu_f_v", "PseudoVFCVT_XU_F">;
4398defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_xu_f_v", "PseudoVFCVT_RTZ_XU_F">;
4399defm : VPatConversionVI_VF<"int_riscv_vfcvt_x_f_v", "PseudoVFCVT_X_F">;
4400defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_x_f_v", "PseudoVFCVT_RTZ_X_F">;
4401defm : VPatConversionVF_VI<"int_riscv_vfcvt_f_x_v", "PseudoVFCVT_F_X">;
4402defm : VPatConversionVF_VI<"int_riscv_vfcvt_f_xu_v", "PseudoVFCVT_F_XU">;
4403
4404//===----------------------------------------------------------------------===//
4405// 14.18. Widening Floating-Point/Integer Type-Convert Instructions
4406//===----------------------------------------------------------------------===//
4407defm : VPatConversionWI_VF<"int_riscv_vfwcvt_xu_f_v", "PseudoVFWCVT_XU_F">;
4408defm : VPatConversionWI_VF<"int_riscv_vfwcvt_x_f_v", "PseudoVFWCVT_X_F">;
4409defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_xu_f_v", "PseudoVFWCVT_RTZ_XU_F">;
4410defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_x_f_v", "PseudoVFWCVT_RTZ_X_F">;
4411defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU">;
4412defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X">;
4413defm : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F">;
4414
4415//===----------------------------------------------------------------------===//
4416// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
4417//===----------------------------------------------------------------------===//
4418defm : VPatConversionVI_WF<"int_riscv_vfncvt_xu_f_w", "PseudoVFNCVT_XU_F">;
4419defm : VPatConversionVI_WF<"int_riscv_vfncvt_x_f_w", "PseudoVFNCVT_X_F">;
4420defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_xu_f_w", "PseudoVFNCVT_RTZ_XU_F">;
4421defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_x_f_w", "PseudoVFNCVT_RTZ_X_F">;
4422defm : VPatConversionVF_WI <"int_riscv_vfncvt_f_xu_w", "PseudoVFNCVT_F_XU">;
4423defm : VPatConversionVF_WI <"int_riscv_vfncvt_f_x_w", "PseudoVFNCVT_F_X">;
4424defm : VPatConversionVF_WF<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F">;
4425defm : VPatConversionVF_WF<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F">;
4426} // Predicates = [HasStdExtV, HasStdExtF]
4427
4428let Predicates = [HasStdExtV] in {
4429//===----------------------------------------------------------------------===//
4430// 15.1. Vector Single-Width Integer Reduction Instructions
4431//===----------------------------------------------------------------------===//
4432defm : VPatReductionV_VS<"int_riscv_vredsum", "PseudoVREDSUM">;
4433defm : VPatReductionV_VS<"int_riscv_vredand", "PseudoVREDAND">;
4434defm : VPatReductionV_VS<"int_riscv_vredor", "PseudoVREDOR">;
4435defm : VPatReductionV_VS<"int_riscv_vredxor", "PseudoVREDXOR">;
4436defm : VPatReductionV_VS<"int_riscv_vredminu", "PseudoVREDMINU">;
4437defm : VPatReductionV_VS<"int_riscv_vredmin", "PseudoVREDMIN">;
4438defm : VPatReductionV_VS<"int_riscv_vredmaxu", "PseudoVREDMAXU">;
4439defm : VPatReductionV_VS<"int_riscv_vredmax", "PseudoVREDMAX">;
4440
4441//===----------------------------------------------------------------------===//
4442// 15.2. Vector Widening Integer Reduction Instructions
4443//===----------------------------------------------------------------------===//
4444defm : VPatReductionW_VS<"int_riscv_vwredsumu", "PseudoVWREDSUMU">;
4445defm : VPatReductionW_VS<"int_riscv_vwredsum", "PseudoVWREDSUM">;
4446} // Predicates = [HasStdExtV]
4447
4448let Predicates = [HasStdExtV, HasStdExtF] in {
4449//===----------------------------------------------------------------------===//
4450// 15.3. Vector Single-Width Floating-Point Reduction Instructions
4451//===----------------------------------------------------------------------===//
4452defm : VPatReductionV_VS<"int_riscv_vfredosum", "PseudoVFREDOSUM", /*IsFloat=*/1>;
4453defm : VPatReductionV_VS<"int_riscv_vfredsum", "PseudoVFREDSUM", /*IsFloat=*/1>;
4454defm : VPatReductionV_VS<"int_riscv_vfredmin", "PseudoVFREDMIN", /*IsFloat=*/1>;
4455defm : VPatReductionV_VS<"int_riscv_vfredmax", "PseudoVFREDMAX", /*IsFloat=*/1>;
4456
4457//===----------------------------------------------------------------------===//
4458// 15.4. Vector Widening Floating-Point Reduction Instructions
4459//===----------------------------------------------------------------------===//
4460defm : VPatReductionW_VS<"int_riscv_vfwredsum", "PseudoVFWREDSUM", /*IsFloat=*/1>;
4461defm : VPatReductionW_VS<"int_riscv_vfwredosum", "PseudoVFWREDOSUM", /*IsFloat=*/1>;
4462
4463} // Predicates = [HasStdExtV, HasStdExtF]
4464
4465//===----------------------------------------------------------------------===//
4466// 16. Vector Mask Instructions
4467//===----------------------------------------------------------------------===//
4468
4469let Predicates = [HasStdExtV] in {
4470//===----------------------------------------------------------------------===//
4471// 16.1 Vector Mask-Register Logical Instructions
4472//===----------------------------------------------------------------------===//
4473defm : VPatBinaryM_MM<"int_riscv_vmand", "PseudoVMAND">;
4474defm : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">;
4475defm : VPatBinaryM_MM<"int_riscv_vmandnot", "PseudoVMANDNOT">;
4476defm : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">;
4477defm : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">;
4478defm : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">;
4479defm : VPatBinaryM_MM<"int_riscv_vmornot", "PseudoVMORNOT">;
4480defm : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">;
4481
4482// pseudo instructions
4483defm : VPatNullaryM<"int_riscv_vmclr", "PseudoVMCLR">;
4484defm : VPatNullaryM<"int_riscv_vmset", "PseudoVMSET">;
4485
4486//===----------------------------------------------------------------------===//
4487// 16.2. Vector mask population count vpopc
4488//===----------------------------------------------------------------------===//
4489defm : VPatUnaryS_M<"int_riscv_vpopc", "PseudoVPOPC">;
4490
4491//===----------------------------------------------------------------------===//
4492// 16.3. vfirst find-first-set mask bit
4493//===----------------------------------------------------------------------===//
4494defm : VPatUnaryS_M<"int_riscv_vfirst", "PseudoVFIRST">;
4495
4496//===----------------------------------------------------------------------===//
4497// 16.4. vmsbf.m set-before-first mask bit
4498//===----------------------------------------------------------------------===//
4499defm : VPatUnaryM_M<"int_riscv_vmsbf", "PseudoVMSBF">;
4500
4501//===----------------------------------------------------------------------===//
4502// 16.5. vmsif.m set-including-first mask bit
4503//===----------------------------------------------------------------------===//
4504defm : VPatUnaryM_M<"int_riscv_vmsif", "PseudoVMSIF">;
4505
4506//===----------------------------------------------------------------------===//
4507// 16.6. vmsof.m set-only-first mask bit
4508//===----------------------------------------------------------------------===//
4509defm : VPatUnaryM_M<"int_riscv_vmsof", "PseudoVMSOF">;
4510
4511//===----------------------------------------------------------------------===//
4512// 16.8.  Vector Iota Instruction
4513//===----------------------------------------------------------------------===//
4514defm : VPatUnaryV_M<"int_riscv_viota", "PseudoVIOTA">;
4515
4516//===----------------------------------------------------------------------===//
4517// 16.9. Vector Element Index Instruction
4518//===----------------------------------------------------------------------===//
4519defm : VPatNullaryV<"int_riscv_vid", "PseudoVID">;
4520
4521} // Predicates = [HasStdExtV]
4522
4523//===----------------------------------------------------------------------===//
4524// 17. Vector Permutation Instructions
4525//===----------------------------------------------------------------------===//
4526
4527//===----------------------------------------------------------------------===//
4528// 17.1. Integer Scalar Move Instructions
4529//===----------------------------------------------------------------------===//
4530
4531let Predicates = [HasStdExtV] in {
4532foreach vti = AllIntegerVectors in {
4533  def : Pat<(riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2)),
4534            (!cast<Instruction>("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.Log2SEW)>;
4535  // vmv.s.x is handled with a custom node in RISCVInstrInfoVVLPatterns.td
4536}
4537} // Predicates = [HasStdExtV]
4538
4539//===----------------------------------------------------------------------===//
4540// 17.2. Floating-Point Scalar Move Instructions
4541//===----------------------------------------------------------------------===//
4542
4543let Predicates = [HasStdExtV, HasStdExtF] in {
4544foreach fvti = AllFloatVectors in {
4545  defvar instr = !cast<Instruction>("PseudoVFMV_"#fvti.ScalarSuffix#"_S_" #
4546                                    fvti.LMul.MX);
4547  def : Pat<(fvti.Scalar (int_riscv_vfmv_f_s (fvti.Vector fvti.RegClass:$rs2))),
4548                         (instr $rs2, fvti.Log2SEW)>;
4549
4550  def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1),
4551                         (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)),
4552            (!cast<Instruction>("PseudoVFMV_S_"#fvti.ScalarSuffix#"_" #
4553                                fvti.LMul.MX)
4554             (fvti.Vector $rs1),
4555             (fvti.Scalar fvti.ScalarRegClass:$rs2),
4556             GPR:$vl, fvti.Log2SEW)>;
4557}
4558} // Predicates = [HasStdExtV, HasStdExtF]
4559
4560//===----------------------------------------------------------------------===//
4561// 17.3. Vector Slide Instructions
4562//===----------------------------------------------------------------------===//
4563let Predicates = [HasStdExtV] in {
4564  defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllIntegerVectors, uimm5>;
4565  defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllIntegerVectors, uimm5>;
4566  defm : VPatBinaryV_VX<"int_riscv_vslide1up", "PseudoVSLIDE1UP", AllIntegerVectors>;
4567  defm : VPatBinaryV_VX<"int_riscv_vslide1down", "PseudoVSLIDE1DOWN", AllIntegerVectors>;
4568} // Predicates = [HasStdExtV]
4569
4570let Predicates = [HasStdExtV, HasStdExtF] in {
4571  defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllFloatVectors, uimm5>;
4572  defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVectors, uimm5>;
4573  defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>;
4574  defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>;
4575} // Predicates = [HasStdExtV, HasStdExtF]
4576
4577//===----------------------------------------------------------------------===//
4578// 17.4. Vector Register Gather Instructions
4579//===----------------------------------------------------------------------===//
4580let Predicates = [HasStdExtV] in {
4581  defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
4582                                  AllIntegerVectors, uimm5>;
4583  defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
4584                                /* eew */ 16, AllIntegerVectors>;
4585} // Predicates = [HasStdExtV]
4586
4587let Predicates = [HasStdExtV, HasStdExtF] in {
4588  defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
4589                                  AllFloatVectors, uimm5>;
4590  defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
4591                                /* eew */ 16, AllFloatVectors>;
4592} // Predicates = [HasStdExtV, HasStdExtF]
4593
4594//===----------------------------------------------------------------------===//
4595// 17.5. Vector Compress Instruction
4596//===----------------------------------------------------------------------===//
4597let Predicates = [HasStdExtV] in {
4598  defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>;
4599} // Predicates = [HasStdExtV]
4600
4601let Predicates = [HasStdExtV, HasStdExtF] in {
4602  defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>;
4603} // Predicates = [HasStdExtV, HasStdExtF]
4604
4605// Include the non-intrinsic ISel patterns
4606include "RISCVInstrInfoVSDPatterns.td"
4607include "RISCVInstrInfoVVLPatterns.td"
4608