xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td (revision a90b9d0159070121c221b966469c3e36d912bf82)
1//===-- RISCVInstrInfoVPseudos.td - RISC-V 'V' Pseudos -----*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// This file contains the required infrastructure to support code generation
10/// for the standard 'V' (Vector) extension, version 1.0.
11///
12/// This file is included from RISCVInstrInfoV.td
13///
14/// Overview of our vector instruction pseudos.  Many of the instructions
15/// have behavior which depends on the value of VTYPE.  Several core aspects of
16/// the compiler - e.g. register allocation - depend on fields in this
17/// configuration register.  The details of which fields matter differ by the
18/// specific instruction, but the common dimensions are:
19///
20/// LMUL/EMUL - Most instructions can write to differently sized register groups
21/// depending on LMUL.
22///
23/// Masked vs Unmasked - Many instructions which allow a mask disallow register
24/// overlap.  As a result, masked vs unmasked require different register
25/// allocation constraints.
26///
27/// Policy - For each of mask and tail policy, there are three options:
28/// * "Undisturbed" - As defined in the specification, required to preserve the
29/// exact bit pattern of inactive lanes.
30/// * "Agnostic" - As defined in the specification, required to either preserve
31/// the exact bit pattern of inactive lanes, or produce the bit pattern -1 for
32/// those lanes.  Note that each lane can make this choice independently.
33/// Instructions which produce masks (and only those instructions) also have the
34/// option of producing a result as-if VL had been VLMAX.
35/// * "Undefined" - The bit pattern of the inactive lanes is unspecified, and
36/// can be changed without impacting the semantics of the program.  Note that
37/// this concept does not exist in the specification, and requires source
38/// knowledge to be preserved.
39///
40/// SEW - Some instructions have semantics which depend on SEW.  This is
41/// relatively rare, and mostly impacts scheduling and cost estimation.
42///
43/// We have two techniques we use to represent the impact of these fields:
44/// * For fields which don't impact register classes, we largely use
45/// dummy operands on the pseudo instructions which convey information
46/// about the value of VTYPE.
47/// * For fields which do impact register classes (and a few bits of
48/// legacy - see policy discussion below), we define a family of pseudo
49/// instructions for each actual instruction.  Said differently, we encode
50/// each of the preceding fields which are relevant for a given instruction
51/// in the opcode space.
52///
53/// Currently, the policy is represented via the following instrinsic families:
54/// * _MASK - Can represent all three policy states for both tail and mask.  If
55///   passthrough is IMPLICIT_DEF (or NoReg), then represents "undefined".
56///   Otherwise, policy operand and tablegen flags drive the interpretation.
57///   (If policy operand is not present - there are a couple, though we're
58///   rapidly removing them - a non-undefined policy defaults to "tail
59///   agnostic", and "mask undisturbed".  Since this is the only variant with
60///   a mask, all other variants are "mask undefined".
61/// * Unsuffixed w/ both passthrough and policy operand. Can represent all
62///   three policy states.  If passthrough is IMPLICIT_DEF (or NoReg), then
63///   represents "undefined".  Otherwise, policy operand and tablegen flags
64///   drive the interpretation.
65/// * Unsuffixed w/o passthrough or policy operand -- Does not have a
66///   passthrough operand, and thus represents the "undefined" state.  Note
67///   that terminology in code frequently refers to these as "TA" which is
68///   confusing.  We're in the process of migrating away from this
69///   representation.
70/// * _TU w/o policy operand -- Has a passthrough operand, and always
71///   represents the tail undisturbed state.
72/// * _TU w/policy operand - Can represent all three policy states.  If
73///   passthrough is IMPLICIT_DEF (or NoReg), then represents "undefined".
74///   Otherwise, policy operand and tablegen flags drive the interpretation.
75///
76//===----------------------------------------------------------------------===//
77
78def riscv_vmv_x_s : SDNode<"RISCVISD::VMV_X_S",
79                           SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>,
80                                                SDTCisInt<1>]>>;
81def riscv_read_vlenb : SDNode<"RISCVISD::READ_VLENB",
82                              SDTypeProfile<1, 0, [SDTCisVT<0, XLenVT>]>>;
83
84// Operand that is allowed to be a register other than X0, a 5 bit unsigned
85// immediate, or -1. -1 means VLMAX. This allows us to pick between VSETIVLI and
86// VSETVLI opcodes using the same pseudo instructions.
87def AVL : RegisterOperand<GPRNoX0> {
88  let OperandNamespace = "RISCVOp";
89  let OperandType = "OPERAND_AVL";
90}
91
92// X0 has special meaning for vsetvl/vsetvli.
93//  rd | rs1 |   AVL value | Effect on vl
94//--------------------------------------------------------------
95// !X0 |  X0 |       VLMAX | Set vl to VLMAX
96//  X0 |  X0 | Value in vl | Keep current vl, just change vtype.
97def VLOp : ComplexPattern<XLenVT, 1, "selectVLOp">;
98
99def DecImm : SDNodeXForm<imm, [{
100  return CurDAG->getTargetConstant(N->getSExtValue() - 1, SDLoc(N),
101                                   N->getValueType(0));
102}]>;
103
104defvar TAIL_AGNOSTIC = 1;
105defvar TU_MU = 0;
106defvar TA_MA = 3;
107
108//===----------------------------------------------------------------------===//
109// Utilities.
110//===----------------------------------------------------------------------===//
111
112class PseudoToVInst<string PseudoInst> {
113  defvar AffixSubsts = [["Pseudo", ""],
114                        ["_E64", ""],
115                        ["_E32", ""],
116                        ["_E16", ""],
117                        ["_E8", ""],
118                        ["FPR64", "F"],
119                        ["FPR32", "F"],
120                        ["FPR16", "F"],
121                        ["_TIED", ""],
122                        ["_MASK", ""],
123                        ["_B64", ""],
124                        ["_B32", ""],
125                        ["_B16", ""],
126                        ["_B8", ""],
127                        ["_B4", ""],
128                        ["_B2", ""],
129                        ["_B1", ""],
130                        ["_MF8", ""],
131                        ["_MF4", ""],
132                        ["_MF2", ""],
133                        ["_M1", ""],
134                        ["_M2", ""],
135                        ["_M4", ""],
136                        ["_M8", ""],
137                        ["_SE", ""],
138                        ["_RM", ""]
139                       ];
140  string VInst = !foldl(PseudoInst, AffixSubsts, Acc, AffixSubst,
141                        !subst(AffixSubst[0], AffixSubst[1], Acc));
142}
143
144// This class describes information associated to the LMUL.
145class LMULInfo<int lmul, int oct, VReg regclass, VReg wregclass,
146               VReg f2regclass, VReg f4regclass, VReg f8regclass, string mx> {
147  bits<3> value = lmul; // This is encoded as the vlmul field of vtype.
148  VReg vrclass = regclass;
149  VReg wvrclass = wregclass;
150  VReg f8vrclass = f8regclass;
151  VReg f4vrclass = f4regclass;
152  VReg f2vrclass = f2regclass;
153  string MX = mx;
154  int octuple = oct;
155}
156
157// Associate LMUL with tablegen records of register classes.
158def V_M1  : LMULInfo<0b000,  8,   VR,        VRM2,   VR,   VR, VR, "M1">;
159def V_M2  : LMULInfo<0b001, 16, VRM2,        VRM4,   VR,   VR, VR, "M2">;
160def V_M4  : LMULInfo<0b010, 32, VRM4,        VRM8, VRM2,   VR, VR, "M4">;
161def V_M8  : LMULInfo<0b011, 64, VRM8,/*NoVReg*/VR, VRM4, VRM2, VR, "M8">;
162
163def V_MF8 : LMULInfo<0b101, 1, VR, VR,/*NoVReg*/VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF8">;
164def V_MF4 : LMULInfo<0b110, 2, VR, VR,          VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF4">;
165def V_MF2 : LMULInfo<0b111, 4, VR, VR,          VR,          VR,/*NoVReg*/VR, "MF2">;
166
167// Used to iterate over all possible LMULs.
168defvar MxList = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
169// For floating point which don't need MF8.
170defvar MxListF = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
171
172// Used for widening and narrowing instructions as it doesn't contain M8.
173defvar MxListW = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4];
174// Used for widening reductions. It can contain M8 because wider operands are
175// scalar operands.
176defvar MxListWRed = MxList;
177// For floating point which don't need MF8.
178defvar MxListFW = [V_MF4, V_MF2, V_M1, V_M2, V_M4];
179// For widening floating-point Reduction as it doesn't contain MF8. It can
180// contain M8 because wider operands are scalar operands.
181defvar MxListFWRed = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
182
183// Use for zext/sext.vf2
184defvar MxListVF2 = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
185
186// Use for zext/sext.vf4 and vector crypto instructions
187defvar MxListVF4 = [V_MF2, V_M1, V_M2, V_M4, V_M8];
188
189// Use for zext/sext.vf8
190defvar MxListVF8 = [V_M1, V_M2, V_M4, V_M8];
191
192class MxSet<int eew> {
193  list<LMULInfo> m = !cond(!eq(eew, 8) : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
194                           !eq(eew, 16) : [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
195                           !eq(eew, 32) : [V_MF2, V_M1, V_M2, V_M4, V_M8],
196                           !eq(eew, 64) : [V_M1, V_M2, V_M4, V_M8]);
197}
198
199class FPR_Info<int sew> {
200  RegisterClass fprclass = !cast<RegisterClass>("FPR" # sew);
201  string FX = "FPR" # sew;
202  int SEW = sew;
203  list<LMULInfo> MxList = MxSet<sew>.m;
204  list<LMULInfo> MxListFW = !if(!eq(sew, 64), [], !listremove(MxList, [V_M8]));
205}
206
207def SCALAR_F16 : FPR_Info<16>;
208def SCALAR_F32 : FPR_Info<32>;
209def SCALAR_F64 : FPR_Info<64>;
210
211// BF16 uses the same register class as F16.
212def SCALAR_BF16 : FPR_Info<16>;
213
214defvar FPList = [SCALAR_F16, SCALAR_F32, SCALAR_F64];
215
216// Used for widening instructions. It excludes F64.
217defvar FPListW = [SCALAR_F16, SCALAR_F32];
218
219// Used for widening bf16 instructions.
220defvar BFPListW = [SCALAR_BF16];
221
222class NFSet<LMULInfo m> {
223  defvar lmul = !shl(1, m.value);
224  list<int> L = NFList<lmul>.L;
225}
226
227class octuple_to_str<int octuple> {
228  string ret = !cond(!eq(octuple, 1): "MF8",
229                     !eq(octuple, 2): "MF4",
230                     !eq(octuple, 4): "MF2",
231                     !eq(octuple, 8): "M1",
232                     !eq(octuple, 16): "M2",
233                     !eq(octuple, 32): "M4",
234                     !eq(octuple, 64): "M8");
235}
236
237def VLOpFrag : PatFrag<(ops), (XLenVT (VLOp (XLenVT AVL:$vl)))>;
238
239// Output pattern for X0 used to represent VLMAX in the pseudo instructions.
240// We can't use X0 register becuase the AVL operands use GPRNoX0.
241// This must be kept in sync with RISCV::VLMaxSentinel.
242def VLMax : OutPatFrag<(ops), (XLenVT -1)>;
243
244def SelectFPImm : ComplexPattern<fAny, 1, "selectFPImm", [], [], 1>;
245
246// List of EEW.
247defvar EEWList = [8, 16, 32, 64];
248
249class SegRegClass<LMULInfo m, int nf> {
250  VReg RC = !cast<VReg>("VRN" # nf # !cond(!eq(m.value, V_MF8.value): V_M1.MX,
251                                           !eq(m.value, V_MF4.value): V_M1.MX,
252                                           !eq(m.value, V_MF2.value): V_M1.MX,
253                                           true: m.MX));
254}
255
256//===----------------------------------------------------------------------===//
257// Vector register and vector group type information.
258//===----------------------------------------------------------------------===//
259
260class VTypeInfo<ValueType Vec, ValueType Mas, int Sew, LMULInfo M,
261                ValueType Scal = XLenVT, RegisterClass ScalarReg = GPR> {
262  ValueType Vector = Vec;
263  ValueType Mask = Mas;
264  int SEW = Sew;
265  int Log2SEW = !logtwo(Sew);
266  VReg RegClass = M.vrclass;
267  LMULInfo LMul = M;
268  ValueType Scalar = Scal;
269  RegisterClass ScalarRegClass = ScalarReg;
270  // The pattern fragment which produces the AVL operand, representing the
271  // "natural" vector length for this type. For scalable vectors this is VLMax.
272  OutPatFrag AVL = VLMax;
273
274  string ScalarSuffix = !cond(!eq(Scal, XLenVT) : "X",
275                              !eq(Scal, f16) : "FPR16",
276                              !eq(Scal, bf16) : "FPR16",
277                              !eq(Scal, f32) : "FPR32",
278                              !eq(Scal, f64) : "FPR64");
279}
280
281class GroupVTypeInfo<ValueType Vec, ValueType VecM1, ValueType Mas, int Sew,
282                     LMULInfo M, ValueType Scal = XLenVT,
283                     RegisterClass ScalarReg = GPR>
284    : VTypeInfo<Vec, Mas, Sew, M, Scal, ScalarReg> {
285  ValueType VectorM1 = VecM1;
286}
287
288defset list<VTypeInfo> AllVectors = {
289  defset list<VTypeInfo> AllIntegerVectors = {
290    defset list<VTypeInfo> NoGroupIntegerVectors = {
291      defset list<VTypeInfo> FractionalGroupIntegerVectors = {
292        def VI8MF8:  VTypeInfo<vint8mf8_t,  vbool64_t, 8,  V_MF8>;
293        def VI8MF4:  VTypeInfo<vint8mf4_t,  vbool32_t, 8,  V_MF4>;
294        def VI8MF2:  VTypeInfo<vint8mf2_t,  vbool16_t, 8,  V_MF2>;
295        def VI16MF4: VTypeInfo<vint16mf4_t, vbool64_t, 16, V_MF4>;
296        def VI16MF2: VTypeInfo<vint16mf2_t, vbool32_t, 16, V_MF2>;
297        def VI32MF2: VTypeInfo<vint32mf2_t, vbool64_t, 32, V_MF2>;
298      }
299      def VI8M1:  VTypeInfo<vint8m1_t,  vbool8_t,   8, V_M1>;
300      def VI16M1: VTypeInfo<vint16m1_t, vbool16_t, 16, V_M1>;
301      def VI32M1: VTypeInfo<vint32m1_t, vbool32_t, 32, V_M1>;
302      def VI64M1: VTypeInfo<vint64m1_t, vbool64_t, 64, V_M1>;
303    }
304    defset list<GroupVTypeInfo> GroupIntegerVectors = {
305      def VI8M2: GroupVTypeInfo<vint8m2_t, vint8m1_t, vbool4_t, 8, V_M2>;
306      def VI8M4: GroupVTypeInfo<vint8m4_t, vint8m1_t, vbool2_t, 8, V_M4>;
307      def VI8M8: GroupVTypeInfo<vint8m8_t, vint8m1_t, vbool1_t, 8, V_M8>;
308
309      def VI16M2: GroupVTypeInfo<vint16m2_t, vint16m1_t, vbool8_t, 16, V_M2>;
310      def VI16M4: GroupVTypeInfo<vint16m4_t, vint16m1_t, vbool4_t, 16, V_M4>;
311      def VI16M8: GroupVTypeInfo<vint16m8_t, vint16m1_t, vbool2_t, 16, V_M8>;
312
313      def VI32M2: GroupVTypeInfo<vint32m2_t, vint32m1_t, vbool16_t, 32, V_M2>;
314      def VI32M4: GroupVTypeInfo<vint32m4_t, vint32m1_t, vbool8_t,  32, V_M4>;
315      def VI32M8: GroupVTypeInfo<vint32m8_t, vint32m1_t, vbool4_t,  32, V_M8>;
316
317      def VI64M2: GroupVTypeInfo<vint64m2_t, vint64m1_t, vbool32_t, 64, V_M2>;
318      def VI64M4: GroupVTypeInfo<vint64m4_t, vint64m1_t, vbool16_t, 64, V_M4>;
319      def VI64M8: GroupVTypeInfo<vint64m8_t, vint64m1_t, vbool8_t,  64, V_M8>;
320    }
321  }
322
323  defset list<VTypeInfo> AllFloatVectors = {
324    defset list<VTypeInfo> NoGroupFloatVectors = {
325      defset list<VTypeInfo> FractionalGroupFloatVectors = {
326        def VF16MF4: VTypeInfo<vfloat16mf4_t, vbool64_t, 16, V_MF4, f16, FPR16>;
327        def VF16MF2: VTypeInfo<vfloat16mf2_t, vbool32_t, 16, V_MF2, f16, FPR16>;
328        def VF32MF2: VTypeInfo<vfloat32mf2_t, vbool64_t, 32, V_MF2, f32, FPR32>;
329      }
330      def VF16M1: VTypeInfo<vfloat16m1_t, vbool16_t, 16, V_M1, f16, FPR16>;
331      def VF32M1: VTypeInfo<vfloat32m1_t, vbool32_t, 32, V_M1, f32, FPR32>;
332      def VF64M1: VTypeInfo<vfloat64m1_t, vbool64_t, 64, V_M1, f64, FPR64>;
333    }
334
335    defset list<GroupVTypeInfo> GroupFloatVectors = {
336      def VF16M2: GroupVTypeInfo<vfloat16m2_t, vfloat16m1_t, vbool8_t, 16,
337                                 V_M2, f16, FPR16>;
338      def VF16M4: GroupVTypeInfo<vfloat16m4_t, vfloat16m1_t, vbool4_t, 16,
339                                 V_M4, f16, FPR16>;
340      def VF16M8: GroupVTypeInfo<vfloat16m8_t, vfloat16m1_t, vbool2_t, 16,
341                                 V_M8, f16, FPR16>;
342
343      def VF32M2: GroupVTypeInfo<vfloat32m2_t, vfloat32m1_t, vbool16_t, 32,
344                                 V_M2, f32, FPR32>;
345      def VF32M4: GroupVTypeInfo<vfloat32m4_t, vfloat32m1_t, vbool8_t,  32,
346                                 V_M4, f32, FPR32>;
347      def VF32M8: GroupVTypeInfo<vfloat32m8_t, vfloat32m1_t, vbool4_t,  32,
348                                 V_M8, f32, FPR32>;
349
350      def VF64M2: GroupVTypeInfo<vfloat64m2_t, vfloat64m1_t, vbool32_t, 64,
351                                 V_M2, f64, FPR64>;
352      def VF64M4: GroupVTypeInfo<vfloat64m4_t, vfloat64m1_t, vbool16_t, 64,
353                                 V_M4, f64, FPR64>;
354      def VF64M8: GroupVTypeInfo<vfloat64m8_t, vfloat64m1_t, vbool8_t,  64,
355                                 V_M8, f64, FPR64>;
356    }
357  }
358}
359
360defset list<VTypeInfo> AllBFloatVectors = {
361  defset list<VTypeInfo> NoGroupBFloatVectors = {
362    defset list<VTypeInfo> FractionalGroupBFloatVectors = {
363      def VBF16MF4: VTypeInfo<vbfloat16mf4_t, vbool64_t, 16, V_MF4, bf16, FPR16>;
364      def VBF16MF2: VTypeInfo<vbfloat16mf2_t, vbool32_t, 16, V_MF2, bf16, FPR16>;
365    }
366    def VBF16M1:  VTypeInfo<vbfloat16m1_t, vbool16_t, 16, V_M1, bf16, FPR16>;
367  }
368
369  defset list<GroupVTypeInfo> GroupBFloatVectors = {
370    def VBF16M2: GroupVTypeInfo<vbfloat16m2_t, vbfloat16m1_t, vbool8_t, 16,
371                                V_M2, bf16, FPR16>;
372    def VBF16M4: GroupVTypeInfo<vbfloat16m4_t, vbfloat16m1_t, vbool4_t, 16,
373                                V_M4, bf16, FPR16>;
374    def VBF16M8: GroupVTypeInfo<vbfloat16m8_t, vbfloat16m1_t, vbool2_t, 16,
375                                V_M8, bf16, FPR16>;
376  }
377}
378
379// This functor is used to obtain the int vector type that has the same SEW and
380// multiplier as the input parameter type
381class GetIntVTypeInfo<VTypeInfo vti> {
382  // Equivalent integer vector type. Eg.
383  //   VI8M1 → VI8M1 (identity)
384  //   VF64M4 → VI64M4
385  VTypeInfo Vti = !cast<VTypeInfo>(!subst("VF", "VI", !cast<string>(vti)));
386}
387
388class MTypeInfo<ValueType Mas, LMULInfo M, string Bx> {
389  ValueType Mask = Mas;
390  // {SEW, VLMul} values set a valid VType to deal with this mask type.
391  // we assume SEW=1 and set corresponding LMUL. vsetvli insertion will
392  // look for SEW=1 to optimize based on surrounding instructions.
393  int SEW = 1;
394  int Log2SEW = 0;
395  LMULInfo LMul = M;
396  string BX = Bx; // Appendix of mask operations.
397  // The pattern fragment which produces the AVL operand, representing the
398  // "natural" vector length for this mask type. For scalable masks this is
399  // VLMax.
400  OutPatFrag AVL = VLMax;
401}
402
403defset list<MTypeInfo> AllMasks = {
404  // vbool<n>_t, <n> = SEW/LMUL, we assume SEW=8 and corresponding LMUL.
405  def : MTypeInfo<vbool64_t, V_MF8, "B1">;
406  def : MTypeInfo<vbool32_t, V_MF4, "B2">;
407  def : MTypeInfo<vbool16_t, V_MF2, "B4">;
408  def : MTypeInfo<vbool8_t, V_M1, "B8">;
409  def : MTypeInfo<vbool4_t, V_M2, "B16">;
410  def : MTypeInfo<vbool2_t, V_M4, "B32">;
411  def : MTypeInfo<vbool1_t, V_M8, "B64">;
412}
413
414class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti> {
415  VTypeInfo Vti = vti;
416  VTypeInfo Wti = wti;
417}
418
419class VTypeInfoToFraction<VTypeInfo vti, VTypeInfo fti> {
420  VTypeInfo Vti = vti;
421  VTypeInfo Fti = fti;
422}
423
424defset list<VTypeInfoToWide> AllWidenableIntVectors = {
425  def : VTypeInfoToWide<VI8MF8,  VI16MF4>;
426  def : VTypeInfoToWide<VI8MF4,  VI16MF2>;
427  def : VTypeInfoToWide<VI8MF2,  VI16M1>;
428  def : VTypeInfoToWide<VI8M1,   VI16M2>;
429  def : VTypeInfoToWide<VI8M2,   VI16M4>;
430  def : VTypeInfoToWide<VI8M4,   VI16M8>;
431
432  def : VTypeInfoToWide<VI16MF4, VI32MF2>;
433  def : VTypeInfoToWide<VI16MF2, VI32M1>;
434  def : VTypeInfoToWide<VI16M1,  VI32M2>;
435  def : VTypeInfoToWide<VI16M2,  VI32M4>;
436  def : VTypeInfoToWide<VI16M4,  VI32M8>;
437
438  def : VTypeInfoToWide<VI32MF2, VI64M1>;
439  def : VTypeInfoToWide<VI32M1,  VI64M2>;
440  def : VTypeInfoToWide<VI32M2,  VI64M4>;
441  def : VTypeInfoToWide<VI32M4,  VI64M8>;
442}
443
444defset list<VTypeInfoToWide> AllWidenableFloatVectors = {
445  def : VTypeInfoToWide<VF16MF4, VF32MF2>;
446  def : VTypeInfoToWide<VF16MF2, VF32M1>;
447  def : VTypeInfoToWide<VF16M1, VF32M2>;
448  def : VTypeInfoToWide<VF16M2, VF32M4>;
449  def : VTypeInfoToWide<VF16M4, VF32M8>;
450
451  def : VTypeInfoToWide<VF32MF2, VF64M1>;
452  def : VTypeInfoToWide<VF32M1, VF64M2>;
453  def : VTypeInfoToWide<VF32M2, VF64M4>;
454  def : VTypeInfoToWide<VF32M4, VF64M8>;
455}
456
457defset list<VTypeInfoToFraction> AllFractionableVF2IntVectors = {
458  def : VTypeInfoToFraction<VI16MF4, VI8MF8>;
459  def : VTypeInfoToFraction<VI16MF2, VI8MF4>;
460  def : VTypeInfoToFraction<VI16M1, VI8MF2>;
461  def : VTypeInfoToFraction<VI16M2, VI8M1>;
462  def : VTypeInfoToFraction<VI16M4, VI8M2>;
463  def : VTypeInfoToFraction<VI16M8, VI8M4>;
464  def : VTypeInfoToFraction<VI32MF2, VI16MF4>;
465  def : VTypeInfoToFraction<VI32M1, VI16MF2>;
466  def : VTypeInfoToFraction<VI32M2, VI16M1>;
467  def : VTypeInfoToFraction<VI32M4, VI16M2>;
468  def : VTypeInfoToFraction<VI32M8, VI16M4>;
469  def : VTypeInfoToFraction<VI64M1, VI32MF2>;
470  def : VTypeInfoToFraction<VI64M2, VI32M1>;
471  def : VTypeInfoToFraction<VI64M4, VI32M2>;
472  def : VTypeInfoToFraction<VI64M8, VI32M4>;
473}
474
475defset list<VTypeInfoToFraction> AllFractionableVF4IntVectors = {
476  def : VTypeInfoToFraction<VI32MF2, VI8MF8>;
477  def : VTypeInfoToFraction<VI32M1, VI8MF4>;
478  def : VTypeInfoToFraction<VI32M2, VI8MF2>;
479  def : VTypeInfoToFraction<VI32M4, VI8M1>;
480  def : VTypeInfoToFraction<VI32M8, VI8M2>;
481  def : VTypeInfoToFraction<VI64M1, VI16MF4>;
482  def : VTypeInfoToFraction<VI64M2, VI16MF2>;
483  def : VTypeInfoToFraction<VI64M4, VI16M1>;
484  def : VTypeInfoToFraction<VI64M8, VI16M2>;
485}
486
487defset list<VTypeInfoToFraction> AllFractionableVF8IntVectors = {
488  def : VTypeInfoToFraction<VI64M1, VI8MF8>;
489  def : VTypeInfoToFraction<VI64M2, VI8MF4>;
490  def : VTypeInfoToFraction<VI64M4, VI8MF2>;
491  def : VTypeInfoToFraction<VI64M8, VI8M1>;
492}
493
494defset list<VTypeInfoToWide> AllWidenableIntToFloatVectors = {
495  def : VTypeInfoToWide<VI8MF8, VF16MF4>;
496  def : VTypeInfoToWide<VI8MF4, VF16MF2>;
497  def : VTypeInfoToWide<VI8MF2, VF16M1>;
498  def : VTypeInfoToWide<VI8M1, VF16M2>;
499  def : VTypeInfoToWide<VI8M2, VF16M4>;
500  def : VTypeInfoToWide<VI8M4, VF16M8>;
501
502  def : VTypeInfoToWide<VI16MF4, VF32MF2>;
503  def : VTypeInfoToWide<VI16MF2, VF32M1>;
504  def : VTypeInfoToWide<VI16M1, VF32M2>;
505  def : VTypeInfoToWide<VI16M2, VF32M4>;
506  def : VTypeInfoToWide<VI16M4, VF32M8>;
507
508  def : VTypeInfoToWide<VI32MF2, VF64M1>;
509  def : VTypeInfoToWide<VI32M1, VF64M2>;
510  def : VTypeInfoToWide<VI32M2, VF64M4>;
511  def : VTypeInfoToWide<VI32M4, VF64M8>;
512}
513
514defset list<VTypeInfoToWide> AllWidenableBFloatToFloatVectors = {
515  def : VTypeInfoToWide<VBF16MF4, VF32MF2>;
516  def : VTypeInfoToWide<VBF16MF2, VF32M1>;
517  def : VTypeInfoToWide<VBF16M1, VF32M2>;
518  def : VTypeInfoToWide<VBF16M2, VF32M4>;
519  def : VTypeInfoToWide<VBF16M4, VF32M8>;
520}
521
522// This class holds the record of the RISCVVPseudoTable below.
523// This represents the information we need in codegen for each pseudo.
524// The definition should be consistent with `struct PseudoInfo` in
525// RISCVInstrInfo.h.
526class RISCVVPseudo {
527  Pseudo Pseudo = !cast<Pseudo>(NAME); // Used as a key.
528  Instruction BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
529  // SEW = 0 is used to denote that the Pseudo is not SEW specific (or unknown).
530  bits<8> SEW = 0;
531  bit NeedBeInPseudoTable = 1;
532}
533
534// The actual table.
535def RISCVVPseudosTable : GenericTable {
536  let FilterClass = "RISCVVPseudo";
537  let FilterClassField = "NeedBeInPseudoTable";
538  let CppTypeName = "PseudoInfo";
539  let Fields = [ "Pseudo", "BaseInstr" ];
540  let PrimaryKey = [ "Pseudo" ];
541  let PrimaryKeyName = "getPseudoInfo";
542  let PrimaryKeyEarlyOut = true;
543}
544
545def RISCVVInversePseudosTable : GenericTable {
546  let FilterClass = "RISCVVPseudo";
547  let CppTypeName = "PseudoInfo";
548  let Fields = [ "Pseudo", "BaseInstr", "VLMul", "SEW"];
549  let PrimaryKey = [ "BaseInstr", "VLMul", "SEW"];
550  let PrimaryKeyName = "getBaseInfo";
551  let PrimaryKeyEarlyOut = true;
552}
553
554def RISCVVIntrinsicsTable : GenericTable {
555  let FilterClass = "RISCVVIntrinsic";
556  let CppTypeName = "RISCVVIntrinsicInfo";
557  let Fields = ["IntrinsicID", "ScalarOperand", "VLOperand"];
558  let PrimaryKey = ["IntrinsicID"];
559  let PrimaryKeyName = "getRISCVVIntrinsicInfo";
560}
561
562// Describes the relation of a masked pseudo to the unmasked variants.
563//    Note that all masked variants (in this table) have exactly one
564//    unmasked variant.  For all but compares, both the masked and
565//    unmasked variant have a passthru and policy operand.  For compares,
566//    neither has a policy op, and only the masked version has a passthru.
567class RISCVMaskedPseudo<bits<4> MaskIdx, bit MaskAffectsRes=false> {
568  Pseudo MaskedPseudo = !cast<Pseudo>(NAME);
569  Pseudo UnmaskedPseudo = !cast<Pseudo>(!subst("_MASK", "", NAME));
570  bits<4> MaskOpIdx = MaskIdx;
571  bit MaskAffectsResult = MaskAffectsRes;
572}
573
574def RISCVMaskedPseudosTable : GenericTable {
575  let FilterClass = "RISCVMaskedPseudo";
576  let CppTypeName = "RISCVMaskedPseudoInfo";
577  let Fields = ["MaskedPseudo", "UnmaskedPseudo", "MaskOpIdx", "MaskAffectsResult"];
578  let PrimaryKey = ["MaskedPseudo"];
579  let PrimaryKeyName = "getMaskedPseudoInfo";
580}
581
582class RISCVVLE<bit M, bit Str, bit F, bits<3> S, bits<3> L> {
583  bits<1> Masked = M;
584  bits<1> Strided = Str;
585  bits<1> FF = F;
586  bits<3> Log2SEW = S;
587  bits<3> LMUL = L;
588  Pseudo Pseudo = !cast<Pseudo>(NAME);
589}
590
591def lookupMaskedIntrinsicByUnmasked : SearchIndex {
592  let Table = RISCVMaskedPseudosTable;
593  let Key = ["UnmaskedPseudo"];
594}
595
596def RISCVVLETable : GenericTable {
597  let FilterClass = "RISCVVLE";
598  let CppTypeName = "VLEPseudo";
599  let Fields = ["Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
600  let PrimaryKey = ["Masked", "Strided", "FF", "Log2SEW", "LMUL"];
601  let PrimaryKeyName = "getVLEPseudo";
602}
603
604class RISCVVSE<bit M, bit Str, bits<3> S, bits<3> L> {
605  bits<1> Masked = M;
606  bits<1> Strided = Str;
607  bits<3> Log2SEW = S;
608  bits<3> LMUL = L;
609  Pseudo Pseudo = !cast<Pseudo>(NAME);
610}
611
612def RISCVVSETable : GenericTable {
613  let FilterClass = "RISCVVSE";
614  let CppTypeName = "VSEPseudo";
615  let Fields = ["Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
616  let PrimaryKey = ["Masked", "Strided", "Log2SEW", "LMUL"];
617  let PrimaryKeyName = "getVSEPseudo";
618}
619
620class RISCVVLX_VSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
621  bits<1> Masked = M;
622  bits<1> Ordered = O;
623  bits<3> Log2SEW = S;
624  bits<3> LMUL = L;
625  bits<3> IndexLMUL = IL;
626  Pseudo Pseudo = !cast<Pseudo>(NAME);
627}
628
629class RISCVVLX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
630  RISCVVLX_VSX<M, O, S, L, IL>;
631class RISCVVSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
632  RISCVVLX_VSX<M, O, S, L, IL>;
633
634class RISCVVLX_VSXTable : GenericTable {
635  let CppTypeName = "VLX_VSXPseudo";
636  let Fields = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
637  let PrimaryKey = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
638}
639
640def RISCVVLXTable : RISCVVLX_VSXTable {
641  let FilterClass = "RISCVVLX";
642  let PrimaryKeyName = "getVLXPseudo";
643}
644
645def RISCVVSXTable : RISCVVLX_VSXTable {
646  let FilterClass = "RISCVVSX";
647  let PrimaryKeyName = "getVSXPseudo";
648}
649
650class RISCVVLSEG<bits<4> N, bit M, bit Str, bit F, bits<3> S, bits<3> L> {
651  bits<4> NF = N;
652  bits<1> Masked = M;
653  bits<1> Strided = Str;
654  bits<1> FF = F;
655  bits<3> Log2SEW = S;
656  bits<3> LMUL = L;
657  Pseudo Pseudo = !cast<Pseudo>(NAME);
658}
659
660def RISCVVLSEGTable : GenericTable {
661  let FilterClass = "RISCVVLSEG";
662  let CppTypeName = "VLSEGPseudo";
663  let Fields = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
664  let PrimaryKey = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL"];
665  let PrimaryKeyName = "getVLSEGPseudo";
666}
667
668class RISCVVLXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
669  bits<4> NF = N;
670  bits<1> Masked = M;
671  bits<1> Ordered = O;
672  bits<3> Log2SEW = S;
673  bits<3> LMUL = L;
674  bits<3> IndexLMUL = IL;
675  Pseudo Pseudo = !cast<Pseudo>(NAME);
676}
677
678def RISCVVLXSEGTable : GenericTable {
679  let FilterClass = "RISCVVLXSEG";
680  let CppTypeName = "VLXSEGPseudo";
681  let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
682  let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
683  let PrimaryKeyName = "getVLXSEGPseudo";
684}
685
686class RISCVVSSEG<bits<4> N, bit M, bit Str, bits<3> S, bits<3> L> {
687  bits<4> NF = N;
688  bits<1> Masked = M;
689  bits<1> Strided = Str;
690  bits<3> Log2SEW = S;
691  bits<3> LMUL = L;
692  Pseudo Pseudo = !cast<Pseudo>(NAME);
693}
694
695def RISCVVSSEGTable : GenericTable {
696  let FilterClass = "RISCVVSSEG";
697  let CppTypeName = "VSSEGPseudo";
698  let Fields = ["NF", "Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
699  let PrimaryKey = ["NF", "Masked", "Strided", "Log2SEW", "LMUL"];
700  let PrimaryKeyName = "getVSSEGPseudo";
701}
702
703class RISCVVSXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
704  bits<4> NF = N;
705  bits<1> Masked = M;
706  bits<1> Ordered = O;
707  bits<3> Log2SEW = S;
708  bits<3> LMUL = L;
709  bits<3> IndexLMUL = IL;
710  Pseudo Pseudo = !cast<Pseudo>(NAME);
711}
712
713def RISCVVSXSEGTable : GenericTable {
714  let FilterClass = "RISCVVSXSEG";
715  let CppTypeName = "VSXSEGPseudo";
716  let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
717  let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
718  let PrimaryKeyName = "getVSXSEGPseudo";
719}
720
721//===----------------------------------------------------------------------===//
722// Helpers to define the different pseudo instructions.
723//===----------------------------------------------------------------------===//
724
725// The destination vector register group for a masked vector instruction cannot
726// overlap the source mask register (v0), unless the destination vector register
727// is being written with a mask value (e.g., comparisons) or the scalar result
728// of a reduction.
729class GetVRegNoV0<VReg VRegClass> {
730  VReg R = !cond(!eq(VRegClass, VR) : VRNoV0,
731                 !eq(VRegClass, VRM2) : VRM2NoV0,
732                 !eq(VRegClass, VRM4) : VRM4NoV0,
733                 !eq(VRegClass, VRM8) : VRM8NoV0,
734                 !eq(VRegClass, VRN2M1) : VRN2M1NoV0,
735                 !eq(VRegClass, VRN2M2) : VRN2M2NoV0,
736                 !eq(VRegClass, VRN2M4) : VRN2M4NoV0,
737                 !eq(VRegClass, VRN3M1) : VRN3M1NoV0,
738                 !eq(VRegClass, VRN3M2) : VRN3M2NoV0,
739                 !eq(VRegClass, VRN4M1) : VRN4M1NoV0,
740                 !eq(VRegClass, VRN4M2) : VRN4M2NoV0,
741                 !eq(VRegClass, VRN5M1) : VRN5M1NoV0,
742                 !eq(VRegClass, VRN6M1) : VRN6M1NoV0,
743                 !eq(VRegClass, VRN7M1) : VRN7M1NoV0,
744                 !eq(VRegClass, VRN8M1) : VRN8M1NoV0,
745                 true : VRegClass);
746}
747
748class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins, int sew = 0> :
749      Pseudo<outs, ins, []>, RISCVVPseudo {
750  let BaseInstr = instr;
751  let VLMul = m.value;
752  let SEW = sew;
753}
754
755class GetVTypePredicates<VTypeInfo vti> {
756  list<Predicate> Predicates = !cond(!eq(vti.Scalar, f16) : [HasVInstructionsF16],
757                                     !eq(vti.Scalar, bf16) : [HasVInstructionsBF16],
758                                     !eq(vti.Scalar, f32) : [HasVInstructionsAnyF],
759                                     !eq(vti.Scalar, f64) : [HasVInstructionsF64],
760                                     !eq(vti.SEW, 64) : [HasVInstructionsI64],
761                                     true : [HasVInstructions]);
762}
763
764class VPseudoUSLoadNoMask<VReg RetClass,
765                          int EEW> :
766      Pseudo<(outs RetClass:$rd),
767             (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew,
768                  ixlenimm:$policy), []>,
769      RISCVVPseudo,
770      RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
771  let mayLoad = 1;
772  let mayStore = 0;
773  let hasSideEffects = 0;
774  let HasVLOp = 1;
775  let HasSEWOp = 1;
776  let HasVecPolicyOp = 1;
777  let Constraints = "$rd = $dest";
778}
779
780class VPseudoUSLoadMask<VReg RetClass,
781                        int EEW> :
782      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
783             (ins GetVRegNoV0<RetClass>.R:$merge,
784                  GPRMem:$rs1,
785                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
786      RISCVVPseudo,
787      RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
788  let mayLoad = 1;
789  let mayStore = 0;
790  let hasSideEffects = 0;
791  let Constraints = "$rd = $merge";
792  let HasVLOp = 1;
793  let HasSEWOp = 1;
794  let HasVecPolicyOp = 1;
795  let UsesMaskPolicy = 1;
796}
797
798class VPseudoUSLoadFFNoMask<VReg RetClass,
799                            int EEW> :
800      Pseudo<(outs RetClass:$rd, GPR:$vl),
801             (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl,
802                  ixlenimm:$sew, ixlenimm:$policy), []>,
803      RISCVVPseudo,
804      RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
805  let mayLoad = 1;
806  let mayStore = 0;
807  let hasSideEffects = 0;
808  let HasVLOp = 1;
809  let HasSEWOp = 1;
810  let HasVecPolicyOp = 1;
811  let Constraints = "$rd = $dest";
812}
813
814class VPseudoUSLoadFFMask<VReg RetClass,
815                          int EEW> :
816      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
817             (ins GetVRegNoV0<RetClass>.R:$merge,
818                  GPRMem:$rs1,
819                  VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy), []>,
820      RISCVVPseudo,
821      RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
822  let mayLoad = 1;
823  let mayStore = 0;
824  let hasSideEffects = 0;
825  let Constraints = "$rd = $merge";
826  let HasVLOp = 1;
827  let HasSEWOp = 1;
828  let HasVecPolicyOp = 1;
829  let UsesMaskPolicy = 1;
830}
831
832class VPseudoSLoadNoMask<VReg RetClass,
833                         int EEW> :
834      Pseudo<(outs RetClass:$rd),
835             (ins RetClass:$dest, GPRMem:$rs1, GPR:$rs2, AVL:$vl,
836                  ixlenimm:$sew, ixlenimm:$policy), []>,
837      RISCVVPseudo,
838      RISCVVLE</*Masked*/0, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
839  let mayLoad = 1;
840  let mayStore = 0;
841  let hasSideEffects = 0;
842  let HasVLOp = 1;
843  let HasSEWOp = 1;
844  let HasVecPolicyOp = 1;
845  let Constraints = "$rd = $dest";
846}
847
848class VPseudoSLoadMask<VReg RetClass,
849                       int EEW> :
850      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
851             (ins GetVRegNoV0<RetClass>.R:$merge,
852                  GPRMem:$rs1, GPR:$rs2,
853                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
854      RISCVVPseudo,
855      RISCVVLE</*Masked*/1, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
856  let mayLoad = 1;
857  let mayStore = 0;
858  let hasSideEffects = 0;
859  let Constraints = "$rd = $merge";
860  let HasVLOp = 1;
861  let HasSEWOp = 1;
862  let HasVecPolicyOp = 1;
863  let UsesMaskPolicy = 1;
864}
865
866class VPseudoILoadNoMask<VReg RetClass,
867                         VReg IdxClass,
868                         int EEW,
869                         bits<3> LMUL,
870                         bit Ordered,
871                         bit EarlyClobber,
872                         int TargetConstraintType = 1> :
873      Pseudo<(outs RetClass:$rd),
874             (ins RetClass:$dest, GPRMem:$rs1, IdxClass:$rs2, AVL:$vl,
875                  ixlenimm:$sew, ixlenimm:$policy), []>,
876      RISCVVPseudo,
877      RISCVVLX</*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
878  let mayLoad = 1;
879  let mayStore = 0;
880  let hasSideEffects = 0;
881  let HasVLOp = 1;
882  let HasSEWOp = 1;
883  let HasVecPolicyOp = 1;
884  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $dest", "$rd = $dest");
885  let TargetOverlapConstraintType = TargetConstraintType;
886}
887
888class VPseudoILoadMask<VReg RetClass,
889                       VReg IdxClass,
890                       int EEW,
891                       bits<3> LMUL,
892                       bit Ordered,
893                       bit EarlyClobber,
894                       int TargetConstraintType = 1> :
895      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
896             (ins GetVRegNoV0<RetClass>.R:$merge,
897                  GPRMem:$rs1, IdxClass:$rs2,
898                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
899      RISCVVPseudo,
900      RISCVVLX</*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
901  let mayLoad = 1;
902  let mayStore = 0;
903  let hasSideEffects = 0;
904  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $merge", "$rd = $merge");
905  let TargetOverlapConstraintType = TargetConstraintType;
906  let HasVLOp = 1;
907  let HasSEWOp = 1;
908  let HasVecPolicyOp = 1;
909  let UsesMaskPolicy = 1;
910}
911
912class VPseudoUSStoreNoMask<VReg StClass,
913                           int EEW> :
914      Pseudo<(outs),
915             (ins StClass:$rd, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew), []>,
916      RISCVVPseudo,
917      RISCVVSE</*Masked*/0, /*Strided*/0, !logtwo(EEW), VLMul> {
918  let mayLoad = 0;
919  let mayStore = 1;
920  let hasSideEffects = 0;
921  let HasVLOp = 1;
922  let HasSEWOp = 1;
923}
924
925class VPseudoUSStoreMask<VReg StClass,
926                         int EEW> :
927      Pseudo<(outs),
928             (ins StClass:$rd, GPRMem:$rs1,
929                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
930      RISCVVPseudo,
931      RISCVVSE</*Masked*/1, /*Strided*/0, !logtwo(EEW), VLMul> {
932  let mayLoad = 0;
933  let mayStore = 1;
934  let hasSideEffects = 0;
935  let HasVLOp = 1;
936  let HasSEWOp = 1;
937}
938
939class VPseudoSStoreNoMask<VReg StClass,
940                          int EEW> :
941      Pseudo<(outs),
942             (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2,
943                  AVL:$vl, ixlenimm:$sew), []>,
944      RISCVVPseudo,
945      RISCVVSE</*Masked*/0, /*Strided*/1, !logtwo(EEW), VLMul> {
946  let mayLoad = 0;
947  let mayStore = 1;
948  let hasSideEffects = 0;
949  let HasVLOp = 1;
950  let HasSEWOp = 1;
951}
952
953class VPseudoSStoreMask<VReg StClass,
954                        int EEW> :
955      Pseudo<(outs),
956             (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2,
957                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
958      RISCVVPseudo,
959      RISCVVSE</*Masked*/1, /*Strided*/1, !logtwo(EEW), VLMul> {
960  let mayLoad = 0;
961  let mayStore = 1;
962  let hasSideEffects = 0;
963  let HasVLOp = 1;
964  let HasSEWOp = 1;
965}
966
967class VPseudoNullaryNoMask<VReg RegClass> :
968      Pseudo<(outs RegClass:$rd),
969             (ins RegClass:$merge,
970                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
971      RISCVVPseudo {
972  let mayLoad = 0;
973  let mayStore = 0;
974  let hasSideEffects = 0;
975  let Constraints = "$rd = $merge";
976  let HasVLOp = 1;
977  let HasSEWOp = 1;
978  let HasVecPolicyOp = 1;
979}
980
981class VPseudoNullaryMask<VReg RegClass> :
982      Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd),
983             (ins GetVRegNoV0<RegClass>.R:$merge,
984                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
985      RISCVVPseudo {
986  let mayLoad = 0;
987  let mayStore = 0;
988  let hasSideEffects = 0;
989  let Constraints ="$rd = $merge";
990  let HasVLOp = 1;
991  let HasSEWOp = 1;
992  let UsesMaskPolicy = 1;
993  let HasVecPolicyOp = 1;
994}
995
996// Nullary for pseudo instructions. They are expanded in
997// RISCVExpandPseudoInsts pass.
998class VPseudoNullaryPseudoM<string BaseInst> :
999      Pseudo<(outs VR:$rd), (ins AVL:$vl, ixlenimm:$sew), []>,
1000      RISCVVPseudo {
1001  let mayLoad = 0;
1002  let mayStore = 0;
1003  let hasSideEffects = 0;
1004  let HasVLOp = 1;
1005  let HasSEWOp = 1;
1006  // BaseInstr is not used in RISCVExpandPseudoInsts pass.
1007  // Just fill a corresponding real v-inst to pass tablegen check.
1008  let BaseInstr = !cast<Instruction>(BaseInst);
1009  // We exclude them from RISCVVPseudoTable.
1010  let NeedBeInPseudoTable = 0;
1011}
1012
1013class VPseudoUnaryNoMask<DAGOperand RetClass,
1014                         DAGOperand OpClass,
1015                         string Constraint = "",
1016                         int TargetConstraintType = 1> :
1017      Pseudo<(outs RetClass:$rd),
1018             (ins RetClass:$merge, OpClass:$rs2,
1019                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1020      RISCVVPseudo {
1021  let mayLoad = 0;
1022  let mayStore = 0;
1023  let hasSideEffects = 0;
1024  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1025  let TargetOverlapConstraintType = TargetConstraintType;
1026  let HasVLOp = 1;
1027  let HasSEWOp = 1;
1028  let HasVecPolicyOp = 1;
1029}
1030
1031class VPseudoUnaryNoMaskRoundingMode<DAGOperand RetClass,
1032                                     DAGOperand OpClass,
1033                                     string Constraint = "",
1034                                     int TargetConstraintType = 1> :
1035      Pseudo<(outs RetClass:$rd),
1036             (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm,
1037                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1038      RISCVVPseudo {
1039  let mayLoad = 0;
1040  let mayStore = 0;
1041  let hasSideEffects = 0;
1042  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1043  let TargetOverlapConstraintType = TargetConstraintType;
1044  let HasVLOp = 1;
1045  let HasSEWOp = 1;
1046  let HasVecPolicyOp = 1;
1047  let HasRoundModeOp = 1;
1048  let UsesVXRM = 0;
1049}
1050
1051class VPseudoUnaryMask<VReg RetClass,
1052                       VReg OpClass,
1053                       string Constraint = "",
1054                       int TargetConstraintType = 1> :
1055      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1056             (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
1057                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1058      RISCVVPseudo {
1059  let mayLoad = 0;
1060  let mayStore = 0;
1061  let hasSideEffects = 0;
1062  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1063  let TargetOverlapConstraintType = TargetConstraintType;
1064  let HasVLOp = 1;
1065  let HasSEWOp = 1;
1066  let HasVecPolicyOp = 1;
1067  let UsesMaskPolicy = 1;
1068}
1069
1070class VPseudoUnaryMaskRoundingMode<VReg RetClass,
1071                                   VReg OpClass,
1072                                   string Constraint = "",
1073                                   int TargetConstraintType = 1> :
1074      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1075             (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
1076                  VMaskOp:$vm, ixlenimm:$rm,
1077                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1078      RISCVVPseudo {
1079  let mayLoad = 0;
1080  let mayStore = 0;
1081  let hasSideEffects = 0;
1082  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1083  let TargetOverlapConstraintType = TargetConstraintType;
1084  let HasVLOp = 1;
1085  let HasSEWOp = 1;
1086  let HasVecPolicyOp = 1;
1087  let UsesMaskPolicy = 1;
1088  let HasRoundModeOp = 1;
1089  let UsesVXRM = 0;
1090}
1091
1092class VPseudoUnaryMask_NoExcept<VReg RetClass,
1093                                VReg OpClass,
1094                                string Constraint = ""> :
1095      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1096             (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
1097                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []> {
1098  let mayLoad = 0;
1099  let mayStore = 0;
1100  let hasSideEffects = 0;
1101  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1102  let HasVLOp = 1;
1103  let HasSEWOp = 1;
1104  let HasVecPolicyOp = 1;
1105  let UsesMaskPolicy = 1;
1106  let usesCustomInserter = 1;
1107}
1108
1109class VPseudoUnaryNoMask_FRM<VReg RetClass,
1110                             VReg OpClass,
1111                             string Constraint = "",
1112                             int TargetConstraintType = 1> :
1113      Pseudo<(outs RetClass:$rd),
1114             (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$frm,
1115                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1116      RISCVVPseudo {
1117  let mayLoad = 0;
1118  let mayStore = 0;
1119  let hasSideEffects = 0;
1120  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1121  let TargetOverlapConstraintType = TargetConstraintType;
1122  let HasVLOp = 1;
1123  let HasSEWOp = 1;
1124  let HasVecPolicyOp = 1;
1125  let HasRoundModeOp = 1;
1126}
1127
1128class VPseudoUnaryMask_FRM<VReg RetClass,
1129                           VReg OpClass,
1130                           string Constraint = "",
1131                           int TargetConstraintType = 1> :
1132      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1133             (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
1134                  VMaskOp:$vm, ixlenimm:$frm,
1135                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1136      RISCVVPseudo {
1137  let mayLoad = 0;
1138  let mayStore = 0;
1139  let hasSideEffects = 0;
1140  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1141  let TargetOverlapConstraintType = TargetConstraintType;
1142  let HasVLOp = 1;
1143  let HasSEWOp = 1;
1144  let HasVecPolicyOp = 1;
1145  let UsesMaskPolicy = 1;
1146  let HasRoundModeOp = 1;
1147}
1148
1149class VPseudoUnaryNoMaskGPROut :
1150      Pseudo<(outs GPR:$rd),
1151             (ins VR:$rs2, AVL:$vl, ixlenimm:$sew), []>,
1152      RISCVVPseudo {
1153  let mayLoad = 0;
1154  let mayStore = 0;
1155  let hasSideEffects = 0;
1156  let HasVLOp = 1;
1157  let HasSEWOp = 1;
1158}
1159
1160class VPseudoUnaryMaskGPROut :
1161      Pseudo<(outs GPR:$rd),
1162             (ins VR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1163      RISCVVPseudo {
1164  let mayLoad = 0;
1165  let mayStore = 0;
1166  let hasSideEffects = 0;
1167  let HasVLOp = 1;
1168  let HasSEWOp = 1;
1169}
1170
1171// Mask can be V0~V31
1172class VPseudoUnaryAnyMask<VReg RetClass,
1173                          VReg Op1Class> :
1174      Pseudo<(outs RetClass:$rd),
1175             (ins RetClass:$merge, Op1Class:$rs2,
1176                  VR:$vm, AVL:$vl, ixlenimm:$sew), []>,
1177      RISCVVPseudo {
1178  let mayLoad = 0;
1179  let mayStore = 0;
1180  let hasSideEffects = 0;
1181  let Constraints = "@earlyclobber $rd, $rd = $merge";
1182  let HasVLOp = 1;
1183  let HasSEWOp = 1;
1184}
1185
1186class VPseudoBinaryNoMask<VReg RetClass,
1187                          VReg Op1Class,
1188                          DAGOperand Op2Class,
1189                          string Constraint,
1190                          int TargetConstraintType = 1> :
1191      Pseudo<(outs RetClass:$rd),
1192             (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
1193      RISCVVPseudo {
1194  let mayLoad = 0;
1195  let mayStore = 0;
1196  let hasSideEffects = 0;
1197  let Constraints = Constraint;
1198  let TargetOverlapConstraintType = TargetConstraintType;
1199  let HasVLOp = 1;
1200  let HasSEWOp = 1;
1201}
1202
1203class VPseudoBinaryNoMaskTU<VReg RetClass,
1204                            VReg Op1Class,
1205                            DAGOperand Op2Class,
1206                            string Constraint,
1207                            int TargetConstraintType = 1> :
1208      Pseudo<(outs RetClass:$rd),
1209             (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl,
1210                  ixlenimm:$sew, ixlenimm:$policy), []>,
1211      RISCVVPseudo {
1212  let mayLoad = 0;
1213  let mayStore = 0;
1214  let hasSideEffects = 0;
1215  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1216  let TargetOverlapConstraintType = TargetConstraintType;
1217  let HasVLOp = 1;
1218  let HasSEWOp = 1;
1219  let HasVecPolicyOp = 1;
1220}
1221
1222class VPseudoBinaryNoMaskRoundingMode<VReg RetClass,
1223                                      VReg Op1Class,
1224                                      DAGOperand Op2Class,
1225                                      string Constraint,
1226                                      int UsesVXRM_ = 1,
1227                                      int TargetConstraintType = 1> :
1228      Pseudo<(outs RetClass:$rd),
1229             (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm,
1230                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1231      RISCVVPseudo {
1232  let mayLoad = 0;
1233  let mayStore = 0;
1234  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1235  let TargetOverlapConstraintType = TargetConstraintType;
1236  let HasVLOp = 1;
1237  let HasSEWOp = 1;
1238  let HasVecPolicyOp = 1;
1239  let HasRoundModeOp = 1;
1240  let UsesVXRM = UsesVXRM_;
1241}
1242
1243class VPseudoBinaryMaskPolicyRoundingMode<VReg RetClass,
1244                                          RegisterClass Op1Class,
1245                                          DAGOperand Op2Class,
1246                                          string Constraint,
1247                                          int UsesVXRM_,
1248                                          int TargetConstraintType = 1> :
1249      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1250             (ins GetVRegNoV0<RetClass>.R:$merge,
1251                  Op1Class:$rs2, Op2Class:$rs1,
1252                  VMaskOp:$vm, ixlenimm:$rm, AVL:$vl,
1253                  ixlenimm:$sew, ixlenimm:$policy), []>,
1254      RISCVVPseudo {
1255  let mayLoad = 0;
1256  let mayStore = 0;
1257  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1258  let TargetOverlapConstraintType = TargetConstraintType;
1259  let HasVLOp = 1;
1260  let HasSEWOp = 1;
1261  let HasVecPolicyOp = 1;
1262  let UsesMaskPolicy = 1;
1263  let HasRoundModeOp = 1;
1264  let UsesVXRM = UsesVXRM_;
1265}
1266
1267// Special version of VPseudoBinaryNoMask where we pretend the first source is
1268// tied to the destination.
1269// This allows maskedoff and rs2 to be the same register.
1270class VPseudoTiedBinaryNoMask<VReg RetClass,
1271                              DAGOperand Op2Class,
1272                              string Constraint,
1273                              int TargetConstraintType = 1> :
1274      Pseudo<(outs RetClass:$rd),
1275             (ins RetClass:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew,
1276                  ixlenimm:$policy), []>,
1277      RISCVVPseudo {
1278  let mayLoad = 0;
1279  let mayStore = 0;
1280  let hasSideEffects = 0;
1281  let Constraints = !interleave([Constraint, "$rd = $rs2"], ",");
1282  let TargetOverlapConstraintType = TargetConstraintType;
1283  let HasVLOp = 1;
1284  let HasSEWOp = 1;
1285  let HasVecPolicyOp = 1;
1286  let isConvertibleToThreeAddress = 1;
1287  let IsTiedPseudo = 1;
1288}
1289
1290class VPseudoTiedBinaryNoMaskRoundingMode<VReg RetClass,
1291                                          DAGOperand Op2Class,
1292                                          string Constraint,
1293                                          int TargetConstraintType = 1> :
1294      Pseudo<(outs RetClass:$rd),
1295             (ins RetClass:$rs2, Op2Class:$rs1,
1296                  ixlenimm:$rm,
1297                  AVL:$vl, ixlenimm:$sew,
1298                  ixlenimm:$policy), []>,
1299      RISCVVPseudo {
1300  let mayLoad = 0;
1301  let mayStore = 0;
1302  let hasSideEffects = 0;
1303  let Constraints = !interleave([Constraint, "$rd = $rs2"], ",");
1304  let TargetOverlapConstraintType = TargetConstraintType;
1305  let HasVLOp = 1;
1306  let HasSEWOp = 1;
1307  let HasVecPolicyOp = 1;
1308  let isConvertibleToThreeAddress = 1;
1309  let IsTiedPseudo = 1;
1310  let HasRoundModeOp = 1;
1311  let UsesVXRM = 0;
1312}
1313
1314class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
1315                          bit Ordered>:
1316      Pseudo<(outs),
1317             (ins StClass:$rd, GPRMem:$rs1, IdxClass:$rs2, AVL:$vl,
1318                  ixlenimm:$sew),[]>,
1319      RISCVVPseudo,
1320      RISCVVSX</*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
1321  let mayLoad = 0;
1322  let mayStore = 1;
1323  let hasSideEffects = 0;
1324  let HasVLOp = 1;
1325  let HasSEWOp = 1;
1326}
1327
1328class VPseudoIStoreMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
1329                        bit Ordered>:
1330      Pseudo<(outs),
1331             (ins StClass:$rd, GPRMem:$rs1, IdxClass:$rs2,
1332                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1333      RISCVVPseudo,
1334      RISCVVSX</*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
1335  let mayLoad = 0;
1336  let mayStore = 1;
1337  let hasSideEffects = 0;
1338  let HasVLOp = 1;
1339  let HasSEWOp = 1;
1340}
1341
1342class VPseudoBinaryMask<VReg RetClass,
1343                        RegisterClass Op1Class,
1344                        DAGOperand Op2Class,
1345                        string Constraint> :
1346      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1347             (ins GetVRegNoV0<RetClass>.R:$merge,
1348                  Op1Class:$rs2, Op2Class:$rs1,
1349                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1350      RISCVVPseudo {
1351  let mayLoad = 0;
1352  let mayStore = 0;
1353  let hasSideEffects = 0;
1354  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1355  let HasVLOp = 1;
1356  let HasSEWOp = 1;
1357}
1358
1359class VPseudoBinaryMaskPolicy<VReg RetClass,
1360                              RegisterClass Op1Class,
1361                              DAGOperand Op2Class,
1362                              string Constraint,
1363                              int TargetConstraintType = 1> :
1364      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1365             (ins GetVRegNoV0<RetClass>.R:$merge,
1366                  Op1Class:$rs2, Op2Class:$rs1,
1367                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1368      RISCVVPseudo {
1369  let mayLoad = 0;
1370  let mayStore = 0;
1371  let hasSideEffects = 0;
1372  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1373  let TargetOverlapConstraintType = TargetConstraintType;
1374  let HasVLOp = 1;
1375  let HasSEWOp = 1;
1376  let HasVecPolicyOp = 1;
1377  let UsesMaskPolicy = 1;
1378}
1379
1380class VPseudoTernaryMaskPolicy<VReg RetClass,
1381                               RegisterClass Op1Class,
1382                               DAGOperand Op2Class,
1383                               string Constraint> :
1384      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1385             (ins GetVRegNoV0<RetClass>.R:$merge,
1386                  Op1Class:$rs2, Op2Class:$rs1,
1387                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1388      RISCVVPseudo {
1389  let mayLoad = 0;
1390  let mayStore = 0;
1391  let hasSideEffects = 0;
1392  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1393  let HasVLOp = 1;
1394  let HasSEWOp = 1;
1395  let HasVecPolicyOp = 1;
1396}
1397
1398class VPseudoTernaryMaskPolicyRoundingMode<VReg RetClass,
1399                                           RegisterClass Op1Class,
1400                                           DAGOperand Op2Class,
1401                                           string Constraint> :
1402      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1403             (ins GetVRegNoV0<RetClass>.R:$merge,
1404                  Op1Class:$rs2, Op2Class:$rs1,
1405                  VMaskOp:$vm,
1406                  ixlenimm:$rm,
1407                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1408      RISCVVPseudo {
1409  let mayLoad = 0;
1410  let mayStore = 0;
1411  let hasSideEffects = 0;
1412  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1413  let HasVLOp = 1;
1414  let HasSEWOp = 1;
1415  let HasVecPolicyOp = 1;
1416  let HasRoundModeOp = 1;
1417  let UsesVXRM = 0;
1418}
1419
1420// Like VPseudoBinaryNoMask, but output can be V0.
1421class VPseudoBinaryMOutNoMask<VReg RetClass,
1422                              VReg Op1Class,
1423                              DAGOperand Op2Class,
1424                              string Constraint,
1425                              int TargetConstraintType = 1> :
1426      Pseudo<(outs RetClass:$rd),
1427             (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
1428      RISCVVPseudo {
1429  let mayLoad = 0;
1430  let mayStore = 0;
1431  let hasSideEffects = 0;
1432  let Constraints = Constraint;
1433  let TargetOverlapConstraintType = TargetConstraintType;
1434  let HasVLOp = 1;
1435  let HasSEWOp = 1;
1436}
1437
1438// Like VPseudoBinaryMask, but output can be V0.
1439class VPseudoBinaryMOutMask<VReg RetClass,
1440                            RegisterClass Op1Class,
1441                            DAGOperand Op2Class,
1442                            string Constraint,
1443                            int TargetConstraintType = 1> :
1444      Pseudo<(outs RetClass:$rd),
1445             (ins RetClass:$merge,
1446                  Op1Class:$rs2, Op2Class:$rs1,
1447                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1448      RISCVVPseudo {
1449  let mayLoad = 0;
1450  let mayStore = 0;
1451  let hasSideEffects = 0;
1452  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1453  let TargetOverlapConstraintType = TargetConstraintType;
1454  let HasVLOp = 1;
1455  let HasSEWOp = 1;
1456  let UsesMaskPolicy = 1;
1457}
1458
1459// Special version of VPseudoBinaryMask where we pretend the first source is
1460// tied to the destination so we can workaround the earlyclobber constraint.
1461// This allows maskedoff and rs2 to be the same register.
1462class VPseudoTiedBinaryMask<VReg RetClass,
1463                            DAGOperand Op2Class,
1464                            string Constraint,
1465                            int TargetConstraintType = 1> :
1466      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1467             (ins GetVRegNoV0<RetClass>.R:$merge,
1468                  Op2Class:$rs1,
1469                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1470      RISCVVPseudo {
1471  let mayLoad = 0;
1472  let mayStore = 0;
1473  let hasSideEffects = 0;
1474  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1475  let TargetOverlapConstraintType = TargetConstraintType;
1476  let HasVLOp = 1;
1477  let HasSEWOp = 1;
1478  let HasVecPolicyOp = 1;
1479  let UsesMaskPolicy = 1;
1480  let IsTiedPseudo = 1;
1481}
1482
1483class VPseudoTiedBinaryMaskRoundingMode<VReg RetClass,
1484                                        DAGOperand Op2Class,
1485                                        string Constraint,
1486                                        int TargetConstraintType = 1> :
1487      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1488             (ins GetVRegNoV0<RetClass>.R:$merge,
1489                  Op2Class:$rs1,
1490                  VMaskOp:$vm,
1491                  ixlenimm:$rm,
1492                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1493      RISCVVPseudo {
1494  let mayLoad = 0;
1495  let mayStore = 0;
1496  let hasSideEffects = 0;
1497  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1498  let TargetOverlapConstraintType = TargetConstraintType;
1499  let HasVLOp = 1;
1500  let HasSEWOp = 1;
1501  let HasVecPolicyOp = 1;
1502  let UsesMaskPolicy = 1;
1503  let IsTiedPseudo = 1;
1504  let HasRoundModeOp = 1;
1505  let UsesVXRM = 0;
1506}
1507
1508class VPseudoBinaryCarryIn<VReg RetClass,
1509                           VReg Op1Class,
1510                           DAGOperand Op2Class,
1511                           LMULInfo MInfo,
1512                           bit CarryIn,
1513                           string Constraint,
1514                           int TargetConstraintType = 1> :
1515      Pseudo<(outs RetClass:$rd),
1516             !if(CarryIn,
1517                (ins Op1Class:$rs2, Op2Class:$rs1,
1518                     VMV0:$carry, AVL:$vl, ixlenimm:$sew),
1519                (ins Op1Class:$rs2, Op2Class:$rs1,
1520                     AVL:$vl, ixlenimm:$sew)), []>,
1521      RISCVVPseudo {
1522  let mayLoad = 0;
1523  let mayStore = 0;
1524  let hasSideEffects = 0;
1525  let Constraints = Constraint;
1526  let TargetOverlapConstraintType = TargetConstraintType;
1527  let HasVLOp = 1;
1528  let HasSEWOp = 1;
1529  let VLMul = MInfo.value;
1530}
1531
1532class VPseudoTiedBinaryCarryIn<VReg RetClass,
1533                               VReg Op1Class,
1534                               DAGOperand Op2Class,
1535                               LMULInfo MInfo,
1536                               bit CarryIn,
1537                               string Constraint,
1538                               int TargetConstraintType = 1> :
1539      Pseudo<(outs RetClass:$rd),
1540             !if(CarryIn,
1541                (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1,
1542                     VMV0:$carry, AVL:$vl, ixlenimm:$sew),
1543                (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1,
1544                     AVL:$vl, ixlenimm:$sew)), []>,
1545      RISCVVPseudo {
1546  let mayLoad = 0;
1547  let mayStore = 0;
1548  let hasSideEffects = 0;
1549  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1550  let TargetOverlapConstraintType = TargetConstraintType;
1551  let HasVLOp = 1;
1552  let HasSEWOp = 1;
1553  let HasVecPolicyOp = 0;
1554  let VLMul = MInfo.value;
1555}
1556
1557class VPseudoTernaryNoMask<VReg RetClass,
1558                           RegisterClass Op1Class,
1559                           DAGOperand Op2Class,
1560                           string Constraint> :
1561      Pseudo<(outs RetClass:$rd),
1562             (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1563                  AVL:$vl, ixlenimm:$sew), []>,
1564      RISCVVPseudo {
1565  let mayLoad = 0;
1566  let mayStore = 0;
1567  let hasSideEffects = 0;
1568  let Constraints = !interleave([Constraint, "$rd = $rs3"], ",");
1569  let HasVLOp = 1;
1570  let HasSEWOp = 1;
1571}
1572
1573class VPseudoTernaryNoMaskWithPolicy<VReg RetClass,
1574                                     RegisterClass Op1Class,
1575                                     DAGOperand Op2Class,
1576                                     string Constraint,
1577                                     int TargetConstraintType = 1> :
1578      Pseudo<(outs RetClass:$rd),
1579             (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1580                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1581      RISCVVPseudo {
1582  let mayLoad = 0;
1583  let mayStore = 0;
1584  let hasSideEffects = 0;
1585  let Constraints = !interleave([Constraint, "$rd = $rs3"], ",");
1586  let TargetOverlapConstraintType = TargetConstraintType;
1587  let HasVecPolicyOp = 1;
1588  let HasVLOp = 1;
1589  let HasSEWOp = 1;
1590}
1591
1592class VPseudoTernaryNoMaskWithPolicyRoundingMode<VReg RetClass,
1593                                                 RegisterClass Op1Class,
1594                                                 DAGOperand Op2Class,
1595                                                 string Constraint,
1596                                                 int TargetConstraintType = 1> :
1597      Pseudo<(outs RetClass:$rd),
1598             (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1599                  ixlenimm:$rm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1600      RISCVVPseudo {
1601  let mayLoad = 0;
1602  let mayStore = 0;
1603  let hasSideEffects = 0;
1604  let Constraints = !interleave([Constraint, "$rd = $rs3"], ",");
1605  let TargetOverlapConstraintType = TargetConstraintType;
1606  let HasVecPolicyOp = 1;
1607  let HasVLOp = 1;
1608  let HasSEWOp = 1;
1609  let HasRoundModeOp = 1;
1610  let UsesVXRM = 0;
1611}
1612
1613class VPseudoUSSegLoadNoMask<VReg RetClass,
1614                             int EEW,
1615                             bits<4> NF> :
1616      Pseudo<(outs RetClass:$rd),
1617             (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl,
1618                  ixlenimm:$sew, ixlenimm:$policy), []>,
1619      RISCVVPseudo,
1620      RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
1621  let mayLoad = 1;
1622  let mayStore = 0;
1623  let hasSideEffects = 0;
1624  let HasVLOp = 1;
1625  let HasSEWOp = 1;
1626  let HasVecPolicyOp = 1;
1627  let Constraints = "$rd = $dest";
1628}
1629
1630class VPseudoUSSegLoadMask<VReg RetClass,
1631                           int EEW,
1632                           bits<4> NF> :
1633      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1634             (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
1635                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1636      RISCVVPseudo,
1637      RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
1638  let mayLoad = 1;
1639  let mayStore = 0;
1640  let hasSideEffects = 0;
1641  let Constraints = "$rd = $merge";
1642  let HasVLOp = 1;
1643  let HasSEWOp = 1;
1644  let HasVecPolicyOp = 1;
1645  let UsesMaskPolicy = 1;
1646}
1647
1648class VPseudoUSSegLoadFFNoMask<VReg RetClass,
1649                               int EEW,
1650                               bits<4> NF> :
1651      Pseudo<(outs RetClass:$rd, GPR:$vl),
1652             (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl,
1653                  ixlenimm:$sew, ixlenimm:$policy), []>,
1654      RISCVVPseudo,
1655      RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
1656  let mayLoad = 1;
1657  let mayStore = 0;
1658  let hasSideEffects = 0;
1659  let HasVLOp = 1;
1660  let HasSEWOp = 1;
1661  let HasVecPolicyOp = 1;
1662  let Constraints = "$rd = $dest";
1663}
1664
1665class VPseudoUSSegLoadFFMask<VReg RetClass,
1666                             int EEW,
1667                             bits<4> NF> :
1668      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
1669             (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
1670                  VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy), []>,
1671      RISCVVPseudo,
1672      RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
1673  let mayLoad = 1;
1674  let mayStore = 0;
1675  let hasSideEffects = 0;
1676  let Constraints = "$rd = $merge";
1677  let HasVLOp = 1;
1678  let HasSEWOp = 1;
1679  let HasVecPolicyOp = 1;
1680  let UsesMaskPolicy = 1;
1681}
1682
1683class VPseudoSSegLoadNoMask<VReg RetClass,
1684                            int EEW,
1685                            bits<4> NF> :
1686      Pseudo<(outs RetClass:$rd),
1687             (ins RetClass:$merge, GPRMem:$rs1, GPR:$offset, AVL:$vl,
1688             ixlenimm:$sew, ixlenimm:$policy), []>,
1689      RISCVVPseudo,
1690      RISCVVLSEG<NF, /*Masked*/0, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
1691  let mayLoad = 1;
1692  let mayStore = 0;
1693  let hasSideEffects = 0;
1694  let HasVLOp = 1;
1695  let HasSEWOp = 1;
1696  let HasVecPolicyOp = 1;
1697  let Constraints = "$rd = $merge";
1698}
1699
1700class VPseudoSSegLoadMask<VReg RetClass,
1701                          int EEW,
1702                          bits<4> NF> :
1703      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1704             (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
1705                  GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
1706                  ixlenimm:$policy), []>,
1707      RISCVVPseudo,
1708      RISCVVLSEG<NF, /*Masked*/1, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
1709  let mayLoad = 1;
1710  let mayStore = 0;
1711  let hasSideEffects = 0;
1712  let Constraints = "$rd = $merge";
1713  let HasVLOp = 1;
1714  let HasSEWOp = 1;
1715  let HasVecPolicyOp = 1;
1716  let UsesMaskPolicy = 1;
1717}
1718
1719class VPseudoISegLoadNoMask<VReg RetClass,
1720                            VReg IdxClass,
1721                            int EEW,
1722                            bits<3> LMUL,
1723                            bits<4> NF,
1724                            bit Ordered> :
1725      Pseudo<(outs RetClass:$rd),
1726             (ins RetClass:$merge, GPRMem:$rs1, IdxClass:$offset, AVL:$vl,
1727                  ixlenimm:$sew, ixlenimm:$policy), []>,
1728      RISCVVPseudo,
1729      RISCVVLXSEG<NF, /*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
1730  let mayLoad = 1;
1731  let mayStore = 0;
1732  let hasSideEffects = 0;
1733  // For vector indexed segment loads, the destination vector register groups
1734  // cannot overlap the source vector register group
1735  let Constraints = "@earlyclobber $rd, $rd = $merge";
1736  let HasVLOp = 1;
1737  let HasSEWOp = 1;
1738  let HasVecPolicyOp = 1;
1739}
1740
1741class VPseudoISegLoadMask<VReg RetClass,
1742                          VReg IdxClass,
1743                          int EEW,
1744                          bits<3> LMUL,
1745                          bits<4> NF,
1746                          bit Ordered> :
1747      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1748             (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
1749                  IdxClass:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
1750                  ixlenimm:$policy), []>,
1751      RISCVVPseudo,
1752      RISCVVLXSEG<NF, /*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
1753  let mayLoad = 1;
1754  let mayStore = 0;
1755  let hasSideEffects = 0;
1756  // For vector indexed segment loads, the destination vector register groups
1757  // cannot overlap the source vector register group
1758  let Constraints = "@earlyclobber $rd, $rd = $merge";
1759  let HasVLOp = 1;
1760  let HasSEWOp = 1;
1761  let HasVecPolicyOp = 1;
1762  let UsesMaskPolicy = 1;
1763}
1764
1765class VPseudoUSSegStoreNoMask<VReg ValClass,
1766                              int EEW,
1767                              bits<4> NF> :
1768      Pseudo<(outs),
1769             (ins ValClass:$rd, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew), []>,
1770      RISCVVPseudo,
1771      RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, !logtwo(EEW), VLMul> {
1772  let mayLoad = 0;
1773  let mayStore = 1;
1774  let hasSideEffects = 0;
1775  let HasVLOp = 1;
1776  let HasSEWOp = 1;
1777}
1778
1779class VPseudoUSSegStoreMask<VReg ValClass,
1780                            int EEW,
1781                            bits<4> NF> :
1782      Pseudo<(outs),
1783             (ins ValClass:$rd, GPRMem:$rs1,
1784                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1785      RISCVVPseudo,
1786      RISCVVSSEG<NF, /*Masked*/1, /*Strided*/0, !logtwo(EEW), VLMul> {
1787  let mayLoad = 0;
1788  let mayStore = 1;
1789  let hasSideEffects = 0;
1790  let HasVLOp = 1;
1791  let HasSEWOp = 1;
1792}
1793
1794class VPseudoSSegStoreNoMask<VReg ValClass,
1795                             int EEW,
1796                             bits<4> NF> :
1797      Pseudo<(outs),
1798             (ins ValClass:$rd, GPRMem:$rs1, GPR:$offset,
1799                  AVL:$vl, ixlenimm:$sew), []>,
1800      RISCVVPseudo,
1801      RISCVVSSEG<NF, /*Masked*/0, /*Strided*/1, !logtwo(EEW), VLMul> {
1802  let mayLoad = 0;
1803  let mayStore = 1;
1804  let hasSideEffects = 0;
1805  let HasVLOp = 1;
1806  let HasSEWOp = 1;
1807}
1808
1809class VPseudoSSegStoreMask<VReg ValClass,
1810                           int EEW,
1811                           bits<4> NF> :
1812      Pseudo<(outs),
1813             (ins ValClass:$rd, GPRMem:$rs1, GPR: $offset,
1814                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1815      RISCVVPseudo,
1816      RISCVVSSEG<NF, /*Masked*/1, /*Strided*/1, !logtwo(EEW), VLMul> {
1817  let mayLoad = 0;
1818  let mayStore = 1;
1819  let hasSideEffects = 0;
1820  let HasVLOp = 1;
1821  let HasSEWOp = 1;
1822}
1823
1824class VPseudoISegStoreNoMask<VReg ValClass,
1825                             VReg IdxClass,
1826                             int EEW,
1827                             bits<3> LMUL,
1828                             bits<4> NF,
1829                             bit Ordered> :
1830      Pseudo<(outs),
1831             (ins ValClass:$rd, GPRMem:$rs1, IdxClass: $index,
1832                  AVL:$vl, ixlenimm:$sew), []>,
1833      RISCVVPseudo,
1834      RISCVVSXSEG<NF, /*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
1835  let mayLoad = 0;
1836  let mayStore = 1;
1837  let hasSideEffects = 0;
1838  let HasVLOp = 1;
1839  let HasSEWOp = 1;
1840}
1841
1842class VPseudoISegStoreMask<VReg ValClass,
1843                           VReg IdxClass,
1844                           int EEW,
1845                           bits<3> LMUL,
1846                           bits<4> NF,
1847                           bit Ordered> :
1848      Pseudo<(outs),
1849             (ins ValClass:$rd, GPRMem:$rs1, IdxClass: $index,
1850                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1851      RISCVVPseudo,
1852      RISCVVSXSEG<NF, /*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
1853  let mayLoad = 0;
1854  let mayStore = 1;
1855  let hasSideEffects = 0;
1856  let HasVLOp = 1;
1857  let HasSEWOp = 1;
1858}
1859
1860multiclass VPseudoUSLoad {
1861  foreach eew = EEWList in {
1862    foreach lmul = MxSet<eew>.m in {
1863      defvar LInfo = lmul.MX;
1864      defvar vreg = lmul.vrclass;
1865      let VLMul = lmul.value, SEW=eew in {
1866        def "E" # eew # "_V_" # LInfo :
1867          VPseudoUSLoadNoMask<vreg, eew>,
1868          VLESched<LInfo>;
1869        def "E" # eew # "_V_" # LInfo # "_MASK" :
1870          VPseudoUSLoadMask<vreg, eew>,
1871          RISCVMaskedPseudo<MaskIdx=2>,
1872          VLESched<LInfo>;
1873      }
1874    }
1875  }
1876}
1877
1878multiclass VPseudoFFLoad {
1879  foreach eew = EEWList in {
1880    foreach lmul = MxSet<eew>.m in {
1881      defvar LInfo = lmul.MX;
1882      defvar vreg = lmul.vrclass;
1883      let VLMul = lmul.value, SEW=eew in {
1884        def "E" # eew # "FF_V_" # LInfo:
1885          VPseudoUSLoadFFNoMask<vreg, eew>,
1886          VLFSched<LInfo>;
1887        def "E" # eew # "FF_V_" # LInfo # "_MASK":
1888          VPseudoUSLoadFFMask<vreg, eew>,
1889          RISCVMaskedPseudo<MaskIdx=2>,
1890          VLFSched<LInfo>;
1891      }
1892    }
1893  }
1894}
1895
1896multiclass VPseudoLoadMask {
1897  foreach mti = AllMasks in {
1898    defvar mx = mti.LMul.MX;
1899    defvar WriteVLDM_MX = !cast<SchedWrite>("WriteVLDM_" # mx);
1900    let VLMul = mti.LMul.value in {
1901      def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, EEW=1>,
1902        Sched<[WriteVLDM_MX, ReadVLDX]>;
1903    }
1904  }
1905}
1906
1907multiclass VPseudoSLoad {
1908  foreach eew = EEWList in {
1909    foreach lmul = MxSet<eew>.m in {
1910      defvar LInfo = lmul.MX;
1911      defvar vreg = lmul.vrclass;
1912      let VLMul = lmul.value, SEW=eew in {
1913        def "E" # eew # "_V_" # LInfo : VPseudoSLoadNoMask<vreg, eew>,
1914                                        VLSSched<eew, LInfo>;
1915        def "E" # eew # "_V_" # LInfo # "_MASK" :
1916          VPseudoSLoadMask<vreg, eew>,
1917          RISCVMaskedPseudo<MaskIdx=3>,
1918          VLSSched<eew, LInfo>;
1919      }
1920    }
1921  }
1922}
1923
1924multiclass VPseudoILoad<bit Ordered> {
1925  foreach idxEEW = EEWList in {
1926    foreach dataEEW = EEWList in {
1927      foreach dataEMUL = MxSet<dataEEW>.m in {
1928        defvar dataEMULOctuple = dataEMUL.octuple;
1929        // Calculate emul = eew * lmul / sew
1930        defvar idxEMULOctuple =
1931          !srl(!mul(idxEEW, dataEMULOctuple), !logtwo(dataEEW));
1932        if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
1933          defvar DataLInfo = dataEMUL.MX;
1934          defvar IdxLInfo = octuple_to_str<idxEMULOctuple>.ret;
1935          defvar idxEMUL = !cast<LMULInfo>("V_" # IdxLInfo);
1936          defvar Vreg = dataEMUL.vrclass;
1937          defvar IdxVreg = idxEMUL.vrclass;
1938          defvar HasConstraint = !ne(dataEEW, idxEEW);
1939          defvar TypeConstraints =
1940            !if(!eq(dataEEW, idxEEW), 1, !if(!gt(dataEEW, idxEEW), !if(!ge(idxEMULOctuple, 8), 3, 1), 2));
1941          let VLMul = dataEMUL.value in {
1942            def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo :
1943              VPseudoILoadNoMask<Vreg, IdxVreg, idxEEW, idxEMUL.value, Ordered, HasConstraint, TypeConstraints>,
1944              VLXSched<dataEEW, Ordered, DataLInfo, IdxLInfo>;
1945            def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" :
1946              VPseudoILoadMask<Vreg, IdxVreg, idxEEW, idxEMUL.value, Ordered, HasConstraint, TypeConstraints>,
1947              RISCVMaskedPseudo<MaskIdx=3>,
1948              VLXSched<dataEEW, Ordered, DataLInfo, IdxLInfo>;
1949          }
1950        }
1951      }
1952    }
1953  }
1954}
1955
1956multiclass VPseudoUSStore {
1957  foreach eew = EEWList in {
1958    foreach lmul = MxSet<eew>.m in {
1959      defvar LInfo = lmul.MX;
1960      defvar vreg = lmul.vrclass;
1961      let VLMul = lmul.value, SEW=eew in {
1962        def "E" # eew # "_V_" # LInfo : VPseudoUSStoreNoMask<vreg, eew>,
1963                                        VSESched<LInfo>;
1964        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask<vreg, eew>,
1965                                                  VSESched<LInfo>;
1966      }
1967    }
1968  }
1969}
1970
1971multiclass VPseudoStoreMask {
1972  foreach mti = AllMasks in {
1973    defvar mx = mti.LMul.MX;
1974    defvar WriteVSTM_MX = !cast<SchedWrite>("WriteVSTM_" # mx);
1975    let VLMul = mti.LMul.value in {
1976      def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, EEW=1>,
1977        Sched<[WriteVSTM_MX, ReadVSTX]>;
1978    }
1979  }
1980}
1981
1982multiclass VPseudoSStore {
1983  foreach eew = EEWList in {
1984    foreach lmul = MxSet<eew>.m in {
1985      defvar LInfo = lmul.MX;
1986      defvar vreg = lmul.vrclass;
1987      let VLMul = lmul.value, SEW=eew in {
1988        def "E" # eew # "_V_" # LInfo : VPseudoSStoreNoMask<vreg, eew>,
1989                                        VSSSched<eew, LInfo>;
1990        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSStoreMask<vreg, eew>,
1991                                                  VSSSched<eew, LInfo>;
1992      }
1993    }
1994  }
1995}
1996
1997multiclass VPseudoIStore<bit Ordered> {
1998  foreach idxEEW = EEWList in {
1999    foreach dataEEW = EEWList in {
2000      foreach dataEMUL = MxSet<dataEEW>.m in {
2001        defvar dataEMULOctuple = dataEMUL.octuple;
2002        // Calculate emul = eew * lmul / sew
2003        defvar idxEMULOctuple =
2004          !srl(!mul(idxEEW, dataEMULOctuple), !logtwo(dataEEW));
2005        if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
2006          defvar DataLInfo = dataEMUL.MX;
2007          defvar IdxLInfo = octuple_to_str<idxEMULOctuple>.ret;
2008          defvar idxEMUL = !cast<LMULInfo>("V_" # IdxLInfo);
2009          defvar Vreg = dataEMUL.vrclass;
2010          defvar IdxVreg = idxEMUL.vrclass;
2011          let VLMul = dataEMUL.value in {
2012            def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo :
2013              VPseudoIStoreNoMask<Vreg, IdxVreg, idxEEW, idxEMUL.value, Ordered>,
2014              VSXSched<dataEEW, Ordered, DataLInfo, IdxLInfo>;
2015            def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" :
2016              VPseudoIStoreMask<Vreg, IdxVreg, idxEEW, idxEMUL.value, Ordered>,
2017              VSXSched<dataEEW, Ordered, DataLInfo, IdxLInfo>;
2018          }
2019        }
2020      }
2021    }
2022  }
2023}
2024
2025multiclass VPseudoVPOP_M {
2026  foreach mti = AllMasks in {
2027    defvar mx = mti.LMul.MX;
2028    let VLMul = mti.LMul.value in {
2029      def "_M_" # mti.BX : VPseudoUnaryNoMaskGPROut,
2030          SchedBinary<"WriteVMPopV", "ReadVMPopV", "ReadVMPopV", mx>;
2031      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMaskGPROut,
2032          SchedBinary<"WriteVMPopV", "ReadVMPopV", "ReadVMPopV", mx>;
2033    }
2034  }
2035}
2036
2037multiclass VPseudoV1ST_M {
2038  foreach mti = AllMasks in {
2039    defvar mx = mti.LMul.MX;
2040    let VLMul = mti.LMul.value in {
2041      def "_M_" #mti.BX : VPseudoUnaryNoMaskGPROut,
2042          SchedBinary<"WriteVMFFSV", "ReadVMFFSV", "ReadVMFFSV", mx>;
2043      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMaskGPROut,
2044          SchedBinary<"WriteVMFFSV", "ReadVMFFSV", "ReadVMFFSV", mx>;
2045    }
2046  }
2047}
2048
2049multiclass VPseudoVSFS_M {
2050  defvar constraint = "@earlyclobber $rd";
2051  foreach mti = AllMasks in {
2052    defvar mx = mti.LMul.MX;
2053    let VLMul = mti.LMul.value in {
2054      def "_M_" # mti.BX : VPseudoUnaryNoMask<VR, VR, constraint>,
2055                           SchedUnary<"WriteVMSFSV", "ReadVMSFSV", mx,
2056                                      forceMergeOpRead=true>;
2057      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR, constraint>,
2058                                     SchedUnary<"WriteVMSFSV", "ReadVMSFSV", mx,
2059                                                forceMergeOpRead=true>;
2060    }
2061  }
2062}
2063
2064multiclass VPseudoVID_V {
2065  foreach m = MxList in {
2066    defvar mx = m.MX;
2067    let VLMul = m.value in {
2068      def "_V_" # mx : VPseudoNullaryNoMask<m.vrclass>,
2069                         SchedNullary<"WriteVMIdxV", mx, forceMergeOpRead=true>;
2070      def "_V_" # mx # "_MASK" : VPseudoNullaryMask<m.vrclass>,
2071                                   RISCVMaskedPseudo<MaskIdx=1>,
2072                                   SchedNullary<"WriteVMIdxV", mx,
2073                                                forceMergeOpRead=true>;
2074    }
2075  }
2076}
2077
2078multiclass VPseudoNullaryPseudoM <string BaseInst> {
2079  foreach mti = AllMasks in {
2080    let VLMul = mti.LMul.value in {
2081      def "_M_" # mti.BX : VPseudoNullaryPseudoM<BaseInst # "_MM">,
2082        SchedBinary<"WriteVMALUV", "ReadVMALUV", "ReadVMALUV", mti.LMul.MX>;
2083    }
2084  }
2085}
2086
2087multiclass VPseudoVIOT_M {
2088  defvar constraint = "@earlyclobber $rd";
2089  foreach m = MxList in {
2090    defvar mx = m.MX;
2091    let VLMul = m.value in {
2092      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, VR, constraint>,
2093                       SchedUnary<"WriteVMIotV", "ReadVMIotV", mx,
2094                                  forceMergeOpRead=true>;
2095      def "_" # mx # "_MASK" : VPseudoUnaryMask<m.vrclass, VR, constraint>,
2096                                 RISCVMaskedPseudo<MaskIdx=2, MaskAffectsRes=true>,
2097                                 SchedUnary<"WriteVMIotV", "ReadVMIotV", mx,
2098                                            forceMergeOpRead=true>;
2099    }
2100  }
2101}
2102
2103multiclass VPseudoVCPR_V {
2104  foreach m = MxList in {
2105    defvar mx = m.MX;
2106    defvar sews = SchedSEWSet<mx>.val;
2107    let VLMul = m.value in
2108      foreach e = sews in {
2109        defvar suffix = "_" # m.MX # "_E" # e;
2110        let SEW = e in
2111        def _VM # suffix
2112          : VPseudoUnaryAnyMask<m.vrclass, m.vrclass>,
2113            SchedBinary<"WriteVCompressV", "ReadVCompressV", "ReadVCompressV",
2114                        mx, e>;
2115      }
2116  }
2117}
2118
2119multiclass VPseudoBinary<VReg RetClass,
2120                         VReg Op1Class,
2121                         DAGOperand Op2Class,
2122                         LMULInfo MInfo,
2123                         string Constraint = "",
2124                         int sew = 0,
2125                         int TargetConstraintType = 1> {
2126  let VLMul = MInfo.value, SEW=sew in {
2127    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
2128    def suffix : VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
2129                                       Constraint, TargetConstraintType>;
2130    def suffix # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
2131                                                   Constraint, TargetConstraintType>,
2132                           RISCVMaskedPseudo<MaskIdx=3>;
2133  }
2134}
2135
2136multiclass VPseudoBinaryNoMask<VReg RetClass,
2137                               VReg Op1Class,
2138                               DAGOperand Op2Class,
2139                               LMULInfo MInfo,
2140                               string Constraint = "",
2141                               int sew = 0> {
2142  let VLMul = MInfo.value, SEW=sew in {
2143    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
2144    def suffix : VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
2145                                       Constraint>;
2146  }
2147}
2148
2149multiclass VPseudoBinaryRoundingMode<VReg RetClass,
2150                                     VReg Op1Class,
2151                                     DAGOperand Op2Class,
2152                                     LMULInfo MInfo,
2153                                     string Constraint = "",
2154                                     int sew = 0,
2155                                     int UsesVXRM = 1,
2156                                     int TargetConstraintType = 1> {
2157  let VLMul = MInfo.value, SEW=sew in {
2158    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
2159    def suffix : VPseudoBinaryNoMaskRoundingMode<RetClass, Op1Class, Op2Class,
2160                                                 Constraint, UsesVXRM,
2161                                                 TargetConstraintType>;
2162    def suffix # "_MASK" : VPseudoBinaryMaskPolicyRoundingMode<RetClass,
2163                                                               Op1Class,
2164                                                               Op2Class,
2165                                                               Constraint,
2166                                                               UsesVXRM,
2167                                                               TargetConstraintType>,
2168                           RISCVMaskedPseudo<MaskIdx=3>;
2169  }
2170}
2171
2172
2173multiclass VPseudoBinaryM<VReg RetClass,
2174                          VReg Op1Class,
2175                          DAGOperand Op2Class,
2176                          LMULInfo MInfo,
2177                          string Constraint = "",
2178                          int TargetConstraintType = 1> {
2179  let VLMul = MInfo.value in {
2180    def "_" # MInfo.MX : VPseudoBinaryMOutNoMask<RetClass, Op1Class, Op2Class,
2181                                                 Constraint, TargetConstraintType>;
2182    let ForceTailAgnostic = true in
2183    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask<RetClass, Op1Class,
2184                                                         Op2Class, Constraint, TargetConstraintType>,
2185                                   RISCVMaskedPseudo<MaskIdx=3>;
2186  }
2187}
2188
2189multiclass VPseudoBinaryEmul<VReg RetClass,
2190                             VReg Op1Class,
2191                             DAGOperand Op2Class,
2192                             LMULInfo lmul,
2193                             LMULInfo emul,
2194                             string Constraint = "",
2195                             int sew = 0> {
2196  let VLMul = lmul.value, SEW=sew in {
2197    defvar suffix = !if(sew, "_" # lmul.MX # "_E" # sew, "_" # lmul.MX);
2198    def suffix # "_" # emul.MX : VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
2199                                                       Constraint>;
2200    def suffix # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
2201                                                                          Constraint>,
2202                                                  RISCVMaskedPseudo<MaskIdx=3>;
2203  }
2204}
2205
2206multiclass VPseudoTiedBinary<VReg RetClass,
2207                             DAGOperand Op2Class,
2208                             LMULInfo MInfo,
2209                             string Constraint = "",
2210                             int TargetConstraintType = 1> {
2211  let VLMul = MInfo.value in {
2212    def "_" # MInfo.MX # "_TIED": VPseudoTiedBinaryNoMask<RetClass, Op2Class,
2213                                                          Constraint, TargetConstraintType>;
2214    def "_" # MInfo.MX # "_MASK_TIED" : VPseudoTiedBinaryMask<RetClass, Op2Class,
2215                                                         Constraint, TargetConstraintType>;
2216  }
2217}
2218
2219multiclass VPseudoTiedBinaryRoundingMode<VReg RetClass,
2220                                         DAGOperand Op2Class,
2221                                         LMULInfo MInfo,
2222                                         string Constraint = "",
2223                                         int TargetConstraintType = 1> {
2224    let VLMul = MInfo.value in {
2225    def "_" # MInfo.MX # "_TIED":
2226      VPseudoTiedBinaryNoMaskRoundingMode<RetClass, Op2Class, Constraint, TargetConstraintType>;
2227    def "_" # MInfo.MX # "_MASK_TIED" :
2228      VPseudoTiedBinaryMaskRoundingMode<RetClass, Op2Class, Constraint, TargetConstraintType>;
2229  }
2230}
2231
2232
2233multiclass VPseudoBinaryV_VV<LMULInfo m, string Constraint = "", int sew = 0> {
2234  defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint, sew>;
2235}
2236
2237multiclass VPseudoBinaryV_VV_RM<LMULInfo m, string Constraint = ""> {
2238  defm _VV : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
2239}
2240
2241// Similar to VPseudoBinaryV_VV, but uses MxListF.
2242multiclass VPseudoBinaryFV_VV<LMULInfo m, string Constraint = "", int sew = 0> {
2243  defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint, sew>;
2244}
2245
2246multiclass VPseudoBinaryFV_VV_RM<LMULInfo m, string Constraint = "", int sew = 0> {
2247  defm _VV : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, m.vrclass, m,
2248                                       Constraint, sew,
2249                                       UsesVXRM=0>;
2250}
2251
2252multiclass VPseudoVGTR_VV_EEW<int eew, string Constraint = ""> {
2253  foreach m = MxList in {
2254    defvar mx = m.MX;
2255    foreach sew = EEWList in {
2256      defvar dataEMULOctuple = m.octuple;
2257      // emul = lmul * eew / sew
2258      defvar idxEMULOctuple = !srl(!mul(dataEMULOctuple, eew), !logtwo(sew));
2259      if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
2260        defvar emulMX = octuple_to_str<idxEMULOctuple>.ret;
2261        defvar emul = !cast<LMULInfo>("V_" # emulMX);
2262        defvar sews = SchedSEWSet<mx>.val;
2263        foreach e = sews in {
2264          defm _VV
2265              : VPseudoBinaryEmul<m.vrclass, m.vrclass, emul.vrclass, m, emul,
2266                                  Constraint, e>,
2267                SchedBinary<"WriteVRGatherVV", "ReadVRGatherVV_data",
2268                            "ReadVRGatherVV_index", mx, e, forceMergeOpRead=true>;
2269        }
2270      }
2271    }
2272  }
2273}
2274
2275multiclass VPseudoBinaryV_VX<LMULInfo m, string Constraint = "", int sew = 0> {
2276  defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint, sew>;
2277}
2278
2279multiclass VPseudoBinaryV_VX_RM<LMULInfo m, string Constraint = ""> {
2280  defm "_VX" : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, GPR, m, Constraint>;
2281}
2282
2283multiclass VPseudoVSLD1_VX<string Constraint = ""> {
2284  foreach m = MxList in {
2285    defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>,
2286                 SchedBinary<"WriteVISlide1X", "ReadVISlideV", "ReadVISlideX",
2287                             m.MX, forceMergeOpRead=true>;
2288  }
2289}
2290
2291multiclass VPseudoBinaryV_VF<LMULInfo m, FPR_Info f, string Constraint = "", int sew = 0> {
2292  defm "_V" # f.FX : VPseudoBinary<m.vrclass, m.vrclass,
2293                                   f.fprclass, m, Constraint, sew>;
2294}
2295
2296multiclass VPseudoBinaryV_VF_RM<LMULInfo m, FPR_Info f, string Constraint = "", int sew = 0> {
2297  defm "_V" # f.FX : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass,
2298                                               f.fprclass, m, Constraint, sew,
2299                                               UsesVXRM=0>;
2300}
2301
2302multiclass VPseudoVSLD1_VF<string Constraint = ""> {
2303  foreach f = FPList in {
2304    foreach m = f.MxList in {
2305      defm "_V" #f.FX
2306          : VPseudoBinary<m.vrclass, m.vrclass, f.fprclass, m, Constraint>,
2307            SchedBinary<"WriteVFSlide1F", "ReadVFSlideV", "ReadVFSlideF", m.MX,
2308                      forceMergeOpRead=true>;
2309    }
2310  }
2311}
2312
2313multiclass VPseudoBinaryV_VI<Operand ImmType = simm5, LMULInfo m, string Constraint = ""> {
2314  defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
2315}
2316
2317multiclass VPseudoBinaryV_VI_RM<Operand ImmType = simm5, LMULInfo m, string Constraint = ""> {
2318  defm _VI : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, ImmType, m, Constraint>;
2319}
2320
2321multiclass VPseudoVALU_MM<bit Commutable = 0> {
2322  foreach m = MxList in {
2323    defvar mx = m.MX;
2324    let VLMul = m.value, isCommutable = Commutable in {
2325      def "_MM_" # mx : VPseudoBinaryNoMask<VR, VR, VR, "">,
2326                        SchedBinary<"WriteVMALUV", "ReadVMALUV", "ReadVMALUV", mx>;
2327    }
2328  }
2329}
2330
2331// We use earlyclobber here due to
2332// * The destination EEW is smaller than the source EEW and the overlap is
2333//   in the lowest-numbered part of the source register group is legal.
2334//   Otherwise, it is illegal.
2335// * The destination EEW is greater than the source EEW, the source EMUL is
2336//   at least 1, and the overlap is in the highest-numbered part of the
2337//   destination register group is legal. Otherwise, it is illegal.
2338multiclass VPseudoBinaryW_VV<LMULInfo m> {
2339  defm _VV : VPseudoBinary<m.wvrclass, m.vrclass, m.vrclass, m,
2340                           "@earlyclobber $rd", TargetConstraintType=3>;
2341}
2342
2343multiclass VPseudoBinaryW_VV_RM<LMULInfo m> {
2344  defm _VV : VPseudoBinaryRoundingMode<m.wvrclass, m.vrclass, m.vrclass, m,
2345                                      "@earlyclobber $rd",  UsesVXRM=0,
2346                                      TargetConstraintType=3>;
2347}
2348
2349multiclass VPseudoBinaryW_VX<LMULInfo m> {
2350  defm "_VX" : VPseudoBinary<m.wvrclass, m.vrclass, GPR, m,
2351                             "@earlyclobber $rd", TargetConstraintType=3>;
2352}
2353
2354multiclass VPseudoBinaryW_VI<Operand ImmType, LMULInfo m> {
2355  defm "_VI" : VPseudoBinary<m.wvrclass, m.vrclass, ImmType, m,
2356                             "@earlyclobber $rd", TargetConstraintType=3>;
2357}
2358
2359multiclass VPseudoBinaryW_VF<LMULInfo m, FPR_Info f> {
2360  defm "_V" # f.FX : VPseudoBinary<m.wvrclass, m.vrclass,
2361                                   f.fprclass, m,
2362                                   "@earlyclobber $rd">;
2363}
2364
2365multiclass VPseudoBinaryW_VF_RM<LMULInfo m, FPR_Info f> {
2366  defm "_V" # f.FX : VPseudoBinaryRoundingMode<m.wvrclass, m.vrclass,
2367                                               f.fprclass, m,
2368                                               "@earlyclobber $rd",
2369                                               UsesVXRM=0,
2370                                               TargetConstraintType=3>;
2371}
2372
2373multiclass VPseudoBinaryW_WV<LMULInfo m> {
2374  defm _WV : VPseudoBinary<m.wvrclass, m.wvrclass, m.vrclass, m,
2375                           "@earlyclobber $rd", TargetConstraintType=3>;
2376  defm _WV : VPseudoTiedBinary<m.wvrclass, m.vrclass, m,
2377                               "@earlyclobber $rd", TargetConstraintType=3>;
2378}
2379
2380multiclass VPseudoBinaryW_WV_RM<LMULInfo m> {
2381  defm _WV : VPseudoBinaryRoundingMode<m.wvrclass, m.wvrclass, m.vrclass, m,
2382                                       "@earlyclobber $rd", UsesVXRM=0, TargetConstraintType=3>;
2383  defm _WV : VPseudoTiedBinaryRoundingMode<m.wvrclass, m.vrclass, m,
2384                                           "@earlyclobber $rd", TargetConstraintType=3>;
2385}
2386
2387multiclass VPseudoBinaryW_WX<LMULInfo m> {
2388  defm "_WX" : VPseudoBinary<m.wvrclass, m.wvrclass, GPR, m, /*Constraint*/ "", TargetConstraintType=3>;
2389}
2390
2391multiclass VPseudoBinaryW_WF<LMULInfo m, FPR_Info f, int TargetConstraintType = 1> {
2392  defm "_W" # f.FX : VPseudoBinary<m.wvrclass, m.wvrclass,
2393                                   f.fprclass, m, /*Constraint*/ "", TargetConstraintType=TargetConstraintType>;
2394}
2395
2396multiclass VPseudoBinaryW_WF_RM<LMULInfo m, FPR_Info f> {
2397  defm "_W" # f.FX : VPseudoBinaryRoundingMode<m.wvrclass, m.wvrclass,
2398                                               f.fprclass, m,
2399                                               Constraint="",
2400                                               sew=0,
2401                                               UsesVXRM=0,
2402                                               TargetConstraintType=3>;
2403}
2404
2405// Narrowing instructions like vnsrl/vnsra/vnclip(u) don't need @earlyclobber
2406// if the source and destination have an LMUL<=1. This matches this overlap
2407// exception from the spec.
2408// "The destination EEW is smaller than the source EEW and the overlap is in the
2409//  lowest-numbered part of the source register group."
2410multiclass VPseudoBinaryV_WV<LMULInfo m, int TargetConstraintType = 1> {
2411  defm _WV : VPseudoBinary<m.vrclass, m.wvrclass, m.vrclass, m,
2412                           !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""), TargetConstraintType=TargetConstraintType>;
2413}
2414
2415multiclass VPseudoBinaryV_WV_RM<LMULInfo m> {
2416  defm _WV : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, m.vrclass, m,
2417                                       !if(!ge(m.octuple, 8),
2418                                       "@earlyclobber $rd", "")>;
2419}
2420
2421multiclass VPseudoBinaryV_WX<LMULInfo m, int TargetConstraintType = 1> {
2422  defm _WX : VPseudoBinary<m.vrclass, m.wvrclass, GPR, m,
2423                           !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""), TargetConstraintType=TargetConstraintType>;
2424}
2425
2426multiclass VPseudoBinaryV_WX_RM<LMULInfo m> {
2427  defm _WX : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, GPR, m,
2428                                       !if(!ge(m.octuple, 8),
2429                                       "@earlyclobber $rd", "")>;
2430}
2431
2432multiclass VPseudoBinaryV_WI<LMULInfo m, int TargetConstraintType = 1> {
2433  defm _WI : VPseudoBinary<m.vrclass, m.wvrclass, uimm5, m,
2434                           !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""), TargetConstraintType=TargetConstraintType>;
2435}
2436
2437multiclass VPseudoBinaryV_WI_RM<LMULInfo m> {
2438  defm _WI : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, uimm5, m,
2439                                       !if(!ge(m.octuple, 8),
2440                                       "@earlyclobber $rd", "")>;
2441}
2442
2443// For vadc and vsbc, the instruction encoding is reserved if the destination
2444// vector register is v0.
2445// For vadc and vsbc, CarryIn == 1 and CarryOut == 0
2446multiclass VPseudoBinaryV_VM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2447                             string Constraint = "",
2448                             bit Commutable = 0,
2449                             int TargetConstraintType = 1> {
2450  let isCommutable = Commutable in
2451  def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX :
2452    VPseudoBinaryCarryIn<!if(CarryOut, VR,
2453                         !if(!and(CarryIn, !not(CarryOut)),
2454                             GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2455                         m.vrclass, m.vrclass, m, CarryIn, Constraint, TargetConstraintType>;
2456}
2457
2458multiclass VPseudoTiedBinaryV_VM<LMULInfo m, int TargetConstraintType = 1> {
2459  def "_VVM" # "_" # m.MX:
2460    VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
2461                             m.vrclass, m.vrclass, m, 1, "",
2462                             TargetConstraintType>;
2463}
2464
2465multiclass VPseudoBinaryV_XM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2466                             string Constraint = "", int TargetConstraintType = 1> {
2467  def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX :
2468    VPseudoBinaryCarryIn<!if(CarryOut, VR,
2469                         !if(!and(CarryIn, !not(CarryOut)),
2470                             GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2471                         m.vrclass, GPR, m, CarryIn, Constraint, TargetConstraintType>;
2472}
2473
2474multiclass VPseudoTiedBinaryV_XM<LMULInfo m, int TargetConstraintType = 1> {
2475  def "_VXM" # "_" # m.MX:
2476    VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
2477                             m.vrclass, GPR, m, 1, "",
2478                             TargetConstraintType>;
2479}
2480
2481multiclass VPseudoVMRG_FM {
2482  foreach f = FPList in {
2483    foreach m = f.MxList in {
2484      defvar mx = m.MX;
2485      def "_V" # f.FX # "M_" # mx
2486          : VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R, m.vrclass,
2487                                     f.fprclass, m, CarryIn=1,
2488                                     Constraint = "">,
2489          SchedBinary<"WriteVFMergeV", "ReadVFMergeV", "ReadVFMergeF", mx,
2490                      forceMasked=1, forceMergeOpRead=true>;
2491    }
2492  }
2493}
2494
2495multiclass VPseudoBinaryV_IM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2496                             string Constraint = "", int TargetConstraintType = 1> {
2497  def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX :
2498    VPseudoBinaryCarryIn<!if(CarryOut, VR,
2499                         !if(!and(CarryIn, !not(CarryOut)),
2500                             GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2501                         m.vrclass, simm5, m, CarryIn, Constraint, TargetConstraintType>;
2502}
2503
2504multiclass VPseudoTiedBinaryV_IM<LMULInfo m> {
2505  def "_VIM" # "_" # m.MX:
2506    VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
2507                             m.vrclass, simm5, m, 1, "">;
2508}
2509
2510multiclass VPseudoUnaryVMV_V_X_I {
2511  foreach m = MxList in {
2512    let VLMul = m.value in {
2513      defvar mx = m.MX;
2514      let VLMul = m.value in {
2515        def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2516                         SchedUnary<"WriteVIMovV", "ReadVIMovV", mx,
2517                                    forceMergeOpRead=true>;
2518        def "_X_" # mx : VPseudoUnaryNoMask<m.vrclass, GPR>,
2519                         SchedUnary<"WriteVIMovX", "ReadVIMovX", mx,
2520                                    forceMergeOpRead=true>;
2521        def "_I_" # mx : VPseudoUnaryNoMask<m.vrclass, simm5>,
2522                         SchedNullary<"WriteVIMovI", mx,
2523                                      forceMergeOpRead=true>;
2524      }
2525    }
2526  }
2527}
2528
2529multiclass VPseudoVMV_F {
2530  foreach f = FPList in {
2531    foreach m = f.MxList in {
2532      defvar mx = m.MX;
2533      let VLMul = m.value in {
2534        def "_" # f.FX # "_" # mx :
2535          VPseudoUnaryNoMask<m.vrclass, f.fprclass>,
2536          SchedUnary<"WriteVFMovV", "ReadVFMovF", mx, forceMergeOpRead=true>;
2537      }
2538    }
2539  }
2540}
2541
2542multiclass VPseudoVCLS_V {
2543  foreach m = MxListF in {
2544    defvar mx = m.MX;
2545    let VLMul = m.value in {
2546      def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2547                       SchedUnary<"WriteVFClassV", "ReadVFClassV", mx,
2548                                  forceMergeOpRead=true>;
2549      def "_V_" # mx # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>,
2550                                 RISCVMaskedPseudo<MaskIdx=2>,
2551                                 SchedUnary<"WriteVFClassV", "ReadVFClassV", mx,
2552                                            forceMergeOpRead=true>;
2553    }
2554  }
2555}
2556
2557multiclass VPseudoVSQR_V_RM {
2558  foreach m = MxListF in {
2559    defvar mx = m.MX;
2560    defvar sews = SchedSEWSet<m.MX, isF=1>.val;
2561
2562    let VLMul = m.value in
2563      foreach e = sews in {
2564        defvar suffix = "_" # mx # "_E" # e;
2565        let SEW = e in {
2566          def "_V" # suffix : VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass>,
2567                              SchedUnary<"WriteVFSqrtV", "ReadVFSqrtV", mx, e,
2568                                         forceMergeOpRead=true>;
2569          def "_V" #suffix # "_MASK"
2570              : VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass>,
2571                RISCVMaskedPseudo<MaskIdx = 2>,
2572                SchedUnary<"WriteVFSqrtV", "ReadVFSqrtV", mx, e,
2573                           forceMergeOpRead=true>;
2574        }
2575      }
2576  }
2577}
2578
2579multiclass VPseudoVRCP_V {
2580  foreach m = MxListF in {
2581    defvar mx = m.MX;
2582    let VLMul = m.value in {
2583      def "_V_" # mx
2584          : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2585            SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, forceMergeOpRead=true>;
2586      def "_V_" # mx # "_MASK"
2587          : VPseudoUnaryMask<m.vrclass, m.vrclass>,
2588            RISCVMaskedPseudo<MaskIdx = 2>,
2589            SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, forceMergeOpRead=true>;
2590    }
2591  }
2592}
2593
2594multiclass VPseudoVRCP_V_RM {
2595  foreach m = MxListF in {
2596    defvar mx = m.MX;
2597    let VLMul = m.value in {
2598      def "_V_" # mx
2599          : VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass>,
2600            SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, forceMergeOpRead=true>;
2601      def "_V_" # mx # "_MASK"
2602          : VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass>,
2603            RISCVMaskedPseudo<MaskIdx = 2>,
2604            SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, forceMergeOpRead=true>;
2605    }
2606  }
2607}
2608
2609multiclass PseudoVEXT_VF2 {
2610  defvar constraints = "@earlyclobber $rd";
2611  foreach m = MxListVF2 in {
2612    defvar mx = m.MX;
2613    defvar CurrTypeConstraints = !if(!or(!eq(mx, "MF4"), !eq(mx, "MF2"), !eq(mx, "M1")), 1, 3);
2614    let VLMul = m.value in {
2615      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f2vrclass, constraints, CurrTypeConstraints>,
2616                     SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
2617      def "_" # mx # "_MASK" :
2618        VPseudoUnaryMask<m.vrclass, m.f2vrclass, constraints, CurrTypeConstraints>,
2619        RISCVMaskedPseudo<MaskIdx=2>,
2620        SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
2621    }
2622  }
2623}
2624
2625multiclass PseudoVEXT_VF4 {
2626  defvar constraints = "@earlyclobber $rd";
2627  foreach m = MxListVF4 in {
2628    defvar mx = m.MX;
2629    defvar CurrTypeConstraints = !if(!or(!eq(mx, "MF2"), !eq(mx, "M1"), !eq(mx, "M2")), 1, 3);
2630    let VLMul = m.value in {
2631      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f4vrclass, constraints, CurrTypeConstraints>,
2632                     SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
2633      def "_" # mx # "_MASK" :
2634        VPseudoUnaryMask<m.vrclass, m.f4vrclass, constraints, CurrTypeConstraints>,
2635        RISCVMaskedPseudo<MaskIdx=2>,
2636        SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
2637    }
2638  }
2639}
2640
2641multiclass PseudoVEXT_VF8 {
2642  defvar constraints = "@earlyclobber $rd";
2643  foreach m = MxListVF8 in {
2644    defvar mx = m.MX;
2645    defvar CurrTypeConstraints = !if(!or(!eq(mx, "M1"), !eq(mx, "M2"), !eq(mx, "M4")), 1, 3);
2646    let VLMul = m.value in {
2647      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f8vrclass, constraints, CurrTypeConstraints>,
2648                     SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
2649      def "_" # mx # "_MASK" :
2650        VPseudoUnaryMask<m.vrclass, m.f8vrclass, constraints, CurrTypeConstraints>,
2651        RISCVMaskedPseudo<MaskIdx=2>,
2652        SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
2653    }
2654  }
2655}
2656
2657// The destination EEW is 1 since "For the purposes of register group overlap
2658// constraints, mask elements have EEW=1."
2659// The source EEW is 8, 16, 32, or 64.
2660// When the destination EEW is different from source EEW, we need to use
2661// @earlyclobber to avoid the overlap between destination and source registers.
2662// We don't need @earlyclobber for LMUL<=1 since that matches this overlap
2663// exception from the spec
2664// "The destination EEW is smaller than the source EEW and the overlap is in the
2665//  lowest-numbered part of the source register group".
2666// With LMUL<=1 the source and dest occupy a single register so any overlap
2667// is in the lowest-numbered part.
2668multiclass VPseudoBinaryM_VV<LMULInfo m, int TargetConstraintType = 1> {
2669  defm _VV : VPseudoBinaryM<VR, m.vrclass, m.vrclass, m,
2670                            !if(!ge(m.octuple, 16), "@earlyclobber $rd", ""), TargetConstraintType>;
2671}
2672
2673multiclass VPseudoBinaryM_VX<LMULInfo m, int TargetConstraintType = 1> {
2674  defm "_VX" :
2675    VPseudoBinaryM<VR, m.vrclass, GPR, m,
2676                   !if(!ge(m.octuple, 16), "@earlyclobber $rd", ""), TargetConstraintType>;
2677}
2678
2679multiclass VPseudoBinaryM_VF<LMULInfo m, FPR_Info f, int TargetConstraintType = 1> {
2680  defm "_V" # f.FX :
2681    VPseudoBinaryM<VR, m.vrclass, f.fprclass, m,
2682                   !if(!ge(m.octuple, 16), "@earlyclobber $rd", ""), TargetConstraintType>;
2683}
2684
2685multiclass VPseudoBinaryM_VI<LMULInfo m, int TargetConstraintType = 1> {
2686  defm _VI : VPseudoBinaryM<VR, m.vrclass, simm5, m,
2687                            !if(!ge(m.octuple, 16), "@earlyclobber $rd", ""), TargetConstraintType>;
2688}
2689
2690multiclass VPseudoVGTR_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2691  foreach m = MxList in {
2692    defvar mx = m.MX;
2693    defm "" : VPseudoBinaryV_VX<m, Constraint>,
2694              SchedBinary<"WriteVRGatherVX", "ReadVRGatherVX_data",
2695                          "ReadVRGatherVX_index", mx, forceMergeOpRead=true>;
2696    defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
2697              SchedUnary<"WriteVRGatherVI", "ReadVRGatherVI_data", mx,
2698                         forceMergeOpRead=true>;
2699
2700    defvar sews = SchedSEWSet<mx>.val;
2701    foreach e = sews in {
2702      defm "" : VPseudoBinaryV_VV<m, Constraint, e>,
2703                SchedBinary<"WriteVRGatherVV", "ReadVRGatherVV_data",
2704                              "ReadVRGatherVV_index", mx, e, forceMergeOpRead=true>;
2705    }
2706  }
2707}
2708
2709multiclass VPseudoVSALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2710  foreach m = MxList in {
2711    defvar mx = m.MX;
2712    defm "" : VPseudoBinaryV_VV<m, Constraint>,
2713              SchedBinary<"WriteVSALUV", "ReadVSALUV", "ReadVSALUX", mx,
2714                          forceMergeOpRead=true>;
2715    defm "" : VPseudoBinaryV_VX<m, Constraint>,
2716              SchedBinary<"WriteVSALUX", "ReadVSALUV", "ReadVSALUX", mx,
2717                          forceMergeOpRead=true>;
2718    defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
2719              SchedUnary<"WriteVSALUI", "ReadVSALUV", mx, forceMergeOpRead=true>;
2720  }
2721}
2722
2723
2724multiclass VPseudoVSHT_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2725  foreach m = MxList in {
2726    defvar mx = m.MX;
2727    defm "" : VPseudoBinaryV_VV<m, Constraint>,
2728              SchedBinary<"WriteVShiftV", "ReadVShiftV", "ReadVShiftV", mx,
2729                          forceMergeOpRead=true>;
2730    defm "" : VPseudoBinaryV_VX<m, Constraint>,
2731              SchedBinary<"WriteVShiftX", "ReadVShiftV", "ReadVShiftX", mx,
2732                          forceMergeOpRead=true>;
2733    defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
2734              SchedUnary<"WriteVShiftI", "ReadVShiftV", mx, forceMergeOpRead=true>;
2735  }
2736}
2737
2738multiclass VPseudoVSSHT_VV_VX_VI_RM<Operand ImmType = simm5, string Constraint = ""> {
2739  foreach m = MxList in {
2740    defvar mx = m.MX;
2741    defm "" : VPseudoBinaryV_VV_RM<m, Constraint>,
2742              SchedBinary<"WriteVSShiftV", "ReadVSShiftV", "ReadVSShiftV", mx,
2743                          forceMergeOpRead=true>;
2744    defm "" : VPseudoBinaryV_VX_RM<m, Constraint>,
2745              SchedBinary<"WriteVSShiftX", "ReadVSShiftV", "ReadVSShiftX", mx,
2746                          forceMergeOpRead=true>;
2747    defm "" : VPseudoBinaryV_VI_RM<ImmType, m, Constraint>,
2748              SchedUnary<"WriteVSShiftI", "ReadVSShiftV", mx, forceMergeOpRead=true>;
2749  }
2750}
2751
2752multiclass VPseudoVALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2753  foreach m = MxList in {
2754    defvar mx = m.MX;
2755    defm "" : VPseudoBinaryV_VV<m, Constraint>,
2756            SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", mx,
2757                        forceMergeOpRead=true>;
2758    defm "" : VPseudoBinaryV_VX<m, Constraint>,
2759            SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", mx,
2760                        forceMergeOpRead=true>;
2761    defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
2762            SchedUnary<"WriteVIALUI", "ReadVIALUV", mx, forceMergeOpRead=true>;
2763  }
2764}
2765
2766multiclass VPseudoVSALU_VV_VX {
2767  foreach m = MxList in {
2768    defvar mx = m.MX;
2769    defm "" : VPseudoBinaryV_VV<m>,
2770              SchedBinary<"WriteVSALUV", "ReadVSALUV", "ReadVSALUV", mx,
2771                          forceMergeOpRead=true>;
2772    defm "" : VPseudoBinaryV_VX<m>,
2773              SchedBinary<"WriteVSALUX", "ReadVSALUV", "ReadVSALUX", mx,
2774                          forceMergeOpRead=true>;
2775  }
2776}
2777
2778multiclass VPseudoVSMUL_VV_VX_RM {
2779  foreach m = MxList in {
2780    defvar mx = m.MX;
2781    defm "" : VPseudoBinaryV_VV_RM<m>,
2782              SchedBinary<"WriteVSMulV", "ReadVSMulV", "ReadVSMulV", mx,
2783                          forceMergeOpRead=true>;
2784    defm "" : VPseudoBinaryV_VX_RM<m>,
2785              SchedBinary<"WriteVSMulX", "ReadVSMulV", "ReadVSMulX", mx,
2786                          forceMergeOpRead=true>;
2787  }
2788}
2789
2790multiclass VPseudoVAALU_VV_VX_RM {
2791  foreach m = MxList in {
2792    defvar mx = m.MX;
2793    defm "" : VPseudoBinaryV_VV_RM<m>,
2794              SchedBinary<"WriteVAALUV", "ReadVAALUV", "ReadVAALUV", mx,
2795                          forceMergeOpRead=true>;
2796    defm "" : VPseudoBinaryV_VX_RM<m>,
2797              SchedBinary<"WriteVAALUX", "ReadVAALUV", "ReadVAALUX", mx,
2798                          forceMergeOpRead=true>;
2799  }
2800}
2801
2802multiclass VPseudoVMINMAX_VV_VX {
2803  foreach m = MxList in {
2804    defvar mx = m.MX;
2805    defm "" : VPseudoBinaryV_VV<m>,
2806              SchedBinary<"WriteVIMinMaxV", "ReadVIMinMaxV", "ReadVIMinMaxV", mx>;
2807    defm "" : VPseudoBinaryV_VX<m>,
2808              SchedBinary<"WriteVIMinMaxX", "ReadVIMinMaxV", "ReadVIMinMaxX", mx>;
2809  }
2810}
2811
2812multiclass VPseudoVMUL_VV_VX {
2813  foreach m = MxList in {
2814    defvar mx = m.MX;
2815    defm "" : VPseudoBinaryV_VV<m>,
2816              SchedBinary<"WriteVIMulV", "ReadVIMulV", "ReadVIMulV", mx>;
2817    defm "" : VPseudoBinaryV_VX<m>,
2818              SchedBinary<"WriteVIMulX", "ReadVIMulV", "ReadVIMulX", mx>;
2819  }
2820}
2821
2822multiclass VPseudoVDIV_VV_VX {
2823  foreach m = MxList in {
2824    defvar mx = m.MX;
2825    defvar sews = SchedSEWSet<mx>.val;
2826    foreach e = sews in {
2827      defm "" : VPseudoBinaryV_VV<m, "", e>,
2828                SchedBinary<"WriteVIDivV", "ReadVIDivV", "ReadVIDivV", mx, e>;
2829      defm "" : VPseudoBinaryV_VX<m, "", e>,
2830                SchedBinary<"WriteVIDivX", "ReadVIDivV", "ReadVIDivX", mx, e>;
2831    }
2832  }
2833}
2834
2835multiclass VPseudoVFMUL_VV_VF_RM {
2836  foreach m = MxListF in {
2837    defm "" : VPseudoBinaryFV_VV_RM<m>,
2838              SchedBinary<"WriteVFMulV", "ReadVFMulV", "ReadVFMulV", m.MX,
2839                          forceMergeOpRead=true>;
2840  }
2841
2842  foreach f = FPList in {
2843    foreach m = f.MxList in {
2844      defm "" : VPseudoBinaryV_VF_RM<m, f>,
2845                SchedBinary<"WriteVFMulF", "ReadVFMulV", "ReadVFMulF", m.MX,
2846                            forceMergeOpRead=true>;
2847    }
2848  }
2849}
2850
2851multiclass VPseudoVFDIV_VV_VF_RM {
2852  foreach m = MxListF in {
2853    defvar mx = m.MX;
2854    defvar sews = SchedSEWSet<mx, isF=1>.val;
2855    foreach e = sews in {
2856      defm "" : VPseudoBinaryFV_VV_RM<m, "", e>,
2857                SchedBinary<"WriteVFDivV", "ReadVFDivV", "ReadVFDivV", mx, e,
2858                            forceMergeOpRead=true>;
2859    }
2860  }
2861
2862  foreach f = FPList in {
2863    foreach m = f.MxList in {
2864      defm "" : VPseudoBinaryV_VF_RM<m, f, "", f.SEW>,
2865                SchedBinary<"WriteVFDivF", "ReadVFDivV", "ReadVFDivF", m.MX, f.SEW,
2866                            forceMergeOpRead=true>;
2867    }
2868  }
2869}
2870
2871multiclass VPseudoVFRDIV_VF_RM {
2872  foreach f = FPList in {
2873    foreach m = f.MxList in {
2874      defm "" : VPseudoBinaryV_VF_RM<m, f, "", f.SEW>,
2875                SchedBinary<"WriteVFDivF", "ReadVFDivV", "ReadVFDivF", m.MX, f.SEW,
2876                            forceMergeOpRead=true>;
2877    }
2878  }
2879}
2880
2881multiclass VPseudoVALU_VV_VX {
2882 foreach m = MxList in {
2883    defm "" : VPseudoBinaryV_VV<m>,
2884            SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", m.MX,
2885                        forceMergeOpRead=true>;
2886    defm "" : VPseudoBinaryV_VX<m>,
2887            SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", m.MX,
2888                        forceMergeOpRead=true>;
2889  }
2890}
2891
2892multiclass VPseudoVSGNJ_VV_VF {
2893  foreach m = MxListF in {
2894    defm "" : VPseudoBinaryFV_VV<m>,
2895              SchedBinary<"WriteVFSgnjV", "ReadVFSgnjV", "ReadVFSgnjV", m.MX,
2896                          forceMergeOpRead=true>;
2897  }
2898
2899  foreach f = FPList in {
2900    foreach m = f.MxList in {
2901      defm "" : VPseudoBinaryV_VF<m, f>,
2902                SchedBinary<"WriteVFSgnjF", "ReadVFSgnjV", "ReadVFSgnjF", m.MX,
2903                            forceMergeOpRead=true>;
2904    }
2905  }
2906}
2907
2908multiclass VPseudoVMAX_VV_VF {
2909  foreach m = MxListF in {
2910    defm "" : VPseudoBinaryFV_VV<m>,
2911              SchedBinary<"WriteVFMinMaxV", "ReadVFMinMaxV", "ReadVFMinMaxV", m.MX,
2912                          forceMergeOpRead=true>;
2913  }
2914
2915  foreach f = FPList in {
2916    foreach m = f.MxList in {
2917      defm "" : VPseudoBinaryV_VF<m, f>,
2918                SchedBinary<"WriteVFMinMaxF", "ReadVFMinMaxV", "ReadVFMinMaxF", m.MX,
2919                            forceMergeOpRead=true>;
2920    }
2921  }
2922}
2923
2924multiclass VPseudoVALU_VV_VF {
2925  foreach m = MxListF in {
2926    defm "" : VPseudoBinaryFV_VV<m>,
2927              SchedBinary<"WriteVFALUV", "ReadVFALUV", "ReadVFALUV", m.MX,
2928                          forceMergeOpRead=true>;
2929  }
2930
2931  foreach f = FPList in {
2932    foreach m = f.MxList in {
2933      defm "" : VPseudoBinaryV_VF<m, f>,
2934                SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX,
2935                            forceMergeOpRead=true>;
2936    }
2937  }
2938}
2939
2940multiclass VPseudoVALU_VV_VF_RM {
2941  foreach m = MxListF in {
2942    defm "" : VPseudoBinaryFV_VV_RM<m>,
2943              SchedBinary<"WriteVFALUV", "ReadVFALUV", "ReadVFALUV", m.MX,
2944                          forceMergeOpRead=true>;
2945  }
2946
2947  foreach f = FPList in {
2948    foreach m = f.MxList in {
2949      defm "" : VPseudoBinaryV_VF_RM<m, f>,
2950                SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX,
2951                            forceMergeOpRead=true>;
2952    }
2953  }
2954}
2955
2956multiclass VPseudoVALU_VF {
2957  foreach f = FPList in {
2958    foreach m = f.MxList in {
2959      defm "" : VPseudoBinaryV_VF<m, f>,
2960                SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX,
2961                            forceMergeOpRead=true>;
2962    }
2963  }
2964}
2965
2966multiclass VPseudoVALU_VF_RM {
2967  foreach f = FPList in {
2968    foreach m = f.MxList in {
2969      defm "" : VPseudoBinaryV_VF_RM<m, f>,
2970                SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX,
2971                            forceMergeOpRead=true>;
2972    }
2973  }
2974}
2975
2976multiclass VPseudoVALU_VX_VI<Operand ImmType = simm5> {
2977  foreach m = MxList in {
2978    defvar mx = m.MX;
2979    defm "" : VPseudoBinaryV_VX<m>,
2980              SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", mx,
2981                          forceMergeOpRead=true>;
2982    defm "" : VPseudoBinaryV_VI<ImmType, m>,
2983              SchedUnary<"WriteVIALUI", "ReadVIALUV", mx, forceMergeOpRead=true>;
2984  }
2985}
2986
2987multiclass VPseudoVWALU_VV_VX {
2988  foreach m = MxListW in {
2989    defvar mx = m.MX;
2990    defm "" : VPseudoBinaryW_VV<m>,
2991              SchedBinary<"WriteVIWALUV", "ReadVIWALUV", "ReadVIWALUV", mx,
2992                          forceMergeOpRead=true>;
2993    defm "" : VPseudoBinaryW_VX<m>,
2994              SchedBinary<"WriteVIWALUX", "ReadVIWALUV", "ReadVIWALUX", mx,
2995                          forceMergeOpRead=true>;
2996  }
2997}
2998
2999multiclass VPseudoVWALU_VV_VX_VI<Operand ImmType> : VPseudoVWALU_VV_VX {
3000  foreach m = MxListW in {
3001    defm "" : VPseudoBinaryW_VI<ImmType, m>;
3002  }
3003}
3004
3005multiclass VPseudoVWMUL_VV_VX {
3006  foreach m = MxListW in {
3007    defvar mx = m.MX;
3008    defm "" : VPseudoBinaryW_VV<m>,
3009              SchedBinary<"WriteVIWMulV", "ReadVIWMulV", "ReadVIWMulV", mx,
3010                          forceMergeOpRead=true>;
3011    defm "" : VPseudoBinaryW_VX<m>,
3012              SchedBinary<"WriteVIWMulX", "ReadVIWMulV", "ReadVIWMulX", mx,
3013                          forceMergeOpRead=true>;
3014  }
3015}
3016
3017multiclass VPseudoVWMUL_VV_VF_RM {
3018  foreach m = MxListFW in {
3019    defm "" : VPseudoBinaryW_VV_RM<m>,
3020              SchedBinary<"WriteVFWMulV", "ReadVFWMulV", "ReadVFWMulV", m.MX,
3021                          forceMergeOpRead=true>;
3022  }
3023
3024  foreach f = FPListW in {
3025    foreach m = f.MxListFW in {
3026      defm "" : VPseudoBinaryW_VF_RM<m, f>,
3027                SchedBinary<"WriteVFWMulF", "ReadVFWMulV", "ReadVFWMulF", m.MX,
3028                          forceMergeOpRead=true>;
3029    }
3030  }
3031}
3032
3033multiclass VPseudoVWALU_WV_WX {
3034  foreach m = MxListW in {
3035    defvar mx = m.MX;
3036    defm "" : VPseudoBinaryW_WV<m>,
3037              SchedBinary<"WriteVIWALUV", "ReadVIWALUV", "ReadVIWALUV", mx,
3038                          forceMergeOpRead=true>;
3039    defm "" : VPseudoBinaryW_WX<m>,
3040              SchedBinary<"WriteVIWALUX", "ReadVIWALUV", "ReadVIWALUX", mx,
3041                          forceMergeOpRead=true>;
3042  }
3043}
3044
3045multiclass VPseudoVFWALU_VV_VF_RM {
3046  foreach m = MxListFW in {
3047    defm "" : VPseudoBinaryW_VV_RM<m>,
3048              SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX,
3049                          forceMergeOpRead=true>;
3050  }
3051
3052  foreach f = FPListW in {
3053    foreach m = f.MxListFW in {
3054      defm "" : VPseudoBinaryW_VF_RM<m, f>,
3055                SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX,
3056                          forceMergeOpRead=true>;
3057    }
3058  }
3059}
3060
3061multiclass VPseudoVFWALU_WV_WF_RM {
3062  foreach m = MxListFW in {
3063    defm "" : VPseudoBinaryW_WV_RM<m>,
3064              SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX,
3065                          forceMergeOpRead=true>;
3066  }
3067  foreach f = FPListW in {
3068    foreach m = f.MxListFW in {
3069      defm "" : VPseudoBinaryW_WF_RM<m, f>,
3070                SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX,
3071                          forceMergeOpRead=true>;
3072    }
3073  }
3074}
3075
3076multiclass VPseudoVMRG_VM_XM_IM {
3077  foreach m = MxList in {
3078    defvar mx = m.MX;
3079    def "_VVM" # "_" # m.MX:
3080      VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
3081                               m.vrclass, m.vrclass, m, 1, "">,
3082      SchedBinary<"WriteVIMergeV", "ReadVIMergeV", "ReadVIMergeV", mx,
3083                          forceMergeOpRead=true>;
3084    def "_VXM" # "_" # m.MX:
3085      VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
3086                               m.vrclass, GPR, m, 1, "">,
3087      SchedBinary<"WriteVIMergeX", "ReadVIMergeV", "ReadVIMergeX", mx,
3088                          forceMergeOpRead=true>;
3089    def "_VIM" # "_" # m.MX:
3090      VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
3091                               m.vrclass, simm5, m, 1, "">,
3092      SchedUnary<"WriteVIMergeI", "ReadVIMergeV", mx,
3093                          forceMergeOpRead=true>;
3094  }
3095}
3096
3097multiclass VPseudoVCALU_VM_XM_IM {
3098  foreach m = MxList in {
3099    defvar mx = m.MX;
3100    defm "" : VPseudoTiedBinaryV_VM<m>,
3101              SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx,
3102                          forceMergeOpRead=true>;
3103    defm "" : VPseudoTiedBinaryV_XM<m>,
3104              SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx,
3105                          forceMergeOpRead=true>;
3106    defm "" : VPseudoTiedBinaryV_IM<m>,
3107              SchedUnary<"WriteVICALUI", "ReadVICALUV", mx,
3108                          forceMergeOpRead=true>;
3109  }
3110}
3111
3112multiclass VPseudoVCALU_VM_XM {
3113  foreach m = MxList in {
3114    defvar mx = m.MX;
3115    defm "" : VPseudoTiedBinaryV_VM<m>,
3116              SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx,
3117                          forceMergeOpRead=true>;
3118    defm "" : VPseudoTiedBinaryV_XM<m>,
3119              SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx,
3120                          forceMergeOpRead=true>;
3121  }
3122}
3123
3124multiclass VPseudoVCALUM_VM_XM_IM<string Constraint> {
3125  foreach m = MxList in {
3126    defvar mx = m.MX;
3127    defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=1, Constraint=Constraint,
3128                                Commutable=1, TargetConstraintType=2>,
3129              SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx, forceMasked=1,
3130                          forceMergeOpRead=true>;
3131    defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=1, Constraint=Constraint, TargetConstraintType=2>,
3132              SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx, forceMasked=1,
3133                          forceMergeOpRead=true>;
3134    defm "" : VPseudoBinaryV_IM<m, CarryOut=1, CarryIn=1, Constraint=Constraint, TargetConstraintType=2>,
3135              SchedUnary<"WriteVICALUI", "ReadVICALUV", mx, forceMasked=1,
3136                          forceMergeOpRead=true>;
3137  }
3138}
3139
3140multiclass VPseudoVCALUM_VM_XM<string Constraint> {
3141  foreach m = MxList in {
3142    defvar mx = m.MX;
3143    defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=1, Constraint=Constraint, TargetConstraintType=2>,
3144              SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx, forceMasked=1,
3145                          forceMergeOpRead=true>;
3146    defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=1, Constraint=Constraint, TargetConstraintType=2>,
3147              SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx, forceMasked=1,
3148                          forceMergeOpRead=true>;
3149  }
3150}
3151
3152multiclass VPseudoVCALUM_V_X_I<string Constraint> {
3153  foreach m = MxList in {
3154    defvar mx = m.MX;
3155    defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=0, Constraint=Constraint,
3156                                Commutable=1, TargetConstraintType=2>,
3157              SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx,
3158                          forceMergeOpRead=true>;
3159    defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=0, Constraint=Constraint, TargetConstraintType=2>,
3160              SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx,
3161                          forceMergeOpRead=true>;
3162    defm "" : VPseudoBinaryV_IM<m, CarryOut=1, CarryIn=0, Constraint=Constraint>,
3163              SchedUnary<"WriteVICALUI", "ReadVICALUV", mx,
3164                          forceMergeOpRead=true>;
3165  }
3166}
3167
3168multiclass VPseudoVCALUM_V_X<string Constraint> {
3169  foreach m = MxList in {
3170    defvar mx = m.MX;
3171    defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=0, Constraint=Constraint, TargetConstraintType=2>,
3172              SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx,
3173                          forceMergeOpRead=true>;
3174    defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=0, Constraint=Constraint, TargetConstraintType=2>,
3175              SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx,
3176                          forceMergeOpRead=true>;
3177  }
3178}
3179
3180multiclass VPseudoVNCLP_WV_WX_WI_RM {
3181  foreach m = MxListW in {
3182    defvar mx = m.MX;
3183    defm "" : VPseudoBinaryV_WV_RM<m>,
3184              SchedBinary<"WriteVNClipV", "ReadVNClipV", "ReadVNClipV", mx,
3185                          forceMergeOpRead=true>;
3186    defm "" : VPseudoBinaryV_WX_RM<m>,
3187              SchedBinary<"WriteVNClipX", "ReadVNClipV", "ReadVNClipX", mx,
3188                          forceMergeOpRead=true>;
3189    defm "" : VPseudoBinaryV_WI_RM<m>,
3190              SchedUnary<"WriteVNClipI", "ReadVNClipV", mx,
3191                          forceMergeOpRead=true>;
3192  }
3193}
3194
3195multiclass VPseudoVNSHT_WV_WX_WI {
3196  foreach m = MxListW in {
3197    defvar mx = m.MX;
3198    defm "" : VPseudoBinaryV_WV<m, TargetConstraintType=2>,
3199              SchedBinary<"WriteVNShiftV", "ReadVNShiftV", "ReadVNShiftV", mx,
3200                          forceMergeOpRead=true>;
3201    defm "" : VPseudoBinaryV_WX<m, TargetConstraintType=2>,
3202              SchedBinary<"WriteVNShiftX", "ReadVNShiftV", "ReadVNShiftX", mx,
3203                          forceMergeOpRead=true>;
3204    defm "" : VPseudoBinaryV_WI<m, TargetConstraintType=2>,
3205              SchedUnary<"WriteVNShiftI", "ReadVNShiftV", mx,
3206                          forceMergeOpRead=true>;
3207  }
3208}
3209
3210multiclass VPseudoTernaryWithTailPolicy<VReg RetClass,
3211                                          RegisterClass Op1Class,
3212                                          DAGOperand Op2Class,
3213                                          LMULInfo MInfo,
3214                                          int sew,
3215                                          string Constraint = "",
3216                                          bit Commutable = 0> {
3217  let VLMul = MInfo.value, SEW=sew in {
3218    defvar mx = MInfo.MX;
3219    let isCommutable = Commutable in
3220    def "_" # mx # "_E" # sew : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint>;
3221    def "_" # mx # "_E" # sew # "_MASK" : VPseudoTernaryMaskPolicy<RetClass, Op1Class, Op2Class, Constraint>,
3222                                          RISCVMaskedPseudo<MaskIdx=3, MaskAffectsRes=true>;
3223  }
3224}
3225
3226multiclass VPseudoTernaryWithTailPolicyRoundingMode<VReg RetClass,
3227                                          RegisterClass Op1Class,
3228                                          DAGOperand Op2Class,
3229                                          LMULInfo MInfo,
3230                                          int sew,
3231                                          string Constraint = "",
3232                                          bit Commutable = 0> {
3233  let VLMul = MInfo.value, SEW=sew in {
3234    defvar mx = MInfo.MX;
3235    let isCommutable = Commutable in
3236    def "_" # mx # "_E" # sew
3237        : VPseudoTernaryNoMaskWithPolicyRoundingMode<RetClass, Op1Class,
3238                                                     Op2Class, Constraint>;
3239    def "_" # mx # "_E" # sew # "_MASK"
3240        : VPseudoTernaryMaskPolicyRoundingMode<RetClass, Op1Class,
3241                                               Op2Class, Constraint>,
3242          RISCVMaskedPseudo<MaskIdx=3, MaskAffectsRes=true>;
3243  }
3244}
3245
3246multiclass VPseudoTernaryWithPolicy<VReg RetClass,
3247                                    RegisterClass Op1Class,
3248                                    DAGOperand Op2Class,
3249                                    LMULInfo MInfo,
3250                                    string Constraint = "",
3251                                    bit Commutable = 0,
3252                                    int TargetConstraintType = 1> {
3253  let VLMul = MInfo.value in {
3254    let isCommutable = Commutable in
3255    def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint, TargetConstraintType>;
3256    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class, Constraint, TargetConstraintType>,
3257                                   RISCVMaskedPseudo<MaskIdx=3>;
3258  }
3259}
3260
3261multiclass VPseudoTernaryWithPolicyRoundingMode<VReg RetClass,
3262                                                RegisterClass Op1Class,
3263                                                DAGOperand Op2Class,
3264                                                LMULInfo MInfo,
3265                                                string Constraint = "",
3266                                                bit Commutable = 0,
3267                                                int TargetConstraintType = 1> {
3268  let VLMul = MInfo.value in {
3269    let isCommutable = Commutable in
3270    def "_" # MInfo.MX :
3271        VPseudoTernaryNoMaskWithPolicyRoundingMode<RetClass, Op1Class,
3272                                                   Op2Class, Constraint,
3273                                                   TargetConstraintType>;
3274    def "_" # MInfo.MX # "_MASK" :
3275        VPseudoBinaryMaskPolicyRoundingMode<RetClass, Op1Class,
3276                                            Op2Class, Constraint,
3277                                            UsesVXRM_=0,
3278                                            TargetConstraintType=TargetConstraintType>,
3279                                   RISCVMaskedPseudo<MaskIdx=3>;
3280  }
3281}
3282
3283multiclass VPseudoTernaryV_VV_AAXA<LMULInfo m, string Constraint = ""> {
3284  defm _VV : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, m.vrclass, m,
3285                                      Constraint, Commutable=1>;
3286}
3287
3288multiclass VPseudoTernaryV_VV_AAXA_RM<LMULInfo m, string Constraint = ""> {
3289  defm _VV : VPseudoTernaryWithPolicyRoundingMode<m.vrclass, m.vrclass, m.vrclass, m,
3290                                                  Constraint, Commutable=1>;
3291}
3292
3293multiclass VPseudoTernaryV_VX_AAXA<LMULInfo m, string Constraint = ""> {
3294  defm "_VX" : VPseudoTernaryWithPolicy<m.vrclass, GPR, m.vrclass, m,
3295                                        Constraint, Commutable=1>;
3296}
3297
3298multiclass VPseudoTernaryV_VF_AAXA<LMULInfo m, FPR_Info f, string Constraint = ""> {
3299  defm "_V" # f.FX : VPseudoTernaryWithPolicy<m.vrclass, f.fprclass,
3300                                              m.vrclass, m, Constraint,
3301                                              Commutable=1>;
3302}
3303
3304multiclass VPseudoTernaryV_VF_AAXA_RM<LMULInfo m, FPR_Info f, string Constraint = ""> {
3305  defm "_V" # f.FX : VPseudoTernaryWithPolicyRoundingMode<m.vrclass, f.fprclass,
3306                                                          m.vrclass, m, Constraint,
3307                                                          Commutable=1>;
3308}
3309
3310multiclass VPseudoTernaryW_VV<LMULInfo m> {
3311  defvar constraint = "@earlyclobber $rd";
3312  defm _VV : VPseudoTernaryWithPolicy<m.wvrclass, m.vrclass, m.vrclass, m,
3313                                      constraint, /*Commutable*/ 0, TargetConstraintType=3>;
3314}
3315
3316multiclass VPseudoTernaryW_VV_RM<LMULInfo m> {
3317  defvar constraint = "@earlyclobber $rd";
3318  defm _VV : VPseudoTernaryWithPolicyRoundingMode<m.wvrclass, m.vrclass, m.vrclass, m,
3319                                                  constraint, /* Commutable */ 0,
3320                                                  TargetConstraintType=3>;
3321}
3322
3323multiclass VPseudoTernaryW_VX<LMULInfo m> {
3324  defvar constraint = "@earlyclobber $rd";
3325  defm "_VX" : VPseudoTernaryWithPolicy<m.wvrclass, GPR, m.vrclass, m,
3326                                        constraint, /*Commutable*/ 0, TargetConstraintType=3>;
3327}
3328
3329multiclass VPseudoTernaryW_VF<LMULInfo m, FPR_Info f, int TargetConstraintType = 1> {
3330  defvar constraint = "@earlyclobber $rd";
3331  defm "_V" # f.FX : VPseudoTernaryWithPolicy<m.wvrclass, f.fprclass,
3332                                              m.vrclass, m, constraint, /*Commutable*/ 0, TargetConstraintType>;
3333}
3334
3335multiclass VPseudoTernaryW_VF_RM<LMULInfo m, FPR_Info f> {
3336  defvar constraint = "@earlyclobber $rd";
3337  defm "_V" # f.FX : VPseudoTernaryWithPolicyRoundingMode<m.wvrclass, f.fprclass,
3338                                                          m.vrclass, m, constraint,
3339                                                          /* Commutable */ 0,
3340                                                          TargetConstraintType=3>;
3341}
3342
3343multiclass VPseudoVSLDVWithPolicy<VReg RetClass,
3344                                  RegisterClass Op1Class,
3345                                  DAGOperand Op2Class,
3346                                  LMULInfo MInfo,
3347                                  string Constraint = ""> {
3348  let VLMul = MInfo.value in {
3349    def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint>;
3350    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class, Constraint>,
3351                                   RISCVMaskedPseudo<MaskIdx=3>;
3352  }
3353}
3354
3355multiclass VPseudoVSLDV_VX<LMULInfo m, string Constraint = ""> {
3356  defm _VX : VPseudoVSLDVWithPolicy<m.vrclass, m.vrclass, GPR, m, Constraint>;
3357}
3358
3359multiclass VPseudoVSLDV_VI<Operand ImmType = simm5, LMULInfo m, string Constraint = ""> {
3360  defm _VI : VPseudoVSLDVWithPolicy<m.vrclass, m.vrclass, ImmType, m, Constraint>;
3361}
3362
3363multiclass VPseudoVMAC_VV_VX_AAXA<string Constraint = ""> {
3364  foreach m = MxList in {
3365    defvar mx = m.MX;
3366    defm "" : VPseudoTernaryV_VV_AAXA<m, Constraint>,
3367              SchedTernary<"WriteVIMulAddV", "ReadVIMulAddV", "ReadVIMulAddV",
3368                           "ReadVIMulAddV", mx>;
3369    defm "" : VPseudoTernaryV_VX_AAXA<m, Constraint>,
3370              SchedTernary<"WriteVIMulAddX", "ReadVIMulAddV", "ReadVIMulAddX",
3371                           "ReadVIMulAddV", mx>;
3372  }
3373}
3374
3375multiclass VPseudoVMAC_VV_VF_AAXA<string Constraint = ""> {
3376  foreach m = MxListF in {
3377    defm "" : VPseudoTernaryV_VV_AAXA<m, Constraint>,
3378              SchedTernary<"WriteVFMulAddV", "ReadVFMulAddV", "ReadVFMulAddV",
3379                           "ReadVFMulAddV", m.MX>;
3380  }
3381
3382  foreach f = FPList in {
3383    foreach m = f.MxList in {
3384      defm "" : VPseudoTernaryV_VF_AAXA<m, f, Constraint>,
3385                SchedTernary<"WriteVFMulAddF", "ReadVFMulAddV", "ReadVFMulAddF",
3386                             "ReadVFMulAddV", m.MX>;
3387    }
3388  }
3389}
3390
3391multiclass VPseudoVMAC_VV_VF_AAXA_RM<string Constraint = ""> {
3392  foreach m = MxListF in {
3393    defm "" : VPseudoTernaryV_VV_AAXA_RM<m, Constraint>,
3394              SchedTernary<"WriteVFMulAddV", "ReadVFMulAddV", "ReadVFMulAddV",
3395                           "ReadVFMulAddV", m.MX>;
3396  }
3397
3398  foreach f = FPList in {
3399    foreach m = f.MxList in {
3400      defm "" : VPseudoTernaryV_VF_AAXA_RM<m, f, Constraint>,
3401                SchedTernary<"WriteVFMulAddF", "ReadVFMulAddV", "ReadVFMulAddF",
3402                             "ReadVFMulAddV", m.MX>;
3403    }
3404  }
3405}
3406
3407multiclass VPseudoVSLD_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
3408  foreach m = MxList in {
3409    defvar mx = m.MX;
3410    defm "" : VPseudoVSLDV_VX<m, Constraint>,
3411              SchedTernary<"WriteVISlideX", "ReadVISlideV", "ReadVISlideV",
3412                           "ReadVISlideX", mx>;
3413    defm "" : VPseudoVSLDV_VI<ImmType, m, Constraint>,
3414              SchedBinary<"WriteVISlideI", "ReadVISlideV", "ReadVISlideV", mx>;
3415  }
3416}
3417
3418multiclass VPseudoVWMAC_VV_VX {
3419  foreach m = MxListW in {
3420    defvar mx = m.MX;
3421    defm "" : VPseudoTernaryW_VV<m>,
3422              SchedTernary<"WriteVIWMulAddV", "ReadVIWMulAddV", "ReadVIWMulAddV",
3423                           "ReadVIWMulAddV", mx>;
3424    defm "" : VPseudoTernaryW_VX<m>,
3425              SchedTernary<"WriteVIWMulAddX", "ReadVIWMulAddV", "ReadVIWMulAddX",
3426                           "ReadVIWMulAddV", mx>;
3427  }
3428}
3429
3430multiclass VPseudoVWMAC_VX {
3431  foreach m = MxListW in {
3432    defm "" : VPseudoTernaryW_VX<m>,
3433              SchedTernary<"WriteVIWMulAddX", "ReadVIWMulAddV", "ReadVIWMulAddX",
3434                           "ReadVIWMulAddV", m.MX>;
3435  }
3436}
3437
3438multiclass VPseudoVWMAC_VV_VF_RM {
3439  foreach m = MxListFW in {
3440    defm "" : VPseudoTernaryW_VV_RM<m>,
3441              SchedTernary<"WriteVFWMulAddV", "ReadVFWMulAddV",
3442                           "ReadVFWMulAddV", "ReadVFWMulAddV", m.MX>;
3443  }
3444
3445  foreach f = FPListW in {
3446    foreach m = f.MxListFW in {
3447      defm "" : VPseudoTernaryW_VF_RM<m, f>,
3448                SchedTernary<"WriteVFWMulAddF", "ReadVFWMulAddV",
3449                             "ReadVFWMulAddF", "ReadVFWMulAddV", m.MX>;
3450    }
3451  }
3452}
3453
3454multiclass VPseudoVWMAC_VV_VF_BF_RM {
3455  foreach m = MxListFW in {
3456    defvar mx = m.MX;
3457    defm "" : VPseudoTernaryW_VV_RM<m>,
3458              SchedTernary<"WriteVFWMulAddV", "ReadVFWMulAddV",
3459                           "ReadVFWMulAddV", "ReadVFWMulAddV", mx>;
3460  }
3461
3462  foreach f = BFPListW in {
3463    foreach m = f.MxListFW in {
3464      defvar mx = m.MX;
3465      defm "" : VPseudoTernaryW_VF_RM<m, f>,
3466                SchedTernary<"WriteVFWMulAddF", "ReadVFWMulAddV",
3467                             "ReadVFWMulAddF", "ReadVFWMulAddV", mx>;
3468    }
3469  }
3470}
3471
3472multiclass VPseudoVCMPM_VV_VX_VI {
3473  foreach m = MxList in {
3474    defvar mx = m.MX;
3475    defm "" : VPseudoBinaryM_VV<m, TargetConstraintType=2>,
3476              SchedBinary<"WriteVICmpV", "ReadVICmpV", "ReadVICmpV", mx>;
3477    defm "" : VPseudoBinaryM_VX<m, TargetConstraintType=2>,
3478              SchedBinary<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX", mx>;
3479    defm "" : VPseudoBinaryM_VI<m, TargetConstraintType=2>,
3480              SchedUnary<"WriteVICmpI", "ReadVICmpV", mx>;
3481  }
3482}
3483
3484multiclass VPseudoVCMPM_VV_VX {
3485  foreach m = MxList in {
3486    defvar mx = m.MX;
3487    defm "" : VPseudoBinaryM_VV<m, TargetConstraintType=2>,
3488              SchedBinary<"WriteVICmpV", "ReadVICmpV", "ReadVICmpV", mx>;
3489    defm "" : VPseudoBinaryM_VX<m, TargetConstraintType=2>,
3490              SchedBinary<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX", mx>;
3491  }
3492}
3493
3494multiclass VPseudoVCMPM_VV_VF {
3495  foreach m = MxListF in {
3496    defm "" : VPseudoBinaryM_VV<m, TargetConstraintType=2>,
3497              SchedBinary<"WriteVFCmpV", "ReadVFCmpV", "ReadVFCmpV", m.MX>;
3498  }
3499
3500  foreach f = FPList in {
3501    foreach m = f.MxList in {
3502      defm "" : VPseudoBinaryM_VF<m, f, TargetConstraintType=2>,
3503                SchedBinary<"WriteVFCmpF", "ReadVFCmpV", "ReadVFCmpF", m.MX>;
3504    }
3505  }
3506}
3507
3508multiclass VPseudoVCMPM_VF {
3509  foreach f = FPList in {
3510    foreach m = f.MxList in {
3511      defm "" : VPseudoBinaryM_VF<m, f, TargetConstraintType=2>,
3512                SchedBinary<"WriteVFCmpF", "ReadVFCmpV", "ReadVFCmpF", m.MX>;
3513    }
3514  }
3515}
3516
3517multiclass VPseudoVCMPM_VX_VI {
3518  foreach m = MxList in {
3519    defvar mx = m.MX;
3520    defm "" : VPseudoBinaryM_VX<m, TargetConstraintType=2>,
3521              SchedBinary<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX", mx>;
3522    defm "" : VPseudoBinaryM_VI<m, TargetConstraintType=2>,
3523              SchedUnary<"WriteVICmpI", "ReadVICmpV", mx>;
3524  }
3525}
3526
3527multiclass VPseudoVRED_VS {
3528  foreach m = MxList in {
3529    defvar mx = m.MX;
3530    foreach e = SchedSEWSet<mx>.val in {
3531      defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m, e>,
3532                 SchedReduction<"WriteVIRedV_From", "ReadVIRedV", mx, e>;
3533    }
3534  }
3535}
3536
3537multiclass VPseudoVREDMINMAX_VS {
3538  foreach m = MxList in {
3539    defvar mx = m.MX;
3540    foreach e = SchedSEWSet<mx>.val in {
3541      defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m, e>,
3542                 SchedReduction<"WriteVIRedMinMaxV_From", "ReadVIRedV", mx, e>;
3543    }
3544  }
3545}
3546
3547multiclass VPseudoVWRED_VS {
3548  foreach m = MxListWRed in {
3549    defvar mx = m.MX;
3550    foreach e = SchedSEWSet<mx, isWidening=1>.val in {
3551      defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m, e>,
3552                 SchedReduction<"WriteVIWRedV_From", "ReadVIWRedV", mx, e>;
3553    }
3554  }
3555}
3556
3557multiclass VPseudoVFRED_VS_RM {
3558  foreach m = MxListF in {
3559    defvar mx = m.MX;
3560    foreach e = SchedSEWSet<mx, isF=1>.val in {
3561      defm _VS
3562          : VPseudoTernaryWithTailPolicyRoundingMode<V_M1.vrclass, m.vrclass,
3563                                                     V_M1.vrclass, m, e>,
3564            SchedReduction<"WriteVFRedV_From", "ReadVFRedV", mx, e>;
3565    }
3566  }
3567}
3568
3569multiclass VPseudoVFREDMINMAX_VS {
3570  foreach m = MxListF in {
3571    defvar mx = m.MX;
3572    foreach e = SchedSEWSet<mx, isF=1>.val in {
3573      defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m, e>,
3574                 SchedReduction<"WriteVFRedMinMaxV_From", "ReadVFRedV", mx, e>;
3575    }
3576  }
3577}
3578
3579multiclass VPseudoVFREDO_VS_RM {
3580  foreach m = MxListF in {
3581    defvar mx = m.MX;
3582    foreach e = SchedSEWSet<mx, isF=1>.val in {
3583      defm _VS : VPseudoTernaryWithTailPolicyRoundingMode<V_M1.vrclass, m.vrclass,
3584                                                          V_M1.vrclass, m, e>,
3585                 SchedReduction<"WriteVFRedOV_From", "ReadVFRedOV", mx, e>;
3586    }
3587  }
3588}
3589
3590multiclass VPseudoVFWRED_VS_RM {
3591  foreach m = MxListFWRed in {
3592    defvar mx = m.MX;
3593    foreach e = SchedSEWSet<mx, isF=1, isWidening=1>.val in {
3594      defm _VS
3595          : VPseudoTernaryWithTailPolicyRoundingMode<V_M1.vrclass, m.vrclass,
3596                                                     V_M1.vrclass, m, e>,
3597            SchedReduction<"WriteVFWRedV_From", "ReadVFWRedV", mx, e>;
3598    }
3599  }
3600}
3601
3602multiclass VPseudoVFWREDO_VS_RM {
3603  foreach m = MxListFWRed in {
3604    defvar mx = m.MX;
3605    foreach e = SchedSEWSet<mx, isF=1, isWidening=1>.val in {
3606      defm _VS
3607          : VPseudoTernaryWithTailPolicyRoundingMode<V_M1.vrclass, m.vrclass,
3608                                                     V_M1.vrclass, m, e>,
3609            SchedReduction<"WriteVFWRedOV_From", "ReadVFWRedV", mx, e>;
3610    }
3611  }
3612}
3613
3614multiclass VPseudoConversion<VReg RetClass,
3615                             VReg Op1Class,
3616                             LMULInfo MInfo,
3617                             string Constraint = "",
3618                             int TargetConstraintType = 1> {
3619  let VLMul = MInfo.value in {
3620    def "_" # MInfo.MX : VPseudoUnaryNoMask<RetClass, Op1Class, Constraint, TargetConstraintType>;
3621    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMask<RetClass, Op1Class,
3622                                                    Constraint, TargetConstraintType>,
3623                                   RISCVMaskedPseudo<MaskIdx=2>;
3624  }
3625}
3626
3627multiclass VPseudoConversionRoundingMode<VReg RetClass,
3628                             VReg Op1Class,
3629                             LMULInfo MInfo,
3630                             string Constraint = "",
3631                             int TargetConstraintType = 1> {
3632  let VLMul = MInfo.value in {
3633    def "_" # MInfo.MX : VPseudoUnaryNoMaskRoundingMode<RetClass, Op1Class, Constraint, TargetConstraintType>;
3634    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMaskRoundingMode<RetClass, Op1Class,
3635                                                                Constraint, TargetConstraintType>,
3636                                   RISCVMaskedPseudo<MaskIdx=2>;
3637  }
3638}
3639
3640
3641multiclass VPseudoConversionRM<VReg RetClass,
3642                               VReg Op1Class,
3643                               LMULInfo MInfo,
3644                               string Constraint = "",
3645                               int TargetConstraintType = 1> {
3646  let VLMul = MInfo.value in {
3647    def "_" # MInfo.MX : VPseudoUnaryNoMask_FRM<RetClass, Op1Class,
3648                                                        Constraint, TargetConstraintType>;
3649    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMask_FRM<RetClass, Op1Class,
3650                                                        Constraint, TargetConstraintType>,
3651                                   RISCVMaskedPseudo<MaskIdx=2>;
3652  }
3653}
3654
3655multiclass VPseudoConversionNoExcept<VReg RetClass,
3656                                     VReg Op1Class,
3657                                     LMULInfo MInfo,
3658                                     string Constraint = ""> {
3659  let VLMul = MInfo.value in {
3660    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMask_NoExcept<RetClass, Op1Class, Constraint>;
3661  }
3662}
3663
3664multiclass VPseudoVCVTI_V {
3665  foreach m = MxListF in {
3666    defm _V : VPseudoConversion<m.vrclass, m.vrclass, m>,
3667              SchedUnary<"WriteVFCvtFToIV", "ReadVFCvtFToIV", m.MX,
3668                         forceMergeOpRead=true>;
3669  }
3670}
3671
3672multiclass VPseudoVCVTI_V_RM {
3673  foreach m = MxListF in {
3674    defm _V : VPseudoConversionRoundingMode<m.vrclass, m.vrclass, m>,
3675              SchedUnary<"WriteVFCvtFToIV", "ReadVFCvtFToIV", m.MX,
3676                         forceMergeOpRead=true>;
3677  }
3678}
3679
3680multiclass VPseudoVCVTI_RM_V {
3681  foreach m = MxListF in {
3682    defm _V : VPseudoConversionRM<m.vrclass, m.vrclass, m>,
3683              SchedUnary<"WriteVFCvtFToIV", "ReadVFCvtFToIV", m.MX,
3684                         forceMergeOpRead=true>;
3685  }
3686}
3687
3688multiclass VPseudoVFROUND_NOEXCEPT_V {
3689  foreach m = MxListF in {
3690    defm _V : VPseudoConversionNoExcept<m.vrclass, m.vrclass, m>,
3691              SchedUnary<"WriteVFCvtFToIV", "ReadVFCvtFToIV", m.MX,
3692                         forceMergeOpRead=true>;
3693  }
3694}
3695
3696multiclass VPseudoVCVTF_V_RM {
3697  foreach m = MxListF in {
3698    defm _V : VPseudoConversionRoundingMode<m.vrclass, m.vrclass, m>,
3699              SchedUnary<"WriteVFCvtIToFV", "ReadVFCvtIToFV", m.MX,
3700                         forceMergeOpRead=true>;
3701  }
3702}
3703
3704multiclass VPseudoVCVTF_RM_V {
3705  foreach m = MxListF in {
3706    defm _V : VPseudoConversionRM<m.vrclass, m.vrclass, m>,
3707              SchedUnary<"WriteVFCvtIToFV", "ReadVFCvtIToFV", m.MX,
3708                         forceMergeOpRead=true>;
3709  }
3710}
3711
3712multiclass VPseudoVWCVTI_V {
3713  defvar constraint = "@earlyclobber $rd";
3714  foreach m = MxListFW in {
3715    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, TargetConstraintType=3>,
3716              SchedUnary<"WriteVFWCvtFToIV", "ReadVFWCvtFToIV", m.MX,
3717                         forceMergeOpRead=true>;
3718  }
3719}
3720
3721multiclass VPseudoVWCVTI_V_RM {
3722  defvar constraint = "@earlyclobber $rd";
3723  foreach m = MxListFW in {
3724    defm _V : VPseudoConversionRoundingMode<m.wvrclass, m.vrclass, m, constraint, TargetConstraintType=3>,
3725              SchedUnary<"WriteVFWCvtFToIV", "ReadVFWCvtFToIV", m.MX,
3726                         forceMergeOpRead=true>;
3727  }
3728}
3729
3730multiclass VPseudoVWCVTI_RM_V {
3731  defvar constraint = "@earlyclobber $rd";
3732  foreach m = MxListFW in {
3733    defm _V : VPseudoConversionRM<m.wvrclass, m.vrclass, m, constraint>,
3734              SchedUnary<"WriteVFWCvtFToIV", "ReadVFWCvtFToIV", m.MX,
3735                         forceMergeOpRead=true>;
3736  }
3737}
3738
3739multiclass VPseudoVWCVTF_V {
3740  defvar constraint = "@earlyclobber $rd";
3741  foreach m = MxListW in {
3742    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, TargetConstraintType=3>,
3743              SchedUnary<"WriteVFWCvtIToFV", "ReadVFWCvtIToFV", m.MX,
3744                         forceMergeOpRead=true>;
3745  }
3746}
3747
3748multiclass VPseudoVWCVTD_V {
3749  defvar constraint = "@earlyclobber $rd";
3750  foreach m = MxListFW in {
3751    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, TargetConstraintType=3>,
3752              SchedUnary<"WriteVFWCvtFToFV", "ReadVFWCvtFToFV", m.MX,
3753                         forceMergeOpRead=true>;
3754  }
3755}
3756
3757multiclass VPseudoVNCVTI_W {
3758  defvar constraint = "@earlyclobber $rd";
3759  foreach m = MxListW in {
3760    defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint, TargetConstraintType=2>,
3761              SchedUnary<"WriteVFNCvtFToIV", "ReadVFNCvtFToIV", m.MX,
3762                         forceMergeOpRead=true>;
3763  }
3764}
3765
3766multiclass VPseudoVNCVTI_W_RM {
3767  defvar constraint = "@earlyclobber $rd";
3768  foreach m = MxListW in {
3769    defm _W : VPseudoConversionRoundingMode<m.vrclass, m.wvrclass, m, constraint, TargetConstraintType=2>,
3770              SchedUnary<"WriteVFNCvtFToIV", "ReadVFNCvtFToIV", m.MX,
3771                         forceMergeOpRead=true>;
3772  }
3773}
3774
3775multiclass VPseudoVNCVTI_RM_W {
3776  defvar constraint = "@earlyclobber $rd";
3777  foreach m = MxListW in {
3778    defm _W : VPseudoConversionRM<m.vrclass, m.wvrclass, m, constraint, TargetConstraintType=2>,
3779              SchedUnary<"WriteVFNCvtFToIV", "ReadVFNCvtFToIV", m.MX,
3780                         forceMergeOpRead=true>;
3781  }
3782}
3783
3784multiclass VPseudoVNCVTF_W_RM {
3785  defvar constraint = "@earlyclobber $rd";
3786  foreach m = MxListFW in {
3787    defm _W : VPseudoConversionRoundingMode<m.vrclass, m.wvrclass, m, constraint, TargetConstraintType=2>,
3788              SchedUnary<"WriteVFNCvtIToFV", "ReadVFNCvtIToFV", m.MX,
3789                         forceMergeOpRead=true>;
3790  }
3791}
3792
3793multiclass VPseudoVNCVTF_RM_W {
3794  defvar constraint = "@earlyclobber $rd";
3795  foreach m = MxListFW in {
3796    defm _W : VPseudoConversionRM<m.vrclass, m.wvrclass, m, constraint>,
3797              SchedUnary<"WriteVFNCvtIToFV", "ReadVFNCvtIToFV", m.MX,
3798                         forceMergeOpRead=true>;
3799  }
3800}
3801
3802multiclass VPseudoVNCVTD_W {
3803  defvar constraint = "@earlyclobber $rd";
3804  foreach m = MxListFW in {
3805    defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint, TargetConstraintType=2>,
3806              SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX,
3807                         forceMergeOpRead=true>;
3808  }
3809}
3810
3811multiclass VPseudoVNCVTD_W_RM {
3812  defvar constraint = "@earlyclobber $rd";
3813  foreach m = MxListFW in {
3814    defm _W : VPseudoConversionRoundingMode<m.vrclass, m.wvrclass, m, constraint, TargetConstraintType=2>,
3815              SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX,
3816                         forceMergeOpRead=true>;
3817  }
3818}
3819
3820multiclass VPseudoUSSegLoad {
3821  foreach eew = EEWList in {
3822    foreach lmul = MxSet<eew>.m in {
3823      defvar LInfo = lmul.MX;
3824      let VLMul = lmul.value, SEW=eew in {
3825        foreach nf = NFSet<lmul>.L in {
3826          defvar vreg = SegRegClass<lmul, nf>.RC;
3827          def nf # "E" # eew # "_V_" # LInfo :
3828            VPseudoUSSegLoadNoMask<vreg, eew, nf>, VLSEGSched<nf, eew, LInfo>;
3829          def nf # "E" # eew # "_V_" # LInfo # "_MASK" :
3830            VPseudoUSSegLoadMask<vreg, eew, nf>, VLSEGSched<nf, eew, LInfo>;
3831        }
3832      }
3833    }
3834  }
3835}
3836
3837multiclass VPseudoUSSegLoadFF {
3838  foreach eew = EEWList in {
3839    foreach lmul = MxSet<eew>.m in {
3840      defvar LInfo = lmul.MX;
3841      let VLMul = lmul.value, SEW=eew in {
3842        foreach nf = NFSet<lmul>.L in {
3843          defvar vreg = SegRegClass<lmul, nf>.RC;
3844          def nf # "E" # eew # "FF_V_" # LInfo :
3845            VPseudoUSSegLoadFFNoMask<vreg, eew, nf>, VLSEGFFSched<nf, eew, LInfo>;
3846          def nf # "E" # eew # "FF_V_" # LInfo # "_MASK" :
3847            VPseudoUSSegLoadFFMask<vreg, eew, nf>, VLSEGFFSched<nf, eew, LInfo>;
3848        }
3849      }
3850    }
3851  }
3852}
3853
3854multiclass VPseudoSSegLoad {
3855  foreach eew = EEWList in {
3856    foreach lmul = MxSet<eew>.m in {
3857      defvar LInfo = lmul.MX;
3858      let VLMul = lmul.value, SEW=eew in {
3859        foreach nf = NFSet<lmul>.L in {
3860          defvar vreg = SegRegClass<lmul, nf>.RC;
3861          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew, nf>,
3862                                               VLSSEGSched<nf, eew, LInfo>;
3863          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew, nf>,
3864                                                         VLSSEGSched<nf, eew, LInfo>;
3865        }
3866      }
3867    }
3868  }
3869}
3870
3871multiclass VPseudoISegLoad<bit Ordered> {
3872  foreach idxEEW = EEWList in {
3873    foreach dataEEW = EEWList in {
3874      foreach dataEMUL = MxSet<dataEEW>.m in {
3875        defvar dataEMULOctuple = dataEMUL.octuple;
3876        // Calculate emul = eew * lmul / sew
3877        defvar idxEMULOctuple = !srl(!mul(idxEEW, dataEMULOctuple), !logtwo(dataEEW));
3878        if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
3879          defvar DataLInfo = dataEMUL.MX;
3880          defvar IdxLInfo = octuple_to_str<idxEMULOctuple>.ret;
3881          defvar idxEMUL = !cast<LMULInfo>("V_" # IdxLInfo);
3882          defvar DataVreg = dataEMUL.vrclass;
3883          defvar IdxVreg = idxEMUL.vrclass;
3884          let VLMul = dataEMUL.value in {
3885            foreach nf = NFSet<dataEMUL>.L in {
3886              defvar Vreg = SegRegClass<dataEMUL, nf>.RC;
3887              def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo :
3888                VPseudoISegLoadNoMask<Vreg, IdxVreg, idxEEW, idxEMUL.value,
3889                                      nf, Ordered>,
3890                VLXSEGSched<nf, dataEEW, Ordered, DataLInfo>;
3891              def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" :
3892                VPseudoISegLoadMask<Vreg, IdxVreg, idxEEW, idxEMUL.value,
3893                                    nf, Ordered>,
3894                VLXSEGSched<nf, dataEEW, Ordered, DataLInfo>;
3895            }
3896          }
3897        }
3898      }
3899    }
3900  }
3901}
3902
3903multiclass VPseudoUSSegStore {
3904  foreach eew = EEWList in {
3905    foreach lmul = MxSet<eew>.m in {
3906      defvar LInfo = lmul.MX;
3907      let VLMul = lmul.value, SEW=eew in {
3908        foreach nf = NFSet<lmul>.L in {
3909          defvar vreg = SegRegClass<lmul, nf>.RC;
3910          def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew, nf>,
3911                                               VSSEGSched<nf, eew, LInfo>;
3912          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew, nf>,
3913                                                         VSSEGSched<nf, eew, LInfo>;
3914        }
3915      }
3916    }
3917  }
3918}
3919
3920multiclass VPseudoSSegStore {
3921  foreach eew = EEWList in {
3922    foreach lmul = MxSet<eew>.m in {
3923      defvar LInfo = lmul.MX;
3924      let VLMul = lmul.value, SEW=eew in {
3925        foreach nf = NFSet<lmul>.L in {
3926          defvar vreg = SegRegClass<lmul, nf>.RC;
3927          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew, nf>,
3928                                               VSSSEGSched<nf, eew, LInfo>;
3929          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew, nf>,
3930                                                         VSSSEGSched<nf, eew, LInfo>;
3931        }
3932      }
3933    }
3934  }
3935}
3936
3937multiclass VPseudoISegStore<bit Ordered> {
3938  foreach idxEEW = EEWList in {
3939    foreach dataEEW = EEWList in {
3940      foreach dataEMUL = MxSet<dataEEW>.m in {
3941        defvar dataEMULOctuple = dataEMUL.octuple;
3942        // Calculate emul = eew * lmul / sew
3943        defvar idxEMULOctuple = !srl(!mul(idxEEW, dataEMULOctuple), !logtwo(dataEEW));
3944        if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
3945          defvar DataLInfo = dataEMUL.MX;
3946          defvar IdxLInfo = octuple_to_str<idxEMULOctuple>.ret;
3947          defvar idxEMUL = !cast<LMULInfo>("V_" # IdxLInfo);
3948          defvar DataVreg = dataEMUL.vrclass;
3949          defvar IdxVreg = idxEMUL.vrclass;
3950          let VLMul = dataEMUL.value in {
3951            foreach nf = NFSet<dataEMUL>.L in {
3952              defvar Vreg = SegRegClass<dataEMUL, nf>.RC;
3953              def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo :
3954                VPseudoISegStoreNoMask<Vreg, IdxVreg, idxEEW, idxEMUL.value,
3955                                       nf, Ordered>,
3956                VSXSEGSched<nf, idxEEW, Ordered, DataLInfo>;
3957              def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" :
3958                VPseudoISegStoreMask<Vreg, IdxVreg, idxEEW, idxEMUL.value,
3959                                     nf, Ordered>,
3960                VSXSEGSched<nf, idxEEW, Ordered, DataLInfo>;
3961            }
3962          }
3963        }
3964      }
3965    }
3966  }
3967}
3968
3969//===----------------------------------------------------------------------===//
3970// Helpers to define the intrinsic patterns.
3971//===----------------------------------------------------------------------===//
3972
3973class VPatUnaryNoMask<string intrinsic_name,
3974                      string inst,
3975                      string kind,
3976                      ValueType result_type,
3977                      ValueType op2_type,
3978                      int log2sew,
3979                      LMULInfo vlmul,
3980                      VReg result_reg_class,
3981                      VReg op2_reg_class> :
3982  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3983                   (result_type result_reg_class:$merge),
3984                   (op2_type op2_reg_class:$rs2),
3985                   VLOpFrag)),
3986                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
3987                   (result_type result_reg_class:$merge),
3988                   (op2_type op2_reg_class:$rs2),
3989                   GPR:$vl, log2sew, TU_MU)>;
3990
3991class VPatUnaryNoMaskRoundingMode<string intrinsic_name,
3992                                  string inst,
3993                                  string kind,
3994                                  ValueType result_type,
3995                                  ValueType op2_type,
3996                                  int log2sew,
3997                                  LMULInfo vlmul,
3998                                  VReg result_reg_class,
3999                                  VReg op2_reg_class,
4000                                  bit isSEWAware = 0> :
4001  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4002                   (result_type result_reg_class:$merge),
4003                   (op2_type op2_reg_class:$rs2),
4004                   (XLenVT timm:$round),
4005                   VLOpFrag)),
4006                   (!cast<Instruction>(
4007                      !if(isSEWAware,
4008                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
4009                          inst#"_"#kind#"_"#vlmul.MX))
4010                   (result_type result_reg_class:$merge),
4011                   (op2_type op2_reg_class:$rs2),
4012                   (XLenVT timm:$round),
4013                   GPR:$vl, log2sew, TU_MU)>;
4014
4015
4016class VPatUnaryMask<string intrinsic_name,
4017                    string inst,
4018                    string kind,
4019                    ValueType result_type,
4020                    ValueType op2_type,
4021                    ValueType mask_type,
4022                    int log2sew,
4023                    LMULInfo vlmul,
4024                    VReg result_reg_class,
4025                    VReg op2_reg_class> :
4026  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4027                   (result_type result_reg_class:$merge),
4028                   (op2_type op2_reg_class:$rs2),
4029                   (mask_type V0),
4030                   VLOpFrag, (XLenVT timm:$policy))),
4031                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
4032                   (result_type result_reg_class:$merge),
4033                   (op2_type op2_reg_class:$rs2),
4034                   (mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy))>;
4035
4036class VPatUnaryMaskRoundingMode<string intrinsic_name,
4037                                string inst,
4038                                string kind,
4039                                ValueType result_type,
4040                                ValueType op2_type,
4041                                ValueType mask_type,
4042                                int log2sew,
4043                                LMULInfo vlmul,
4044                                VReg result_reg_class,
4045                                VReg op2_reg_class,
4046                                bit isSEWAware = 0> :
4047  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4048                   (result_type result_reg_class:$merge),
4049                   (op2_type op2_reg_class:$rs2),
4050                   (mask_type V0),
4051                   (XLenVT timm:$round),
4052                   VLOpFrag, (XLenVT timm:$policy))),
4053                   (!cast<Instruction>(
4054                      !if(isSEWAware,
4055                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
4056                          inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
4057                   (result_type result_reg_class:$merge),
4058                   (op2_type op2_reg_class:$rs2),
4059                   (mask_type V0),
4060                   (XLenVT timm:$round),
4061                   GPR:$vl, log2sew, (XLenVT timm:$policy))>;
4062
4063
4064class VPatMaskUnaryNoMask<string intrinsic_name,
4065                          string inst,
4066                          MTypeInfo mti> :
4067  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name)
4068                (mti.Mask VR:$rs2),
4069                VLOpFrag)),
4070                (!cast<Instruction>(inst#"_M_"#mti.BX)
4071                (mti.Mask (IMPLICIT_DEF)),
4072                (mti.Mask VR:$rs2),
4073                GPR:$vl, mti.Log2SEW, TA_MA)>;
4074
4075class VPatMaskUnaryMask<string intrinsic_name,
4076                        string inst,
4077                        MTypeInfo mti> :
4078  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name#"_mask")
4079                (mti.Mask VR:$merge),
4080                (mti.Mask VR:$rs2),
4081                (mti.Mask V0),
4082                VLOpFrag)),
4083                (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
4084                (mti.Mask VR:$merge),
4085                (mti.Mask VR:$rs2),
4086                (mti.Mask V0), GPR:$vl, mti.Log2SEW, TU_MU)>;
4087
4088class VPatUnaryAnyMask<string intrinsic,
4089                       string inst,
4090                       string kind,
4091                       ValueType result_type,
4092                       ValueType op1_type,
4093                       ValueType mask_type,
4094                       int log2sew,
4095                       LMULInfo vlmul,
4096                       VReg result_reg_class,
4097                       VReg op1_reg_class> :
4098  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4099                   (result_type result_reg_class:$merge),
4100                   (op1_type op1_reg_class:$rs1),
4101                   (mask_type VR:$rs2),
4102                   VLOpFrag)),
4103                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
4104                   (result_type result_reg_class:$merge),
4105                   (op1_type op1_reg_class:$rs1),
4106                   (mask_type VR:$rs2),
4107                   GPR:$vl, log2sew)>;
4108
4109class VPatBinaryM<string intrinsic_name,
4110                  string inst,
4111                  ValueType result_type,
4112                  ValueType op1_type,
4113                  ValueType op2_type,
4114                  int sew,
4115                  VReg op1_reg_class,
4116                  DAGOperand op2_kind> :
4117  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4118                   (op1_type op1_reg_class:$rs1),
4119                   (op2_type op2_kind:$rs2),
4120                   VLOpFrag)),
4121                   (!cast<Instruction>(inst)
4122                   (op1_type op1_reg_class:$rs1),
4123                   (op2_type op2_kind:$rs2),
4124                   GPR:$vl, sew)>;
4125
4126class VPatBinaryNoMaskTU<string intrinsic_name,
4127                         string inst,
4128                         ValueType result_type,
4129                         ValueType op1_type,
4130                         ValueType op2_type,
4131                         int sew,
4132                         VReg result_reg_class,
4133                         VReg op1_reg_class,
4134                         DAGOperand op2_kind> :
4135  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4136                   (result_type result_reg_class:$merge),
4137                   (op1_type op1_reg_class:$rs1),
4138                   (op2_type op2_kind:$rs2),
4139                   VLOpFrag)),
4140                   (!cast<Instruction>(inst)
4141                   (result_type result_reg_class:$merge),
4142                   (op1_type op1_reg_class:$rs1),
4143                   (op2_type op2_kind:$rs2),
4144                   GPR:$vl, sew, TU_MU)>;
4145
4146class VPatBinaryNoMaskRoundingMode<string intrinsic_name,
4147                                   string inst,
4148                                   ValueType result_type,
4149                                   ValueType op1_type,
4150                                   ValueType op2_type,
4151                                   int sew,
4152                                   VReg op1_reg_class,
4153                                   DAGOperand op2_kind> :
4154  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4155                   (result_type (undef)),
4156                   (op1_type op1_reg_class:$rs1),
4157                   (op2_type op2_kind:$rs2),
4158                   (XLenVT timm:$round),
4159                   VLOpFrag)),
4160                   (!cast<Instruction>(inst)
4161                   (result_type (IMPLICIT_DEF)),
4162                   (op1_type op1_reg_class:$rs1),
4163                   (op2_type op2_kind:$rs2),
4164                   (XLenVT timm:$round),
4165                   GPR:$vl, sew, TA_MA)>;
4166
4167class VPatBinaryNoMaskTURoundingMode<string intrinsic_name,
4168                                     string inst,
4169                                     ValueType result_type,
4170                                     ValueType op1_type,
4171                                     ValueType op2_type,
4172                                     int sew,
4173                                     VReg result_reg_class,
4174                                     VReg op1_reg_class,
4175                                     DAGOperand op2_kind> :
4176  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4177                   (result_type result_reg_class:$merge),
4178                   (op1_type op1_reg_class:$rs1),
4179                   (op2_type op2_kind:$rs2),
4180                   (XLenVT timm:$round),
4181                   VLOpFrag)),
4182                   (!cast<Instruction>(inst)
4183                   (result_type result_reg_class:$merge),
4184                   (op1_type op1_reg_class:$rs1),
4185                   (op2_type op2_kind:$rs2),
4186                   (XLenVT timm:$round),
4187                   GPR:$vl, sew, TU_MU)>;
4188
4189
4190// Same as above but source operands are swapped.
4191class VPatBinaryNoMaskSwapped<string intrinsic_name,
4192                              string inst,
4193                              ValueType result_type,
4194                              ValueType op1_type,
4195                              ValueType op2_type,
4196                              int sew,
4197                              VReg op1_reg_class,
4198                              DAGOperand op2_kind> :
4199  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4200                   (op2_type op2_kind:$rs2),
4201                   (op1_type op1_reg_class:$rs1),
4202                   VLOpFrag)),
4203                   (!cast<Instruction>(inst)
4204                   (op1_type op1_reg_class:$rs1),
4205                   (op2_type op2_kind:$rs2),
4206                   GPR:$vl, sew)>;
4207
4208class VPatBinaryMask<string intrinsic_name,
4209                     string inst,
4210                     ValueType result_type,
4211                     ValueType op1_type,
4212                     ValueType op2_type,
4213                     ValueType mask_type,
4214                     int sew,
4215                     VReg result_reg_class,
4216                     VReg op1_reg_class,
4217                     DAGOperand op2_kind> :
4218  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4219                   (result_type result_reg_class:$merge),
4220                   (op1_type op1_reg_class:$rs1),
4221                   (op2_type op2_kind:$rs2),
4222                   (mask_type V0),
4223                   VLOpFrag)),
4224                   (!cast<Instruction>(inst#"_MASK")
4225                   (result_type result_reg_class:$merge),
4226                   (op1_type op1_reg_class:$rs1),
4227                   (op2_type op2_kind:$rs2),
4228                   (mask_type V0), GPR:$vl, sew)>;
4229
4230class VPatBinaryMaskTA<string intrinsic_name,
4231                       string inst,
4232                       ValueType result_type,
4233                       ValueType op1_type,
4234                       ValueType op2_type,
4235                       ValueType mask_type,
4236                       int sew,
4237                       VReg result_reg_class,
4238                       VReg op1_reg_class,
4239                       DAGOperand op2_kind> :
4240  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4241                   (result_type result_reg_class:$merge),
4242                   (op1_type op1_reg_class:$rs1),
4243                   (op2_type op2_kind:$rs2),
4244                   (mask_type V0),
4245                   VLOpFrag, (XLenVT timm:$policy))),
4246                   (!cast<Instruction>(inst#"_MASK")
4247                   (result_type result_reg_class:$merge),
4248                   (op1_type op1_reg_class:$rs1),
4249                   (op2_type op2_kind:$rs2),
4250                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
4251
4252class VPatBinaryMaskTARoundingMode<string intrinsic_name,
4253                                   string inst,
4254                                   ValueType result_type,
4255                                   ValueType op1_type,
4256                                   ValueType op2_type,
4257                                   ValueType mask_type,
4258                                   int sew,
4259                                   VReg result_reg_class,
4260                                   VReg op1_reg_class,
4261                                   DAGOperand op2_kind> :
4262  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4263                   (result_type result_reg_class:$merge),
4264                   (op1_type op1_reg_class:$rs1),
4265                   (op2_type op2_kind:$rs2),
4266                   (mask_type V0),
4267                   (XLenVT timm:$round),
4268                   VLOpFrag, (XLenVT timm:$policy))),
4269                   (!cast<Instruction>(inst#"_MASK")
4270                   (result_type result_reg_class:$merge),
4271                   (op1_type op1_reg_class:$rs1),
4272                   (op2_type op2_kind:$rs2),
4273                   (mask_type V0),
4274                   (XLenVT timm:$round),
4275                   GPR:$vl, sew, (XLenVT timm:$policy))>;
4276
4277// Same as above but source operands are swapped.
4278class VPatBinaryMaskSwapped<string intrinsic_name,
4279                            string inst,
4280                            ValueType result_type,
4281                            ValueType op1_type,
4282                            ValueType op2_type,
4283                            ValueType mask_type,
4284                            int sew,
4285                            VReg result_reg_class,
4286                            VReg op1_reg_class,
4287                            DAGOperand op2_kind> :
4288  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4289                   (result_type result_reg_class:$merge),
4290                   (op2_type op2_kind:$rs2),
4291                   (op1_type op1_reg_class:$rs1),
4292                   (mask_type V0),
4293                   VLOpFrag)),
4294                   (!cast<Instruction>(inst#"_MASK")
4295                   (result_type result_reg_class:$merge),
4296                   (op1_type op1_reg_class:$rs1),
4297                   (op2_type op2_kind:$rs2),
4298                   (mask_type V0), GPR:$vl, sew)>;
4299
4300class VPatTiedBinaryNoMask<string intrinsic_name,
4301                           string inst,
4302                           ValueType result_type,
4303                           ValueType op2_type,
4304                           int sew,
4305                           VReg result_reg_class,
4306                           DAGOperand op2_kind> :
4307  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4308                   (result_type (undef)),
4309                   (result_type result_reg_class:$rs1),
4310                   (op2_type op2_kind:$rs2),
4311                   VLOpFrag)),
4312                   (!cast<Instruction>(inst#"_TIED")
4313                   (result_type result_reg_class:$rs1),
4314                   (op2_type op2_kind:$rs2),
4315                   GPR:$vl, sew, TAIL_AGNOSTIC)>;
4316
4317class VPatTiedBinaryNoMaskRoundingMode<string intrinsic_name,
4318                                       string inst,
4319                                       ValueType result_type,
4320                                       ValueType op2_type,
4321                                       int sew,
4322                                       VReg result_reg_class,
4323                                       DAGOperand op2_kind> :
4324  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4325                   (result_type (undef)),
4326                   (result_type result_reg_class:$rs1),
4327                   (op2_type op2_kind:$rs2),
4328                   (XLenVT timm:$round),
4329                   VLOpFrag)),
4330                   (!cast<Instruction>(inst#"_TIED")
4331                   (result_type result_reg_class:$rs1),
4332                   (op2_type op2_kind:$rs2),
4333                   (XLenVT timm:$round),
4334                   GPR:$vl, sew, TAIL_AGNOSTIC)>;
4335
4336class VPatTiedBinaryNoMaskTU<string intrinsic_name,
4337                             string inst,
4338                             ValueType result_type,
4339                             ValueType op2_type,
4340                             int sew,
4341                             VReg result_reg_class,
4342                             DAGOperand op2_kind> :
4343  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4344                   (result_type result_reg_class:$merge),
4345                   (result_type result_reg_class:$merge),
4346                   (op2_type op2_kind:$rs2),
4347                   VLOpFrag)),
4348                   (!cast<Instruction>(inst#"_TIED")
4349                   (result_type result_reg_class:$merge),
4350                   (op2_type op2_kind:$rs2),
4351                   GPR:$vl, sew, TU_MU)>;
4352
4353class VPatTiedBinaryNoMaskTURoundingMode<string intrinsic_name,
4354                                         string inst,
4355                                         ValueType result_type,
4356                                         ValueType op2_type,
4357                                         int sew,
4358                                         VReg result_reg_class,
4359                                         DAGOperand op2_kind> :
4360  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4361                   (result_type result_reg_class:$merge),
4362                   (result_type result_reg_class:$merge),
4363                   (op2_type op2_kind:$rs2),
4364                   (XLenVT timm:$round),
4365                   VLOpFrag)),
4366                   (!cast<Instruction>(inst#"_TIED")
4367                   (result_type result_reg_class:$merge),
4368                   (op2_type op2_kind:$rs2),
4369                   (XLenVT timm:$round),
4370                   GPR:$vl, sew, TU_MU)>;
4371
4372class VPatTiedBinaryMask<string intrinsic_name,
4373                         string inst,
4374                         ValueType result_type,
4375                         ValueType op2_type,
4376                         ValueType mask_type,
4377                         int sew,
4378                         VReg result_reg_class,
4379                         DAGOperand op2_kind> :
4380  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4381                   (result_type result_reg_class:$merge),
4382                   (result_type result_reg_class:$merge),
4383                   (op2_type op2_kind:$rs2),
4384                   (mask_type V0),
4385                   VLOpFrag, (XLenVT timm:$policy))),
4386                   (!cast<Instruction>(inst#"_MASK_TIED")
4387                   (result_type result_reg_class:$merge),
4388                   (op2_type op2_kind:$rs2),
4389                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
4390
4391class VPatTiedBinaryMaskRoundingMode<string intrinsic_name,
4392                                     string inst,
4393                                     ValueType result_type,
4394                                     ValueType op2_type,
4395                                     ValueType mask_type,
4396                                     int sew,
4397                                     VReg result_reg_class,
4398                                     DAGOperand op2_kind> :
4399  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4400                   (result_type result_reg_class:$merge),
4401                   (result_type result_reg_class:$merge),
4402                   (op2_type op2_kind:$rs2),
4403                   (mask_type V0),
4404                   (XLenVT timm:$round),
4405                   VLOpFrag, (XLenVT timm:$policy))),
4406                   (!cast<Instruction>(inst#"_MASK_TIED")
4407                   (result_type result_reg_class:$merge),
4408                   (op2_type op2_kind:$rs2),
4409                   (mask_type V0),
4410                   (XLenVT timm:$round),
4411                   GPR:$vl, sew, (XLenVT timm:$policy))>;
4412
4413class VPatTernaryNoMask<string intrinsic,
4414                        string inst,
4415                        string kind,
4416                        ValueType result_type,
4417                        ValueType op1_type,
4418                        ValueType op2_type,
4419                        int sew,
4420                        LMULInfo vlmul,
4421                        VReg result_reg_class,
4422                        RegisterClass op1_reg_class,
4423                        DAGOperand op2_kind> :
4424  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4425                    (result_type result_reg_class:$rs3),
4426                    (op1_type op1_reg_class:$rs1),
4427                    (op2_type op2_kind:$rs2),
4428                    VLOpFrag)),
4429                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4430                    result_reg_class:$rs3,
4431                    (op1_type op1_reg_class:$rs1),
4432                    op2_kind:$rs2,
4433                    GPR:$vl, sew)>;
4434
4435class VPatTernaryNoMaskTA<string intrinsic,
4436                          string inst,
4437                          string kind,
4438                          ValueType result_type,
4439                          ValueType op1_type,
4440                          ValueType op2_type,
4441                          int log2sew,
4442                          LMULInfo vlmul,
4443                          VReg result_reg_class,
4444                          RegisterClass op1_reg_class,
4445                          DAGOperand op2_kind> :
4446  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4447                    (result_type result_reg_class:$rs3),
4448                    (op1_type op1_reg_class:$rs1),
4449                    (op2_type op2_kind:$rs2),
4450                    VLOpFrag)),
4451                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
4452                    result_reg_class:$rs3,
4453                    (op1_type op1_reg_class:$rs1),
4454                    op2_kind:$rs2,
4455                    GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
4456
4457class VPatTernaryNoMaskTARoundingMode<string intrinsic,
4458                          string inst,
4459                          string kind,
4460                          ValueType result_type,
4461                          ValueType op1_type,
4462                          ValueType op2_type,
4463                          int log2sew,
4464                          LMULInfo vlmul,
4465                          VReg result_reg_class,
4466                          RegisterClass op1_reg_class,
4467                          DAGOperand op2_kind> :
4468  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4469                    (result_type result_reg_class:$rs3),
4470                    (op1_type op1_reg_class:$rs1),
4471                    (op2_type op2_kind:$rs2),
4472                    (XLenVT timm:$round),
4473                    VLOpFrag)),
4474                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
4475                    result_reg_class:$rs3,
4476                    (op1_type op1_reg_class:$rs1),
4477                    op2_kind:$rs2,
4478                    (XLenVT timm:$round),
4479                    GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
4480
4481class VPatTernaryNoMaskWithPolicy<string intrinsic,
4482                                  string inst,
4483                                  string kind,
4484                                  ValueType result_type,
4485                                  ValueType op1_type,
4486                                  ValueType op2_type,
4487                                  int sew,
4488                                  LMULInfo vlmul,
4489                                  VReg result_reg_class,
4490                                  RegisterClass op1_reg_class,
4491                                  DAGOperand op2_kind> :
4492  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4493                    (result_type result_reg_class:$rs3),
4494                    (op1_type op1_reg_class:$rs1),
4495                    (op2_type op2_kind:$rs2),
4496                    VLOpFrag, (XLenVT timm:$policy))),
4497                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4498                    result_reg_class:$rs3,
4499                    (op1_type op1_reg_class:$rs1),
4500                    op2_kind:$rs2,
4501                    GPR:$vl, sew, (XLenVT timm:$policy))>;
4502
4503class VPatTernaryNoMaskWithPolicyRoundingMode<string intrinsic,
4504                                  string inst,
4505                                  string kind,
4506                                  ValueType result_type,
4507                                  ValueType op1_type,
4508                                  ValueType op2_type,
4509                                  int sew,
4510                                  LMULInfo vlmul,
4511                                  VReg result_reg_class,
4512                                  RegisterClass op1_reg_class,
4513                                  DAGOperand op2_kind> :
4514  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4515                    (result_type result_reg_class:$rs3),
4516                    (op1_type op1_reg_class:$rs1),
4517                    (op2_type op2_kind:$rs2),
4518                    (XLenVT timm:$round),
4519                    VLOpFrag, (XLenVT timm:$policy))),
4520                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4521                    result_reg_class:$rs3,
4522                    (op1_type op1_reg_class:$rs1),
4523                    op2_kind:$rs2,
4524                    (XLenVT timm:$round),
4525                    GPR:$vl, sew, (XLenVT timm:$policy))>;
4526
4527class VPatTernaryMask<string intrinsic,
4528                      string inst,
4529                      string kind,
4530                      ValueType result_type,
4531                      ValueType op1_type,
4532                      ValueType op2_type,
4533                      ValueType mask_type,
4534                      int sew,
4535                      LMULInfo vlmul,
4536                      VReg result_reg_class,
4537                      RegisterClass op1_reg_class,
4538                      DAGOperand op2_kind> :
4539  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4540                    (result_type result_reg_class:$rs3),
4541                    (op1_type op1_reg_class:$rs1),
4542                    (op2_type op2_kind:$rs2),
4543                    (mask_type V0),
4544                    VLOpFrag)),
4545                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
4546                    result_reg_class:$rs3,
4547                    (op1_type op1_reg_class:$rs1),
4548                    op2_kind:$rs2,
4549                    (mask_type V0),
4550                    GPR:$vl, sew)>;
4551
4552class VPatTernaryMaskPolicy<string intrinsic,
4553                            string inst,
4554                            string kind,
4555                            ValueType result_type,
4556                            ValueType op1_type,
4557                            ValueType op2_type,
4558                            ValueType mask_type,
4559                            int sew,
4560                            LMULInfo vlmul,
4561                            VReg result_reg_class,
4562                            RegisterClass op1_reg_class,
4563                            DAGOperand op2_kind> :
4564  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4565                    (result_type result_reg_class:$rs3),
4566                    (op1_type op1_reg_class:$rs1),
4567                    (op2_type op2_kind:$rs2),
4568                    (mask_type V0),
4569                    VLOpFrag, (XLenVT timm:$policy))),
4570                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
4571                    result_reg_class:$rs3,
4572                    (op1_type op1_reg_class:$rs1),
4573                    op2_kind:$rs2,
4574                    (mask_type V0),
4575                    GPR:$vl, sew, (XLenVT timm:$policy))>;
4576
4577class VPatTernaryMaskPolicyRoundingMode<string intrinsic,
4578                                        string inst,
4579                                        string kind,
4580                                        ValueType result_type,
4581                                        ValueType op1_type,
4582                                        ValueType op2_type,
4583                                        ValueType mask_type,
4584                                        int sew,
4585                                        LMULInfo vlmul,
4586                                        VReg result_reg_class,
4587                                        RegisterClass op1_reg_class,
4588                                        DAGOperand op2_kind> :
4589  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4590                    (result_type result_reg_class:$rs3),
4591                    (op1_type op1_reg_class:$rs1),
4592                    (op2_type op2_kind:$rs2),
4593                    (mask_type V0),
4594                    (XLenVT timm:$round),
4595                    VLOpFrag, (XLenVT timm:$policy))),
4596                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
4597                    result_reg_class:$rs3,
4598                    (op1_type op1_reg_class:$rs1),
4599                    op2_kind:$rs2,
4600                    (mask_type V0),
4601                    (XLenVT timm:$round),
4602                    GPR:$vl, sew, (XLenVT timm:$policy))>;
4603
4604class VPatTernaryMaskTA<string intrinsic,
4605                        string inst,
4606                        string kind,
4607                        ValueType result_type,
4608                        ValueType op1_type,
4609                        ValueType op2_type,
4610                        ValueType mask_type,
4611                        int log2sew,
4612                        LMULInfo vlmul,
4613                        VReg result_reg_class,
4614                        RegisterClass op1_reg_class,
4615                        DAGOperand op2_kind> :
4616  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4617                    (result_type result_reg_class:$rs3),
4618                    (op1_type op1_reg_class:$rs1),
4619                    (op2_type op2_kind:$rs2),
4620                    (mask_type V0),
4621                    VLOpFrag)),
4622                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)# "_MASK")
4623                    result_reg_class:$rs3,
4624                    (op1_type op1_reg_class:$rs1),
4625                    op2_kind:$rs2,
4626                    (mask_type V0),
4627                    GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
4628
4629class VPatTernaryMaskTARoundingMode<string intrinsic,
4630                                    string inst,
4631                                    string kind,
4632                                    ValueType result_type,
4633                                    ValueType op1_type,
4634                                    ValueType op2_type,
4635                                    ValueType mask_type,
4636                                    int log2sew,
4637                                    LMULInfo vlmul,
4638                                    VReg result_reg_class,
4639                                    RegisterClass op1_reg_class,
4640                                    DAGOperand op2_kind> :
4641  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4642                    (result_type result_reg_class:$rs3),
4643                    (op1_type op1_reg_class:$rs1),
4644                    (op2_type op2_kind:$rs2),
4645                    (mask_type V0),
4646                    (XLenVT timm:$round),
4647                    VLOpFrag)),
4648                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)# "_MASK")
4649                    result_reg_class:$rs3,
4650                    (op1_type op1_reg_class:$rs1),
4651                    op2_kind:$rs2,
4652                    (mask_type V0),
4653                    (XLenVT timm:$round),
4654                    GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
4655
4656multiclass VPatUnaryS_M<string intrinsic_name,
4657                             string inst> {
4658  foreach mti = AllMasks in {
4659    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name)
4660                      (mti.Mask VR:$rs1), VLOpFrag)),
4661                      (!cast<Instruction>(inst#"_M_"#mti.BX) $rs1,
4662                      GPR:$vl, mti.Log2SEW)>;
4663    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name # "_mask")
4664                      (mti.Mask VR:$rs1), (mti.Mask V0), VLOpFrag)),
4665                      (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK") $rs1,
4666                      (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
4667  }
4668}
4669
4670multiclass VPatUnaryV_V_AnyMask<string intrinsic, string instruction,
4671                                list<VTypeInfo> vtilist> {
4672  foreach vti = vtilist in {
4673    let Predicates = GetVTypePredicates<vti>.Predicates in
4674    def : VPatUnaryAnyMask<intrinsic, instruction, "VM",
4675                           vti.Vector, vti.Vector, vti.Mask,
4676                           vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
4677  }
4678}
4679
4680multiclass VPatUnaryM_M<string intrinsic,
4681                         string inst> {
4682  foreach mti = AllMasks in {
4683    def : VPatMaskUnaryNoMask<intrinsic, inst, mti>;
4684    def : VPatMaskUnaryMask<intrinsic, inst, mti>;
4685  }
4686}
4687
4688multiclass VPatUnaryV_M<string intrinsic, string instruction> {
4689  foreach vti = AllIntegerVectors in {
4690    let Predicates = GetVTypePredicates<vti>.Predicates in {
4691      def : VPatUnaryNoMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
4692                            vti.Log2SEW, vti.LMul, vti.RegClass, VR>;
4693      def : VPatUnaryMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
4694                          vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, VR>;
4695    }
4696  }
4697}
4698
4699multiclass VPatUnaryV_VF<string intrinsic, string instruction, string suffix,
4700                         list<VTypeInfoToFraction> fractionList> {
4701  foreach vtiTofti = fractionList in {
4702      defvar vti = vtiTofti.Vti;
4703      defvar fti = vtiTofti.Fti;
4704      let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
4705                                   GetVTypePredicates<fti>.Predicates) in {
4706        def : VPatUnaryNoMask<intrinsic, instruction, suffix,
4707                              vti.Vector, fti.Vector,
4708                              vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
4709        def : VPatUnaryMask<intrinsic, instruction, suffix,
4710                            vti.Vector, fti.Vector, vti.Mask,
4711                            vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
4712      }
4713  }
4714}
4715
4716multiclass VPatUnaryV_V<string intrinsic, string instruction,
4717                        list<VTypeInfo> vtilist> {
4718  foreach vti = vtilist in {
4719    let Predicates = GetVTypePredicates<vti>.Predicates in {
4720      def : VPatUnaryNoMask<intrinsic, instruction, "V",
4721                            vti.Vector, vti.Vector, vti.Log2SEW,
4722                            vti.LMul, vti.RegClass, vti.RegClass>;
4723      def : VPatUnaryMask<intrinsic, instruction, "V",
4724                          vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
4725                          vti.LMul, vti.RegClass, vti.RegClass>;
4726    }
4727  }
4728}
4729
4730multiclass VPatUnaryV_V_RM<string intrinsic, string instruction,
4731                        list<VTypeInfo> vtilist, bit isSEWAware = 0> {
4732  foreach vti = vtilist in {
4733    let Predicates = GetVTypePredicates<vti>.Predicates in {
4734      def : VPatUnaryNoMaskRoundingMode<intrinsic, instruction, "V",
4735                                        vti.Vector, vti.Vector, vti.Log2SEW,
4736                                        vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>;
4737      def : VPatUnaryMaskRoundingMode<intrinsic, instruction, "V",
4738                                      vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
4739                                      vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>;
4740    }
4741  }
4742}
4743
4744multiclass VPatNullaryV<string intrinsic, string instruction> {
4745  foreach vti = AllIntegerVectors in {
4746    let Predicates = GetVTypePredicates<vti>.Predicates in {
4747      def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
4748                            (vti.Vector vti.RegClass:$merge),
4749                            VLOpFrag)),
4750                            (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
4751                            vti.RegClass:$merge, GPR:$vl, vti.Log2SEW, TU_MU)>;
4752      def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
4753                            (vti.Vector vti.RegClass:$merge),
4754                            (vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))),
4755                            (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK")
4756                            vti.RegClass:$merge, (vti.Mask V0),
4757                            GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
4758  }
4759  }
4760}
4761
4762multiclass VPatNullaryM<string intrinsic, string inst> {
4763  foreach mti = AllMasks in
4764    def : Pat<(mti.Mask (!cast<Intrinsic>(intrinsic)
4765                        VLOpFrag)),
4766                        (!cast<Instruction>(inst#"_M_"#mti.BX)
4767                        GPR:$vl, mti.Log2SEW)>;
4768}
4769
4770multiclass VPatBinaryM<string intrinsic,
4771                      string inst,
4772                      ValueType result_type,
4773                      ValueType op1_type,
4774                      ValueType op2_type,
4775                      ValueType mask_type,
4776                      int sew,
4777                      VReg result_reg_class,
4778                      VReg op1_reg_class,
4779                      DAGOperand op2_kind> {
4780  def : VPatBinaryM<intrinsic, inst, result_type, op1_type, op2_type,
4781                    sew, op1_reg_class, op2_kind>;
4782  def : VPatBinaryMask<intrinsic, inst, result_type, op1_type, op2_type,
4783                       mask_type, sew, result_reg_class, op1_reg_class,
4784                       op2_kind>;
4785}
4786
4787multiclass VPatBinary<string intrinsic,
4788                      string inst,
4789                      ValueType result_type,
4790                      ValueType op1_type,
4791                      ValueType op2_type,
4792                      ValueType mask_type,
4793                      int sew,
4794                      VReg result_reg_class,
4795                      VReg op1_reg_class,
4796                      DAGOperand op2_kind> {
4797  def : VPatBinaryNoMaskTU<intrinsic, inst, result_type, op1_type, op2_type,
4798                           sew, result_reg_class, op1_reg_class, op2_kind>;
4799  def : VPatBinaryMaskTA<intrinsic, inst, result_type, op1_type, op2_type,
4800                         mask_type, sew, result_reg_class, op1_reg_class,
4801                         op2_kind>;
4802}
4803
4804multiclass VPatBinaryRoundingMode<string intrinsic,
4805                                  string inst,
4806                                  ValueType result_type,
4807                                  ValueType op1_type,
4808                                  ValueType op2_type,
4809                                  ValueType mask_type,
4810                                  int sew,
4811                                  VReg result_reg_class,
4812                                  VReg op1_reg_class,
4813                                  DAGOperand op2_kind> {
4814  def : VPatBinaryNoMaskRoundingMode<intrinsic, inst, result_type, op1_type, op2_type,
4815                                       sew, op1_reg_class, op2_kind>;
4816  def : VPatBinaryNoMaskTURoundingMode<intrinsic, inst, result_type, op1_type, op2_type,
4817                                       sew, result_reg_class, op1_reg_class, op2_kind>;
4818  def : VPatBinaryMaskTARoundingMode<intrinsic, inst, result_type, op1_type, op2_type,
4819                                     mask_type, sew, result_reg_class, op1_reg_class,
4820                                     op2_kind>;
4821}
4822
4823multiclass VPatBinarySwapped<string intrinsic,
4824                      string inst,
4825                      ValueType result_type,
4826                      ValueType op1_type,
4827                      ValueType op2_type,
4828                      ValueType mask_type,
4829                      int sew,
4830                      VReg result_reg_class,
4831                      VReg op1_reg_class,
4832                      DAGOperand op2_kind> {
4833  def : VPatBinaryNoMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type,
4834                                sew, op1_reg_class, op2_kind>;
4835  def : VPatBinaryMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type,
4836                              mask_type, sew, result_reg_class, op1_reg_class,
4837                              op2_kind>;
4838}
4839
4840multiclass VPatBinaryCarryInTAIL<string intrinsic,
4841                                 string inst,
4842                                 string kind,
4843                                 ValueType result_type,
4844                                 ValueType op1_type,
4845                                 ValueType op2_type,
4846                                 ValueType mask_type,
4847                                 int sew,
4848                                 LMULInfo vlmul,
4849                                 VReg result_reg_class,
4850                                 VReg op1_reg_class,
4851                                 DAGOperand op2_kind> {
4852  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
4853                         (result_type result_reg_class:$merge),
4854                         (op1_type op1_reg_class:$rs1),
4855                         (op2_type op2_kind:$rs2),
4856                         (mask_type V0),
4857                         VLOpFrag)),
4858                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4859                         (result_type result_reg_class:$merge),
4860                         (op1_type op1_reg_class:$rs1),
4861                         (op2_type op2_kind:$rs2),
4862                         (mask_type V0), GPR:$vl, sew)>;
4863}
4864
4865multiclass VPatBinaryCarryIn<string intrinsic,
4866                             string inst,
4867                             string kind,
4868                             ValueType result_type,
4869                             ValueType op1_type,
4870                             ValueType op2_type,
4871                             ValueType mask_type,
4872                             int sew,
4873                             LMULInfo vlmul,
4874                             VReg op1_reg_class,
4875                             DAGOperand op2_kind> {
4876  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
4877                         (op1_type op1_reg_class:$rs1),
4878                         (op2_type op2_kind:$rs2),
4879                         (mask_type V0),
4880                         VLOpFrag)),
4881                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4882                         (op1_type op1_reg_class:$rs1),
4883                         (op2_type op2_kind:$rs2),
4884                         (mask_type V0), GPR:$vl, sew)>;
4885}
4886
4887multiclass VPatBinaryMaskOut<string intrinsic,
4888                             string inst,
4889                             string kind,
4890                             ValueType result_type,
4891                             ValueType op1_type,
4892                             ValueType op2_type,
4893                             int sew,
4894                             LMULInfo vlmul,
4895                             VReg op1_reg_class,
4896                             DAGOperand op2_kind> {
4897  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
4898                         (op1_type op1_reg_class:$rs1),
4899                         (op2_type op2_kind:$rs2),
4900                         VLOpFrag)),
4901                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4902                         (op1_type op1_reg_class:$rs1),
4903                         (op2_type op2_kind:$rs2),
4904                         GPR:$vl, sew)>;
4905}
4906
4907multiclass VPatConversionTA<string intrinsic,
4908                            string inst,
4909                            string kind,
4910                            ValueType result_type,
4911                            ValueType op1_type,
4912                            ValueType mask_type,
4913                            int sew,
4914                            LMULInfo vlmul,
4915                            VReg result_reg_class,
4916                            VReg op1_reg_class> {
4917  def : VPatUnaryNoMask<intrinsic, inst, kind, result_type, op1_type,
4918                        sew, vlmul, result_reg_class, op1_reg_class>;
4919  def : VPatUnaryMask<intrinsic, inst, kind, result_type, op1_type,
4920                      mask_type, sew, vlmul, result_reg_class, op1_reg_class>;
4921}
4922
4923multiclass VPatConversionTARoundingMode<string intrinsic,
4924                                        string inst,
4925                                        string kind,
4926                                        ValueType result_type,
4927                                        ValueType op1_type,
4928                                        ValueType mask_type,
4929                                        int sew,
4930                                        LMULInfo vlmul,
4931                                        VReg result_reg_class,
4932                                        VReg op1_reg_class> {
4933  def : VPatUnaryNoMaskRoundingMode<intrinsic, inst, kind, result_type, op1_type,
4934                                    sew, vlmul, result_reg_class, op1_reg_class>;
4935  def : VPatUnaryMaskRoundingMode<intrinsic, inst, kind, result_type, op1_type,
4936                                  mask_type, sew, vlmul, result_reg_class, op1_reg_class>;
4937}
4938
4939multiclass VPatBinaryV_VV<string intrinsic, string instruction,
4940                          list<VTypeInfo> vtilist, bit isSEWAware = 0> {
4941  foreach vti = vtilist in
4942    let Predicates = GetVTypePredicates<vti>.Predicates in
4943    defm : VPatBinary<intrinsic,
4944                      !if(isSEWAware,
4945                          instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW,
4946                          instruction # "_VV_" # vti.LMul.MX),
4947                      vti.Vector, vti.Vector, vti.Vector,vti.Mask,
4948                      vti.Log2SEW, vti.RegClass,
4949                      vti.RegClass, vti.RegClass>;
4950}
4951
4952multiclass VPatBinaryV_VV_RM<string intrinsic, string instruction,
4953                             list<VTypeInfo> vtilist, bit isSEWAware = 0> {
4954  foreach vti = vtilist in
4955    let Predicates = GetVTypePredicates<vti>.Predicates in
4956    defm : VPatBinaryRoundingMode<intrinsic,
4957                                  !if(isSEWAware,
4958                                      instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW,
4959                                      instruction # "_VV_" # vti.LMul.MX),
4960                                  vti.Vector, vti.Vector, vti.Vector,vti.Mask,
4961                                  vti.Log2SEW, vti.RegClass,
4962                                  vti.RegClass, vti.RegClass>;
4963}
4964
4965multiclass VPatBinaryV_VV_INT<string intrinsic, string instruction,
4966                              list<VTypeInfo> vtilist> {
4967  foreach vti = vtilist in {
4968    defvar ivti = GetIntVTypeInfo<vti>.Vti;
4969    let Predicates = GetVTypePredicates<vti>.Predicates in
4970    defm : VPatBinary<intrinsic,
4971                      instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW,
4972                      vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
4973                      vti.Log2SEW, vti.RegClass,
4974                      vti.RegClass, vti.RegClass>;
4975  }
4976}
4977
4978multiclass VPatBinaryV_VV_INT_EEW<string intrinsic, string instruction,
4979                                  int eew, list<VTypeInfo> vtilist> {
4980  foreach vti = vtilist in {
4981    // emul = lmul * eew / sew
4982    defvar vlmul = vti.LMul;
4983    defvar octuple_lmul = vlmul.octuple;
4984    defvar octuple_emul = !srl(!mul(octuple_lmul, eew), vti.Log2SEW);
4985    if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
4986      defvar emul_str = octuple_to_str<octuple_emul>.ret;
4987      defvar ivti = !cast<VTypeInfo>("VI" # eew # emul_str);
4988      defvar inst = instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str;
4989      let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
4990                                   GetVTypePredicates<ivti>.Predicates) in
4991      defm : VPatBinary<intrinsic, inst,
4992                        vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
4993                        vti.Log2SEW, vti.RegClass,
4994                        vti.RegClass, ivti.RegClass>;
4995    }
4996  }
4997}
4998
4999multiclass VPatBinaryV_VX<string intrinsic, string instruction,
5000                          list<VTypeInfo> vtilist, bit isSEWAware = 0> {
5001  foreach vti = vtilist in {
5002    defvar kind = "V"#vti.ScalarSuffix;
5003    let Predicates = GetVTypePredicates<vti>.Predicates in
5004    defm : VPatBinary<intrinsic,
5005                      !if(isSEWAware,
5006                          instruction#"_"#kind#"_"#vti.LMul.MX#"_E"#vti.SEW,
5007                          instruction#"_"#kind#"_"#vti.LMul.MX),
5008                      vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
5009                      vti.Log2SEW, vti.RegClass,
5010                      vti.RegClass, vti.ScalarRegClass>;
5011  }
5012}
5013
5014multiclass VPatBinaryV_VX_RM<string intrinsic, string instruction,
5015                             list<VTypeInfo> vtilist, bit isSEWAware = 0> {
5016  foreach vti = vtilist in {
5017    defvar kind = "V"#vti.ScalarSuffix;
5018    let Predicates = GetVTypePredicates<vti>.Predicates in
5019    defm : VPatBinaryRoundingMode<intrinsic,
5020                                  !if(isSEWAware,
5021                                      instruction#"_"#kind#"_"#vti.LMul.MX#"_E"#vti.SEW,
5022                                      instruction#"_"#kind#"_"#vti.LMul.MX),
5023                                  vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
5024                                  vti.Log2SEW, vti.RegClass,
5025                                  vti.RegClass, vti.ScalarRegClass>;
5026  }
5027}
5028
5029multiclass VPatBinaryV_VX_INT<string intrinsic, string instruction,
5030                          list<VTypeInfo> vtilist> {
5031  foreach vti = vtilist in
5032    let Predicates = GetVTypePredicates<vti>.Predicates in
5033    defm : VPatBinary<intrinsic, instruction # "_VX_" # vti.LMul.MX,
5034                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
5035                      vti.Log2SEW, vti.RegClass,
5036                      vti.RegClass, GPR>;
5037}
5038
5039multiclass VPatBinaryV_VI<string intrinsic, string instruction,
5040                          list<VTypeInfo> vtilist, Operand imm_type> {
5041  foreach vti = vtilist in
5042    let Predicates = GetVTypePredicates<vti>.Predicates in
5043    defm : VPatBinary<intrinsic, instruction # "_VI_" # vti.LMul.MX,
5044                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
5045                      vti.Log2SEW, vti.RegClass,
5046                      vti.RegClass, imm_type>;
5047}
5048
5049multiclass VPatBinaryV_VI_RM<string intrinsic, string instruction,
5050                             list<VTypeInfo> vtilist,
5051                             Operand imm_type> {
5052  foreach vti = vtilist in
5053    let Predicates = GetVTypePredicates<vti>.Predicates in
5054    defm : VPatBinaryRoundingMode<intrinsic,
5055                                  instruction # "_VI_" # vti.LMul.MX,
5056                                  vti.Vector, vti.Vector, XLenVT, vti.Mask,
5057                                  vti.Log2SEW, vti.RegClass,
5058                                  vti.RegClass, imm_type>;
5059}
5060
5061multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
5062  foreach mti = AllMasks in
5063    let Predicates = [HasVInstructions] in
5064    def : VPatBinaryM<intrinsic, instruction # "_MM_" # mti.LMul.MX,
5065                      mti.Mask, mti.Mask, mti.Mask,
5066                      mti.Log2SEW, VR, VR>;
5067}
5068
5069multiclass VPatBinaryW_VV<string intrinsic, string instruction,
5070                          list<VTypeInfoToWide> vtilist> {
5071  foreach VtiToWti = vtilist in {
5072    defvar Vti = VtiToWti.Vti;
5073    defvar Wti = VtiToWti.Wti;
5074    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5075                                 GetVTypePredicates<Wti>.Predicates) in
5076    defm : VPatBinary<intrinsic, instruction # "_VV_" # Vti.LMul.MX,
5077                      Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
5078                      Vti.Log2SEW, Wti.RegClass,
5079                      Vti.RegClass, Vti.RegClass>;
5080  }
5081}
5082
5083multiclass VPatBinaryW_VV_RM<string intrinsic, string instruction,
5084                             list<VTypeInfoToWide> vtilist> {
5085  foreach VtiToWti = vtilist in {
5086    defvar Vti = VtiToWti.Vti;
5087    defvar Wti = VtiToWti.Wti;
5088    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5089                                 GetVTypePredicates<Wti>.Predicates) in
5090    defm : VPatBinaryRoundingMode<intrinsic, instruction # "_VV_" # Vti.LMul.MX,
5091                                  Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
5092                                  Vti.Log2SEW, Wti.RegClass,
5093                                  Vti.RegClass, Vti.RegClass>;
5094  }
5095}
5096
5097multiclass VPatBinaryW_VX<string intrinsic, string instruction,
5098                          list<VTypeInfoToWide> vtilist> {
5099  foreach VtiToWti = vtilist in {
5100    defvar Vti = VtiToWti.Vti;
5101    defvar Wti = VtiToWti.Wti;
5102    defvar kind = "V"#Vti.ScalarSuffix;
5103    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5104                                 GetVTypePredicates<Wti>.Predicates) in
5105    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
5106                      Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
5107                      Vti.Log2SEW, Wti.RegClass,
5108                      Vti.RegClass, Vti.ScalarRegClass>;
5109  }
5110}
5111
5112multiclass VPatBinaryW_VX_RM<string intrinsic, string instruction,
5113                          list<VTypeInfoToWide> vtilist> {
5114  foreach VtiToWti = vtilist in {
5115    defvar Vti = VtiToWti.Vti;
5116    defvar Wti = VtiToWti.Wti;
5117    defvar kind = "V"#Vti.ScalarSuffix;
5118    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5119                                 GetVTypePredicates<Wti>.Predicates) in
5120    defm : VPatBinaryRoundingMode<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
5121                                  Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
5122                                  Vti.Log2SEW, Wti.RegClass,
5123                                  Vti.RegClass, Vti.ScalarRegClass>;
5124  }
5125}
5126
5127multiclass VPatBinaryW_WV<string intrinsic, string instruction,
5128                          list<VTypeInfoToWide> vtilist> {
5129  foreach VtiToWti = vtilist in {
5130    defvar Vti = VtiToWti.Vti;
5131    defvar Wti = VtiToWti.Wti;
5132    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5133                                 GetVTypePredicates<Wti>.Predicates) in {
5134      def : VPatTiedBinaryNoMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5135                                 Wti.Vector, Vti.Vector,
5136                                 Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5137      def : VPatBinaryNoMaskTU<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5138                               Wti.Vector, Wti.Vector, Vti.Vector, Vti.Log2SEW,
5139                               Wti.RegClass, Wti.RegClass, Vti.RegClass>;
5140      let AddedComplexity = 1 in {
5141      def : VPatTiedBinaryNoMaskTU<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5142                                   Wti.Vector, Vti.Vector,
5143                                   Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5144      def : VPatTiedBinaryMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5145                               Wti.Vector, Vti.Vector, Vti.Mask,
5146                               Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5147      }
5148      def : VPatBinaryMaskTA<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5149                             Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
5150                             Vti.Log2SEW, Wti.RegClass,
5151                             Wti.RegClass, Vti.RegClass>;
5152    }
5153  }
5154}
5155
5156multiclass VPatBinaryW_WV_RM<string intrinsic, string instruction,
5157                             list<VTypeInfoToWide> vtilist> {
5158  foreach VtiToWti = vtilist in {
5159    defvar Vti = VtiToWti.Vti;
5160    defvar Wti = VtiToWti.Wti;
5161    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5162                                 GetVTypePredicates<Wti>.Predicates) in {
5163      def : VPatTiedBinaryNoMaskRoundingMode<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5164                                             Wti.Vector, Vti.Vector,
5165                                             Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5166      def : VPatBinaryNoMaskTURoundingMode<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5167                                           Wti.Vector, Wti.Vector, Vti.Vector, Vti.Log2SEW,
5168                                           Wti.RegClass, Wti.RegClass, Vti.RegClass>;
5169      let AddedComplexity = 1 in {
5170      def : VPatTiedBinaryNoMaskTURoundingMode<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5171                                               Wti.Vector, Vti.Vector,
5172                                               Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5173      def : VPatTiedBinaryMaskRoundingMode<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5174                                           Wti.Vector, Vti.Vector, Vti.Mask,
5175                                           Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5176      }
5177      def : VPatBinaryMaskTARoundingMode<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5178                                         Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
5179                                         Vti.Log2SEW, Wti.RegClass,
5180                                         Wti.RegClass, Vti.RegClass>;
5181    }
5182  }
5183}
5184
5185multiclass VPatBinaryW_WX<string intrinsic, string instruction,
5186                          list<VTypeInfoToWide> vtilist> {
5187  foreach VtiToWti = vtilist in {
5188    defvar Vti = VtiToWti.Vti;
5189    defvar Wti = VtiToWti.Wti;
5190    defvar kind = "W"#Vti.ScalarSuffix;
5191    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5192                                 GetVTypePredicates<Wti>.Predicates) in
5193    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
5194                      Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
5195                      Vti.Log2SEW, Wti.RegClass,
5196                      Wti.RegClass, Vti.ScalarRegClass>;
5197  }
5198}
5199
5200multiclass VPatBinaryW_WX_RM<string intrinsic, string instruction,
5201                             list<VTypeInfoToWide> vtilist> {
5202  foreach VtiToWti = vtilist in {
5203    defvar Vti = VtiToWti.Vti;
5204    defvar Wti = VtiToWti.Wti;
5205    defvar kind = "W"#Vti.ScalarSuffix;
5206    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5207                                 GetVTypePredicates<Wti>.Predicates) in
5208    defm : VPatBinaryRoundingMode<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
5209                                  Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
5210                                  Vti.Log2SEW, Wti.RegClass,
5211                                  Wti.RegClass, Vti.ScalarRegClass>;
5212  }
5213}
5214
5215multiclass VPatBinaryV_WV<string intrinsic, string instruction,
5216                          list<VTypeInfoToWide> vtilist> {
5217  foreach VtiToWti = vtilist in {
5218    defvar Vti = VtiToWti.Vti;
5219    defvar Wti = VtiToWti.Wti;
5220    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5221                                 GetVTypePredicates<Wti>.Predicates) in
5222    defm : VPatBinary<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5223                      Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
5224                      Vti.Log2SEW, Vti.RegClass,
5225                      Wti.RegClass, Vti.RegClass>;
5226  }
5227}
5228
5229multiclass VPatBinaryV_WV_RM<string intrinsic, string instruction,
5230                             list<VTypeInfoToWide> vtilist> {
5231  foreach VtiToWti = vtilist in {
5232    defvar Vti = VtiToWti.Vti;
5233    defvar Wti = VtiToWti.Wti;
5234    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5235                                 GetVTypePredicates<Wti>.Predicates) in
5236    defm : VPatBinaryRoundingMode<intrinsic,
5237                                  instruction # "_WV_" # Vti.LMul.MX,
5238                                  Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
5239                                  Vti.Log2SEW, Vti.RegClass,
5240                                  Wti.RegClass, Vti.RegClass>;
5241  }
5242}
5243
5244multiclass VPatBinaryV_WX<string intrinsic, string instruction,
5245                          list<VTypeInfoToWide> vtilist> {
5246  foreach VtiToWti = vtilist in {
5247    defvar Vti = VtiToWti.Vti;
5248    defvar Wti = VtiToWti.Wti;
5249    defvar kind = "W"#Vti.ScalarSuffix;
5250    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5251                                 GetVTypePredicates<Wti>.Predicates) in
5252    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
5253                      Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
5254                      Vti.Log2SEW, Vti.RegClass,
5255                      Wti.RegClass, Vti.ScalarRegClass>;
5256  }
5257}
5258
5259multiclass VPatBinaryV_WX_RM<string intrinsic, string instruction,
5260                             list<VTypeInfoToWide> vtilist> {
5261  foreach VtiToWti = vtilist in {
5262    defvar Vti = VtiToWti.Vti;
5263    defvar Wti = VtiToWti.Wti;
5264    defvar kind = "W"#Vti.ScalarSuffix;
5265    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5266                                 GetVTypePredicates<Wti>.Predicates) in
5267    defm : VPatBinaryRoundingMode<intrinsic,
5268                                  instruction#"_"#kind#"_"#Vti.LMul.MX,
5269                                  Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
5270                                  Vti.Log2SEW, Vti.RegClass,
5271                                  Wti.RegClass, Vti.ScalarRegClass>;
5272  }
5273}
5274
5275
5276multiclass VPatBinaryV_WI<string intrinsic, string instruction,
5277                          list<VTypeInfoToWide> vtilist> {
5278  foreach VtiToWti = vtilist in {
5279    defvar Vti = VtiToWti.Vti;
5280    defvar Wti = VtiToWti.Wti;
5281    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5282                                 GetVTypePredicates<Wti>.Predicates) in
5283    defm : VPatBinary<intrinsic, instruction # "_WI_" # Vti.LMul.MX,
5284                      Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
5285                      Vti.Log2SEW, Vti.RegClass,
5286                      Wti.RegClass, uimm5>;
5287  }
5288}
5289
5290multiclass VPatBinaryV_WI_RM<string intrinsic, string instruction,
5291                             list<VTypeInfoToWide> vtilist> {
5292  foreach VtiToWti = vtilist in {
5293    defvar Vti = VtiToWti.Vti;
5294    defvar Wti = VtiToWti.Wti;
5295    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5296                                 GetVTypePredicates<Wti>.Predicates) in
5297    defm : VPatBinaryRoundingMode<intrinsic,
5298                                  instruction # "_WI_" # Vti.LMul.MX,
5299                                  Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
5300                                  Vti.Log2SEW, Vti.RegClass,
5301                                  Wti.RegClass, uimm5>;
5302  }
5303}
5304
5305multiclass VPatBinaryV_VM<string intrinsic, string instruction,
5306                          bit CarryOut = 0,
5307                          list<VTypeInfo> vtilist = AllIntegerVectors> {
5308  foreach vti = vtilist in
5309    let Predicates = GetVTypePredicates<vti>.Predicates in
5310    defm : VPatBinaryCarryIn<intrinsic, instruction, "VVM",
5311                             !if(CarryOut, vti.Mask, vti.Vector),
5312                             vti.Vector, vti.Vector, vti.Mask,
5313                             vti.Log2SEW, vti.LMul,
5314                             vti.RegClass, vti.RegClass>;
5315}
5316
5317multiclass VPatBinaryV_XM<string intrinsic, string instruction,
5318                          bit CarryOut = 0,
5319                          list<VTypeInfo> vtilist = AllIntegerVectors> {
5320  foreach vti = vtilist in
5321    let Predicates = GetVTypePredicates<vti>.Predicates in
5322    defm : VPatBinaryCarryIn<intrinsic, instruction,
5323                             "V"#vti.ScalarSuffix#"M",
5324                             !if(CarryOut, vti.Mask, vti.Vector),
5325                             vti.Vector, vti.Scalar, vti.Mask,
5326                             vti.Log2SEW, vti.LMul,
5327                             vti.RegClass, vti.ScalarRegClass>;
5328}
5329
5330multiclass VPatBinaryV_IM<string intrinsic, string instruction,
5331                          bit CarryOut = 0> {
5332  foreach vti = AllIntegerVectors in
5333    let Predicates = GetVTypePredicates<vti>.Predicates in
5334    defm : VPatBinaryCarryIn<intrinsic, instruction, "VIM",
5335                             !if(CarryOut, vti.Mask, vti.Vector),
5336                             vti.Vector, XLenVT, vti.Mask,
5337                             vti.Log2SEW, vti.LMul,
5338                             vti.RegClass, simm5>;
5339}
5340
5341multiclass VPatBinaryV_VM_TAIL<string intrinsic, string instruction> {
5342  foreach vti = AllIntegerVectors in
5343    let Predicates = GetVTypePredicates<vti>.Predicates in
5344    defm : VPatBinaryCarryInTAIL<intrinsic, instruction, "VVM",
5345                                 vti.Vector,
5346                                 vti.Vector, vti.Vector, vti.Mask,
5347                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5348                                 vti.RegClass, vti.RegClass>;
5349}
5350
5351multiclass VPatBinaryV_XM_TAIL<string intrinsic, string instruction> {
5352  foreach vti = AllIntegerVectors in
5353    let Predicates = GetVTypePredicates<vti>.Predicates in
5354    defm : VPatBinaryCarryInTAIL<intrinsic, instruction,
5355                                 "V"#vti.ScalarSuffix#"M",
5356                                 vti.Vector,
5357                                 vti.Vector, vti.Scalar, vti.Mask,
5358                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5359                                 vti.RegClass, vti.ScalarRegClass>;
5360}
5361
5362multiclass VPatBinaryV_IM_TAIL<string intrinsic, string instruction> {
5363  foreach vti = AllIntegerVectors in
5364    let Predicates = GetVTypePredicates<vti>.Predicates in
5365    defm : VPatBinaryCarryInTAIL<intrinsic, instruction, "VIM",
5366                                 vti.Vector,
5367                                 vti.Vector, XLenVT, vti.Mask,
5368                                 vti.Log2SEW, vti.LMul,
5369                                 vti.RegClass, vti.RegClass, simm5>;
5370}
5371
5372multiclass VPatBinaryV_V<string intrinsic, string instruction> {
5373  foreach vti = AllIntegerVectors in
5374    let Predicates = GetVTypePredicates<vti>.Predicates in
5375    defm : VPatBinaryMaskOut<intrinsic, instruction, "VV",
5376                             vti.Mask, vti.Vector, vti.Vector,
5377                             vti.Log2SEW, vti.LMul,
5378                             vti.RegClass, vti.RegClass>;
5379}
5380
5381multiclass VPatBinaryV_X<string intrinsic, string instruction> {
5382  foreach vti = AllIntegerVectors in
5383    let Predicates = GetVTypePredicates<vti>.Predicates in
5384    defm : VPatBinaryMaskOut<intrinsic, instruction, "VX",
5385                             vti.Mask, vti.Vector, XLenVT,
5386                             vti.Log2SEW, vti.LMul,
5387                             vti.RegClass, GPR>;
5388}
5389
5390multiclass VPatBinaryV_I<string intrinsic, string instruction> {
5391  foreach vti = AllIntegerVectors in
5392    let Predicates = GetVTypePredicates<vti>.Predicates in
5393    defm : VPatBinaryMaskOut<intrinsic, instruction, "VI",
5394                             vti.Mask, vti.Vector, XLenVT,
5395                             vti.Log2SEW, vti.LMul,
5396                             vti.RegClass, simm5>;
5397}
5398
5399multiclass VPatBinaryM_VV<string intrinsic, string instruction,
5400                          list<VTypeInfo> vtilist> {
5401  foreach vti = vtilist in
5402    let Predicates = GetVTypePredicates<vti>.Predicates in
5403    defm : VPatBinaryM<intrinsic, instruction # "_VV_" # vti.LMul.MX,
5404                       vti.Mask, vti.Vector, vti.Vector, vti.Mask,
5405                       vti.Log2SEW, VR,
5406                       vti.RegClass, vti.RegClass>;
5407}
5408
5409multiclass VPatBinarySwappedM_VV<string intrinsic, string instruction,
5410                                 list<VTypeInfo> vtilist> {
5411  foreach vti = vtilist in
5412    let Predicates = GetVTypePredicates<vti>.Predicates in
5413    defm : VPatBinarySwapped<intrinsic, instruction # "_VV_" # vti.LMul.MX,
5414                             vti.Mask, vti.Vector, vti.Vector, vti.Mask,
5415                             vti.Log2SEW, VR,
5416                             vti.RegClass, vti.RegClass>;
5417}
5418
5419multiclass VPatBinaryM_VX<string intrinsic, string instruction,
5420                          list<VTypeInfo> vtilist> {
5421  foreach vti = vtilist in {
5422    defvar kind = "V"#vti.ScalarSuffix;
5423    let Predicates = GetVTypePredicates<vti>.Predicates in
5424    defm : VPatBinaryM<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
5425                       vti.Mask, vti.Vector, vti.Scalar, vti.Mask,
5426                       vti.Log2SEW, VR,
5427                       vti.RegClass, vti.ScalarRegClass>;
5428  }
5429}
5430
5431multiclass VPatBinaryM_VI<string intrinsic, string instruction,
5432                          list<VTypeInfo> vtilist> {
5433  foreach vti = vtilist in
5434    let Predicates = GetVTypePredicates<vti>.Predicates in
5435    defm : VPatBinaryM<intrinsic, instruction # "_VI_" # vti.LMul.MX,
5436                       vti.Mask, vti.Vector, XLenVT, vti.Mask,
5437                       vti.Log2SEW, VR,
5438                       vti.RegClass, simm5>;
5439}
5440
5441multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
5442                                list<VTypeInfo> vtilist, Operand ImmType = simm5>
5443    : VPatBinaryV_VV<intrinsic, instruction, vtilist>,
5444      VPatBinaryV_VX<intrinsic, instruction, vtilist>,
5445      VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
5446
5447multiclass VPatBinaryV_VV_VX_VI_RM<string intrinsic, string instruction,
5448                                   list<VTypeInfo> vtilist, Operand ImmType = simm5>
5449    : VPatBinaryV_VV_RM<intrinsic, instruction, vtilist>,
5450      VPatBinaryV_VX_RM<intrinsic, instruction, vtilist>,
5451      VPatBinaryV_VI_RM<intrinsic, instruction, vtilist, ImmType>;
5452
5453multiclass VPatBinaryV_VV_VX<string intrinsic, string instruction,
5454                             list<VTypeInfo> vtilist, bit isSEWAware = 0>
5455    : VPatBinaryV_VV<intrinsic, instruction, vtilist, isSEWAware>,
5456      VPatBinaryV_VX<intrinsic, instruction, vtilist, isSEWAware>;
5457
5458multiclass VPatBinaryV_VV_VX_RM<string intrinsic, string instruction,
5459                                list<VTypeInfo> vtilist, bit isSEWAware = 0>
5460    : VPatBinaryV_VV_RM<intrinsic, instruction, vtilist, isSEWAware>,
5461      VPatBinaryV_VX_RM<intrinsic, instruction, vtilist, isSEWAware>;
5462
5463multiclass VPatBinaryV_VX_VI<string intrinsic, string instruction,
5464                             list<VTypeInfo> vtilist>
5465    : VPatBinaryV_VX<intrinsic, instruction, vtilist>,
5466      VPatBinaryV_VI<intrinsic, instruction, vtilist, simm5>;
5467
5468multiclass VPatBinaryW_VV_VX<string intrinsic, string instruction,
5469                             list<VTypeInfoToWide> vtilist>
5470    : VPatBinaryW_VV<intrinsic, instruction, vtilist>,
5471      VPatBinaryW_VX<intrinsic, instruction, vtilist>;
5472
5473multiclass VPatBinaryW_VV_VX_RM<string intrinsic, string instruction,
5474                                list<VTypeInfoToWide> vtilist>
5475    : VPatBinaryW_VV_RM<intrinsic, instruction, vtilist>,
5476      VPatBinaryW_VX_RM<intrinsic, instruction, vtilist>;
5477
5478multiclass VPatBinaryW_WV_WX<string intrinsic, string instruction,
5479                             list<VTypeInfoToWide> vtilist>
5480    : VPatBinaryW_WV<intrinsic, instruction, vtilist>,
5481      VPatBinaryW_WX<intrinsic, instruction, vtilist>;
5482
5483multiclass VPatBinaryW_WV_WX_RM<string intrinsic, string instruction,
5484                                list<VTypeInfoToWide> vtilist>
5485    : VPatBinaryW_WV_RM<intrinsic, instruction, vtilist>,
5486      VPatBinaryW_WX_RM<intrinsic, instruction, vtilist>;
5487
5488multiclass VPatBinaryV_WV_WX_WI<string intrinsic, string instruction,
5489                                list<VTypeInfoToWide> vtilist>
5490    : VPatBinaryV_WV<intrinsic, instruction, vtilist>,
5491      VPatBinaryV_WX<intrinsic, instruction, vtilist>,
5492      VPatBinaryV_WI<intrinsic, instruction, vtilist>;
5493
5494multiclass VPatBinaryV_WV_WX_WI_RM<string intrinsic, string instruction,
5495                                   list<VTypeInfoToWide> vtilist>
5496    : VPatBinaryV_WV_RM<intrinsic, instruction, vtilist>,
5497      VPatBinaryV_WX_RM<intrinsic, instruction, vtilist>,
5498      VPatBinaryV_WI_RM<intrinsic, instruction, vtilist>;
5499
5500multiclass VPatBinaryV_VM_XM_IM<string intrinsic, string instruction>
5501    : VPatBinaryV_VM_TAIL<intrinsic, instruction>,
5502      VPatBinaryV_XM_TAIL<intrinsic, instruction>,
5503      VPatBinaryV_IM_TAIL<intrinsic, instruction>;
5504
5505multiclass VPatBinaryM_VM_XM_IM<string intrinsic, string instruction>
5506    : VPatBinaryV_VM<intrinsic, instruction, CarryOut=1>,
5507      VPatBinaryV_XM<intrinsic, instruction, CarryOut=1>,
5508      VPatBinaryV_IM<intrinsic, instruction, CarryOut=1>;
5509
5510multiclass VPatBinaryM_V_X_I<string intrinsic, string instruction>
5511    : VPatBinaryV_V<intrinsic, instruction>,
5512      VPatBinaryV_X<intrinsic, instruction>,
5513      VPatBinaryV_I<intrinsic, instruction>;
5514
5515multiclass VPatBinaryV_VM_XM<string intrinsic, string instruction>
5516    : VPatBinaryV_VM_TAIL<intrinsic, instruction>,
5517      VPatBinaryV_XM_TAIL<intrinsic, instruction>;
5518
5519multiclass VPatBinaryM_VM_XM<string intrinsic, string instruction>
5520    : VPatBinaryV_VM<intrinsic, instruction, CarryOut=1>,
5521      VPatBinaryV_XM<intrinsic, instruction, CarryOut=1>;
5522
5523multiclass VPatBinaryM_V_X<string intrinsic, string instruction>
5524    : VPatBinaryV_V<intrinsic, instruction>,
5525      VPatBinaryV_X<intrinsic, instruction>;
5526
5527multiclass VPatTernary<string intrinsic,
5528                       string inst,
5529                       string kind,
5530                       ValueType result_type,
5531                       ValueType op1_type,
5532                       ValueType op2_type,
5533                       ValueType mask_type,
5534                       int sew,
5535                       LMULInfo vlmul,
5536                       VReg result_reg_class,
5537                       RegisterClass op1_reg_class,
5538                       DAGOperand op2_kind> {
5539  def : VPatTernaryNoMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
5540                          sew, vlmul, result_reg_class, op1_reg_class,
5541                          op2_kind>;
5542  def : VPatTernaryMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
5543                        mask_type, sew, vlmul, result_reg_class, op1_reg_class,
5544                        op2_kind>;
5545}
5546
5547multiclass VPatTernaryNoMaskNoPolicy<string intrinsic,
5548                                     string inst,
5549                                     string kind,
5550                                     ValueType result_type,
5551                                     ValueType op1_type,
5552                                     ValueType op2_type,
5553                                     ValueType mask_type,
5554                                     int sew,
5555                                     LMULInfo vlmul,
5556                                     VReg result_reg_class,
5557                                     RegisterClass op1_reg_class,
5558                                     DAGOperand op2_kind> {
5559  def : VPatTernaryNoMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
5560                          sew, vlmul, result_reg_class, op1_reg_class,
5561                          op2_kind>;
5562  def : VPatTernaryMaskPolicy<intrinsic, inst, kind, result_type, op1_type, op2_type,
5563                              mask_type, sew, vlmul, result_reg_class, op1_reg_class,
5564                              op2_kind>;
5565}
5566
5567multiclass VPatTernaryWithPolicy<string intrinsic,
5568                                 string inst,
5569                                 string kind,
5570                                 ValueType result_type,
5571                                 ValueType op1_type,
5572                                 ValueType op2_type,
5573                                 ValueType mask_type,
5574                                 int sew,
5575                                 LMULInfo vlmul,
5576                                 VReg result_reg_class,
5577                                 RegisterClass op1_reg_class,
5578                                 DAGOperand op2_kind> {
5579  def : VPatTernaryNoMaskWithPolicy<intrinsic, inst, kind, result_type, op1_type,
5580                                    op2_type, sew, vlmul, result_reg_class,
5581                                    op1_reg_class, op2_kind>;
5582  def : VPatTernaryMaskPolicy<intrinsic, inst, kind, result_type, op1_type, op2_type,
5583                              mask_type, sew, vlmul, result_reg_class, op1_reg_class,
5584                              op2_kind>;
5585}
5586
5587multiclass VPatTernaryWithPolicyRoundingMode<string intrinsic,
5588                                             string inst,
5589                                             string kind,
5590                                             ValueType result_type,
5591                                             ValueType op1_type,
5592                                             ValueType op2_type,
5593                                             ValueType mask_type,
5594                                             int sew,
5595                                             LMULInfo vlmul,
5596                                             VReg result_reg_class,
5597                                             RegisterClass op1_reg_class,
5598                                             DAGOperand op2_kind> {
5599  def : VPatTernaryNoMaskWithPolicyRoundingMode<intrinsic, inst, kind, result_type,
5600                                                op1_type, op2_type, sew, vlmul,
5601                                                result_reg_class, op1_reg_class,
5602                                                op2_kind>;
5603  def : VPatTernaryMaskPolicyRoundingMode<intrinsic, inst, kind, result_type, op1_type,
5604                                                op2_type, mask_type, sew, vlmul,
5605                                                result_reg_class, op1_reg_class,
5606                                                op2_kind>;
5607}
5608
5609multiclass VPatTernaryTA<string intrinsic,
5610                         string inst,
5611                         string kind,
5612                         ValueType result_type,
5613                         ValueType op1_type,
5614                         ValueType op2_type,
5615                         ValueType mask_type,
5616                         int log2sew,
5617                         LMULInfo vlmul,
5618                         VReg result_reg_class,
5619                         RegisterClass op1_reg_class,
5620                         DAGOperand op2_kind> {
5621  def : VPatTernaryNoMaskTA<intrinsic, inst, kind, result_type, op1_type,
5622                            op2_type, log2sew, vlmul, result_reg_class,
5623                            op1_reg_class, op2_kind>;
5624  def : VPatTernaryMaskTA<intrinsic, inst, kind, result_type, op1_type,
5625                          op2_type, mask_type, log2sew, vlmul,
5626                          result_reg_class, op1_reg_class, op2_kind>;
5627}
5628
5629multiclass VPatTernaryTARoundingMode<string intrinsic,
5630                                     string inst,
5631                                     string kind,
5632                                     ValueType result_type,
5633                                     ValueType op1_type,
5634                                     ValueType op2_type,
5635                                     ValueType mask_type,
5636                                     int log2sew,
5637                                     LMULInfo vlmul,
5638                                     VReg result_reg_class,
5639                                     RegisterClass op1_reg_class,
5640                                     DAGOperand op2_kind> {
5641  def : VPatTernaryNoMaskTARoundingMode<intrinsic, inst, kind, result_type, op1_type,
5642                            op2_type, log2sew, vlmul, result_reg_class,
5643                            op1_reg_class, op2_kind>;
5644  def : VPatTernaryMaskTARoundingMode<intrinsic, inst, kind, result_type, op1_type,
5645                          op2_type, mask_type, log2sew, vlmul,
5646                          result_reg_class, op1_reg_class, op2_kind>;
5647}
5648
5649multiclass VPatTernaryV_VV_AAXA<string intrinsic, string instruction,
5650                                list<VTypeInfo> vtilist> {
5651  foreach vti = vtilist in
5652    let Predicates = GetVTypePredicates<vti>.Predicates in
5653    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VV",
5654                                 vti.Vector, vti.Vector, vti.Vector, vti.Mask,
5655                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5656                                 vti.RegClass, vti.RegClass>;
5657}
5658
5659multiclass VPatTernaryV_VV_AAXA_RM<string intrinsic, string instruction,
5660                                list<VTypeInfo> vtilist> {
5661  foreach vti = vtilist in
5662    let Predicates = GetVTypePredicates<vti>.Predicates in
5663    defm : VPatTernaryWithPolicyRoundingMode<intrinsic, instruction, "VV",
5664                                             vti.Vector, vti.Vector, vti.Vector, vti.Mask,
5665                                             vti.Log2SEW, vti.LMul, vti.RegClass,
5666                                             vti.RegClass, vti.RegClass>;
5667}
5668
5669multiclass VPatTernaryV_VX<string intrinsic, string instruction,
5670                           list<VTypeInfo> vtilist> {
5671  foreach vti = vtilist in
5672    let Predicates = GetVTypePredicates<vti>.Predicates in
5673    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VX",
5674                                 vti.Vector, vti.Vector, XLenVT, vti.Mask,
5675                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5676                                 vti.RegClass, GPR>;
5677}
5678
5679multiclass VPatTernaryV_VX_AAXA<string intrinsic, string instruction,
5680                           list<VTypeInfo> vtilist> {
5681  foreach vti = vtilist in
5682    let Predicates = GetVTypePredicates<vti>.Predicates in
5683    defm : VPatTernaryWithPolicy<intrinsic, instruction,
5684                                 "V"#vti.ScalarSuffix,
5685                                 vti.Vector, vti.Scalar, vti.Vector, vti.Mask,
5686                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5687                                 vti.ScalarRegClass, vti.RegClass>;
5688}
5689
5690multiclass VPatTernaryV_VX_AAXA_RM<string intrinsic, string instruction,
5691                           list<VTypeInfo> vtilist> {
5692  foreach vti = vtilist in
5693    let Predicates = GetVTypePredicates<vti>.Predicates in
5694    defm : VPatTernaryWithPolicyRoundingMode<intrinsic, instruction,
5695                                             "V"#vti.ScalarSuffix,
5696                                             vti.Vector, vti.Scalar, vti.Vector, vti.Mask,
5697                                             vti.Log2SEW, vti.LMul, vti.RegClass,
5698                                             vti.ScalarRegClass, vti.RegClass>;
5699}
5700
5701multiclass VPatTernaryV_VI<string intrinsic, string instruction,
5702                           list<VTypeInfo> vtilist, Operand Imm_type> {
5703  foreach vti = vtilist in
5704    let Predicates = GetVTypePredicates<vti>.Predicates in
5705    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VI",
5706                                 vti.Vector, vti.Vector, XLenVT, vti.Mask,
5707                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5708                                 vti.RegClass, Imm_type>;
5709}
5710
5711multiclass VPatTernaryW_VV<string intrinsic, string instruction,
5712                           list<VTypeInfoToWide> vtilist> {
5713  foreach vtiToWti = vtilist in {
5714    defvar vti = vtiToWti.Vti;
5715    defvar wti = vtiToWti.Wti;
5716    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5717                                 GetVTypePredicates<wti>.Predicates) in
5718    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VV",
5719                                 wti.Vector, vti.Vector, vti.Vector,
5720                                 vti.Mask, vti.Log2SEW, vti.LMul,
5721                                 wti.RegClass, vti.RegClass, vti.RegClass>;
5722  }
5723}
5724
5725multiclass VPatTernaryW_VV_RM<string intrinsic, string instruction,
5726                           list<VTypeInfoToWide> vtilist> {
5727  foreach vtiToWti = vtilist in {
5728    defvar vti = vtiToWti.Vti;
5729    defvar wti = vtiToWti.Wti;
5730    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5731                                 GetVTypePredicates<wti>.Predicates) in
5732    defm : VPatTernaryWithPolicyRoundingMode<intrinsic, instruction, "VV",
5733                                             wti.Vector, vti.Vector, vti.Vector,
5734                                             vti.Mask, vti.Log2SEW, vti.LMul,
5735                                             wti.RegClass, vti.RegClass, vti.RegClass>;
5736  }
5737}
5738
5739multiclass VPatTernaryW_VX<string intrinsic, string instruction,
5740                           list<VTypeInfoToWide> vtilist> {
5741  foreach vtiToWti = vtilist in {
5742    defvar vti = vtiToWti.Vti;
5743    defvar wti = vtiToWti.Wti;
5744    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5745                                 GetVTypePredicates<wti>.Predicates) in
5746    defm : VPatTernaryWithPolicy<intrinsic, instruction,
5747                                 "V"#vti.ScalarSuffix,
5748                                 wti.Vector, vti.Scalar, vti.Vector,
5749                                 vti.Mask, vti.Log2SEW, vti.LMul,
5750                                 wti.RegClass, vti.ScalarRegClass, vti.RegClass>;
5751  }
5752}
5753
5754multiclass VPatTernaryW_VX_RM<string intrinsic, string instruction,
5755                           list<VTypeInfoToWide> vtilist> {
5756  foreach vtiToWti = vtilist in {
5757    defvar vti = vtiToWti.Vti;
5758    defvar wti = vtiToWti.Wti;
5759    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5760                                 GetVTypePredicates<wti>.Predicates) in
5761    defm : VPatTernaryWithPolicyRoundingMode<intrinsic, instruction,
5762                                             "V"#vti.ScalarSuffix,
5763                                             wti.Vector, vti.Scalar, vti.Vector,
5764                                             vti.Mask, vti.Log2SEW, vti.LMul,
5765                                             wti.RegClass, vti.ScalarRegClass,
5766                                             vti.RegClass>;
5767  }
5768}
5769
5770multiclass VPatTernaryV_VV_VX_AAXA<string intrinsic, string instruction,
5771                              list<VTypeInfo> vtilist>
5772    : VPatTernaryV_VV_AAXA<intrinsic, instruction, vtilist>,
5773      VPatTernaryV_VX_AAXA<intrinsic, instruction, vtilist>;
5774
5775multiclass VPatTernaryV_VV_VX_AAXA_RM<string intrinsic, string instruction,
5776                              list<VTypeInfo> vtilist>
5777    : VPatTernaryV_VV_AAXA_RM<intrinsic, instruction, vtilist>,
5778      VPatTernaryV_VX_AAXA_RM<intrinsic, instruction, vtilist>;
5779
5780multiclass VPatTernaryV_VX_VI<string intrinsic, string instruction,
5781                              list<VTypeInfo> vtilist, Operand Imm_type = simm5>
5782    : VPatTernaryV_VX<intrinsic, instruction, vtilist>,
5783      VPatTernaryV_VI<intrinsic, instruction, vtilist, Imm_type>;
5784
5785
5786multiclass VPatBinaryM_VV_VX_VI<string intrinsic, string instruction,
5787                                list<VTypeInfo> vtilist>
5788    : VPatBinaryM_VV<intrinsic, instruction, vtilist>,
5789      VPatBinaryM_VX<intrinsic, instruction, vtilist>,
5790      VPatBinaryM_VI<intrinsic, instruction, vtilist>;
5791
5792multiclass VPatTernaryW_VV_VX<string intrinsic, string instruction,
5793                              list<VTypeInfoToWide> vtilist>
5794    : VPatTernaryW_VV<intrinsic, instruction, vtilist>,
5795      VPatTernaryW_VX<intrinsic, instruction, vtilist>;
5796
5797multiclass VPatTernaryW_VV_VX_RM<string intrinsic, string instruction,
5798                              list<VTypeInfoToWide> vtilist>
5799    : VPatTernaryW_VV_RM<intrinsic, instruction, vtilist>,
5800      VPatTernaryW_VX_RM<intrinsic, instruction, vtilist>;
5801
5802multiclass VPatBinaryM_VV_VX<string intrinsic, string instruction,
5803                             list<VTypeInfo> vtilist>
5804    : VPatBinaryM_VV<intrinsic, instruction, vtilist>,
5805      VPatBinaryM_VX<intrinsic, instruction, vtilist>;
5806
5807multiclass VPatBinaryM_VX_VI<string intrinsic, string instruction,
5808                             list<VTypeInfo> vtilist>
5809    : VPatBinaryM_VX<intrinsic, instruction, vtilist>,
5810      VPatBinaryM_VI<intrinsic, instruction, vtilist>;
5811
5812multiclass VPatBinaryV_VV_VX_VI_INT<string intrinsic, string instruction,
5813                                    list<VTypeInfo> vtilist, Operand ImmType = simm5>
5814    : VPatBinaryV_VV_INT<intrinsic#"_vv", instruction, vtilist>,
5815      VPatBinaryV_VX_INT<intrinsic#"_vx", instruction, vtilist>,
5816      VPatBinaryV_VI<intrinsic#"_vx", instruction, vtilist, ImmType>;
5817
5818multiclass VPatReductionV_VS<string intrinsic, string instruction, bit IsFloat = 0> {
5819  foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in {
5820    defvar vectorM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # vti.SEW # "M1");
5821    let Predicates = GetVTypePredicates<vti>.Predicates in
5822    defm : VPatTernaryTA<intrinsic, instruction, "VS",
5823                         vectorM1.Vector, vti.Vector,
5824                         vectorM1.Vector, vti.Mask,
5825                         vti.Log2SEW, vti.LMul,
5826                         VR, vti.RegClass, VR>;
5827  }
5828  foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in {
5829    let Predicates = GetVTypePredicates<gvti>.Predicates in
5830    defm : VPatTernaryTA<intrinsic, instruction, "VS",
5831                         gvti.VectorM1, gvti.Vector,
5832                         gvti.VectorM1, gvti.Mask,
5833                         gvti.Log2SEW, gvti.LMul,
5834                         VR, gvti.RegClass, VR>;
5835  }
5836}
5837
5838multiclass VPatReductionV_VS_RM<string intrinsic, string instruction, bit IsFloat = 0> {
5839  foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in {
5840    defvar vectorM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # vti.SEW # "M1");
5841    let Predicates = GetVTypePredicates<vti>.Predicates in
5842    defm : VPatTernaryTARoundingMode<intrinsic, instruction, "VS",
5843                                     vectorM1.Vector, vti.Vector,
5844                                     vectorM1.Vector, vti.Mask,
5845                                     vti.Log2SEW, vti.LMul,
5846                                     VR, vti.RegClass, VR>;
5847  }
5848  foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in {
5849    let Predicates = GetVTypePredicates<gvti>.Predicates in
5850    defm : VPatTernaryTARoundingMode<intrinsic, instruction, "VS",
5851                                     gvti.VectorM1, gvti.Vector,
5852                                     gvti.VectorM1, gvti.Mask,
5853                                     gvti.Log2SEW, gvti.LMul,
5854                                     VR, gvti.RegClass, VR>;
5855  }
5856}
5857
5858multiclass VPatReductionW_VS<string intrinsic, string instruction, bit IsFloat = 0> {
5859  foreach vti = !if(IsFloat, AllFloatVectors, AllIntegerVectors) in {
5860    defvar wtiSEW = !mul(vti.SEW, 2);
5861    if !le(wtiSEW, 64) then {
5862      defvar wtiM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # wtiSEW # "M1");
5863      let Predicates = GetVTypePredicates<vti>.Predicates in
5864      defm : VPatTernaryTA<intrinsic, instruction, "VS",
5865                           wtiM1.Vector, vti.Vector,
5866                           wtiM1.Vector, vti.Mask,
5867                           vti.Log2SEW, vti.LMul,
5868                           wtiM1.RegClass, vti.RegClass,
5869                           wtiM1.RegClass>;
5870    }
5871  }
5872}
5873
5874multiclass VPatReductionW_VS_RM<string intrinsic, string instruction, bit IsFloat = 0> {
5875  foreach vti = !if(IsFloat, AllFloatVectors, AllIntegerVectors) in {
5876    defvar wtiSEW = !mul(vti.SEW, 2);
5877    if !le(wtiSEW, 64) then {
5878      defvar wtiM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # wtiSEW # "M1");
5879      let Predicates = GetVTypePredicates<vti>.Predicates in
5880      defm : VPatTernaryTARoundingMode<intrinsic, instruction, "VS",
5881                                       wtiM1.Vector, vti.Vector,
5882                                       wtiM1.Vector, vti.Mask,
5883                                       vti.Log2SEW, vti.LMul,
5884                                       wtiM1.RegClass, vti.RegClass,
5885                                       wtiM1.RegClass>;
5886    }
5887  }
5888}
5889
5890multiclass VPatConversionVI_VF<string intrinsic,
5891                               string instruction> {
5892  foreach fvti = AllFloatVectors in {
5893    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
5894    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5895                                 GetVTypePredicates<ivti>.Predicates) in
5896    defm : VPatConversionTA<intrinsic, instruction, "V",
5897                            ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
5898                            fvti.LMul, ivti.RegClass, fvti.RegClass>;
5899  }
5900}
5901
5902multiclass VPatConversionVI_VF_RM<string intrinsic,
5903                                  string instruction> {
5904  foreach fvti = AllFloatVectors in {
5905    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
5906    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5907                                 GetVTypePredicates<ivti>.Predicates) in
5908    defm : VPatConversionTARoundingMode<intrinsic, instruction, "V",
5909                                        ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
5910                                        fvti.LMul, ivti.RegClass, fvti.RegClass>;
5911  }
5912}
5913
5914multiclass VPatConversionVF_VI_RM<string intrinsic,
5915                                  string instruction> {
5916  foreach fvti = AllFloatVectors in {
5917    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
5918    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5919                                 GetVTypePredicates<ivti>.Predicates) in
5920    defm : VPatConversionTARoundingMode<intrinsic, instruction, "V",
5921                                        fvti.Vector, ivti.Vector, fvti.Mask, ivti.Log2SEW,
5922                                        ivti.LMul, fvti.RegClass, ivti.RegClass>;
5923  }
5924}
5925
5926multiclass VPatConversionWI_VF<string intrinsic, string instruction> {
5927  foreach fvtiToFWti = AllWidenableFloatVectors in {
5928    defvar fvti = fvtiToFWti.Vti;
5929    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
5930    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5931                                 GetVTypePredicates<iwti>.Predicates) in
5932    defm : VPatConversionTA<intrinsic, instruction, "V",
5933                            iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
5934                            fvti.LMul, iwti.RegClass, fvti.RegClass>;
5935  }
5936}
5937
5938multiclass VPatConversionWI_VF_RM<string intrinsic, string instruction> {
5939  foreach fvtiToFWti = AllWidenableFloatVectors in {
5940    defvar fvti = fvtiToFWti.Vti;
5941    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
5942    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5943                                 GetVTypePredicates<iwti>.Predicates) in
5944    defm : VPatConversionTARoundingMode<intrinsic, instruction, "V",
5945                                        iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
5946                                        fvti.LMul, iwti.RegClass, fvti.RegClass>;
5947  }
5948}
5949
5950multiclass VPatConversionWF_VI<string intrinsic, string instruction> {
5951  foreach vtiToWti = AllWidenableIntToFloatVectors in {
5952    defvar vti = vtiToWti.Vti;
5953    defvar fwti = vtiToWti.Wti;
5954    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5955                                 GetVTypePredicates<fwti>.Predicates) in
5956    defm : VPatConversionTA<intrinsic, instruction, "V",
5957                            fwti.Vector, vti.Vector, fwti.Mask, vti.Log2SEW,
5958                            vti.LMul, fwti.RegClass, vti.RegClass>;
5959  }
5960}
5961
5962multiclass VPatConversionWF_VF<string intrinsic, string instruction> {
5963  foreach fvtiToFWti = AllWidenableFloatVectors in {
5964    defvar fvti = fvtiToFWti.Vti;
5965    defvar fwti = fvtiToFWti.Wti;
5966    // Define vfwcvt.f.f.v for f16 when Zvfhmin is enable.
5967    let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal],
5968                         !listconcat(GetVTypePredicates<fvti>.Predicates,
5969                                     GetVTypePredicates<fwti>.Predicates)) in
5970      defm : VPatConversionTA<intrinsic, instruction, "V",
5971                              fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
5972                              fvti.LMul, fwti.RegClass, fvti.RegClass>;
5973  }
5974}
5975
5976multiclass VPatConversionWF_VF_BF <string intrinsic, string instruction> {
5977  foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in
5978  {
5979    defvar fvti = fvtiToFWti.Vti;
5980    defvar fwti = fvtiToFWti.Wti;
5981    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5982                                 GetVTypePredicates<fwti>.Predicates) in
5983    defm : VPatConversionTA<intrinsic, instruction, "V",
5984                            fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
5985                            fvti.LMul, fwti.RegClass, fvti.RegClass>;
5986  }
5987}
5988
5989multiclass VPatConversionVI_WF <string intrinsic, string instruction> {
5990  foreach vtiToWti = AllWidenableIntToFloatVectors in {
5991    defvar vti = vtiToWti.Vti;
5992    defvar fwti = vtiToWti.Wti;
5993    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5994                                 GetVTypePredicates<fwti>.Predicates) in
5995    defm : VPatConversionTA<intrinsic, instruction, "W",
5996                            vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
5997                            vti.LMul, vti.RegClass, fwti.RegClass>;
5998  }
5999}
6000
6001multiclass VPatConversionVI_WF_RM <string intrinsic, string instruction> {
6002  foreach vtiToWti = AllWidenableIntToFloatVectors in {
6003    defvar vti = vtiToWti.Vti;
6004    defvar fwti = vtiToWti.Wti;
6005    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
6006                                 GetVTypePredicates<fwti>.Predicates) in
6007    defm : VPatConversionTARoundingMode<intrinsic, instruction, "W",
6008                                        vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
6009                                        vti.LMul, vti.RegClass, fwti.RegClass>;
6010  }
6011}
6012
6013multiclass VPatConversionVF_WI_RM <string intrinsic, string instruction> {
6014  foreach fvtiToFWti = AllWidenableFloatVectors in {
6015    defvar fvti = fvtiToFWti.Vti;
6016    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
6017    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
6018                                 GetVTypePredicates<iwti>.Predicates) in
6019    defm : VPatConversionTARoundingMode<intrinsic, instruction, "W",
6020                                        fvti.Vector, iwti.Vector, fvti.Mask, fvti.Log2SEW,
6021                                        fvti.LMul, fvti.RegClass, iwti.RegClass>;
6022  }
6023}
6024
6025multiclass VPatConversionVF_WF <string intrinsic, string instruction> {
6026  foreach fvtiToFWti = AllWidenableFloatVectors in {
6027    defvar fvti = fvtiToFWti.Vti;
6028    defvar fwti = fvtiToFWti.Wti;
6029    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
6030                                 GetVTypePredicates<fwti>.Predicates) in
6031    defm : VPatConversionTA<intrinsic, instruction, "W",
6032                            fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
6033                            fvti.LMul, fvti.RegClass, fwti.RegClass>;
6034  }
6035}
6036
6037multiclass VPatConversionVF_WF_RM <string intrinsic, string instruction,
6038                                   list<VTypeInfoToWide> wlist = AllWidenableFloatVectors> {
6039  foreach fvtiToFWti = wlist in {
6040    defvar fvti = fvtiToFWti.Vti;
6041    defvar fwti = fvtiToFWti.Wti;
6042    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
6043                                 GetVTypePredicates<fwti>.Predicates) in
6044    defm : VPatConversionTARoundingMode<intrinsic, instruction, "W",
6045                                        fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
6046                                        fvti.LMul, fvti.RegClass, fwti.RegClass>;
6047  }
6048}
6049
6050multiclass VPatConversionVF_WF_BF_RM <string intrinsic, string instruction> {
6051  foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in {
6052    defvar fvti = fvtiToFWti.Vti;
6053    defvar fwti = fvtiToFWti.Wti;
6054    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
6055                                 GetVTypePredicates<fwti>.Predicates) in
6056    defm : VPatConversionTARoundingMode<intrinsic, instruction, "W",
6057                                        fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
6058                                        fvti.LMul, fvti.RegClass, fwti.RegClass>;
6059  }
6060}
6061
6062multiclass VPatCompare_VI<string intrinsic, string inst,
6063                          ImmLeaf ImmType> {
6064  foreach vti = AllIntegerVectors in {
6065    defvar Intr = !cast<Intrinsic>(intrinsic);
6066    defvar Pseudo = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX);
6067    let Predicates = GetVTypePredicates<vti>.Predicates in
6068    def : Pat<(vti.Mask (Intr (vti.Vector vti.RegClass:$rs1),
6069                              (vti.Scalar ImmType:$rs2),
6070                              VLOpFrag)),
6071              (Pseudo vti.RegClass:$rs1, (DecImm ImmType:$rs2),
6072                      GPR:$vl, vti.Log2SEW)>;
6073    defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
6074    defvar PseudoMask = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX#"_MASK");
6075    let Predicates = GetVTypePredicates<vti>.Predicates in
6076    def : Pat<(vti.Mask (IntrMask (vti.Mask VR:$merge),
6077                                  (vti.Vector vti.RegClass:$rs1),
6078                                  (vti.Scalar ImmType:$rs2),
6079                                  (vti.Mask V0),
6080                                  VLOpFrag)),
6081              (PseudoMask VR:$merge, vti.RegClass:$rs1, (DecImm ImmType:$rs2),
6082                          (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
6083  }
6084}
6085
6086//===----------------------------------------------------------------------===//
6087// Pseudo instructions
6088//===----------------------------------------------------------------------===//
6089
6090let Predicates = [HasVInstructions] in {
6091
6092//===----------------------------------------------------------------------===//
6093// Pseudo Instructions for CodeGen
6094//===----------------------------------------------------------------------===//
6095
6096let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1 in {
6097  def PseudoReadVLENB : Pseudo<(outs GPR:$rd), (ins),
6098                               [(set GPR:$rd, (riscv_read_vlenb))]>,
6099                        PseudoInstExpansion<(CSRRS GPR:$rd, SysRegVLENB.Encoding, X0)>,
6100                        Sched<[WriteRdVLENB]>;
6101}
6102
6103let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1,
6104    Uses = [VL] in
6105def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins), []>,
6106                   PseudoInstExpansion<(CSRRS GPR:$rd, SysRegVL.Encoding, X0)>;
6107
6108foreach lmul = MxList in {
6109  foreach nf = NFSet<lmul>.L in {
6110    defvar vreg = SegRegClass<lmul, nf>.RC;
6111    let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1,
6112        Size = !mul(4, !sub(!mul(nf, 2), 1)) in {
6113      def "PseudoVSPILL" # nf # "_" # lmul.MX :
6114        Pseudo<(outs), (ins vreg:$rs1, GPR:$rs2), []>;
6115    }
6116    let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1,
6117        Size = !mul(4, !sub(!mul(nf, 2), 1)) in {
6118      def "PseudoVRELOAD" # nf # "_" # lmul.MX :
6119        Pseudo<(outs vreg:$rs1), (ins GPR:$rs2), []>;
6120    }
6121  }
6122}
6123
6124/// Empty pseudo for RISCVInitUndefPass
6125let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 0,
6126    isCodeGenOnly = 1 in {
6127  def PseudoRVVInitUndefM1 : Pseudo<(outs VR:$vd), (ins), [], "">;
6128  def PseudoRVVInitUndefM2 : Pseudo<(outs VRM2:$vd), (ins), [], "">;
6129  def PseudoRVVInitUndefM4 : Pseudo<(outs VRM4:$vd), (ins), [], "">;
6130  def PseudoRVVInitUndefM8 : Pseudo<(outs VRM8:$vd), (ins), [], "">;
6131}
6132
6133//===----------------------------------------------------------------------===//
6134// 6. Configuration-Setting Instructions
6135//===----------------------------------------------------------------------===//
6136
6137// Pseudos.
6138let hasSideEffects = 1, mayLoad = 0, mayStore = 0, Defs = [VL, VTYPE] in {
6139// Due to rs1=X0 having special meaning, we need a GPRNoX0 register class for
6140// the when we aren't using one of the special X0 encodings. Otherwise it could
6141// be accidentally be made X0 by MachineIR optimizations. To satisfy the
6142// verifier, we also need a GPRX0 instruction for the special encodings.
6143def PseudoVSETVLI : Pseudo<(outs GPR:$rd), (ins GPRNoX0:$rs1, VTypeIOp11:$vtypei), []>,
6144                    Sched<[WriteVSETVLI, ReadVSETVLI]>;
6145def PseudoVSETVLIX0 : Pseudo<(outs GPR:$rd), (ins GPRX0:$rs1, VTypeIOp11:$vtypei), []>,
6146                      Sched<[WriteVSETVLI, ReadVSETVLI]>;
6147def PseudoVSETIVLI : Pseudo<(outs GPR:$rd), (ins uimm5:$rs1, VTypeIOp10:$vtypei), []>,
6148                     Sched<[WriteVSETIVLI]>;
6149}
6150
6151//===----------------------------------------------------------------------===//
6152// 7. Vector Loads and Stores
6153//===----------------------------------------------------------------------===//
6154
6155//===----------------------------------------------------------------------===//
6156// 7.4 Vector Unit-Stride Instructions
6157//===----------------------------------------------------------------------===//
6158
6159// Pseudos Unit-Stride Loads and Stores
6160defm PseudoVL : VPseudoUSLoad;
6161defm PseudoVS : VPseudoUSStore;
6162
6163defm PseudoVLM : VPseudoLoadMask;
6164defm PseudoVSM : VPseudoStoreMask;
6165
6166//===----------------------------------------------------------------------===//
6167// 7.5 Vector Strided Instructions
6168//===----------------------------------------------------------------------===//
6169
6170// Vector Strided Loads and Stores
6171defm PseudoVLS : VPseudoSLoad;
6172defm PseudoVSS : VPseudoSStore;
6173
6174//===----------------------------------------------------------------------===//
6175// 7.6 Vector Indexed Instructions
6176//===----------------------------------------------------------------------===//
6177
6178// Vector Indexed Loads and Stores
6179defm PseudoVLUX : VPseudoILoad<Ordered=false>;
6180defm PseudoVLOX : VPseudoILoad<Ordered=true>;
6181defm PseudoVSOX : VPseudoIStore<Ordered=true>;
6182defm PseudoVSUX : VPseudoIStore<Ordered=false>;
6183
6184//===----------------------------------------------------------------------===//
6185// 7.7. Unit-stride Fault-Only-First Loads
6186//===----------------------------------------------------------------------===//
6187
6188// vleff may update VL register
6189let hasSideEffects = 1, Defs = [VL] in
6190defm PseudoVL : VPseudoFFLoad;
6191
6192//===----------------------------------------------------------------------===//
6193// 7.8. Vector Load/Store Segment Instructions
6194//===----------------------------------------------------------------------===//
6195defm PseudoVLSEG : VPseudoUSSegLoad;
6196defm PseudoVLSSEG : VPseudoSSegLoad;
6197defm PseudoVLOXSEG : VPseudoISegLoad<Ordered=true>;
6198defm PseudoVLUXSEG : VPseudoISegLoad<Ordered=false>;
6199defm PseudoVSSEG : VPseudoUSSegStore;
6200defm PseudoVSSSEG : VPseudoSSegStore;
6201defm PseudoVSOXSEG : VPseudoISegStore<Ordered=true>;
6202defm PseudoVSUXSEG : VPseudoISegStore<Ordered=false>;
6203
6204// vlseg<nf>e<eew>ff.v may update VL register
6205let hasSideEffects = 1, Defs = [VL] in {
6206defm PseudoVLSEG : VPseudoUSSegLoadFF;
6207}
6208
6209//===----------------------------------------------------------------------===//
6210// 11. Vector Integer Arithmetic Instructions
6211//===----------------------------------------------------------------------===//
6212
6213//===----------------------------------------------------------------------===//
6214// 11.1. Vector Single-Width Integer Add and Subtract
6215//===----------------------------------------------------------------------===//
6216defm PseudoVADD   : VPseudoVALU_VV_VX_VI;
6217defm PseudoVSUB   : VPseudoVALU_VV_VX;
6218defm PseudoVRSUB  : VPseudoVALU_VX_VI;
6219
6220foreach vti = AllIntegerVectors in {
6221  // Match vrsub with 2 vector operands to vsub.vv by swapping operands. This
6222  // Occurs when legalizing vrsub.vx intrinsics for i64 on RV32 since we need
6223  // to use a more complex splat sequence. Add the pattern for all VTs for
6224  // consistency.
6225  let Predicates = GetVTypePredicates<vti>.Predicates in {
6226    def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$merge),
6227                                           (vti.Vector vti.RegClass:$rs2),
6228                                           (vti.Vector vti.RegClass:$rs1),
6229                                           VLOpFrag)),
6230              (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX)
6231                                                        vti.RegClass:$merge,
6232                                                        vti.RegClass:$rs1,
6233                                                        vti.RegClass:$rs2,
6234                                                        GPR:$vl,
6235                                                        vti.Log2SEW, TU_MU)>;
6236    def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge),
6237                                                (vti.Vector vti.RegClass:$rs2),
6238                                                (vti.Vector vti.RegClass:$rs1),
6239                                                (vti.Mask V0),
6240                                                VLOpFrag,
6241                                                (XLenVT timm:$policy))),
6242              (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK")
6243                                                        vti.RegClass:$merge,
6244                                                        vti.RegClass:$rs1,
6245                                                        vti.RegClass:$rs2,
6246                                                        (vti.Mask V0),
6247                                                        GPR:$vl,
6248                                                        vti.Log2SEW,
6249                                                        (XLenVT timm:$policy))>;
6250
6251    // Match VSUB with a small immediate to vadd.vi by negating the immediate.
6252    def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector (undef)),
6253                                          (vti.Vector vti.RegClass:$rs1),
6254                                          (vti.Scalar simm5_plus1:$rs2),
6255                                          VLOpFrag)),
6256              (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)),
6257                                                                vti.RegClass:$rs1,
6258                                                                (NegImm simm5_plus1:$rs2),
6259                                                                GPR:$vl,
6260                                                                vti.Log2SEW, TA_MA)>;
6261    def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge),
6262                                               (vti.Vector vti.RegClass:$rs1),
6263                                               (vti.Scalar simm5_plus1:$rs2),
6264                                               (vti.Mask V0),
6265                                               VLOpFrag,
6266                                               (XLenVT timm:$policy))),
6267              (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX#"_MASK")
6268                                                        vti.RegClass:$merge,
6269                                                        vti.RegClass:$rs1,
6270                                                        (NegImm simm5_plus1:$rs2),
6271                                                        (vti.Mask V0),
6272                                                        GPR:$vl,
6273                                                        vti.Log2SEW,
6274                                                        (XLenVT timm:$policy))>;
6275  }
6276}
6277
6278//===----------------------------------------------------------------------===//
6279// 11.2. Vector Widening Integer Add/Subtract
6280//===----------------------------------------------------------------------===//
6281defm PseudoVWADDU : VPseudoVWALU_VV_VX;
6282defm PseudoVWSUBU : VPseudoVWALU_VV_VX;
6283defm PseudoVWADD  : VPseudoVWALU_VV_VX;
6284defm PseudoVWSUB  : VPseudoVWALU_VV_VX;
6285defm PseudoVWADDU : VPseudoVWALU_WV_WX;
6286defm PseudoVWSUBU : VPseudoVWALU_WV_WX;
6287defm PseudoVWADD  : VPseudoVWALU_WV_WX;
6288defm PseudoVWSUB  : VPseudoVWALU_WV_WX;
6289
6290//===----------------------------------------------------------------------===//
6291// 11.3. Vector Integer Extension
6292//===----------------------------------------------------------------------===//
6293defm PseudoVZEXT_VF2 : PseudoVEXT_VF2;
6294defm PseudoVZEXT_VF4 : PseudoVEXT_VF4;
6295defm PseudoVZEXT_VF8 : PseudoVEXT_VF8;
6296defm PseudoVSEXT_VF2 : PseudoVEXT_VF2;
6297defm PseudoVSEXT_VF4 : PseudoVEXT_VF4;
6298defm PseudoVSEXT_VF8 : PseudoVEXT_VF8;
6299
6300//===----------------------------------------------------------------------===//
6301// 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
6302//===----------------------------------------------------------------------===//
6303defm PseudoVADC  : VPseudoVCALU_VM_XM_IM;
6304defm PseudoVMADC : VPseudoVCALUM_VM_XM_IM<"@earlyclobber $rd">;
6305defm PseudoVMADC : VPseudoVCALUM_V_X_I<"@earlyclobber $rd">;
6306
6307defm PseudoVSBC  : VPseudoVCALU_VM_XM;
6308defm PseudoVMSBC : VPseudoVCALUM_VM_XM<"@earlyclobber $rd">;
6309defm PseudoVMSBC : VPseudoVCALUM_V_X<"@earlyclobber $rd">;
6310
6311//===----------------------------------------------------------------------===//
6312// 11.5. Vector Bitwise Logical Instructions
6313//===----------------------------------------------------------------------===//
6314defm PseudoVAND : VPseudoVALU_VV_VX_VI;
6315defm PseudoVOR  : VPseudoVALU_VV_VX_VI;
6316defm PseudoVXOR : VPseudoVALU_VV_VX_VI;
6317
6318//===----------------------------------------------------------------------===//
6319// 11.6. Vector Single-Width Bit Shift Instructions
6320//===----------------------------------------------------------------------===//
6321defm PseudoVSLL : VPseudoVSHT_VV_VX_VI<uimm5>;
6322defm PseudoVSRL : VPseudoVSHT_VV_VX_VI<uimm5>;
6323defm PseudoVSRA : VPseudoVSHT_VV_VX_VI<uimm5>;
6324
6325//===----------------------------------------------------------------------===//
6326// 11.7. Vector Narrowing Integer Right Shift Instructions
6327//===----------------------------------------------------------------------===//
6328defm PseudoVNSRL : VPseudoVNSHT_WV_WX_WI;
6329defm PseudoVNSRA : VPseudoVNSHT_WV_WX_WI;
6330
6331//===----------------------------------------------------------------------===//
6332// 11.8. Vector Integer Comparison Instructions
6333//===----------------------------------------------------------------------===//
6334defm PseudoVMSEQ  : VPseudoVCMPM_VV_VX_VI;
6335defm PseudoVMSNE  : VPseudoVCMPM_VV_VX_VI;
6336defm PseudoVMSLTU : VPseudoVCMPM_VV_VX;
6337defm PseudoVMSLT  : VPseudoVCMPM_VV_VX;
6338defm PseudoVMSLEU : VPseudoVCMPM_VV_VX_VI;
6339defm PseudoVMSLE  : VPseudoVCMPM_VV_VX_VI;
6340defm PseudoVMSGTU : VPseudoVCMPM_VX_VI;
6341defm PseudoVMSGT  : VPseudoVCMPM_VX_VI;
6342
6343//===----------------------------------------------------------------------===//
6344// 11.9. Vector Integer Min/Max Instructions
6345//===----------------------------------------------------------------------===//
6346defm PseudoVMINU : VPseudoVMINMAX_VV_VX;
6347defm PseudoVMIN  : VPseudoVMINMAX_VV_VX;
6348defm PseudoVMAXU : VPseudoVMINMAX_VV_VX;
6349defm PseudoVMAX  : VPseudoVMINMAX_VV_VX;
6350
6351//===----------------------------------------------------------------------===//
6352// 11.10. Vector Single-Width Integer Multiply Instructions
6353//===----------------------------------------------------------------------===//
6354defm PseudoVMUL    : VPseudoVMUL_VV_VX;
6355defm PseudoVMULH   : VPseudoVMUL_VV_VX;
6356defm PseudoVMULHU  : VPseudoVMUL_VV_VX;
6357defm PseudoVMULHSU : VPseudoVMUL_VV_VX;
6358
6359//===----------------------------------------------------------------------===//
6360// 11.11. Vector Integer Divide Instructions
6361//===----------------------------------------------------------------------===//
6362defm PseudoVDIVU : VPseudoVDIV_VV_VX;
6363defm PseudoVDIV  : VPseudoVDIV_VV_VX;
6364defm PseudoVREMU : VPseudoVDIV_VV_VX;
6365defm PseudoVREM  : VPseudoVDIV_VV_VX;
6366
6367//===----------------------------------------------------------------------===//
6368// 11.12. Vector Widening Integer Multiply Instructions
6369//===----------------------------------------------------------------------===//
6370defm PseudoVWMUL   : VPseudoVWMUL_VV_VX;
6371defm PseudoVWMULU  : VPseudoVWMUL_VV_VX;
6372defm PseudoVWMULSU : VPseudoVWMUL_VV_VX;
6373
6374//===----------------------------------------------------------------------===//
6375// 11.13. Vector Single-Width Integer Multiply-Add Instructions
6376//===----------------------------------------------------------------------===//
6377defm PseudoVMACC  : VPseudoVMAC_VV_VX_AAXA;
6378defm PseudoVNMSAC : VPseudoVMAC_VV_VX_AAXA;
6379defm PseudoVMADD  : VPseudoVMAC_VV_VX_AAXA;
6380defm PseudoVNMSUB : VPseudoVMAC_VV_VX_AAXA;
6381
6382//===----------------------------------------------------------------------===//
6383// 11.14. Vector Widening Integer Multiply-Add Instructions
6384//===----------------------------------------------------------------------===//
6385defm PseudoVWMACCU  : VPseudoVWMAC_VV_VX;
6386defm PseudoVWMACC   : VPseudoVWMAC_VV_VX;
6387defm PseudoVWMACCSU : VPseudoVWMAC_VV_VX;
6388defm PseudoVWMACCUS : VPseudoVWMAC_VX;
6389
6390//===----------------------------------------------------------------------===//
6391// 11.15. Vector Integer Merge Instructions
6392//===----------------------------------------------------------------------===//
6393defm PseudoVMERGE : VPseudoVMRG_VM_XM_IM;
6394
6395//===----------------------------------------------------------------------===//
6396// 11.16. Vector Integer Move Instructions
6397//===----------------------------------------------------------------------===//
6398defm PseudoVMV_V : VPseudoUnaryVMV_V_X_I;
6399
6400//===----------------------------------------------------------------------===//
6401// 12. Vector Fixed-Point Arithmetic Instructions
6402//===----------------------------------------------------------------------===//
6403
6404//===----------------------------------------------------------------------===//
6405// 12.1. Vector Single-Width Saturating Add and Subtract
6406//===----------------------------------------------------------------------===//
6407let Defs = [VXSAT], hasSideEffects = 1 in {
6408  defm PseudoVSADDU : VPseudoVSALU_VV_VX_VI;
6409  defm PseudoVSADD  : VPseudoVSALU_VV_VX_VI;
6410  defm PseudoVSSUBU : VPseudoVSALU_VV_VX;
6411  defm PseudoVSSUB  : VPseudoVSALU_VV_VX;
6412}
6413
6414//===----------------------------------------------------------------------===//
6415// 12.2. Vector Single-Width Averaging Add and Subtract
6416//===----------------------------------------------------------------------===//
6417defm PseudoVAADDU : VPseudoVAALU_VV_VX_RM;
6418defm PseudoVAADD  : VPseudoVAALU_VV_VX_RM;
6419defm PseudoVASUBU : VPseudoVAALU_VV_VX_RM;
6420defm PseudoVASUB  : VPseudoVAALU_VV_VX_RM;
6421
6422//===----------------------------------------------------------------------===//
6423// 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
6424//===----------------------------------------------------------------------===//
6425let Defs = [VXSAT], hasSideEffects = 1 in {
6426  defm PseudoVSMUL : VPseudoVSMUL_VV_VX_RM;
6427}
6428
6429//===----------------------------------------------------------------------===//
6430// 12.4. Vector Single-Width Scaling Shift Instructions
6431//===----------------------------------------------------------------------===//
6432defm PseudoVSSRL : VPseudoVSSHT_VV_VX_VI_RM<uimm5>;
6433defm PseudoVSSRA : VPseudoVSSHT_VV_VX_VI_RM<uimm5>;
6434
6435//===----------------------------------------------------------------------===//
6436// 12.5. Vector Narrowing Fixed-Point Clip Instructions
6437//===----------------------------------------------------------------------===//
6438let Defs = [VXSAT], hasSideEffects = 1 in {
6439  defm PseudoVNCLIP  : VPseudoVNCLP_WV_WX_WI_RM;
6440  defm PseudoVNCLIPU : VPseudoVNCLP_WV_WX_WI_RM;
6441}
6442
6443} // Predicates = [HasVInstructions]
6444
6445//===----------------------------------------------------------------------===//
6446// 13. Vector Floating-Point Instructions
6447//===----------------------------------------------------------------------===//
6448
6449let Predicates = [HasVInstructionsAnyF] in {
6450//===----------------------------------------------------------------------===//
6451// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
6452//===----------------------------------------------------------------------===//
6453let mayRaiseFPException = true, hasPostISelHook = 1 in {
6454defm PseudoVFADD  : VPseudoVALU_VV_VF_RM;
6455defm PseudoVFSUB  : VPseudoVALU_VV_VF_RM;
6456defm PseudoVFRSUB : VPseudoVALU_VF_RM;
6457}
6458
6459//===----------------------------------------------------------------------===//
6460// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
6461//===----------------------------------------------------------------------===//
6462let mayRaiseFPException = true, hasSideEffects = 0, hasPostISelHook = 1 in {
6463defm PseudoVFWADD : VPseudoVFWALU_VV_VF_RM;
6464defm PseudoVFWSUB : VPseudoVFWALU_VV_VF_RM;
6465defm PseudoVFWADD : VPseudoVFWALU_WV_WF_RM;
6466defm PseudoVFWSUB : VPseudoVFWALU_WV_WF_RM;
6467}
6468
6469//===----------------------------------------------------------------------===//
6470// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
6471//===----------------------------------------------------------------------===//
6472let mayRaiseFPException = true, hasSideEffects = 0, hasPostISelHook = 1 in {
6473defm PseudoVFMUL  : VPseudoVFMUL_VV_VF_RM;
6474defm PseudoVFDIV  : VPseudoVFDIV_VV_VF_RM;
6475defm PseudoVFRDIV : VPseudoVFRDIV_VF_RM;
6476}
6477
6478//===----------------------------------------------------------------------===//
6479// 13.5. Vector Widening Floating-Point Multiply
6480//===----------------------------------------------------------------------===//
6481let mayRaiseFPException = true, hasSideEffects = 0 in {
6482defm PseudoVFWMUL : VPseudoVWMUL_VV_VF_RM;
6483}
6484
6485//===----------------------------------------------------------------------===//
6486// 13.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
6487//===----------------------------------------------------------------------===//
6488let mayRaiseFPException = true, hasSideEffects = 0, hasPostISelHook = 1 in {
6489defm PseudoVFMACC  : VPseudoVMAC_VV_VF_AAXA_RM;
6490defm PseudoVFNMACC : VPseudoVMAC_VV_VF_AAXA_RM;
6491defm PseudoVFMSAC  : VPseudoVMAC_VV_VF_AAXA_RM;
6492defm PseudoVFNMSAC : VPseudoVMAC_VV_VF_AAXA_RM;
6493defm PseudoVFMADD  : VPseudoVMAC_VV_VF_AAXA_RM;
6494defm PseudoVFNMADD : VPseudoVMAC_VV_VF_AAXA_RM;
6495defm PseudoVFMSUB  : VPseudoVMAC_VV_VF_AAXA_RM;
6496defm PseudoVFNMSUB : VPseudoVMAC_VV_VF_AAXA_RM;
6497}
6498
6499//===----------------------------------------------------------------------===//
6500// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
6501//===----------------------------------------------------------------------===//
6502let mayRaiseFPException = true, hasSideEffects = 0, hasPostISelHook = 1 in {
6503defm PseudoVFWMACC  : VPseudoVWMAC_VV_VF_RM;
6504defm PseudoVFWNMACC : VPseudoVWMAC_VV_VF_RM;
6505defm PseudoVFWMSAC  : VPseudoVWMAC_VV_VF_RM;
6506defm PseudoVFWNMSAC : VPseudoVWMAC_VV_VF_RM;
6507let Predicates = [HasStdExtZvfbfwma] in
6508defm PseudoVFWMACCBF16  : VPseudoVWMAC_VV_VF_BF_RM;
6509}
6510
6511//===----------------------------------------------------------------------===//
6512// 13.8. Vector Floating-Point Square-Root Instruction
6513//===----------------------------------------------------------------------===//
6514let mayRaiseFPException = true, hasSideEffects = 0 in
6515defm PseudoVFSQRT : VPseudoVSQR_V_RM;
6516
6517//===----------------------------------------------------------------------===//
6518// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
6519//===----------------------------------------------------------------------===//
6520let mayRaiseFPException = true in
6521defm PseudoVFRSQRT7 : VPseudoVRCP_V;
6522
6523//===----------------------------------------------------------------------===//
6524// 13.10. Vector Floating-Point Reciprocal Estimate Instruction
6525//===----------------------------------------------------------------------===//
6526let mayRaiseFPException = true, hasSideEffects = 0 in
6527defm PseudoVFREC7 : VPseudoVRCP_V_RM;
6528
6529//===----------------------------------------------------------------------===//
6530// 13.11. Vector Floating-Point Min/Max Instructions
6531//===----------------------------------------------------------------------===//
6532let mayRaiseFPException = true in {
6533defm PseudoVFMIN : VPseudoVMAX_VV_VF;
6534defm PseudoVFMAX : VPseudoVMAX_VV_VF;
6535}
6536
6537//===----------------------------------------------------------------------===//
6538// 13.12. Vector Floating-Point Sign-Injection Instructions
6539//===----------------------------------------------------------------------===//
6540defm PseudoVFSGNJ  : VPseudoVSGNJ_VV_VF;
6541defm PseudoVFSGNJN : VPseudoVSGNJ_VV_VF;
6542defm PseudoVFSGNJX : VPseudoVSGNJ_VV_VF;
6543
6544//===----------------------------------------------------------------------===//
6545// 13.13. Vector Floating-Point Compare Instructions
6546//===----------------------------------------------------------------------===//
6547let mayRaiseFPException = true in {
6548defm PseudoVMFEQ : VPseudoVCMPM_VV_VF;
6549defm PseudoVMFNE : VPseudoVCMPM_VV_VF;
6550defm PseudoVMFLT : VPseudoVCMPM_VV_VF;
6551defm PseudoVMFLE : VPseudoVCMPM_VV_VF;
6552defm PseudoVMFGT : VPseudoVCMPM_VF;
6553defm PseudoVMFGE : VPseudoVCMPM_VF;
6554}
6555
6556//===----------------------------------------------------------------------===//
6557// 13.14. Vector Floating-Point Classify Instruction
6558//===----------------------------------------------------------------------===//
6559defm PseudoVFCLASS : VPseudoVCLS_V;
6560
6561//===----------------------------------------------------------------------===//
6562// 13.15. Vector Floating-Point Merge Instruction
6563//===----------------------------------------------------------------------===//
6564defm PseudoVFMERGE : VPseudoVMRG_FM;
6565
6566//===----------------------------------------------------------------------===//
6567// 13.16. Vector Floating-Point Move Instruction
6568//===----------------------------------------------------------------------===//
6569defm PseudoVFMV_V : VPseudoVMV_F;
6570
6571//===----------------------------------------------------------------------===//
6572// 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions
6573//===----------------------------------------------------------------------===//
6574let mayRaiseFPException = true in {
6575let hasSideEffects = 0, hasPostISelHook = 1 in {
6576defm PseudoVFCVT_XU_F : VPseudoVCVTI_V_RM;
6577defm PseudoVFCVT_X_F : VPseudoVCVTI_V_RM;
6578}
6579
6580defm PseudoVFCVT_RM_XU_F : VPseudoVCVTI_RM_V;
6581defm PseudoVFCVT_RM_X_F : VPseudoVCVTI_RM_V;
6582
6583defm PseudoVFCVT_RTZ_XU_F : VPseudoVCVTI_V;
6584defm PseudoVFCVT_RTZ_X_F : VPseudoVCVTI_V;
6585
6586defm PseudoVFROUND_NOEXCEPT : VPseudoVFROUND_NOEXCEPT_V;
6587let hasSideEffects = 0, hasPostISelHook = 1 in {
6588defm PseudoVFCVT_F_XU : VPseudoVCVTF_V_RM;
6589defm PseudoVFCVT_F_X : VPseudoVCVTF_V_RM;
6590}
6591defm PseudoVFCVT_RM_F_XU : VPseudoVCVTF_RM_V;
6592defm PseudoVFCVT_RM_F_X  : VPseudoVCVTF_RM_V;
6593} // mayRaiseFPException = true
6594
6595//===----------------------------------------------------------------------===//
6596// 13.18. Widening Floating-Point/Integer Type-Convert Instructions
6597//===----------------------------------------------------------------------===//
6598let mayRaiseFPException = true in {
6599let hasSideEffects = 0, hasPostISelHook = 1 in {
6600defm PseudoVFWCVT_XU_F     : VPseudoVWCVTI_V_RM;
6601defm PseudoVFWCVT_X_F      : VPseudoVWCVTI_V_RM;
6602}
6603defm PseudoVFWCVT_RM_XU_F  : VPseudoVWCVTI_RM_V;
6604defm PseudoVFWCVT_RM_X_F   : VPseudoVWCVTI_RM_V;
6605
6606defm PseudoVFWCVT_RTZ_XU_F : VPseudoVWCVTI_V;
6607defm PseudoVFWCVT_RTZ_X_F  : VPseudoVWCVTI_V;
6608
6609defm PseudoVFWCVT_F_XU     : VPseudoVWCVTF_V;
6610defm PseudoVFWCVT_F_X      : VPseudoVWCVTF_V;
6611
6612defm PseudoVFWCVT_F_F      : VPseudoVWCVTD_V;
6613defm PseudoVFWCVTBF16_F_F :  VPseudoVWCVTD_V;
6614} // mayRaiseFPException = true
6615
6616//===----------------------------------------------------------------------===//
6617// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions
6618//===----------------------------------------------------------------------===//
6619let mayRaiseFPException = true in {
6620let hasSideEffects = 0, hasPostISelHook = 1 in {
6621defm PseudoVFNCVT_XU_F     : VPseudoVNCVTI_W_RM;
6622defm PseudoVFNCVT_X_F      : VPseudoVNCVTI_W_RM;
6623}
6624defm PseudoVFNCVT_RM_XU_F  : VPseudoVNCVTI_RM_W;
6625defm PseudoVFNCVT_RM_X_F   : VPseudoVNCVTI_RM_W;
6626
6627defm PseudoVFNCVT_RTZ_XU_F : VPseudoVNCVTI_W;
6628defm PseudoVFNCVT_RTZ_X_F  : VPseudoVNCVTI_W;
6629
6630let hasSideEffects = 0, hasPostISelHook = 1 in {
6631defm PseudoVFNCVT_F_XU     : VPseudoVNCVTF_W_RM;
6632defm PseudoVFNCVT_F_X      : VPseudoVNCVTF_W_RM;
6633}
6634defm PseudoVFNCVT_RM_F_XU  : VPseudoVNCVTF_RM_W;
6635defm PseudoVFNCVT_RM_F_X   : VPseudoVNCVTF_RM_W;
6636
6637let hasSideEffects = 0, hasPostISelHook = 1 in
6638defm PseudoVFNCVT_F_F      : VPseudoVNCVTD_W_RM;
6639defm PseudoVFNCVTBF16_F_F :  VPseudoVNCVTD_W_RM;
6640
6641defm PseudoVFNCVT_ROD_F_F  : VPseudoVNCVTD_W;
6642} // mayRaiseFPException = true
6643} // Predicates = [HasVInstructionsAnyF]
6644
6645//===----------------------------------------------------------------------===//
6646// 14. Vector Reduction Operations
6647//===----------------------------------------------------------------------===//
6648
6649let Predicates = [HasVInstructions] in {
6650//===----------------------------------------------------------------------===//
6651// 14.1. Vector Single-Width Integer Reduction Instructions
6652//===----------------------------------------------------------------------===//
6653defm PseudoVREDSUM  : VPseudoVRED_VS;
6654defm PseudoVREDAND  : VPseudoVRED_VS;
6655defm PseudoVREDOR   : VPseudoVRED_VS;
6656defm PseudoVREDXOR  : VPseudoVRED_VS;
6657defm PseudoVREDMINU : VPseudoVREDMINMAX_VS;
6658defm PseudoVREDMIN  : VPseudoVREDMINMAX_VS;
6659defm PseudoVREDMAXU : VPseudoVREDMINMAX_VS;
6660defm PseudoVREDMAX  : VPseudoVREDMINMAX_VS;
6661
6662//===----------------------------------------------------------------------===//
6663// 14.2. Vector Widening Integer Reduction Instructions
6664//===----------------------------------------------------------------------===//
6665let IsRVVWideningReduction = 1 in {
6666defm PseudoVWREDSUMU   : VPseudoVWRED_VS;
6667defm PseudoVWREDSUM    : VPseudoVWRED_VS;
6668}
6669} // Predicates = [HasVInstructions]
6670
6671let Predicates = [HasVInstructionsAnyF] in {
6672//===----------------------------------------------------------------------===//
6673// 14.3. Vector Single-Width Floating-Point Reduction Instructions
6674//===----------------------------------------------------------------------===//
6675let mayRaiseFPException = true,
6676    hasSideEffects = 0 in {
6677defm PseudoVFREDOSUM : VPseudoVFREDO_VS_RM;
6678defm PseudoVFREDUSUM : VPseudoVFRED_VS_RM;
6679}
6680let mayRaiseFPException = true in {
6681defm PseudoVFREDMIN  : VPseudoVFREDMINMAX_VS;
6682defm PseudoVFREDMAX  : VPseudoVFREDMINMAX_VS;
6683}
6684
6685//===----------------------------------------------------------------------===//
6686// 14.4. Vector Widening Floating-Point Reduction Instructions
6687//===----------------------------------------------------------------------===//
6688let IsRVVWideningReduction = 1,
6689    hasSideEffects = 0,
6690    mayRaiseFPException = true in {
6691defm PseudoVFWREDUSUM  : VPseudoVFWRED_VS_RM;
6692defm PseudoVFWREDOSUM  : VPseudoVFWREDO_VS_RM;
6693}
6694
6695} // Predicates = [HasVInstructionsAnyF]
6696
6697//===----------------------------------------------------------------------===//
6698// 15. Vector Mask Instructions
6699//===----------------------------------------------------------------------===//
6700
6701//===----------------------------------------------------------------------===//
6702// 15.1 Vector Mask-Register Logical Instructions
6703//===----------------------------------------------------------------------===//
6704
6705defm PseudoVMAND: VPseudoVALU_MM<Commutable=1>;
6706defm PseudoVMNAND: VPseudoVALU_MM<Commutable=1>;
6707defm PseudoVMANDN: VPseudoVALU_MM;
6708defm PseudoVMXOR: VPseudoVALU_MM<Commutable=1>;
6709defm PseudoVMOR: VPseudoVALU_MM<Commutable=1>;
6710defm PseudoVMNOR: VPseudoVALU_MM<Commutable=1>;
6711defm PseudoVMORN: VPseudoVALU_MM;
6712defm PseudoVMXNOR: VPseudoVALU_MM<Commutable=1>;
6713
6714// Pseudo instructions
6715defm PseudoVMCLR : VPseudoNullaryPseudoM<"VMXOR">;
6716defm PseudoVMSET : VPseudoNullaryPseudoM<"VMXNOR">;
6717
6718//===----------------------------------------------------------------------===//
6719// 15.2. Vector mask population count vcpop
6720//===----------------------------------------------------------------------===//
6721
6722let IsSignExtendingOpW = 1 in
6723defm PseudoVCPOP: VPseudoVPOP_M;
6724
6725//===----------------------------------------------------------------------===//
6726// 15.3. vfirst find-first-set mask bit
6727//===----------------------------------------------------------------------===//
6728
6729let IsSignExtendingOpW = 1 in
6730defm PseudoVFIRST: VPseudoV1ST_M;
6731
6732//===----------------------------------------------------------------------===//
6733// 15.4. vmsbf.m set-before-first mask bit
6734//===----------------------------------------------------------------------===//
6735defm PseudoVMSBF: VPseudoVSFS_M;
6736
6737//===----------------------------------------------------------------------===//
6738// 15.5. vmsif.m set-including-first mask bit
6739//===----------------------------------------------------------------------===//
6740defm PseudoVMSIF: VPseudoVSFS_M;
6741
6742//===----------------------------------------------------------------------===//
6743// 15.6. vmsof.m set-only-first mask bit
6744//===----------------------------------------------------------------------===//
6745defm PseudoVMSOF: VPseudoVSFS_M;
6746
6747//===----------------------------------------------------------------------===//
6748// 15.8.  Vector Iota Instruction
6749//===----------------------------------------------------------------------===//
6750defm PseudoVIOTA_M: VPseudoVIOT_M;
6751
6752//===----------------------------------------------------------------------===//
6753// 15.9. Vector Element Index Instruction
6754//===----------------------------------------------------------------------===//
6755defm PseudoVID : VPseudoVID_V;
6756
6757//===----------------------------------------------------------------------===//
6758// 16. Vector Permutation Instructions
6759//===----------------------------------------------------------------------===//
6760
6761//===----------------------------------------------------------------------===//
6762// 16.1. Integer Scalar Move Instructions
6763//===----------------------------------------------------------------------===//
6764
6765let Predicates = [HasVInstructions] in {
6766let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
6767  let HasSEWOp = 1, BaseInstr = VMV_X_S in
6768  def PseudoVMV_X_S:
6769    Pseudo<(outs GPR:$rd), (ins VR:$rs2, ixlenimm:$sew), []>,
6770    Sched<[WriteVIMovVX, ReadVIMovVX]>,
6771    RISCVVPseudo;
6772  let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X,
6773      Constraints = "$rd = $rs1" in
6774  def PseudoVMV_S_X: Pseudo<(outs VR:$rd),
6775                            (ins VR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),
6776                            []>,
6777    Sched<[WriteVIMovXV, ReadVIMovXV, ReadVIMovXX]>,
6778    RISCVVPseudo;
6779}
6780} // Predicates = [HasVInstructions]
6781
6782//===----------------------------------------------------------------------===//
6783// 16.2. Floating-Point Scalar Move Instructions
6784//===----------------------------------------------------------------------===//
6785
6786let Predicates = [HasVInstructionsAnyF] in {
6787let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
6788  foreach f = FPList in {
6789    foreach m = f.MxList in {
6790      defvar mx = m.MX;
6791      let VLMul = m.value in {
6792        let HasSEWOp = 1, BaseInstr = VFMV_F_S in
6793        def "PseudoVFMV_" # f.FX # "_S_" # mx :
6794          Pseudo<(outs f.fprclass:$rd),
6795                 (ins m.vrclass:$rs2, ixlenimm:$sew), []>,
6796          Sched<[WriteVFMovVF, ReadVFMovVF]>,
6797          RISCVVPseudo;
6798        let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F,
6799            Constraints = "$rd = $rs1" in
6800        def "PseudoVFMV_S_" # f.FX # "_" # mx :
6801                                          Pseudo<(outs m.vrclass:$rd),
6802                                                 (ins m.vrclass:$rs1, f.fprclass:$rs2,
6803                                                      AVL:$vl, ixlenimm:$sew),
6804                                                 []>,
6805          Sched<[WriteVFMovFV, ReadVFMovFV, ReadVFMovFX]>,
6806          RISCVVPseudo;
6807      }
6808    }
6809  }
6810}
6811} // Predicates = [HasVInstructionsAnyF]
6812
6813//===----------------------------------------------------------------------===//
6814// 16.3. Vector Slide Instructions
6815//===----------------------------------------------------------------------===//
6816let Predicates = [HasVInstructions] in {
6817  defm PseudoVSLIDEUP    : VPseudoVSLD_VX_VI<uimm5, "@earlyclobber $rd">;
6818  defm PseudoVSLIDEDOWN  : VPseudoVSLD_VX_VI<uimm5>;
6819  defm PseudoVSLIDE1UP   : VPseudoVSLD1_VX<"@earlyclobber $rd">;
6820  defm PseudoVSLIDE1DOWN : VPseudoVSLD1_VX;
6821} // Predicates = [HasVInstructions]
6822
6823let Predicates = [HasVInstructionsAnyF] in {
6824  defm PseudoVFSLIDE1UP  : VPseudoVSLD1_VF<"@earlyclobber $rd">;
6825  defm PseudoVFSLIDE1DOWN : VPseudoVSLD1_VF;
6826} // Predicates = [HasVInstructionsAnyF]
6827
6828//===----------------------------------------------------------------------===//
6829// 16.4. Vector Register Gather Instructions
6830//===----------------------------------------------------------------------===//
6831defm PseudoVRGATHER     : VPseudoVGTR_VV_VX_VI<uimm5, "@earlyclobber $rd">;
6832defm PseudoVRGATHEREI16 : VPseudoVGTR_VV_EEW<eew=16,
6833                                             Constraint="@earlyclobber $rd">;
6834
6835//===----------------------------------------------------------------------===//
6836// 16.5. Vector Compress Instruction
6837//===----------------------------------------------------------------------===//
6838defm PseudoVCOMPRESS : VPseudoVCPR_V;
6839
6840//===----------------------------------------------------------------------===//
6841// Patterns.
6842//===----------------------------------------------------------------------===//
6843
6844//===----------------------------------------------------------------------===//
6845// 11. Vector Integer Arithmetic Instructions
6846//===----------------------------------------------------------------------===//
6847
6848//===----------------------------------------------------------------------===//
6849// 11.1. Vector Single-Width Integer Add and Subtract
6850//===----------------------------------------------------------------------===//
6851defm : VPatBinaryV_VV_VX_VI<"int_riscv_vadd", "PseudoVADD", AllIntegerVectors>;
6852defm : VPatBinaryV_VV_VX<"int_riscv_vsub", "PseudoVSUB", AllIntegerVectors>;
6853defm : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>;
6854
6855//===----------------------------------------------------------------------===//
6856// 11.2. Vector Widening Integer Add/Subtract
6857//===----------------------------------------------------------------------===//
6858defm : VPatBinaryW_VV_VX<"int_riscv_vwaddu", "PseudoVWADDU", AllWidenableIntVectors>;
6859defm : VPatBinaryW_VV_VX<"int_riscv_vwsubu", "PseudoVWSUBU", AllWidenableIntVectors>;
6860defm : VPatBinaryW_VV_VX<"int_riscv_vwadd", "PseudoVWADD", AllWidenableIntVectors>;
6861defm : VPatBinaryW_VV_VX<"int_riscv_vwsub", "PseudoVWSUB", AllWidenableIntVectors>;
6862defm : VPatBinaryW_WV_WX<"int_riscv_vwaddu_w", "PseudoVWADDU", AllWidenableIntVectors>;
6863defm : VPatBinaryW_WV_WX<"int_riscv_vwsubu_w", "PseudoVWSUBU", AllWidenableIntVectors>;
6864defm : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD", AllWidenableIntVectors>;
6865defm : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB", AllWidenableIntVectors>;
6866
6867//===----------------------------------------------------------------------===//
6868// 11.3. Vector Integer Extension
6869//===----------------------------------------------------------------------===//
6870defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF2",
6871                     AllFractionableVF2IntVectors>;
6872defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF4",
6873                     AllFractionableVF4IntVectors>;
6874defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF8",
6875                     AllFractionableVF8IntVectors>;
6876defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF2",
6877                     AllFractionableVF2IntVectors>;
6878defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF4",
6879                     AllFractionableVF4IntVectors>;
6880defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF8",
6881                     AllFractionableVF8IntVectors>;
6882
6883//===----------------------------------------------------------------------===//
6884// 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
6885//===----------------------------------------------------------------------===//
6886defm : VPatBinaryV_VM_XM_IM<"int_riscv_vadc", "PseudoVADC">;
6887defm : VPatBinaryM_VM_XM_IM<"int_riscv_vmadc_carry_in", "PseudoVMADC">;
6888defm : VPatBinaryM_V_X_I<"int_riscv_vmadc", "PseudoVMADC">;
6889
6890defm : VPatBinaryV_VM_XM<"int_riscv_vsbc", "PseudoVSBC">;
6891defm : VPatBinaryM_VM_XM<"int_riscv_vmsbc_borrow_in", "PseudoVMSBC">;
6892defm : VPatBinaryM_V_X<"int_riscv_vmsbc", "PseudoVMSBC">;
6893
6894//===----------------------------------------------------------------------===//
6895// 11.5. Vector Bitwise Logical Instructions
6896//===----------------------------------------------------------------------===//
6897defm : VPatBinaryV_VV_VX_VI<"int_riscv_vand", "PseudoVAND", AllIntegerVectors>;
6898defm : VPatBinaryV_VV_VX_VI<"int_riscv_vor", "PseudoVOR", AllIntegerVectors>;
6899defm : VPatBinaryV_VV_VX_VI<"int_riscv_vxor", "PseudoVXOR", AllIntegerVectors>;
6900
6901//===----------------------------------------------------------------------===//
6902// 11.6. Vector Single-Width Bit Shift Instructions
6903//===----------------------------------------------------------------------===//
6904defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsll", "PseudoVSLL", AllIntegerVectors,
6905                            uimm5>;
6906defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsrl", "PseudoVSRL", AllIntegerVectors,
6907                            uimm5>;
6908defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors,
6909                            uimm5>;
6910
6911foreach vti = AllIntegerVectors in {
6912  // Emit shift by 1 as an add since it might be faster.
6913  let Predicates = GetVTypePredicates<vti>.Predicates in {
6914    def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector undef),
6915                                          (vti.Vector vti.RegClass:$rs1),
6916                                          (XLenVT 1), VLOpFrag)),
6917              (!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX)
6918                 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1,
6919                 vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>;
6920    def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$merge),
6921                                               (vti.Vector vti.RegClass:$rs1),
6922                                               (XLenVT 1),
6923                                               (vti.Mask V0),
6924                                               VLOpFrag,
6925                                               (XLenVT timm:$policy))),
6926              (!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX#"_MASK")
6927                                                          vti.RegClass:$merge,
6928                                                          vti.RegClass:$rs1,
6929                                                          vti.RegClass:$rs1,
6930                                                          (vti.Mask V0),
6931                                                          GPR:$vl,
6932                                                          vti.Log2SEW,
6933                                                          (XLenVT timm:$policy))>;
6934  }
6935}
6936
6937//===----------------------------------------------------------------------===//
6938// 11.7. Vector Narrowing Integer Right Shift Instructions
6939//===----------------------------------------------------------------------===//
6940defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsrl", "PseudoVNSRL", AllWidenableIntVectors>;
6941defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsra", "PseudoVNSRA", AllWidenableIntVectors>;
6942
6943//===----------------------------------------------------------------------===//
6944// 11.8. Vector Integer Comparison Instructions
6945//===----------------------------------------------------------------------===//
6946defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmseq", "PseudoVMSEQ", AllIntegerVectors>;
6947defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsne", "PseudoVMSNE", AllIntegerVectors>;
6948defm : VPatBinaryM_VV_VX<"int_riscv_vmsltu", "PseudoVMSLTU", AllIntegerVectors>;
6949defm : VPatBinaryM_VV_VX<"int_riscv_vmslt", "PseudoVMSLT", AllIntegerVectors>;
6950defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsleu", "PseudoVMSLEU", AllIntegerVectors>;
6951defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsle", "PseudoVMSLE", AllIntegerVectors>;
6952
6953defm : VPatBinaryM_VX_VI<"int_riscv_vmsgtu", "PseudoVMSGTU", AllIntegerVectors>;
6954defm : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>;
6955
6956// Match vmsgt with 2 vector operands to vmslt with the operands swapped.
6957defm : VPatBinarySwappedM_VV<"int_riscv_vmsgtu", "PseudoVMSLTU", AllIntegerVectors>;
6958defm : VPatBinarySwappedM_VV<"int_riscv_vmsgt", "PseudoVMSLT", AllIntegerVectors>;
6959
6960defm : VPatBinarySwappedM_VV<"int_riscv_vmsgeu", "PseudoVMSLEU", AllIntegerVectors>;
6961defm : VPatBinarySwappedM_VV<"int_riscv_vmsge", "PseudoVMSLE", AllIntegerVectors>;
6962
6963// Match vmslt(u).vx intrinsics to vmsle(u).vi if the scalar is -15 to 16 and
6964// non-zero. Zero can be .vx with x0. This avoids the user needing to know that
6965// there is no vmslt(u).vi instruction. Similar for vmsge(u).vx intrinsics
6966// using vmslt(u).vi.
6967defm : VPatCompare_VI<"int_riscv_vmslt", "PseudoVMSLE", simm5_plus1_nonzero>;
6968defm : VPatCompare_VI<"int_riscv_vmsltu", "PseudoVMSLEU", simm5_plus1_nonzero>;
6969
6970// We need to handle 0 for vmsge.vi using vmslt.vi because there is no vmsge.vx.
6971defm : VPatCompare_VI<"int_riscv_vmsge", "PseudoVMSGT", simm5_plus1>;
6972defm : VPatCompare_VI<"int_riscv_vmsgeu", "PseudoVMSGTU", simm5_plus1_nonzero>;
6973
6974//===----------------------------------------------------------------------===//
6975// 11.9. Vector Integer Min/Max Instructions
6976//===----------------------------------------------------------------------===//
6977defm : VPatBinaryV_VV_VX<"int_riscv_vminu", "PseudoVMINU", AllIntegerVectors>;
6978defm : VPatBinaryV_VV_VX<"int_riscv_vmin", "PseudoVMIN", AllIntegerVectors>;
6979defm : VPatBinaryV_VV_VX<"int_riscv_vmaxu", "PseudoVMAXU", AllIntegerVectors>;
6980defm : VPatBinaryV_VV_VX<"int_riscv_vmax", "PseudoVMAX", AllIntegerVectors>;
6981
6982//===----------------------------------------------------------------------===//
6983// 11.10. Vector Single-Width Integer Multiply Instructions
6984//===----------------------------------------------------------------------===//
6985defm : VPatBinaryV_VV_VX<"int_riscv_vmul", "PseudoVMUL", AllIntegerVectors>;
6986
6987defvar IntegerVectorsExceptI64 = !filter(vti, AllIntegerVectors,
6988                                         !ne(vti.SEW, 64));
6989defm : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH",
6990                         IntegerVectorsExceptI64>;
6991defm : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU",
6992                         IntegerVectorsExceptI64>;
6993defm : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU",
6994                         IntegerVectorsExceptI64>;
6995
6996// vmulh, vmulhu, vmulhsu are not included for EEW=64 in Zve64*.
6997defvar I64IntegerVectors = !filter(vti, AllIntegerVectors, !eq(vti.SEW, 64));
6998let Predicates = [HasVInstructionsFullMultiply] in {
6999  defm : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH",
7000                           I64IntegerVectors>;
7001  defm : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU",
7002                           I64IntegerVectors>;
7003  defm : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU",
7004                           I64IntegerVectors>;
7005}
7006
7007//===----------------------------------------------------------------------===//
7008// 11.11. Vector Integer Divide Instructions
7009//===----------------------------------------------------------------------===//
7010defm : VPatBinaryV_VV_VX<"int_riscv_vdivu", "PseudoVDIVU", AllIntegerVectors, isSEWAware=1>;
7011defm : VPatBinaryV_VV_VX<"int_riscv_vdiv", "PseudoVDIV", AllIntegerVectors, isSEWAware=1>;
7012defm : VPatBinaryV_VV_VX<"int_riscv_vremu", "PseudoVREMU", AllIntegerVectors, isSEWAware=1>;
7013defm : VPatBinaryV_VV_VX<"int_riscv_vrem", "PseudoVREM", AllIntegerVectors, isSEWAware=1>;
7014
7015//===----------------------------------------------------------------------===//
7016// 11.12. Vector Widening Integer Multiply Instructions
7017//===----------------------------------------------------------------------===//
7018defm : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL", AllWidenableIntVectors>;
7019defm : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU", AllWidenableIntVectors>;
7020defm : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU", AllWidenableIntVectors>;
7021
7022//===----------------------------------------------------------------------===//
7023// 11.13. Vector Single-Width Integer Multiply-Add Instructions
7024//===----------------------------------------------------------------------===//
7025defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmadd", "PseudoVMADD", AllIntegerVectors>;
7026defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsub", "PseudoVNMSUB", AllIntegerVectors>;
7027defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmacc", "PseudoVMACC", AllIntegerVectors>;
7028defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsac", "PseudoVNMSAC", AllIntegerVectors>;
7029
7030//===----------------------------------------------------------------------===//
7031// 11.14. Vector Widening Integer Multiply-Add Instructions
7032//===----------------------------------------------------------------------===//
7033defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccu", "PseudoVWMACCU", AllWidenableIntVectors>;
7034defm : VPatTernaryW_VV_VX<"int_riscv_vwmacc", "PseudoVWMACC", AllWidenableIntVectors>;
7035defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccsu", "PseudoVWMACCSU", AllWidenableIntVectors>;
7036defm : VPatTernaryW_VX<"int_riscv_vwmaccus", "PseudoVWMACCUS", AllWidenableIntVectors>;
7037
7038//===----------------------------------------------------------------------===//
7039// 11.15. Vector Integer Merge Instructions
7040//===----------------------------------------------------------------------===//
7041defm : VPatBinaryV_VM_XM_IM<"int_riscv_vmerge", "PseudoVMERGE">;
7042
7043//===----------------------------------------------------------------------===//
7044// 11.16. Vector Integer Move Instructions
7045//===----------------------------------------------------------------------===//
7046foreach vti = AllVectors in {
7047  let Predicates = GetVTypePredicates<vti>.Predicates in {
7048    def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$passthru),
7049                                             (vti.Vector vti.RegClass:$rs1),
7050                                             VLOpFrag)),
7051              (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX)
7052               $passthru, $rs1, GPR:$vl, vti.Log2SEW, TU_MU)>;
7053
7054    // vmv.v.x/vmv.v.i are handled in RISCInstrVInstrInfoVVLPatterns.td
7055  }
7056}
7057
7058//===----------------------------------------------------------------------===//
7059// 12. Vector Fixed-Point Arithmetic Instructions
7060//===----------------------------------------------------------------------===//
7061
7062//===----------------------------------------------------------------------===//
7063// 12.1. Vector Single-Width Saturating Add and Subtract
7064//===----------------------------------------------------------------------===//
7065defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsaddu", "PseudoVSADDU", AllIntegerVectors>;
7066defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsadd", "PseudoVSADD", AllIntegerVectors>;
7067defm : VPatBinaryV_VV_VX<"int_riscv_vssubu", "PseudoVSSUBU", AllIntegerVectors>;
7068defm : VPatBinaryV_VV_VX<"int_riscv_vssub", "PseudoVSSUB", AllIntegerVectors>;
7069
7070//===----------------------------------------------------------------------===//
7071// 12.2. Vector Single-Width Averaging Add and Subtract
7072//===----------------------------------------------------------------------===//
7073defm : VPatBinaryV_VV_VX_RM<"int_riscv_vaaddu", "PseudoVAADDU",
7074                            AllIntegerVectors>;
7075defm : VPatBinaryV_VV_VX_RM<"int_riscv_vasubu", "PseudoVASUBU",
7076                            AllIntegerVectors>;
7077defm : VPatBinaryV_VV_VX_RM<"int_riscv_vasub", "PseudoVASUB",
7078                            AllIntegerVectors>;
7079defm : VPatBinaryV_VV_VX_RM<"int_riscv_vaadd", "PseudoVAADD",
7080                            AllIntegerVectors>;
7081
7082//===----------------------------------------------------------------------===//
7083// 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
7084//===----------------------------------------------------------------------===//
7085defm : VPatBinaryV_VV_VX_RM<"int_riscv_vsmul", "PseudoVSMUL",
7086                             IntegerVectorsExceptI64>;
7087// vsmul.vv and vsmul.vx are not included in EEW=64 in Zve64*.
7088let Predicates = [HasVInstructionsFullMultiply] in
7089defm : VPatBinaryV_VV_VX_RM<"int_riscv_vsmul", "PseudoVSMUL",
7090                             I64IntegerVectors>;
7091
7092//===----------------------------------------------------------------------===//
7093// 12.4. Vector Single-Width Scaling Shift Instructions
7094//===----------------------------------------------------------------------===//
7095defm : VPatBinaryV_VV_VX_VI_RM<"int_riscv_vssrl", "PseudoVSSRL",
7096                               AllIntegerVectors, uimm5>;
7097defm : VPatBinaryV_VV_VX_VI_RM<"int_riscv_vssra", "PseudoVSSRA",
7098                               AllIntegerVectors, uimm5>;
7099
7100//===----------------------------------------------------------------------===//
7101// 12.5. Vector Narrowing Fixed-Point Clip Instructions
7102//===----------------------------------------------------------------------===//
7103defm : VPatBinaryV_WV_WX_WI_RM<"int_riscv_vnclipu", "PseudoVNCLIPU",
7104                               AllWidenableIntVectors>;
7105defm : VPatBinaryV_WV_WX_WI_RM<"int_riscv_vnclip", "PseudoVNCLIP",
7106                               AllWidenableIntVectors>;
7107
7108//===----------------------------------------------------------------------===//
7109// 13. Vector Floating-Point Instructions
7110//===----------------------------------------------------------------------===//
7111
7112//===----------------------------------------------------------------------===//
7113// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
7114//===----------------------------------------------------------------------===//
7115defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfadd", "PseudoVFADD",
7116                            AllFloatVectors>;
7117defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfsub", "PseudoVFSUB",
7118                            AllFloatVectors>;
7119defm : VPatBinaryV_VX_RM<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>;
7120
7121//===----------------------------------------------------------------------===//
7122// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
7123//===----------------------------------------------------------------------===//
7124defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwadd", "PseudoVFWADD",
7125                            AllWidenableFloatVectors>;
7126defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwsub", "PseudoVFWSUB",
7127                            AllWidenableFloatVectors>;
7128defm : VPatBinaryW_WV_WX_RM<"int_riscv_vfwadd_w", "PseudoVFWADD",
7129                            AllWidenableFloatVectors>;
7130defm : VPatBinaryW_WV_WX_RM<"int_riscv_vfwsub_w", "PseudoVFWSUB",
7131                            AllWidenableFloatVectors>;
7132
7133//===----------------------------------------------------------------------===//
7134// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
7135//===----------------------------------------------------------------------===//
7136defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfmul", "PseudoVFMUL",
7137                            AllFloatVectors>;
7138defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfdiv", "PseudoVFDIV",
7139                            AllFloatVectors, isSEWAware=1>;
7140defm : VPatBinaryV_VX_RM<"int_riscv_vfrdiv", "PseudoVFRDIV",
7141                         AllFloatVectors, isSEWAware=1>;
7142
7143//===----------------------------------------------------------------------===//
7144// 13.5. Vector Widening Floating-Point Multiply
7145//===----------------------------------------------------------------------===//
7146defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwmul", "PseudoVFWMUL",
7147                            AllWidenableFloatVectors>;
7148
7149//===----------------------------------------------------------------------===//
7150// 13.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
7151//===----------------------------------------------------------------------===//
7152defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmacc", "PseudoVFMACC", AllFloatVectors>;
7153defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmacc", "PseudoVFNMACC", AllFloatVectors>;
7154defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmsac", "PseudoVFMSAC", AllFloatVectors>;
7155defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmsac", "PseudoVFNMSAC", AllFloatVectors>;
7156defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmadd", "PseudoVFMADD", AllFloatVectors>;
7157defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmadd", "PseudoVFNMADD", AllFloatVectors>;
7158defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmsub", "PseudoVFMSUB", AllFloatVectors>;
7159defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmsub", "PseudoVFNMSUB", AllFloatVectors>;
7160
7161//===----------------------------------------------------------------------===//
7162// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
7163//===----------------------------------------------------------------------===//
7164defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmacc", "PseudoVFWMACC",
7165                             AllWidenableFloatVectors>;
7166defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwnmacc", "PseudoVFWNMACC",
7167                             AllWidenableFloatVectors>;
7168defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmsac", "PseudoVFWMSAC",
7169                             AllWidenableFloatVectors>;
7170defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwnmsac", "PseudoVFWNMSAC",
7171                             AllWidenableFloatVectors>;
7172let Predicates = [HasStdExtZvfbfwma] in
7173defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmaccbf16", "PseudoVFWMACCBF16",
7174                              AllWidenableBFloatToFloatVectors>;
7175
7176//===----------------------------------------------------------------------===//
7177// 13.8. Vector Floating-Point Square-Root Instruction
7178//===----------------------------------------------------------------------===//
7179defm : VPatUnaryV_V_RM<"int_riscv_vfsqrt", "PseudoVFSQRT", AllFloatVectors, isSEWAware=1>;
7180
7181//===----------------------------------------------------------------------===//
7182// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
7183//===----------------------------------------------------------------------===//
7184defm : VPatUnaryV_V<"int_riscv_vfrsqrt7", "PseudoVFRSQRT7", AllFloatVectors>;
7185
7186//===----------------------------------------------------------------------===//
7187// 13.10. Vector Floating-Point Reciprocal Estimate Instruction
7188//===----------------------------------------------------------------------===//
7189defm : VPatUnaryV_V_RM<"int_riscv_vfrec7", "PseudoVFREC7", AllFloatVectors>;
7190
7191//===----------------------------------------------------------------------===//
7192// 13.11. Vector Floating-Point Min/Max Instructions
7193//===----------------------------------------------------------------------===//
7194defm : VPatBinaryV_VV_VX<"int_riscv_vfmin", "PseudoVFMIN", AllFloatVectors>;
7195defm : VPatBinaryV_VV_VX<"int_riscv_vfmax", "PseudoVFMAX", AllFloatVectors>;
7196
7197//===----------------------------------------------------------------------===//
7198// 13.12. Vector Floating-Point Sign-Injection Instructions
7199//===----------------------------------------------------------------------===//
7200defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnj", "PseudoVFSGNJ", AllFloatVectors>;
7201defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjn", "PseudoVFSGNJN", AllFloatVectors>;
7202defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjx", "PseudoVFSGNJX", AllFloatVectors>;
7203
7204//===----------------------------------------------------------------------===//
7205// 13.13. Vector Floating-Point Compare Instructions
7206//===----------------------------------------------------------------------===//
7207defm : VPatBinaryM_VV_VX<"int_riscv_vmfeq", "PseudoVMFEQ", AllFloatVectors>;
7208defm : VPatBinaryM_VV_VX<"int_riscv_vmfle", "PseudoVMFLE", AllFloatVectors>;
7209defm : VPatBinaryM_VV_VX<"int_riscv_vmflt", "PseudoVMFLT", AllFloatVectors>;
7210defm : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE", AllFloatVectors>;
7211defm : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT", AllFloatVectors>;
7212defm : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE", AllFloatVectors>;
7213defm : VPatBinarySwappedM_VV<"int_riscv_vmfgt", "PseudoVMFLT", AllFloatVectors>;
7214defm : VPatBinarySwappedM_VV<"int_riscv_vmfge", "PseudoVMFLE", AllFloatVectors>;
7215
7216//===----------------------------------------------------------------------===//
7217// 13.14. Vector Floating-Point Classify Instruction
7218//===----------------------------------------------------------------------===//
7219defm : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">;
7220
7221//===----------------------------------------------------------------------===//
7222// 13.15. Vector Floating-Point Merge Instruction
7223//===----------------------------------------------------------------------===//
7224// We can use vmerge.vvm to support vector-vector vfmerge.
7225// NOTE: Clang previously used int_riscv_vfmerge for vector-vector, but now uses
7226// int_riscv_vmerge. Support both for compatibility.
7227foreach vti = AllFloatVectors in {
7228  let Predicates = GetVTypePredicates<vti>.Predicates in {
7229    defm : VPatBinaryCarryInTAIL<"int_riscv_vmerge", "PseudoVMERGE", "VVM",
7230                                 vti.Vector,
7231                                 vti.Vector, vti.Vector, vti.Mask,
7232                                 vti.Log2SEW, vti.LMul, vti.RegClass,
7233                                 vti.RegClass, vti.RegClass>;
7234    defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVMERGE", "VVM",
7235                                 vti.Vector,
7236                                 vti.Vector, vti.Vector, vti.Mask,
7237                                 vti.Log2SEW, vti.LMul, vti.RegClass,
7238                                 vti.RegClass, vti.RegClass>;
7239    defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVFMERGE",
7240                                 "V"#vti.ScalarSuffix#"M",
7241                                 vti.Vector,
7242                                 vti.Vector, vti.Scalar, vti.Mask,
7243                                 vti.Log2SEW, vti.LMul, vti.RegClass,
7244                                 vti.RegClass, vti.ScalarRegClass>;
7245  }
7246}
7247
7248foreach fvti = AllFloatVectors in {
7249  defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX);
7250  let Predicates = GetVTypePredicates<fvti>.Predicates in
7251  def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$merge),
7252                                            (fvti.Vector fvti.RegClass:$rs2),
7253                                            (fvti.Scalar (fpimm0)),
7254                                            (fvti.Mask V0), VLOpFrag)),
7255            (instr fvti.RegClass:$merge, fvti.RegClass:$rs2, 0,
7256                   (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
7257}
7258
7259//===----------------------------------------------------------------------===//
7260// 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions
7261//===----------------------------------------------------------------------===//
7262defm : VPatConversionVI_VF_RM<"int_riscv_vfcvt_x_f_v", "PseudoVFCVT_X_F">;
7263defm : VPatConversionVI_VF_RM<"int_riscv_vfcvt_xu_f_v", "PseudoVFCVT_XU_F">;
7264defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_xu_f_v", "PseudoVFCVT_RTZ_XU_F">;
7265defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_x_f_v", "PseudoVFCVT_RTZ_X_F">;
7266defm : VPatConversionVF_VI_RM<"int_riscv_vfcvt_f_x_v", "PseudoVFCVT_F_X">;
7267defm : VPatConversionVF_VI_RM<"int_riscv_vfcvt_f_xu_v", "PseudoVFCVT_F_XU">;
7268
7269//===----------------------------------------------------------------------===//
7270// 13.18. Widening Floating-Point/Integer Type-Convert Instructions
7271//===----------------------------------------------------------------------===//
7272defm : VPatConversionWI_VF_RM<"int_riscv_vfwcvt_xu_f_v", "PseudoVFWCVT_XU_F">;
7273defm : VPatConversionWI_VF_RM<"int_riscv_vfwcvt_x_f_v", "PseudoVFWCVT_X_F">;
7274defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_xu_f_v", "PseudoVFWCVT_RTZ_XU_F">;
7275defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_x_f_v", "PseudoVFWCVT_RTZ_X_F">;
7276defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU">;
7277defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X">;
7278defm : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F">;
7279defm : VPatConversionWF_VF_BF<"int_riscv_vfwcvtbf16_f_f_v",
7280                              "PseudoVFWCVTBF16_F_F">;
7281
7282//===----------------------------------------------------------------------===//
7283// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions
7284//===----------------------------------------------------------------------===//
7285defm : VPatConversionVI_WF_RM<"int_riscv_vfncvt_xu_f_w", "PseudoVFNCVT_XU_F">;
7286defm : VPatConversionVI_WF_RM<"int_riscv_vfncvt_x_f_w", "PseudoVFNCVT_X_F">;
7287defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_xu_f_w", "PseudoVFNCVT_RTZ_XU_F">;
7288defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_x_f_w", "PseudoVFNCVT_RTZ_X_F">;
7289defm : VPatConversionVF_WI_RM <"int_riscv_vfncvt_f_xu_w", "PseudoVFNCVT_F_XU">;
7290defm : VPatConversionVF_WI_RM <"int_riscv_vfncvt_f_x_w", "PseudoVFNCVT_F_X">;
7291defvar WidenableFloatVectorsExceptF16 = !filter(fvtiToFWti, AllWidenableFloatVectors,
7292                                                !ne(fvtiToFWti.Vti.Scalar, f16));
7293defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F",
7294                           WidenableFloatVectorsExceptF16>;
7295// Define vfncvt.f.f.w for f16 when Zvfhmin is enable.
7296defvar F16WidenableFloatVectors = !filter(fvtiToFWti, AllWidenableFloatVectors,
7297                                          !eq(fvtiToFWti.Vti.Scalar, f16));
7298let Predicates = [HasVInstructionsF16Minimal] in
7299defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F",
7300                           F16WidenableFloatVectors>;
7301defm : VPatConversionVF_WF_BF_RM<"int_riscv_vfncvtbf16_f_f_w",
7302                                 "PseudoVFNCVTBF16_F_F">;
7303defm : VPatConversionVF_WF<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F">;
7304
7305//===----------------------------------------------------------------------===//
7306// 14. Vector Reduction Operations
7307//===----------------------------------------------------------------------===//
7308
7309//===----------------------------------------------------------------------===//
7310// 14.1. Vector Single-Width Integer Reduction Instructions
7311//===----------------------------------------------------------------------===//
7312defm : VPatReductionV_VS<"int_riscv_vredsum", "PseudoVREDSUM">;
7313defm : VPatReductionV_VS<"int_riscv_vredand", "PseudoVREDAND">;
7314defm : VPatReductionV_VS<"int_riscv_vredor", "PseudoVREDOR">;
7315defm : VPatReductionV_VS<"int_riscv_vredxor", "PseudoVREDXOR">;
7316defm : VPatReductionV_VS<"int_riscv_vredminu", "PseudoVREDMINU">;
7317defm : VPatReductionV_VS<"int_riscv_vredmin", "PseudoVREDMIN">;
7318defm : VPatReductionV_VS<"int_riscv_vredmaxu", "PseudoVREDMAXU">;
7319defm : VPatReductionV_VS<"int_riscv_vredmax", "PseudoVREDMAX">;
7320
7321//===----------------------------------------------------------------------===//
7322// 14.2. Vector Widening Integer Reduction Instructions
7323//===----------------------------------------------------------------------===//
7324defm : VPatReductionW_VS<"int_riscv_vwredsumu", "PseudoVWREDSUMU">;
7325defm : VPatReductionW_VS<"int_riscv_vwredsum", "PseudoVWREDSUM">;
7326
7327//===----------------------------------------------------------------------===//
7328// 14.3. Vector Single-Width Floating-Point Reduction Instructions
7329//===----------------------------------------------------------------------===//
7330defm : VPatReductionV_VS_RM<"int_riscv_vfredosum", "PseudoVFREDOSUM", IsFloat=1>;
7331defm : VPatReductionV_VS_RM<"int_riscv_vfredusum", "PseudoVFREDUSUM", IsFloat=1>;
7332defm : VPatReductionV_VS<"int_riscv_vfredmin", "PseudoVFREDMIN", IsFloat=1>;
7333defm : VPatReductionV_VS<"int_riscv_vfredmax", "PseudoVFREDMAX", IsFloat=1>;
7334
7335//===----------------------------------------------------------------------===//
7336// 14.4. Vector Widening Floating-Point Reduction Instructions
7337//===----------------------------------------------------------------------===//
7338defm : VPatReductionW_VS_RM<"int_riscv_vfwredusum", "PseudoVFWREDUSUM", IsFloat=1>;
7339defm : VPatReductionW_VS_RM<"int_riscv_vfwredosum", "PseudoVFWREDOSUM", IsFloat=1>;
7340
7341//===----------------------------------------------------------------------===//
7342// 15. Vector Mask Instructions
7343//===----------------------------------------------------------------------===//
7344
7345//===----------------------------------------------------------------------===//
7346// 15.1 Vector Mask-Register Logical Instructions
7347//===----------------------------------------------------------------------===//
7348defm : VPatBinaryM_MM<"int_riscv_vmand", "PseudoVMAND">;
7349defm : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">;
7350defm : VPatBinaryM_MM<"int_riscv_vmandn", "PseudoVMANDN">;
7351defm : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">;
7352defm : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">;
7353defm : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">;
7354defm : VPatBinaryM_MM<"int_riscv_vmorn", "PseudoVMORN">;
7355defm : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">;
7356
7357// pseudo instructions
7358defm : VPatNullaryM<"int_riscv_vmclr", "PseudoVMCLR">;
7359defm : VPatNullaryM<"int_riscv_vmset", "PseudoVMSET">;
7360
7361//===----------------------------------------------------------------------===//
7362// 15.2. Vector count population in mask vcpop.m
7363//===----------------------------------------------------------------------===//
7364defm : VPatUnaryS_M<"int_riscv_vcpop", "PseudoVCPOP">;
7365
7366//===----------------------------------------------------------------------===//
7367// 15.3. vfirst find-first-set mask bit
7368//===----------------------------------------------------------------------===//
7369defm : VPatUnaryS_M<"int_riscv_vfirst", "PseudoVFIRST">;
7370
7371//===----------------------------------------------------------------------===//
7372// 15.4. vmsbf.m set-before-first mask bit
7373//===----------------------------------------------------------------------===//
7374defm : VPatUnaryM_M<"int_riscv_vmsbf", "PseudoVMSBF">;
7375
7376//===----------------------------------------------------------------------===//
7377// 15.5. vmsif.m set-including-first mask bit
7378//===----------------------------------------------------------------------===//
7379defm : VPatUnaryM_M<"int_riscv_vmsif", "PseudoVMSIF">;
7380
7381//===----------------------------------------------------------------------===//
7382// 15.6. vmsof.m set-only-first mask bit
7383//===----------------------------------------------------------------------===//
7384defm : VPatUnaryM_M<"int_riscv_vmsof", "PseudoVMSOF">;
7385
7386//===----------------------------------------------------------------------===//
7387// 15.8.  Vector Iota Instruction
7388//===----------------------------------------------------------------------===//
7389defm : VPatUnaryV_M<"int_riscv_viota", "PseudoVIOTA">;
7390
7391//===----------------------------------------------------------------------===//
7392// 15.9. Vector Element Index Instruction
7393//===----------------------------------------------------------------------===//
7394defm : VPatNullaryV<"int_riscv_vid", "PseudoVID">;
7395
7396
7397//===----------------------------------------------------------------------===//
7398// 16. Vector Permutation Instructions
7399//===----------------------------------------------------------------------===//
7400
7401//===----------------------------------------------------------------------===//
7402// 16.1. Integer Scalar Move Instructions
7403//===----------------------------------------------------------------------===//
7404
7405foreach vti = NoGroupIntegerVectors in {
7406  let Predicates = GetVTypePredicates<vti>.Predicates in
7407  def : Pat<(XLenVT (riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2))),
7408            (PseudoVMV_X_S $rs2, vti.Log2SEW)>;
7409  // vmv.s.x is handled with a custom node in RISCVInstrInfoVVLPatterns.td
7410}
7411
7412//===----------------------------------------------------------------------===//
7413// 16.3. Vector Slide Instructions
7414//===----------------------------------------------------------------------===//
7415defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllIntegerVectors, uimm5>;
7416defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllIntegerVectors, uimm5>;
7417defm : VPatBinaryV_VX<"int_riscv_vslide1up", "PseudoVSLIDE1UP", AllIntegerVectors>;
7418defm : VPatBinaryV_VX<"int_riscv_vslide1down", "PseudoVSLIDE1DOWN", AllIntegerVectors>;
7419
7420defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllFloatVectors, uimm5>;
7421defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVectors, uimm5>;
7422defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>;
7423defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>;
7424
7425//===----------------------------------------------------------------------===//
7426// 16.4. Vector Register Gather Instructions
7427//===----------------------------------------------------------------------===//
7428defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
7429                                AllIntegerVectors, uimm5>;
7430defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
7431                              eew=16, vtilist=AllIntegerVectors>;
7432
7433defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
7434                                AllFloatVectors, uimm5>;
7435defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
7436                              eew=16, vtilist=AllFloatVectors>;
7437//===----------------------------------------------------------------------===//
7438// 16.5. Vector Compress Instruction
7439//===----------------------------------------------------------------------===//
7440defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>;
7441defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>;
7442
7443// Include the non-intrinsic ISel patterns
7444include "RISCVInstrInfoVVLPatterns.td"
7445include "RISCVInstrInfoVSDPatterns.td"
7446