xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td (revision 02e9120893770924227138ba49df1edb3896112a)
1//===-- RISCVInstrInfoVPseudos.td - RISC-V 'V' Pseudos -----*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// This file contains the required infrastructure to support code generation
10/// for the standard 'V' (Vector) extension, version 1.0.
11///
12/// This file is included from RISCVInstrInfoV.td
13///
14/// Overview of our vector instruction pseudos.  Many of the instructions
15/// have behavior which depends on the value of VTYPE.  Several core aspects of
16/// the compiler - e.g. register allocation - depend on fields in this
17/// configuration register.  The details of which fields matter differ by the
18/// specific instruction, but the common dimensions are:
19///
20/// LMUL/EMUL - Most instructions can write to differently sized register groups
21/// depending on LMUL.
22///
23/// Masked vs Unmasked - Many instructions which allow a mask disallow register
24/// overlap.  As a result, masked vs unmasked require different register
25/// allocation constraints.
26///
27/// Policy - For each of mask and tail policy, there are three options:
28/// * "Undisturbed" - As defined in the specification, required to preserve the
29/// exact bit pattern of inactive lanes.
30/// * "Agnostic" - As defined in the specification, required to either preserve
31/// the exact bit pattern of inactive lanes, or produce the bit pattern -1 for
32/// those lanes.  Note that each lane can make this choice independently.
33/// Instructions which produce masks (and only those instructions) also have the
34/// option of producing a result as-if VL had been VLMAX.
35/// * "Undefined" - The bit pattern of the inactive lanes is unspecified, and
36/// can be changed without impacting the semantics of the program.  Note that
37/// this concept does not exist in the specification, and requires source
38/// knowledge to be preserved.
39///
40/// SEW - Some instructions have semantics which depend on SEW.  This is
41/// relatively rare, and mostly impacts scheduling and cost estimation.
42///
43/// We have two techniques we use to represent the impact of these fields:
44/// * For fields which don't impact register classes, we largely use
45/// dummy operands on the pseudo instructions which convey information
46/// about the value of VTYPE.
47/// * For fields which do impact register classes (and a few bits of
48/// legacy - see policy discussion below), we define a family of pseudo
49/// instructions for each actual instruction.  Said differently, we encode
50/// each of the preceding fields which are relevant for a given instruction
51/// in the opcode space.
52///
53/// Currently, the policy is represented via the following instrinsic families:
54/// * _MASK - Can represent all three policy states for both tail and mask.  If
55///   passthrough is IMPLICIT_DEF, then represents "undefined".  Otherwise,
56///   policy operand and tablegen flags drive the interpretation.  (If policy
57///   operand is not present - there are a couple, thought we're rapidly
58///   removing them - a non-undefined policy defaults to "tail agnostic", and
59///   "mask undisturbed".  Since this is the only variant with a mask, all
60///   other variants are "mask undefined".
61/// * Unsuffixed w/ both passthrough and policy operand. Can represent all
62///   three policy states.  If passthrough is IMPLICIT_DEF, then represents
63///   "undefined".  Otherwise, policy operand and tablegen flags drive the
64///   interpretation.
65/// * Unsuffixed w/o passthrough or policy operand -- Does not have a
66///   passthrough operand, and thus represents the "undefined" state.  Note
67///   that terminology in code frequently refers to these as "TA" which is
68///   confusing.  We're in the process of migrating away from this
69///   representation.
70/// * _TU w/o policy operand -- Has a passthrough operand, and always
71///   represents the tail undisturbed state.
72/// * _TU w/policy operand - Can represent all three policy states.  If
73///   passthrough is IMPLICIT_DEF, then represents "undefined".  Otherwise,
74///   policy operand and tablegen flags drive the interpretation.
75///
76//===----------------------------------------------------------------------===//
77
78def riscv_vmv_x_s : SDNode<"RISCVISD::VMV_X_S",
79                           SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>,
80                                                SDTCisInt<1>]>>;
81def riscv_read_vlenb : SDNode<"RISCVISD::READ_VLENB",
82                              SDTypeProfile<1, 0, [SDTCisVT<0, XLenVT>]>>;
83
84// Operand that is allowed to be a register or a 5 bit immediate.
85// This allows us to pick between VSETIVLI and VSETVLI opcodes using the same
86// pseudo instructions.
87def AVL : RegisterOperand<GPRNoX0> {
88  let OperandNamespace = "RISCVOp";
89  let OperandType = "OPERAND_AVL";
90}
91
92// X0 has special meaning for vsetvl/vsetvli.
93//  rd | rs1 |   AVL value | Effect on vl
94//--------------------------------------------------------------
95// !X0 |  X0 |       VLMAX | Set vl to VLMAX
96//  X0 |  X0 | Value in vl | Keep current vl, just change vtype.
97def VLOp : ComplexPattern<XLenVT, 1, "selectVLOp">;
98
99def DecImm : SDNodeXForm<imm, [{
100  return CurDAG->getTargetConstant(N->getSExtValue() - 1, SDLoc(N),
101                                   N->getValueType(0));
102}]>;
103
104defvar TAIL_AGNOSTIC = 1;
105defvar TU_MU = 0;
106defvar TA_MA = 3;
107
108//===----------------------------------------------------------------------===//
109// Utilities.
110//===----------------------------------------------------------------------===//
111
112class PseudoToVInst<string PseudoInst> {
113  defvar AffixSubsts = [["Pseudo", ""],
114                        ["_E64", ""],
115                        ["_E32", ""],
116                        ["_E16", ""],
117                        ["_E8", ""],
118                        ["_F64", "_F"],
119                        ["_F32", "_F"],
120                        ["_F16", "_F"],
121                        ["_VF64", "_VF"],
122                        ["_VF32", "_VF"],
123                        ["_VF16", "_VF"],
124                        ["_WF64", "_WF"],
125                        ["_WF32", "_WF"],
126                        ["_WF16", "_WF"],
127                        ["_TU", ""],
128                        ["_TIED", ""],
129                        ["_MASK", ""],
130                        ["_B64", ""],
131                        ["_B32", ""],
132                        ["_B16", ""],
133                        ["_B8", ""],
134                        ["_B4", ""],
135                        ["_B2", ""],
136                        ["_B1", ""],
137                        ["_MF8", ""],
138                        ["_MF4", ""],
139                        ["_MF2", ""],
140                        ["_M1", ""],
141                        ["_M2", ""],
142                        ["_M4", ""],
143                        ["_M8", ""],
144                        ["_SE", ""]
145                       ];
146  string VInst = !foldl(PseudoInst, AffixSubsts, Acc, AffixSubst,
147                        !subst(AffixSubst[0], AffixSubst[1], Acc));
148}
149
150// This class describes information associated to the LMUL.
151class LMULInfo<int lmul, int oct, VReg regclass, VReg wregclass,
152               VReg f2regclass, VReg f4regclass, VReg f8regclass, string mx> {
153  bits<3> value = lmul; // This is encoded as the vlmul field of vtype.
154  VReg vrclass = regclass;
155  VReg wvrclass = wregclass;
156  VReg f8vrclass = f8regclass;
157  VReg f4vrclass = f4regclass;
158  VReg f2vrclass = f2regclass;
159  string MX = mx;
160  int octuple = oct;
161}
162
163// Associate LMUL with tablegen records of register classes.
164def V_M1  : LMULInfo<0b000,  8,   VR,        VRM2,   VR,   VR, VR, "M1">;
165def V_M2  : LMULInfo<0b001, 16, VRM2,        VRM4,   VR,   VR, VR, "M2">;
166def V_M4  : LMULInfo<0b010, 32, VRM4,        VRM8, VRM2,   VR, VR, "M4">;
167def V_M8  : LMULInfo<0b011, 64, VRM8,/*NoVReg*/VR, VRM4, VRM2, VR, "M8">;
168
169def V_MF8 : LMULInfo<0b101, 1, VR, VR,/*NoVReg*/VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF8">;
170def V_MF4 : LMULInfo<0b110, 2, VR, VR,          VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF4">;
171def V_MF2 : LMULInfo<0b111, 4, VR, VR,          VR,          VR,/*NoVReg*/VR, "MF2">;
172
173// Used to iterate over all possible LMULs.
174defvar MxList = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
175// For floating point which don't need MF8.
176defvar MxListF = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
177
178// Used for widening and narrowing instructions as it doesn't contain M8.
179defvar MxListW = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4];
180// Used for widening reductions. It can contain M8 because wider operands are
181// scalar operands.
182defvar MxListWRed = MxList;
183// For floating point which don't need MF8.
184defvar MxListFW = [V_MF4, V_MF2, V_M1, V_M2, V_M4];
185// For widening floating-point Reduction as it doesn't contain MF8. It can
186// contain M8 because wider operands are scalar operands.
187defvar MxListFWRed = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
188
189// Use for zext/sext.vf2
190defvar MxListVF2 = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
191
192// Use for zext/sext.vf4
193defvar MxListVF4 = [V_MF2, V_M1, V_M2, V_M4, V_M8];
194
195// Use for zext/sext.vf8
196defvar MxListVF8 = [V_M1, V_M2, V_M4, V_M8];
197
198class MxSet<int eew> {
199  list<LMULInfo> m = !cond(!eq(eew, 8) : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
200                           !eq(eew, 16) : [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
201                           !eq(eew, 32) : [V_MF2, V_M1, V_M2, V_M4, V_M8],
202                           !eq(eew, 64) : [V_M1, V_M2, V_M4, V_M8]);
203}
204
205class FPR_Info<int sew> {
206  RegisterClass fprclass = !cast<RegisterClass>("FPR" # sew);
207  string FX = "F" # sew;
208  int SEW = sew;
209  list<LMULInfo> MxList = MxSet<sew>.m;
210  list<LMULInfo> MxListFW = !if(!eq(sew, 64), [], !listremove(MxList, [V_M8]));
211}
212
213def SCALAR_F16 : FPR_Info<16>;
214def SCALAR_F32 : FPR_Info<32>;
215def SCALAR_F64 : FPR_Info<64>;
216
217defvar FPList = [SCALAR_F16, SCALAR_F32, SCALAR_F64];
218
219// Used for widening instructions. It excludes F64.
220defvar FPListW = [SCALAR_F16, SCALAR_F32];
221
222class NFSet<LMULInfo m> {
223  list<int> L = !cond(!eq(m.value, V_M8.value): [],
224                      !eq(m.value, V_M4.value): [2],
225                      !eq(m.value, V_M2.value): [2, 3, 4],
226                      true: [2, 3, 4, 5, 6, 7, 8]);
227}
228
229class octuple_to_str<int octuple> {
230  string ret = !cond(!eq(octuple, 1): "MF8",
231                     !eq(octuple, 2): "MF4",
232                     !eq(octuple, 4): "MF2",
233                     !eq(octuple, 8): "M1",
234                     !eq(octuple, 16): "M2",
235                     !eq(octuple, 32): "M4",
236                     !eq(octuple, 64): "M8");
237}
238
239def VLOpFrag : PatFrag<(ops), (XLenVT (VLOp (XLenVT AVL:$vl)))>;
240
241// Output pattern for X0 used to represent VLMAX in the pseudo instructions.
242// We can't use X0 register becuase the AVL operands use GPRNoX0.
243// This must be kept in sync with RISCV::VLMaxSentinel.
244def VLMax : OutPatFrag<(ops), (XLenVT -1)>;
245
246// List of EEW.
247defvar EEWList = [8, 16, 32, 64];
248
249class SegRegClass<LMULInfo m, int nf> {
250  VReg RC = !cast<VReg>("VRN" # nf # !cond(!eq(m.value, V_MF8.value): V_M1.MX,
251                                           !eq(m.value, V_MF4.value): V_M1.MX,
252                                           !eq(m.value, V_MF2.value): V_M1.MX,
253                                           true: m.MX));
254}
255
256//===----------------------------------------------------------------------===//
257// Vector register and vector group type information.
258//===----------------------------------------------------------------------===//
259
260class VTypeInfo<ValueType Vec, ValueType Mas, int Sew, VReg Reg, LMULInfo M,
261                ValueType Scal = XLenVT, RegisterClass ScalarReg = GPR> {
262  ValueType Vector = Vec;
263  ValueType Mask = Mas;
264  int SEW = Sew;
265  int Log2SEW = !logtwo(Sew);
266  VReg RegClass = Reg;
267  LMULInfo LMul = M;
268  ValueType Scalar = Scal;
269  RegisterClass ScalarRegClass = ScalarReg;
270  // The pattern fragment which produces the AVL operand, representing the
271  // "natural" vector length for this type. For scalable vectors this is VLMax.
272  OutPatFrag AVL = VLMax;
273
274  string ScalarSuffix = !cond(!eq(Scal, XLenVT) : "X",
275                              !eq(Scal, f16) : "F16",
276                              !eq(Scal, f32) : "F32",
277                              !eq(Scal, f64) : "F64");
278}
279
280class GroupVTypeInfo<ValueType Vec, ValueType VecM1, ValueType Mas, int Sew,
281                     VReg Reg, LMULInfo M, ValueType Scal = XLenVT,
282                     RegisterClass ScalarReg = GPR>
283    : VTypeInfo<Vec, Mas, Sew, Reg, M, Scal, ScalarReg> {
284  ValueType VectorM1 = VecM1;
285}
286
287defset list<VTypeInfo> AllVectors = {
288  defset list<VTypeInfo> AllIntegerVectors = {
289    defset list<VTypeInfo> NoGroupIntegerVectors = {
290      defset list<VTypeInfo> FractionalGroupIntegerVectors = {
291        def VI8MF8: VTypeInfo<vint8mf8_t,  vbool64_t,  8, VR, V_MF8>;
292        def VI8MF4: VTypeInfo<vint8mf4_t,  vbool32_t,  8, VR, V_MF4>;
293        def VI8MF2: VTypeInfo<vint8mf2_t,  vbool16_t,  8, VR, V_MF2>;
294        def VI16MF4: VTypeInfo<vint16mf4_t, vbool64_t, 16, VR, V_MF4>;
295        def VI16MF2: VTypeInfo<vint16mf2_t, vbool32_t, 16, VR, V_MF2>;
296        def VI32MF2: VTypeInfo<vint32mf2_t, vbool64_t, 32, VR, V_MF2>;
297      }
298      def VI8M1: VTypeInfo<vint8m1_t,   vbool8_t,   8, VR, V_M1>;
299      def VI16M1: VTypeInfo<vint16m1_t,  vbool16_t, 16, VR, V_M1>;
300      def VI32M1: VTypeInfo<vint32m1_t,  vbool32_t, 32, VR, V_M1>;
301      def VI64M1: VTypeInfo<vint64m1_t,  vbool64_t, 64, VR, V_M1>;
302    }
303    defset list<GroupVTypeInfo> GroupIntegerVectors = {
304      def VI8M2: GroupVTypeInfo<vint8m2_t, vint8m1_t, vbool4_t, 8, VRM2, V_M2>;
305      def VI8M4: GroupVTypeInfo<vint8m4_t, vint8m1_t, vbool2_t, 8, VRM4, V_M4>;
306      def VI8M8: GroupVTypeInfo<vint8m8_t, vint8m1_t, vbool1_t, 8, VRM8, V_M8>;
307
308      def VI16M2: GroupVTypeInfo<vint16m2_t,vint16m1_t,vbool8_t, 16,VRM2, V_M2>;
309      def VI16M4: GroupVTypeInfo<vint16m4_t,vint16m1_t,vbool4_t, 16,VRM4, V_M4>;
310      def VI16M8: GroupVTypeInfo<vint16m8_t,vint16m1_t,vbool2_t, 16,VRM8, V_M8>;
311
312      def VI32M2: GroupVTypeInfo<vint32m2_t,vint32m1_t,vbool16_t,32,VRM2, V_M2>;
313      def VI32M4: GroupVTypeInfo<vint32m4_t,vint32m1_t,vbool8_t, 32,VRM4, V_M4>;
314      def VI32M8: GroupVTypeInfo<vint32m8_t,vint32m1_t,vbool4_t, 32,VRM8, V_M8>;
315
316      def VI64M2: GroupVTypeInfo<vint64m2_t,vint64m1_t,vbool32_t,64,VRM2, V_M2>;
317      def VI64M4: GroupVTypeInfo<vint64m4_t,vint64m1_t,vbool16_t,64,VRM4, V_M4>;
318      def VI64M8: GroupVTypeInfo<vint64m8_t,vint64m1_t,vbool8_t, 64,VRM8, V_M8>;
319    }
320  }
321
322  defset list<VTypeInfo> AllFloatVectors = {
323    defset list<VTypeInfo> NoGroupFloatVectors = {
324      defset list<VTypeInfo> FractionalGroupFloatVectors = {
325        def VF16MF4: VTypeInfo<vfloat16mf4_t, vbool64_t, 16, VR, V_MF4, f16, FPR16>;
326        def VF16MF2: VTypeInfo<vfloat16mf2_t, vbool32_t, 16, VR, V_MF2, f16, FPR16>;
327        def VF32MF2: VTypeInfo<vfloat32mf2_t,vbool64_t, 32, VR, V_MF2, f32, FPR32>;
328      }
329      def VF16M1:  VTypeInfo<vfloat16m1_t,  vbool16_t, 16, VR, V_M1,  f16, FPR16>;
330      def VF32M1:  VTypeInfo<vfloat32m1_t, vbool32_t, 32, VR, V_M1,  f32, FPR32>;
331      def VF64M1: VTypeInfo<vfloat64m1_t, vbool64_t, 64, VR, V_M1, f64, FPR64>;
332    }
333
334    defset list<GroupVTypeInfo> GroupFloatVectors = {
335      def VF16M2: GroupVTypeInfo<vfloat16m2_t, vfloat16m1_t, vbool8_t, 16,
336                                 VRM2, V_M2, f16, FPR16>;
337      def VF16M4: GroupVTypeInfo<vfloat16m4_t, vfloat16m1_t, vbool4_t, 16,
338                                 VRM4, V_M4, f16, FPR16>;
339      def VF16M8: GroupVTypeInfo<vfloat16m8_t, vfloat16m1_t, vbool2_t, 16,
340                                 VRM8, V_M8, f16, FPR16>;
341
342      def VF32M2: GroupVTypeInfo<vfloat32m2_t, vfloat32m1_t, vbool16_t, 32,
343                                 VRM2, V_M2, f32, FPR32>;
344      def VF32M4: GroupVTypeInfo<vfloat32m4_t, vfloat32m1_t, vbool8_t,  32,
345                                 VRM4, V_M4, f32, FPR32>;
346      def VF32M8: GroupVTypeInfo<vfloat32m8_t, vfloat32m1_t, vbool4_t,  32,
347                                 VRM8, V_M8, f32, FPR32>;
348
349      def VF64M2: GroupVTypeInfo<vfloat64m2_t, vfloat64m1_t, vbool32_t, 64,
350                                 VRM2, V_M2, f64, FPR64>;
351      def VF64M4: GroupVTypeInfo<vfloat64m4_t, vfloat64m1_t, vbool16_t, 64,
352                                 VRM4, V_M4, f64, FPR64>;
353      def VF64M8: GroupVTypeInfo<vfloat64m8_t, vfloat64m1_t, vbool8_t,  64,
354                                 VRM8, V_M8, f64, FPR64>;
355    }
356  }
357}
358
359// This functor is used to obtain the int vector type that has the same SEW and
360// multiplier as the input parameter type
361class GetIntVTypeInfo<VTypeInfo vti> {
362  // Equivalent integer vector type. Eg.
363  //   VI8M1 → VI8M1 (identity)
364  //   VF64M4 → VI64M4
365  VTypeInfo Vti = !cast<VTypeInfo>(!subst("VF", "VI", !cast<string>(vti)));
366}
367
368class MTypeInfo<ValueType Mas, LMULInfo M, string Bx> {
369  ValueType Mask = Mas;
370  // {SEW, VLMul} values set a valid VType to deal with this mask type.
371  // we assume SEW=1 and set corresponding LMUL. vsetvli insertion will
372  // look for SEW=1 to optimize based on surrounding instructions.
373  int SEW = 1;
374  int Log2SEW = 0;
375  LMULInfo LMul = M;
376  string BX = Bx; // Appendix of mask operations.
377  // The pattern fragment which produces the AVL operand, representing the
378  // "natural" vector length for this mask type. For scalable masks this is
379  // VLMax.
380  OutPatFrag AVL = VLMax;
381}
382
383defset list<MTypeInfo> AllMasks = {
384  // vbool<n>_t, <n> = SEW/LMUL, we assume SEW=8 and corresponding LMUL.
385  def : MTypeInfo<vbool64_t, V_MF8, "B1">;
386  def : MTypeInfo<vbool32_t, V_MF4, "B2">;
387  def : MTypeInfo<vbool16_t, V_MF2, "B4">;
388  def : MTypeInfo<vbool8_t, V_M1, "B8">;
389  def : MTypeInfo<vbool4_t, V_M2, "B16">;
390  def : MTypeInfo<vbool2_t, V_M4, "B32">;
391  def : MTypeInfo<vbool1_t, V_M8, "B64">;
392}
393
394class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti> {
395  VTypeInfo Vti = vti;
396  VTypeInfo Wti = wti;
397}
398
399class VTypeInfoToFraction<VTypeInfo vti, VTypeInfo fti> {
400  VTypeInfo Vti = vti;
401  VTypeInfo Fti = fti;
402}
403
404defset list<VTypeInfoToWide> AllWidenableIntVectors = {
405  def : VTypeInfoToWide<VI8MF8,  VI16MF4>;
406  def : VTypeInfoToWide<VI8MF4,  VI16MF2>;
407  def : VTypeInfoToWide<VI8MF2,  VI16M1>;
408  def : VTypeInfoToWide<VI8M1,   VI16M2>;
409  def : VTypeInfoToWide<VI8M2,   VI16M4>;
410  def : VTypeInfoToWide<VI8M4,   VI16M8>;
411
412  def : VTypeInfoToWide<VI16MF4, VI32MF2>;
413  def : VTypeInfoToWide<VI16MF2, VI32M1>;
414  def : VTypeInfoToWide<VI16M1,  VI32M2>;
415  def : VTypeInfoToWide<VI16M2,  VI32M4>;
416  def : VTypeInfoToWide<VI16M4,  VI32M8>;
417
418  def : VTypeInfoToWide<VI32MF2, VI64M1>;
419  def : VTypeInfoToWide<VI32M1,  VI64M2>;
420  def : VTypeInfoToWide<VI32M2,  VI64M4>;
421  def : VTypeInfoToWide<VI32M4,  VI64M8>;
422}
423
424defset list<VTypeInfoToWide> AllWidenableFloatVectors = {
425  def : VTypeInfoToWide<VF16MF4, VF32MF2>;
426  def : VTypeInfoToWide<VF16MF2, VF32M1>;
427  def : VTypeInfoToWide<VF16M1, VF32M2>;
428  def : VTypeInfoToWide<VF16M2, VF32M4>;
429  def : VTypeInfoToWide<VF16M4, VF32M8>;
430
431  def : VTypeInfoToWide<VF32MF2, VF64M1>;
432  def : VTypeInfoToWide<VF32M1, VF64M2>;
433  def : VTypeInfoToWide<VF32M2, VF64M4>;
434  def : VTypeInfoToWide<VF32M4, VF64M8>;
435}
436
437defset list<VTypeInfoToFraction> AllFractionableVF2IntVectors = {
438  def : VTypeInfoToFraction<VI16MF4, VI8MF8>;
439  def : VTypeInfoToFraction<VI16MF2, VI8MF4>;
440  def : VTypeInfoToFraction<VI16M1, VI8MF2>;
441  def : VTypeInfoToFraction<VI16M2, VI8M1>;
442  def : VTypeInfoToFraction<VI16M4, VI8M2>;
443  def : VTypeInfoToFraction<VI16M8, VI8M4>;
444  def : VTypeInfoToFraction<VI32MF2, VI16MF4>;
445  def : VTypeInfoToFraction<VI32M1, VI16MF2>;
446  def : VTypeInfoToFraction<VI32M2, VI16M1>;
447  def : VTypeInfoToFraction<VI32M4, VI16M2>;
448  def : VTypeInfoToFraction<VI32M8, VI16M4>;
449  def : VTypeInfoToFraction<VI64M1, VI32MF2>;
450  def : VTypeInfoToFraction<VI64M2, VI32M1>;
451  def : VTypeInfoToFraction<VI64M4, VI32M2>;
452  def : VTypeInfoToFraction<VI64M8, VI32M4>;
453}
454
455defset list<VTypeInfoToFraction> AllFractionableVF4IntVectors = {
456  def : VTypeInfoToFraction<VI32MF2, VI8MF8>;
457  def : VTypeInfoToFraction<VI32M1, VI8MF4>;
458  def : VTypeInfoToFraction<VI32M2, VI8MF2>;
459  def : VTypeInfoToFraction<VI32M4, VI8M1>;
460  def : VTypeInfoToFraction<VI32M8, VI8M2>;
461  def : VTypeInfoToFraction<VI64M1, VI16MF4>;
462  def : VTypeInfoToFraction<VI64M2, VI16MF2>;
463  def : VTypeInfoToFraction<VI64M4, VI16M1>;
464  def : VTypeInfoToFraction<VI64M8, VI16M2>;
465}
466
467defset list<VTypeInfoToFraction> AllFractionableVF8IntVectors = {
468  def : VTypeInfoToFraction<VI64M1, VI8MF8>;
469  def : VTypeInfoToFraction<VI64M2, VI8MF4>;
470  def : VTypeInfoToFraction<VI64M4, VI8MF2>;
471  def : VTypeInfoToFraction<VI64M8, VI8M1>;
472}
473
474defset list<VTypeInfoToWide> AllWidenableIntToFloatVectors = {
475  def : VTypeInfoToWide<VI8MF8, VF16MF4>;
476  def : VTypeInfoToWide<VI8MF4, VF16MF2>;
477  def : VTypeInfoToWide<VI8MF2, VF16M1>;
478  def : VTypeInfoToWide<VI8M1, VF16M2>;
479  def : VTypeInfoToWide<VI8M2, VF16M4>;
480  def : VTypeInfoToWide<VI8M4, VF16M8>;
481
482  def : VTypeInfoToWide<VI16MF4, VF32MF2>;
483  def : VTypeInfoToWide<VI16MF2, VF32M1>;
484  def : VTypeInfoToWide<VI16M1, VF32M2>;
485  def : VTypeInfoToWide<VI16M2, VF32M4>;
486  def : VTypeInfoToWide<VI16M4, VF32M8>;
487
488  def : VTypeInfoToWide<VI32MF2, VF64M1>;
489  def : VTypeInfoToWide<VI32M1, VF64M2>;
490  def : VTypeInfoToWide<VI32M2, VF64M4>;
491  def : VTypeInfoToWide<VI32M4, VF64M8>;
492}
493
494// This class holds the record of the RISCVVPseudoTable below.
495// This represents the information we need in codegen for each pseudo.
496// The definition should be consistent with `struct PseudoInfo` in
497// RISCVInstrInfo.h.
498class RISCVVPseudo {
499  Pseudo Pseudo = !cast<Pseudo>(NAME); // Used as a key.
500  Instruction BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
501  // SEW = 0 is used to denote that the Pseudo is not SEW specific (or unknown).
502  bits<8> SEW = 0;
503}
504
505// The actual table.
506def RISCVVPseudosTable : GenericTable {
507  let FilterClass = "RISCVVPseudo";
508  let CppTypeName = "PseudoInfo";
509  let Fields = [ "Pseudo", "BaseInstr" ];
510  let PrimaryKey = [ "Pseudo" ];
511  let PrimaryKeyName = "getPseudoInfo";
512  let PrimaryKeyEarlyOut = true;
513}
514
515def RISCVVInversePseudosTable : GenericTable {
516  let FilterClass = "RISCVVPseudo";
517  let CppTypeName = "PseudoInfo";
518  let Fields = [ "Pseudo", "BaseInstr", "VLMul", "SEW"];
519  let PrimaryKey = [ "BaseInstr", "VLMul", "SEW"];
520  let PrimaryKeyName = "getBaseInfo";
521  let PrimaryKeyEarlyOut = true;
522}
523
524def RISCVVIntrinsicsTable : GenericTable {
525  let FilterClass = "RISCVVIntrinsic";
526  let CppTypeName = "RISCVVIntrinsicInfo";
527  let Fields = ["IntrinsicID", "ScalarOperand", "VLOperand"];
528  let PrimaryKey = ["IntrinsicID"];
529  let PrimaryKeyName = "getRISCVVIntrinsicInfo";
530}
531
532// Describes the relation of a masked pseudo to the unmasked variants.
533//    Note that all masked variants (in this table) have exactly one
534//    unmasked variant.  For all but compares, both the masked and
535//    unmasked variant have a passthru and policy operand.  For compares,
536//    neither has a policy op, and only the masked version has a passthru.
537class RISCVMaskedPseudo<bits<4> MaskIdx> {
538  Pseudo MaskedPseudo = !cast<Pseudo>(NAME);
539  Pseudo UnmaskedPseudo = !cast<Pseudo>(!subst("_MASK", "", NAME));
540  bits<4> MaskOpIdx = MaskIdx;
541}
542
543def RISCVMaskedPseudosTable : GenericTable {
544  let FilterClass = "RISCVMaskedPseudo";
545  let CppTypeName = "RISCVMaskedPseudoInfo";
546  let Fields = ["MaskedPseudo", "UnmaskedPseudo", "MaskOpIdx"];
547  let PrimaryKey = ["MaskedPseudo"];
548  let PrimaryKeyName = "getMaskedPseudoInfo";
549}
550
551class RISCVVLE<bit M, bit Str, bit F, bits<3> S, bits<3> L> {
552  bits<1> Masked = M;
553  bits<1> Strided = Str;
554  bits<1> FF = F;
555  bits<3> Log2SEW = S;
556  bits<3> LMUL = L;
557  Pseudo Pseudo = !cast<Pseudo>(NAME);
558}
559
560def lookupMaskedIntrinsicByUnmasked : SearchIndex {
561  let Table = RISCVMaskedPseudosTable;
562  let Key = ["UnmaskedPseudo"];
563}
564
565def RISCVVLETable : GenericTable {
566  let FilterClass = "RISCVVLE";
567  let CppTypeName = "VLEPseudo";
568  let Fields = ["Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
569  let PrimaryKey = ["Masked", "Strided", "FF", "Log2SEW", "LMUL"];
570  let PrimaryKeyName = "getVLEPseudo";
571}
572
573class RISCVVSE<bit M, bit Str, bits<3> S, bits<3> L> {
574  bits<1> Masked = M;
575  bits<1> Strided = Str;
576  bits<3> Log2SEW = S;
577  bits<3> LMUL = L;
578  Pseudo Pseudo = !cast<Pseudo>(NAME);
579}
580
581def RISCVVSETable : GenericTable {
582  let FilterClass = "RISCVVSE";
583  let CppTypeName = "VSEPseudo";
584  let Fields = ["Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
585  let PrimaryKey = ["Masked", "Strided", "Log2SEW", "LMUL"];
586  let PrimaryKeyName = "getVSEPseudo";
587}
588
589class RISCVVLX_VSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
590  bits<1> Masked = M;
591  bits<1> Ordered = O;
592  bits<3> Log2SEW = S;
593  bits<3> LMUL = L;
594  bits<3> IndexLMUL = IL;
595  Pseudo Pseudo = !cast<Pseudo>(NAME);
596}
597
598class RISCVVLX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
599  RISCVVLX_VSX<M, O, S, L, IL>;
600class RISCVVSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
601  RISCVVLX_VSX<M, O, S, L, IL>;
602
603class RISCVVLX_VSXTable : GenericTable {
604  let CppTypeName = "VLX_VSXPseudo";
605  let Fields = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
606  let PrimaryKey = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
607}
608
609def RISCVVLXTable : RISCVVLX_VSXTable {
610  let FilterClass = "RISCVVLX";
611  let PrimaryKeyName = "getVLXPseudo";
612}
613
614def RISCVVSXTable : RISCVVLX_VSXTable {
615  let FilterClass = "RISCVVSX";
616  let PrimaryKeyName = "getVSXPseudo";
617}
618
619class RISCVVLSEG<bits<4> N, bit M, bit Str, bit F, bits<3> S, bits<3> L> {
620  bits<4> NF = N;
621  bits<1> Masked = M;
622  bits<1> Strided = Str;
623  bits<1> FF = F;
624  bits<3> Log2SEW = S;
625  bits<3> LMUL = L;
626  Pseudo Pseudo = !cast<Pseudo>(NAME);
627}
628
629def RISCVVLSEGTable : GenericTable {
630  let FilterClass = "RISCVVLSEG";
631  let CppTypeName = "VLSEGPseudo";
632  let Fields = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
633  let PrimaryKey = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL"];
634  let PrimaryKeyName = "getVLSEGPseudo";
635}
636
637class RISCVVLXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
638  bits<4> NF = N;
639  bits<1> Masked = M;
640  bits<1> Ordered = O;
641  bits<3> Log2SEW = S;
642  bits<3> LMUL = L;
643  bits<3> IndexLMUL = IL;
644  Pseudo Pseudo = !cast<Pseudo>(NAME);
645}
646
647def RISCVVLXSEGTable : GenericTable {
648  let FilterClass = "RISCVVLXSEG";
649  let CppTypeName = "VLXSEGPseudo";
650  let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
651  let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
652  let PrimaryKeyName = "getVLXSEGPseudo";
653}
654
655class RISCVVSSEG<bits<4> N, bit M, bit Str, bits<3> S, bits<3> L> {
656  bits<4> NF = N;
657  bits<1> Masked = M;
658  bits<1> Strided = Str;
659  bits<3> Log2SEW = S;
660  bits<3> LMUL = L;
661  Pseudo Pseudo = !cast<Pseudo>(NAME);
662}
663
664def RISCVVSSEGTable : GenericTable {
665  let FilterClass = "RISCVVSSEG";
666  let CppTypeName = "VSSEGPseudo";
667  let Fields = ["NF", "Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
668  let PrimaryKey = ["NF", "Masked", "Strided", "Log2SEW", "LMUL"];
669  let PrimaryKeyName = "getVSSEGPseudo";
670}
671
672class RISCVVSXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
673  bits<4> NF = N;
674  bits<1> Masked = M;
675  bits<1> Ordered = O;
676  bits<3> Log2SEW = S;
677  bits<3> LMUL = L;
678  bits<3> IndexLMUL = IL;
679  Pseudo Pseudo = !cast<Pseudo>(NAME);
680}
681
682def RISCVVSXSEGTable : GenericTable {
683  let FilterClass = "RISCVVSXSEG";
684  let CppTypeName = "VSXSEGPseudo";
685  let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
686  let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
687  let PrimaryKeyName = "getVSXSEGPseudo";
688}
689
690//===----------------------------------------------------------------------===//
691// Helpers to define the different pseudo instructions.
692//===----------------------------------------------------------------------===//
693
694// The destination vector register group for a masked vector instruction cannot
695// overlap the source mask register (v0), unless the destination vector register
696// is being written with a mask value (e.g., comparisons) or the scalar result
697// of a reduction.
698class GetVRegNoV0<VReg VRegClass> {
699  VReg R = !cond(!eq(VRegClass, VR) : VRNoV0,
700                 !eq(VRegClass, VRM2) : VRM2NoV0,
701                 !eq(VRegClass, VRM4) : VRM4NoV0,
702                 !eq(VRegClass, VRM8) : VRM8NoV0,
703                 !eq(VRegClass, VRN2M1) : VRN2M1NoV0,
704                 !eq(VRegClass, VRN2M2) : VRN2M2NoV0,
705                 !eq(VRegClass, VRN2M4) : VRN2M4NoV0,
706                 !eq(VRegClass, VRN3M1) : VRN3M1NoV0,
707                 !eq(VRegClass, VRN3M2) : VRN3M2NoV0,
708                 !eq(VRegClass, VRN4M1) : VRN4M1NoV0,
709                 !eq(VRegClass, VRN4M2) : VRN4M2NoV0,
710                 !eq(VRegClass, VRN5M1) : VRN5M1NoV0,
711                 !eq(VRegClass, VRN6M1) : VRN6M1NoV0,
712                 !eq(VRegClass, VRN7M1) : VRN7M1NoV0,
713                 !eq(VRegClass, VRN8M1) : VRN8M1NoV0,
714                 true : VRegClass);
715}
716
717class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins, int sew = 0> :
718      Pseudo<outs, ins, []>, RISCVVPseudo {
719  let BaseInstr = instr;
720  let VLMul = m.value;
721  let SEW = sew;
722}
723
724class GetVTypePredicates<VTypeInfo vti> {
725  list<Predicate> Predicates = !cond(!eq(vti.Scalar, f16) : [HasVInstructionsF16],
726                                     !eq(vti.Scalar, f32) : [HasVInstructionsAnyF],
727                                     !eq(vti.Scalar, f64) : [HasVInstructionsF64],
728                                     !eq(vti.SEW, 64) : [HasVInstructionsI64],
729                                     true : [HasVInstructions]);
730}
731
732class VPseudoUSLoadNoMask<VReg RetClass, int EEW> :
733      Pseudo<(outs RetClass:$rd),
734             (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew,
735                  ixlenimm:$policy),[]>,
736      RISCVVPseudo,
737      RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
738  let mayLoad = 1;
739  let mayStore = 0;
740  let hasSideEffects = 0;
741  let HasVLOp = 1;
742  let HasSEWOp = 1;
743  let HasVecPolicyOp = 1;
744  let Constraints = "$rd = $dest";
745}
746
747class VPseudoUSLoadMask<VReg RetClass, int EEW> :
748      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
749              (ins GetVRegNoV0<RetClass>.R:$merge,
750                   GPRMem:$rs1,
751                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
752      RISCVVPseudo,
753      RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
754  let mayLoad = 1;
755  let mayStore = 0;
756  let hasSideEffects = 0;
757  let Constraints = "$rd = $merge";
758  let HasVLOp = 1;
759  let HasSEWOp = 1;
760  let HasVecPolicyOp = 1;
761  let UsesMaskPolicy = 1;
762}
763
764class VPseudoUSLoadFFNoMask<VReg RetClass, int EEW> :
765      Pseudo<(outs RetClass:$rd, GPR:$vl),
766             (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl,
767                  ixlenimm:$sew, ixlenimm:$policy),[]>,
768      RISCVVPseudo,
769      RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
770  let mayLoad = 1;
771  let mayStore = 0;
772  let hasSideEffects = 0;
773  let HasVLOp = 1;
774  let HasSEWOp = 1;
775  let HasVecPolicyOp = 1;
776  let Constraints = "$rd = $dest";
777}
778
779class VPseudoUSLoadFFMask<VReg RetClass, int EEW> :
780      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
781              (ins GetVRegNoV0<RetClass>.R:$merge,
782                   GPRMem:$rs1,
783                   VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy),[]>,
784      RISCVVPseudo,
785      RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
786  let mayLoad = 1;
787  let mayStore = 0;
788  let hasSideEffects = 0;
789  let Constraints = "$rd = $merge";
790  let HasVLOp = 1;
791  let HasSEWOp = 1;
792  let HasVecPolicyOp = 1;
793  let UsesMaskPolicy = 1;
794}
795
796class VPseudoSLoadNoMask<VReg RetClass, int EEW>:
797      Pseudo<(outs RetClass:$rd),
798             (ins RetClass:$dest, GPRMem:$rs1, GPR:$rs2, AVL:$vl,
799                  ixlenimm:$sew, ixlenimm:$policy),[]>,
800      RISCVVPseudo,
801      RISCVVLE</*Masked*/0, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
802  let mayLoad = 1;
803  let mayStore = 0;
804  let hasSideEffects = 0;
805  let HasVLOp = 1;
806  let HasSEWOp = 1;
807  let HasVecPolicyOp = 1;
808  let Constraints = "$rd = $dest";
809}
810
811class VPseudoSLoadMask<VReg RetClass, int EEW>:
812      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
813              (ins GetVRegNoV0<RetClass>.R:$merge,
814                   GPRMem:$rs1, GPR:$rs2,
815                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
816      RISCVVPseudo,
817      RISCVVLE</*Masked*/1, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
818  let mayLoad = 1;
819  let mayStore = 0;
820  let hasSideEffects = 0;
821  let Constraints = "$rd = $merge";
822  let HasVLOp = 1;
823  let HasSEWOp = 1;
824  let HasVecPolicyOp = 1;
825  let UsesMaskPolicy = 1;
826}
827
828class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
829                         bit Ordered, bit EarlyClobber>:
830      Pseudo<(outs RetClass:$rd),
831             (ins RetClass:$dest, GPRMem:$rs1, IdxClass:$rs2, AVL:$vl,
832              ixlenimm:$sew, ixlenimm:$policy),[]>,
833      RISCVVPseudo,
834      RISCVVLX</*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
835  let mayLoad = 1;
836  let mayStore = 0;
837  let hasSideEffects = 0;
838  let HasVLOp = 1;
839  let HasSEWOp = 1;
840  let HasVecPolicyOp = 1;
841  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $dest", "$rd = $dest");
842}
843
844class VPseudoILoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
845                       bit Ordered, bit EarlyClobber>:
846      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
847              (ins GetVRegNoV0<RetClass>.R:$merge,
848                   GPRMem:$rs1, IdxClass:$rs2,
849                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
850      RISCVVPseudo,
851      RISCVVLX</*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
852  let mayLoad = 1;
853  let mayStore = 0;
854  let hasSideEffects = 0;
855  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $merge", "$rd = $merge");
856  let HasVLOp = 1;
857  let HasSEWOp = 1;
858  let HasVecPolicyOp = 1;
859  let UsesMaskPolicy = 1;
860}
861
862class VPseudoUSStoreNoMask<VReg StClass, int EEW>:
863      Pseudo<(outs),
864              (ins StClass:$rd, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
865      RISCVVPseudo,
866      RISCVVSE</*Masked*/0, /*Strided*/0, !logtwo(EEW), VLMul> {
867  let mayLoad = 0;
868  let mayStore = 1;
869  let hasSideEffects = 0;
870  let HasVLOp = 1;
871  let HasSEWOp = 1;
872}
873
874class VPseudoUSStoreMask<VReg StClass, int EEW>:
875      Pseudo<(outs),
876              (ins StClass:$rd, GPRMem:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
877      RISCVVPseudo,
878      RISCVVSE</*Masked*/1, /*Strided*/0, !logtwo(EEW), VLMul> {
879  let mayLoad = 0;
880  let mayStore = 1;
881  let hasSideEffects = 0;
882  let HasVLOp = 1;
883  let HasSEWOp = 1;
884}
885
886class VPseudoSStoreNoMask<VReg StClass, int EEW>:
887      Pseudo<(outs),
888              (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
889      RISCVVPseudo,
890      RISCVVSE</*Masked*/0, /*Strided*/1, !logtwo(EEW), VLMul> {
891  let mayLoad = 0;
892  let mayStore = 1;
893  let hasSideEffects = 0;
894  let HasVLOp = 1;
895  let HasSEWOp = 1;
896}
897
898class VPseudoSStoreMask<VReg StClass, int EEW>:
899      Pseudo<(outs),
900              (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
901      RISCVVPseudo,
902      RISCVVSE</*Masked*/1, /*Strided*/1, !logtwo(EEW), VLMul> {
903  let mayLoad = 0;
904  let mayStore = 1;
905  let hasSideEffects = 0;
906  let HasVLOp = 1;
907  let HasSEWOp = 1;
908}
909
910class VPseudoNullaryNoMask<VReg RegClass>:
911      Pseudo<(outs RegClass:$rd),
912             (ins RegClass:$merge, AVL:$vl, ixlenimm:$sew,
913                  ixlenimm:$policy), []>, RISCVVPseudo {
914  let mayLoad = 0;
915  let mayStore = 0;
916  let hasSideEffects = 0;
917  let Constraints = "$rd = $merge";
918  let HasVLOp = 1;
919  let HasSEWOp = 1;
920  let HasVecPolicyOp = 1;
921}
922
923class VPseudoNullaryMask<VReg RegClass>:
924      Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd),
925             (ins GetVRegNoV0<RegClass>.R:$merge, VMaskOp:$vm, AVL:$vl,
926              ixlenimm:$sew, ixlenimm:$policy), []>, RISCVVPseudo {
927  let mayLoad = 0;
928  let mayStore = 0;
929  let hasSideEffects = 0;
930  let Constraints ="$rd = $merge";
931  let HasVLOp = 1;
932  let HasSEWOp = 1;
933  let UsesMaskPolicy = 1;
934  let HasVecPolicyOp = 1;
935}
936
937// Nullary for pseudo instructions. They are expanded in
938// RISCVExpandPseudoInsts pass.
939class VPseudoNullaryPseudoM<string BaseInst>
940       : Pseudo<(outs VR:$rd), (ins AVL:$vl, ixlenimm:$sew), []>,
941       RISCVVPseudo {
942  let mayLoad = 0;
943  let mayStore = 0;
944  let hasSideEffects = 0;
945  let HasVLOp = 1;
946  let HasSEWOp = 1;
947  // BaseInstr is not used in RISCVExpandPseudoInsts pass.
948  // Just fill a corresponding real v-inst to pass tablegen check.
949  let BaseInstr = !cast<Instruction>(BaseInst);
950}
951
952class VPseudoUnaryNoMask<DAGOperand RetClass, DAGOperand OpClass,
953                         string Constraint = ""> :
954      Pseudo<(outs RetClass:$rd),
955        (ins RetClass:$merge, OpClass:$rs2, AVL:$vl, ixlenimm:$sew,
956             ixlenimm:$policy), []>,
957        RISCVVPseudo {
958  let mayLoad = 0;
959  let mayStore = 0;
960  let hasSideEffects = 0;
961  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
962  let HasVLOp = 1;
963  let HasSEWOp = 1;
964  let HasVecPolicyOp = 1;
965}
966
967class VPseudoUnaryNoMaskRoundingMode<DAGOperand RetClass, DAGOperand OpClass,
968                         string Constraint = ""> :
969      Pseudo<(outs RetClass:$rd),
970        (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm, AVL:$vl, ixlenimm:$sew,
971             ixlenimm:$policy), []>,
972        RISCVVPseudo {
973  let mayLoad = 0;
974  let mayStore = 0;
975  let hasSideEffects = 0;
976  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
977  let HasVLOp = 1;
978  let HasSEWOp = 1;
979  let HasVecPolicyOp = 1;
980  let HasRoundModeOp = 1;
981  let UsesVXRM = 0;
982}
983
984class VPseudoUnaryMask<VReg RetClass, VReg OpClass, string Constraint = ""> :
985        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
986               (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
987                    VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
988        RISCVVPseudo {
989  let mayLoad = 0;
990  let mayStore = 0;
991  let hasSideEffects = 0;
992  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
993  let HasVLOp = 1;
994  let HasSEWOp = 1;
995  let HasVecPolicyOp = 1;
996  let UsesMaskPolicy = 1;
997}
998
999class VPseudoUnaryMaskRoundingMode<VReg RetClass, VReg OpClass, string Constraint = ""> :
1000        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1001               (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
1002                    VMaskOp:$vm, ixlenimm:$rm,
1003                    AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1004        RISCVVPseudo {
1005  let mayLoad = 0;
1006  let mayStore = 0;
1007  let hasSideEffects = 0;
1008  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1009  let HasVLOp = 1;
1010  let HasSEWOp = 1;
1011  let HasVecPolicyOp = 1;
1012  let UsesMaskPolicy = 1;
1013  let HasRoundModeOp = 1;
1014  let UsesVXRM = 0;
1015}
1016
1017class VPseudoUnaryMask_NoExcept<VReg RetClass, VReg OpClass, string Constraint = ""> :
1018        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1019               (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2, VMaskOp:$vm,
1020                    AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []> {
1021  let mayLoad = 0;
1022  let mayStore = 0;
1023  let hasSideEffects = 0;
1024  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1025  let HasVLOp = 1;
1026  let HasSEWOp = 1;
1027  let HasVecPolicyOp = 1;
1028  let UsesMaskPolicy = 1;
1029  let usesCustomInserter = 1;
1030}
1031
1032class VPseudoUnaryNoMask_FRM<VReg RetClass, VReg OpClass, string Constraint = ""> :
1033        Pseudo<(outs RetClass:$rd),
1034               (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$frm, AVL:$vl,
1035                    ixlenimm:$sew, ixlenimm:$policy), []> {
1036  let mayLoad = 0;
1037  let mayStore = 0;
1038  let hasSideEffects = 0;
1039  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1040  let HasVLOp = 1;
1041  let HasSEWOp = 1;
1042  let HasVecPolicyOp = 1;
1043  let usesCustomInserter = 1;
1044}
1045
1046class VPseudoUnaryMask_FRM<VReg RetClass, VReg OpClass, string Constraint = ""> :
1047        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1048               (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
1049                    VMaskOp:$vm, ixlenimm:$frm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []> {
1050  let mayLoad = 0;
1051  let mayStore = 0;
1052  let hasSideEffects = 0;
1053  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1054  let HasVLOp = 1;
1055  let HasSEWOp = 1;
1056  let HasVecPolicyOp = 1;
1057  let UsesMaskPolicy = 1;
1058  let usesCustomInserter = 1;
1059}
1060
1061class VPseudoUnaryNoMaskGPROut :
1062        Pseudo<(outs GPR:$rd),
1063               (ins VR:$rs2, AVL:$vl, ixlenimm:$sew), []>,
1064        RISCVVPseudo {
1065  let mayLoad = 0;
1066  let mayStore = 0;
1067  let hasSideEffects = 0;
1068  let HasVLOp = 1;
1069  let HasSEWOp = 1;
1070}
1071
1072class VPseudoUnaryMaskGPROut:
1073        Pseudo<(outs GPR:$rd),
1074               (ins VR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1075        RISCVVPseudo {
1076  let mayLoad = 0;
1077  let mayStore = 0;
1078  let hasSideEffects = 0;
1079  let HasVLOp = 1;
1080  let HasSEWOp = 1;
1081}
1082
1083// Mask can be V0~V31
1084class VPseudoUnaryAnyMask<VReg RetClass,
1085                          VReg Op1Class> :
1086      Pseudo<(outs RetClass:$rd),
1087             (ins RetClass:$merge,
1088                  Op1Class:$rs2,
1089                  VR:$vm, AVL:$vl, ixlenimm:$sew),
1090             []>,
1091      RISCVVPseudo {
1092  let mayLoad = 0;
1093  let mayStore = 0;
1094  let hasSideEffects = 0;
1095  let Constraints = "@earlyclobber $rd, $rd = $merge";
1096  let HasVLOp = 1;
1097  let HasSEWOp = 1;
1098}
1099
1100class VPseudoBinaryNoMask<VReg RetClass,
1101                          VReg Op1Class,
1102                          DAGOperand Op2Class,
1103                          string Constraint> :
1104        Pseudo<(outs RetClass:$rd),
1105               (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
1106        RISCVVPseudo {
1107  let mayLoad = 0;
1108  let mayStore = 0;
1109  let hasSideEffects = 0;
1110  let Constraints = Constraint;
1111  let HasVLOp = 1;
1112  let HasSEWOp = 1;
1113}
1114
1115class VPseudoBinaryNoMaskTU<VReg RetClass,
1116                            VReg Op1Class,
1117                            DAGOperand Op2Class,
1118                            string Constraint> :
1119        Pseudo<(outs RetClass:$rd),
1120               (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl,
1121               ixlenimm:$sew, ixlenimm:$policy), []>,
1122        RISCVVPseudo {
1123  let mayLoad = 0;
1124  let mayStore = 0;
1125  let hasSideEffects = 0;
1126  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1127  let HasVLOp = 1;
1128  let HasSEWOp = 1;
1129  let HasVecPolicyOp = 1;
1130}
1131
1132class VPseudoBinaryNoMaskRoundingMode<VReg RetClass,
1133                                      VReg Op1Class,
1134                                      DAGOperand Op2Class,
1135                                      string Constraint,
1136                                      int UsesVXRM_ = 1> :
1137        Pseudo<(outs RetClass:$rd),
1138               (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm,
1139                    AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1140        RISCVVPseudo {
1141  let mayLoad = 0;
1142  let mayStore = 0;
1143  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1144  let HasVLOp = 1;
1145  let HasSEWOp = 1;
1146  let HasVecPolicyOp = 1;
1147  let HasRoundModeOp = 1;
1148  let UsesVXRM = UsesVXRM_;
1149}
1150
1151class VPseudoBinaryMaskPolicyRoundingMode<VReg RetClass,
1152                                          RegisterClass Op1Class,
1153                                          DAGOperand Op2Class,
1154                                          string Constraint,
1155                                          int UsesVXRM_> :
1156        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1157                (ins GetVRegNoV0<RetClass>.R:$merge,
1158                     Op1Class:$rs2, Op2Class:$rs1,
1159                     VMaskOp:$vm, ixlenimm:$rm, AVL:$vl,
1160                     ixlenimm:$sew, ixlenimm:$policy), []>,
1161        RISCVVPseudo {
1162  let mayLoad = 0;
1163  let mayStore = 0;
1164  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1165  let HasVLOp = 1;
1166  let HasSEWOp = 1;
1167  let HasVecPolicyOp = 1;
1168  let UsesMaskPolicy = 1;
1169  let HasRoundModeOp = 1;
1170  let UsesVXRM = UsesVXRM_;
1171}
1172
1173// Special version of VPseudoBinaryNoMask where we pretend the first source is
1174// tied to the destination.
1175// This allows maskedoff and rs2 to be the same register.
1176class VPseudoTiedBinaryNoMask<VReg RetClass,
1177                              DAGOperand Op2Class,
1178                              string Constraint> :
1179        Pseudo<(outs RetClass:$rd),
1180               (ins RetClass:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew,
1181                    ixlenimm:$policy), []>,
1182        RISCVVPseudo {
1183  let mayLoad = 0;
1184  let mayStore = 0;
1185  let hasSideEffects = 0;
1186  let Constraints = !interleave([Constraint, "$rd = $rs2"], ",");
1187  let HasVLOp = 1;
1188  let HasSEWOp = 1;
1189  let HasVecPolicyOp = 1;
1190  let isConvertibleToThreeAddress = 1;
1191  let IsTiedPseudo = 1;
1192}
1193
1194class VPseudoTiedBinaryNoMaskRoundingMode<VReg RetClass,
1195                                          DAGOperand Op2Class,
1196                                          string Constraint> :
1197        Pseudo<(outs RetClass:$rd),
1198               (ins RetClass:$rs2, Op2Class:$rs1,
1199                    ixlenimm:$rm,
1200                    AVL:$vl, ixlenimm:$sew,
1201                    ixlenimm:$policy), []>,
1202        RISCVVPseudo {
1203  let mayLoad = 0;
1204  let mayStore = 0;
1205  let hasSideEffects = 0;
1206  let Constraints = !interleave([Constraint, "$rd = $rs2"], ",");
1207  let HasVLOp = 1;
1208  let HasSEWOp = 1;
1209  let HasVecPolicyOp = 1;
1210  let isConvertibleToThreeAddress = 1;
1211  let IsTiedPseudo = 1;
1212  let HasRoundModeOp = 1;
1213  let UsesVXRM = 0;
1214}
1215
1216class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
1217                          bit Ordered>:
1218      Pseudo<(outs),
1219              (ins StClass:$rd, GPRMem:$rs1, IdxClass:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
1220      RISCVVPseudo,
1221      RISCVVSX</*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
1222  let mayLoad = 0;
1223  let mayStore = 1;
1224  let hasSideEffects = 0;
1225  let HasVLOp = 1;
1226  let HasSEWOp = 1;
1227}
1228
1229class VPseudoIStoreMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
1230                        bit Ordered>:
1231      Pseudo<(outs),
1232              (ins StClass:$rd, GPRMem:$rs1, IdxClass:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1233      RISCVVPseudo,
1234      RISCVVSX</*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
1235  let mayLoad = 0;
1236  let mayStore = 1;
1237  let hasSideEffects = 0;
1238  let HasVLOp = 1;
1239  let HasSEWOp = 1;
1240}
1241
1242class VPseudoBinaryMask<VReg RetClass,
1243                        RegisterClass Op1Class,
1244                        DAGOperand Op2Class,
1245                        string Constraint> :
1246        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1247                (ins GetVRegNoV0<RetClass>.R:$merge,
1248                     Op1Class:$rs2, Op2Class:$rs1,
1249                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1250        RISCVVPseudo {
1251  let mayLoad = 0;
1252  let mayStore = 0;
1253  let hasSideEffects = 0;
1254  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1255  let HasVLOp = 1;
1256  let HasSEWOp = 1;
1257}
1258
1259class VPseudoBinaryMaskPolicy<VReg RetClass,
1260                              RegisterClass Op1Class,
1261                              DAGOperand Op2Class,
1262                              string Constraint> :
1263        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1264                (ins GetVRegNoV0<RetClass>.R:$merge,
1265                     Op1Class:$rs2, Op2Class:$rs1,
1266                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1267        RISCVVPseudo {
1268  let mayLoad = 0;
1269  let mayStore = 0;
1270  let hasSideEffects = 0;
1271  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1272  let HasVLOp = 1;
1273  let HasSEWOp = 1;
1274  let HasVecPolicyOp = 1;
1275  let UsesMaskPolicy = 1;
1276}
1277
1278class VPseudoTernaryMaskPolicy<VReg RetClass,
1279                               RegisterClass Op1Class,
1280                               DAGOperand Op2Class,
1281                               string Constraint> :
1282        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1283                (ins GetVRegNoV0<RetClass>.R:$merge,
1284                     Op1Class:$rs2, Op2Class:$rs1,
1285                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1286        RISCVVPseudo {
1287  let mayLoad = 0;
1288  let mayStore = 0;
1289  let hasSideEffects = 0;
1290  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1291  let HasVLOp = 1;
1292  let HasSEWOp = 1;
1293  let HasVecPolicyOp = 1;
1294}
1295
1296class VPseudoTernaryMaskPolicyRoundingMode<VReg RetClass,
1297                                           RegisterClass Op1Class,
1298                                           DAGOperand Op2Class,
1299                                           string Constraint> :
1300        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1301                (ins GetVRegNoV0<RetClass>.R:$merge,
1302                     Op1Class:$rs2, Op2Class:$rs1,
1303                     VMaskOp:$vm,
1304                     ixlenimm:$rm,
1305                     AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1306        RISCVVPseudo {
1307  let mayLoad = 0;
1308  let mayStore = 0;
1309  let hasSideEffects = 0;
1310  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1311  let HasVLOp = 1;
1312  let HasSEWOp = 1;
1313  let HasVecPolicyOp = 1;
1314  let HasRoundModeOp = 1;
1315  let UsesVXRM = 0;
1316}
1317
1318// Like VPseudoBinaryNoMask, but output can be V0.
1319class VPseudoBinaryMOutNoMask<VReg RetClass,
1320                              VReg Op1Class,
1321                              DAGOperand Op2Class,
1322                              string Constraint> :
1323        Pseudo<(outs RetClass:$rd),
1324               (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
1325        RISCVVPseudo {
1326  let mayLoad = 0;
1327  let mayStore = 0;
1328  let hasSideEffects = 0;
1329  let Constraints = Constraint;
1330  let HasVLOp = 1;
1331  let HasSEWOp = 1;
1332}
1333
1334// Like VPseudoBinaryMask, but output can be V0.
1335class VPseudoBinaryMOutMask<VReg RetClass,
1336                            RegisterClass Op1Class,
1337                            DAGOperand Op2Class,
1338                            string Constraint> :
1339        Pseudo<(outs RetClass:$rd),
1340                (ins RetClass:$merge,
1341                     Op1Class:$rs2, Op2Class:$rs1,
1342                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1343        RISCVVPseudo {
1344  let mayLoad = 0;
1345  let mayStore = 0;
1346  let hasSideEffects = 0;
1347  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1348  let HasVLOp = 1;
1349  let HasSEWOp = 1;
1350  let UsesMaskPolicy = 1;
1351}
1352
1353// Special version of VPseudoBinaryMask where we pretend the first source is
1354// tied to the destination so we can workaround the earlyclobber constraint.
1355// This allows maskedoff and rs2 to be the same register.
1356class VPseudoTiedBinaryMask<VReg RetClass,
1357                            DAGOperand Op2Class,
1358                            string Constraint> :
1359        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1360                (ins GetVRegNoV0<RetClass>.R:$merge,
1361                     Op2Class:$rs1,
1362                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1363        RISCVVPseudo {
1364  let mayLoad = 0;
1365  let mayStore = 0;
1366  let hasSideEffects = 0;
1367  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1368  let HasVLOp = 1;
1369  let HasSEWOp = 1;
1370  let HasVecPolicyOp = 1;
1371  let UsesMaskPolicy = 1;
1372  let IsTiedPseudo = 1;
1373}
1374
1375class VPseudoTiedBinaryMaskRoundingMode<VReg RetClass,
1376                                        DAGOperand Op2Class,
1377                                        string Constraint> :
1378        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1379                (ins GetVRegNoV0<RetClass>.R:$merge,
1380                     Op2Class:$rs1,
1381                     VMaskOp:$vm,
1382                     ixlenimm:$rm,
1383                     AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1384        RISCVVPseudo {
1385  let mayLoad = 0;
1386  let mayStore = 0;
1387  let hasSideEffects = 0;
1388  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1389  let HasVLOp = 1;
1390  let HasSEWOp = 1;
1391  let HasVecPolicyOp = 1;
1392  let UsesMaskPolicy = 1;
1393  let IsTiedPseudo = 1;
1394  let HasRoundModeOp = 1;
1395  let UsesVXRM = 0;
1396}
1397
1398class VPseudoBinaryCarryIn<VReg RetClass,
1399                           VReg Op1Class,
1400                           DAGOperand Op2Class,
1401                           LMULInfo MInfo,
1402                           bit CarryIn,
1403                           string Constraint> :
1404        Pseudo<(outs RetClass:$rd),
1405               !if(CarryIn,
1406                  (ins Op1Class:$rs2, Op2Class:$rs1, VMV0:$carry, AVL:$vl,
1407                       ixlenimm:$sew),
1408                  (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew)), []>,
1409        RISCVVPseudo {
1410  let mayLoad = 0;
1411  let mayStore = 0;
1412  let hasSideEffects = 0;
1413  let Constraints = Constraint;
1414  let HasVLOp = 1;
1415  let HasSEWOp = 1;
1416  let VLMul = MInfo.value;
1417}
1418
1419class VPseudoTiedBinaryCarryIn<VReg RetClass,
1420                               VReg Op1Class,
1421                               DAGOperand Op2Class,
1422                               LMULInfo MInfo,
1423                               bit CarryIn,
1424                               string Constraint> :
1425        Pseudo<(outs RetClass:$rd),
1426               !if(CarryIn,
1427                  (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, VMV0:$carry, AVL:$vl,
1428                       ixlenimm:$sew),
1429                  (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew)), []>,
1430        RISCVVPseudo {
1431  let mayLoad = 0;
1432  let mayStore = 0;
1433  let hasSideEffects = 0;
1434  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1435  let HasVLOp = 1;
1436  let HasSEWOp = 1;
1437  let HasVecPolicyOp = 0;
1438  let VLMul = MInfo.value;
1439}
1440
1441class VPseudoTernaryNoMask<VReg RetClass,
1442                           RegisterClass Op1Class,
1443                           DAGOperand Op2Class,
1444                           string Constraint> :
1445        Pseudo<(outs RetClass:$rd),
1446               (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1447                    AVL:$vl, ixlenimm:$sew),
1448               []>,
1449        RISCVVPseudo {
1450  let mayLoad = 0;
1451  let mayStore = 0;
1452  let hasSideEffects = 0;
1453  let Constraints = !interleave([Constraint, "$rd = $rs3"], ",");
1454  let HasVLOp = 1;
1455  let HasSEWOp = 1;
1456}
1457
1458class VPseudoTernaryNoMaskWithPolicy<VReg RetClass,
1459                                     RegisterClass Op1Class,
1460                                     DAGOperand Op2Class,
1461                                     string Constraint> :
1462        Pseudo<(outs RetClass:$rd),
1463               (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1464                    AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
1465               []>,
1466        RISCVVPseudo {
1467  let mayLoad = 0;
1468  let mayStore = 0;
1469  let hasSideEffects = 0;
1470  let Constraints = !interleave([Constraint, "$rd = $rs3"], ",");
1471  let HasVecPolicyOp = 1;
1472  let HasVLOp = 1;
1473  let HasSEWOp = 1;
1474}
1475
1476class VPseudoTernaryNoMaskWithPolicyRoundingMode<VReg RetClass,
1477                                                 RegisterClass Op1Class,
1478                                                 DAGOperand Op2Class,
1479                                                 string Constraint> :
1480        Pseudo<(outs RetClass:$rd),
1481               (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1482                    ixlenimm:$rm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
1483               []>,
1484        RISCVVPseudo {
1485  let mayLoad = 0;
1486  let mayStore = 0;
1487  let hasSideEffects = 0;
1488  let Constraints = !interleave([Constraint, "$rd = $rs3"], ",");
1489  let HasVecPolicyOp = 1;
1490  let HasVLOp = 1;
1491  let HasSEWOp = 1;
1492  let HasRoundModeOp = 1;
1493  let UsesVXRM = 0;
1494}
1495
1496class VPseudoUSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF>:
1497      Pseudo<(outs RetClass:$rd),
1498             (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl,
1499                  ixlenimm:$sew, ixlenimm:$policy),[]>,
1500      RISCVVPseudo,
1501      RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
1502  let mayLoad = 1;
1503  let mayStore = 0;
1504  let hasSideEffects = 0;
1505  let HasVLOp = 1;
1506  let HasSEWOp = 1;
1507  let HasVecPolicyOp = 1;
1508  let Constraints = "$rd = $dest";
1509}
1510
1511class VPseudoUSSegLoadMask<VReg RetClass, int EEW, bits<4> NF>:
1512      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1513             (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
1514                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
1515      RISCVVPseudo,
1516      RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
1517  let mayLoad = 1;
1518  let mayStore = 0;
1519  let hasSideEffects = 0;
1520  let Constraints = "$rd = $merge";
1521  let HasVLOp = 1;
1522  let HasSEWOp = 1;
1523  let HasVecPolicyOp = 1;
1524  let UsesMaskPolicy = 1;
1525}
1526
1527class VPseudoUSSegLoadFFNoMask<VReg RetClass, int EEW, bits<4> NF>:
1528      Pseudo<(outs RetClass:$rd, GPR:$vl),
1529             (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl,
1530                  ixlenimm:$sew, ixlenimm:$policy),[]>,
1531      RISCVVPseudo,
1532      RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
1533  let mayLoad = 1;
1534  let mayStore = 0;
1535  let hasSideEffects = 0;
1536  let HasVLOp = 1;
1537  let HasSEWOp = 1;
1538  let HasVecPolicyOp = 1;
1539  let Constraints = "$rd = $dest";
1540}
1541
1542class VPseudoUSSegLoadFFMask<VReg RetClass, int EEW, bits<4> NF>:
1543      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
1544             (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
1545                  VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy),[]>,
1546      RISCVVPseudo,
1547      RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
1548  let mayLoad = 1;
1549  let mayStore = 0;
1550  let hasSideEffects = 0;
1551  let Constraints = "$rd = $merge";
1552  let HasVLOp = 1;
1553  let HasSEWOp = 1;
1554  let HasVecPolicyOp = 1;
1555  let UsesMaskPolicy = 1;
1556}
1557
1558class VPseudoSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF>:
1559      Pseudo<(outs RetClass:$rd),
1560             (ins RetClass:$merge, GPRMem:$rs1, GPR:$offset, AVL:$vl,
1561             ixlenimm:$sew, ixlenimm:$policy),[]>,
1562      RISCVVPseudo,
1563      RISCVVLSEG<NF, /*Masked*/0, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
1564  let mayLoad = 1;
1565  let mayStore = 0;
1566  let hasSideEffects = 0;
1567  let HasVLOp = 1;
1568  let HasSEWOp = 1;
1569  let HasVecPolicyOp = 1;
1570  let Constraints = "$rd = $merge";
1571}
1572
1573class VPseudoSSegLoadMask<VReg RetClass, int EEW, bits<4> NF>:
1574      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1575             (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
1576                  GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
1577                  ixlenimm:$policy),[]>,
1578      RISCVVPseudo,
1579      RISCVVLSEG<NF, /*Masked*/1, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
1580  let mayLoad = 1;
1581  let mayStore = 0;
1582  let hasSideEffects = 0;
1583  let Constraints = "$rd = $merge";
1584  let HasVLOp = 1;
1585  let HasSEWOp = 1;
1586  let HasVecPolicyOp = 1;
1587  let UsesMaskPolicy = 1;
1588}
1589
1590class VPseudoISegLoadNoMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
1591                            bits<4> NF, bit Ordered>:
1592      Pseudo<(outs RetClass:$rd),
1593             (ins RetClass:$merge, GPRMem:$rs1, IdxClass:$offset, AVL:$vl,
1594                  ixlenimm:$sew, ixlenimm:$policy),[]>,
1595      RISCVVPseudo,
1596      RISCVVLXSEG<NF, /*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
1597  let mayLoad = 1;
1598  let mayStore = 0;
1599  let hasSideEffects = 0;
1600  // For vector indexed segment loads, the destination vector register groups
1601  // cannot overlap the source vector register group
1602  let Constraints = "@earlyclobber $rd, $rd = $merge";
1603  let HasVLOp = 1;
1604  let HasSEWOp = 1;
1605  let HasVecPolicyOp = 1;
1606}
1607
1608class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
1609                          bits<4> NF, bit Ordered>:
1610      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1611             (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
1612                  IdxClass:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
1613                  ixlenimm:$policy),[]>,
1614      RISCVVPseudo,
1615      RISCVVLXSEG<NF, /*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
1616  let mayLoad = 1;
1617  let mayStore = 0;
1618  let hasSideEffects = 0;
1619  // For vector indexed segment loads, the destination vector register groups
1620  // cannot overlap the source vector register group
1621  let Constraints = "@earlyclobber $rd, $rd = $merge";
1622  let HasVLOp = 1;
1623  let HasSEWOp = 1;
1624  let HasVecPolicyOp = 1;
1625  let UsesMaskPolicy = 1;
1626}
1627
1628class VPseudoUSSegStoreNoMask<VReg ValClass, int EEW, bits<4> NF>:
1629      Pseudo<(outs),
1630             (ins ValClass:$rd, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
1631      RISCVVPseudo,
1632      RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, !logtwo(EEW), VLMul> {
1633  let mayLoad = 0;
1634  let mayStore = 1;
1635  let hasSideEffects = 0;
1636  let HasVLOp = 1;
1637  let HasSEWOp = 1;
1638}
1639
1640class VPseudoUSSegStoreMask<VReg ValClass, int EEW, bits<4> NF>:
1641      Pseudo<(outs),
1642             (ins ValClass:$rd, GPRMem:$rs1,
1643                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1644      RISCVVPseudo,
1645      RISCVVSSEG<NF, /*Masked*/1, /*Strided*/0, !logtwo(EEW), VLMul> {
1646  let mayLoad = 0;
1647  let mayStore = 1;
1648  let hasSideEffects = 0;
1649  let HasVLOp = 1;
1650  let HasSEWOp = 1;
1651}
1652
1653class VPseudoSSegStoreNoMask<VReg ValClass, int EEW, bits<4> NF>:
1654      Pseudo<(outs),
1655             (ins ValClass:$rd, GPRMem:$rs1, GPR: $offset, AVL:$vl, ixlenimm:$sew),[]>,
1656      RISCVVPseudo,
1657      RISCVVSSEG<NF, /*Masked*/0, /*Strided*/1, !logtwo(EEW), VLMul> {
1658  let mayLoad = 0;
1659  let mayStore = 1;
1660  let hasSideEffects = 0;
1661  let HasVLOp = 1;
1662  let HasSEWOp = 1;
1663}
1664
1665class VPseudoSSegStoreMask<VReg ValClass, int EEW, bits<4> NF>:
1666      Pseudo<(outs),
1667             (ins ValClass:$rd, GPRMem:$rs1, GPR: $offset,
1668                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1669      RISCVVPseudo,
1670      RISCVVSSEG<NF, /*Masked*/1, /*Strided*/1, !logtwo(EEW), VLMul> {
1671  let mayLoad = 0;
1672  let mayStore = 1;
1673  let hasSideEffects = 0;
1674  let HasVLOp = 1;
1675  let HasSEWOp = 1;
1676}
1677
1678class VPseudoISegStoreNoMask<VReg ValClass, VReg IdxClass, int EEW, bits<3> LMUL,
1679                             bits<4> NF, bit Ordered>:
1680      Pseudo<(outs),
1681             (ins ValClass:$rd, GPRMem:$rs1, IdxClass: $index,
1682                  AVL:$vl, ixlenimm:$sew),[]>,
1683      RISCVVPseudo,
1684      RISCVVSXSEG<NF, /*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
1685  let mayLoad = 0;
1686  let mayStore = 1;
1687  let hasSideEffects = 0;
1688  let HasVLOp = 1;
1689  let HasSEWOp = 1;
1690}
1691
1692class VPseudoISegStoreMask<VReg ValClass, VReg IdxClass, int EEW, bits<3> LMUL,
1693                           bits<4> NF, bit Ordered>:
1694      Pseudo<(outs),
1695             (ins ValClass:$rd, GPRMem:$rs1, IdxClass: $index,
1696                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1697      RISCVVPseudo,
1698      RISCVVSXSEG<NF, /*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
1699  let mayLoad = 0;
1700  let mayStore = 1;
1701  let hasSideEffects = 0;
1702  let HasVLOp = 1;
1703  let HasSEWOp = 1;
1704}
1705
1706multiclass VPseudoUSLoad {
1707  foreach eew = EEWList in {
1708    foreach lmul = MxSet<eew>.m in {
1709      defvar LInfo = lmul.MX;
1710      defvar vreg = lmul.vrclass;
1711      let VLMul = lmul.value, SEW=eew in {
1712        def "E" # eew # "_V_" # LInfo :
1713          VPseudoUSLoadNoMask<vreg, eew>,
1714          VLESched<LInfo>;
1715        def "E" # eew # "_V_" # LInfo # "_MASK" :
1716          VPseudoUSLoadMask<vreg, eew>,
1717          RISCVMaskedPseudo<MaskIdx=2>,
1718          VLESched<LInfo>;
1719      }
1720    }
1721  }
1722}
1723
1724multiclass VPseudoFFLoad {
1725  foreach eew = EEWList in {
1726    foreach lmul = MxSet<eew>.m in {
1727      defvar LInfo = lmul.MX;
1728      defvar vreg = lmul.vrclass;
1729      let VLMul = lmul.value, SEW=eew in {
1730        def "E" # eew # "FF_V_" # LInfo:
1731          VPseudoUSLoadFFNoMask<vreg, eew>,
1732          VLFSched<LInfo>;
1733        def "E" # eew # "FF_V_" # LInfo # "_MASK":
1734          VPseudoUSLoadFFMask<vreg, eew>,
1735          RISCVMaskedPseudo<MaskIdx=2>,
1736          VLFSched<LInfo>;
1737      }
1738    }
1739  }
1740}
1741
1742multiclass VPseudoLoadMask {
1743  foreach mti = AllMasks in {
1744    defvar mx = mti.LMul.MX;
1745    defvar WriteVLDM_MX = !cast<SchedWrite>("WriteVLDM_" # mx);
1746    let VLMul = mti.LMul.value in {
1747      def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, EEW=1>,
1748        Sched<[WriteVLDM_MX, ReadVLDX]>;
1749    }
1750  }
1751}
1752
1753multiclass VPseudoSLoad {
1754  foreach eew = EEWList in {
1755    foreach lmul = MxSet<eew>.m in {
1756      defvar LInfo = lmul.MX;
1757      defvar vreg = lmul.vrclass;
1758      let VLMul = lmul.value, SEW=eew in {
1759        def "E" # eew # "_V_" # LInfo : VPseudoSLoadNoMask<vreg, eew>,
1760                                        VLSSched<eew, LInfo>;
1761        def "E" # eew # "_V_" # LInfo # "_MASK" :
1762          VPseudoSLoadMask<vreg, eew>,
1763          RISCVMaskedPseudo<MaskIdx=3>,
1764          VLSSched<eew, LInfo>;
1765      }
1766    }
1767  }
1768}
1769
1770multiclass VPseudoILoad<bit Ordered> {
1771  foreach idxEEW = EEWList in {
1772    foreach dataEEW = EEWList in {
1773      foreach dataEMUL = MxSet<dataEEW>.m in {
1774        defvar dataEMULOctuple = dataEMUL.octuple;
1775        // Calculate emul = eew * lmul / sew
1776        defvar idxEMULOctuple =
1777          !srl(!mul(idxEEW, dataEMULOctuple), !logtwo(dataEEW));
1778        if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
1779          defvar DataLInfo = dataEMUL.MX;
1780          defvar IdxLInfo = octuple_to_str<idxEMULOctuple>.ret;
1781          defvar idxEMUL = !cast<LMULInfo>("V_" # IdxLInfo);
1782          defvar Vreg = dataEMUL.vrclass;
1783          defvar IdxVreg = idxEMUL.vrclass;
1784          defvar HasConstraint = !ne(dataEEW, idxEEW);
1785          defvar Order = !if(Ordered, "O", "U");
1786          let VLMul = dataEMUL.value in {
1787            def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo :
1788              VPseudoILoadNoMask<Vreg, IdxVreg, idxEEW, idxEMUL.value, Ordered, HasConstraint>,
1789              VLXSched<dataEEW, Order, DataLInfo, IdxLInfo>;
1790            def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" :
1791              VPseudoILoadMask<Vreg, IdxVreg, idxEEW, idxEMUL.value, Ordered, HasConstraint>,
1792              RISCVMaskedPseudo<MaskIdx=3>,
1793              VLXSched<dataEEW, Order, DataLInfo, IdxLInfo>;
1794          }
1795        }
1796      }
1797    }
1798  }
1799}
1800
1801multiclass VPseudoUSStore {
1802  foreach eew = EEWList in {
1803    foreach lmul = MxSet<eew>.m in {
1804      defvar LInfo = lmul.MX;
1805      defvar vreg = lmul.vrclass;
1806      let VLMul = lmul.value, SEW=eew in {
1807        def "E" # eew # "_V_" # LInfo : VPseudoUSStoreNoMask<vreg, eew>,
1808                                        VSESched<LInfo>;
1809        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask<vreg, eew>,
1810                                                  VSESched<LInfo>;
1811      }
1812    }
1813  }
1814}
1815
1816multiclass VPseudoStoreMask {
1817  foreach mti = AllMasks in {
1818    defvar mx = mti.LMul.MX;
1819    defvar WriteVSTM_MX = !cast<SchedWrite>("WriteVSTM_" # mx);
1820    let VLMul = mti.LMul.value in {
1821      def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, EEW=1>,
1822        Sched<[WriteVSTM_MX, ReadVSTX]>;
1823    }
1824  }
1825}
1826
1827multiclass VPseudoSStore {
1828  foreach eew = EEWList in {
1829    foreach lmul = MxSet<eew>.m in {
1830      defvar LInfo = lmul.MX;
1831      defvar vreg = lmul.vrclass;
1832      let VLMul = lmul.value, SEW=eew in {
1833        def "E" # eew # "_V_" # LInfo : VPseudoSStoreNoMask<vreg, eew>,
1834                                        VSSSched<eew, LInfo>;
1835        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSStoreMask<vreg, eew>,
1836                                                  VSSSched<eew, LInfo>;
1837      }
1838    }
1839  }
1840}
1841
1842multiclass VPseudoIStore<bit Ordered> {
1843  foreach idxEEW = EEWList in {
1844    foreach dataEEW = EEWList in {
1845      foreach dataEMUL = MxSet<dataEEW>.m in {
1846        defvar dataEMULOctuple = dataEMUL.octuple;
1847        // Calculate emul = eew * lmul / sew
1848        defvar idxEMULOctuple =
1849          !srl(!mul(idxEEW, dataEMULOctuple), !logtwo(dataEEW));
1850        if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
1851          defvar DataLInfo = dataEMUL.MX;
1852          defvar IdxLInfo = octuple_to_str<idxEMULOctuple>.ret;
1853          defvar idxEMUL = !cast<LMULInfo>("V_" # IdxLInfo);
1854          defvar Vreg = dataEMUL.vrclass;
1855          defvar IdxVreg = idxEMUL.vrclass;
1856          defvar Order = !if(Ordered, "O", "U");
1857          let VLMul = dataEMUL.value in {
1858            def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo :
1859              VPseudoIStoreNoMask<Vreg, IdxVreg, idxEEW, idxEMUL.value, Ordered>,
1860              VSXSched<dataEEW, Order, DataLInfo, IdxLInfo>;
1861            def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" :
1862              VPseudoIStoreMask<Vreg, IdxVreg, idxEEW, idxEMUL.value, Ordered>,
1863              VSXSched<dataEEW, Order, DataLInfo, IdxLInfo>;
1864          }
1865        }
1866      }
1867    }
1868  }
1869}
1870
1871multiclass VPseudoVPOP_M {
1872  foreach mti = AllMasks in {
1873    defvar mx = mti.LMul.MX;
1874    defvar WriteVMPopV_MX = !cast<SchedWrite>("WriteVMPopV_" # mx);
1875    defvar ReadVMPopV_MX = !cast<SchedRead>("ReadVMPopV_" # mx);
1876    let VLMul = mti.LMul.value in {
1877      def "_M_" # mti.BX : VPseudoUnaryNoMaskGPROut,
1878                           Sched<[WriteVMPopV_MX, ReadVMPopV_MX, ReadVMPopV_MX]>;
1879      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMaskGPROut,
1880                                     Sched<[WriteVMPopV_MX, ReadVMPopV_MX, ReadVMPopV_MX]>;
1881    }
1882  }
1883}
1884
1885multiclass VPseudoV1ST_M {
1886  foreach mti = AllMasks in {
1887    defvar mx = mti.LMul.MX;
1888    defvar WriteVMFFSV_MX = !cast<SchedWrite>("WriteVMFFSV_" # mx);
1889    defvar ReadVMFFSV_MX = !cast<SchedRead>("ReadVMFFSV_" # mx);
1890    let VLMul = mti.LMul.value in {
1891      def "_M_" # mti.BX : VPseudoUnaryNoMaskGPROut,
1892                           Sched<[WriteVMFFSV_MX, ReadVMFFSV_MX, ReadVMFFSV_MX]>;
1893      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMaskGPROut,
1894                                     Sched<[WriteVMFFSV_MX, ReadVMFFSV_MX, ReadVMFFSV_MX]>;
1895    }
1896  }
1897}
1898
1899multiclass VPseudoVSFS_M {
1900  defvar constraint = "@earlyclobber $rd";
1901  foreach mti = AllMasks in {
1902    defvar mx = mti.LMul.MX;
1903    defvar WriteVMSFSV_MX = !cast<SchedWrite>("WriteVMSFSV_" # mx);
1904    defvar ReadVMSFSV_MX = !cast<SchedRead>("ReadVMSFSV_" # mx);
1905    let VLMul = mti.LMul.value in {
1906      def "_M_" # mti.BX : VPseudoUnaryNoMask<VR, VR, constraint>,
1907                           Sched<[WriteVMSFSV_MX, ReadVMSFSV_MX, ReadVMask]>;
1908      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR, constraint>,
1909                                     Sched<[WriteVMSFSV_MX, ReadVMSFSV_MX, ReadVMask]>;
1910    }
1911  }
1912}
1913
1914multiclass VPseudoVID_V {
1915  foreach m = MxList in {
1916    defvar mx = m.MX;
1917    defvar WriteVMIdxV_MX = !cast<SchedWrite>("WriteVMIdxV_" # mx);
1918    defvar ReadVMIdxV_MX = !cast<SchedRead>("ReadVMIdxV_" # mx);
1919
1920    let VLMul = m.value in {
1921      def "_V_" # m.MX : VPseudoNullaryNoMask<m.vrclass>,
1922                         Sched<[WriteVMIdxV_MX, ReadVMask]>;
1923      def "_V_" # m.MX # "_MASK" : VPseudoNullaryMask<m.vrclass>,
1924                                   RISCVMaskedPseudo<MaskIdx=1>,
1925                                   Sched<[WriteVMIdxV_MX, ReadVMask]>;
1926    }
1927  }
1928}
1929
1930multiclass VPseudoNullaryPseudoM <string BaseInst> {
1931  foreach mti = AllMasks in {
1932    defvar mx = mti.LMul.MX;
1933    defvar WriteVMALUV_MX = !cast<SchedWrite>("WriteVMALUV_" # mx);
1934    defvar ReadVMALUV_MX = !cast<SchedRead>("ReadVMALUV_" # mx);
1935
1936    let VLMul = mti.LMul.value in {
1937      def "_M_" # mti.BX : VPseudoNullaryPseudoM<BaseInst # "_MM">,
1938        Sched<[WriteVMALUV_MX, ReadVMALUV_MX, ReadVMALUV_MX]>;
1939    }
1940  }
1941}
1942
1943multiclass VPseudoVIOT_M {
1944  defvar constraint = "@earlyclobber $rd";
1945  foreach m = MxList in {
1946    defvar mx = m.MX;
1947    defvar WriteVMIotV_MX = !cast<SchedWrite>("WriteVMIotV_" # mx);
1948    defvar ReadVMIotV_MX = !cast<SchedRead>("ReadVMIotV_" # mx);
1949    let VLMul = m.value in {
1950      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, VR, constraint>,
1951                       Sched<[WriteVMIotV_MX, ReadVMIotV_MX, ReadVMask]>;
1952      def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, VR, constraint>,
1953                                 RISCVMaskedPseudo<MaskIdx=2>,
1954                                 Sched<[WriteVMIotV_MX, ReadVMIotV_MX, ReadVMask]>;
1955    }
1956  }
1957}
1958
1959multiclass VPseudoVCPR_V {
1960  foreach m = MxList in {
1961    defvar mx = m.MX;
1962    defvar sews = SchedSEWSet<mx>.val;
1963    let VLMul = m.value in
1964      foreach e = sews in {
1965        defvar suffix = "_" # m.MX # "_E" # e;
1966        defvar WriteVCompressV_MX_E = !cast<SchedWrite>("WriteVCompressV" # suffix);
1967        defvar ReadVCompressV_MX_E = !cast<SchedRead>("ReadVCompressV" # suffix);
1968
1969        let SEW = e in
1970        def _VM # suffix : VPseudoUnaryAnyMask<m.vrclass, m.vrclass>,
1971                           Sched<[WriteVCompressV_MX_E, ReadVCompressV_MX_E, ReadVCompressV_MX_E]>;
1972      }
1973  }
1974}
1975
1976multiclass VPseudoBinary<VReg RetClass,
1977                         VReg Op1Class,
1978                         DAGOperand Op2Class,
1979                         LMULInfo MInfo,
1980                         string Constraint = "",
1981                         int sew = 0> {
1982  let VLMul = MInfo.value, SEW=sew in {
1983    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
1984    def suffix : VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
1985                                       Constraint>;
1986    def suffix # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
1987                                                   Constraint>,
1988                           RISCVMaskedPseudo<MaskIdx=3>;
1989  }
1990}
1991
1992multiclass VPseudoBinaryRoundingMode<VReg RetClass,
1993                                     VReg Op1Class,
1994                                     DAGOperand Op2Class,
1995                                     LMULInfo MInfo,
1996                                     string Constraint = "",
1997                                     int sew = 0,
1998                                     int UsesVXRM = 1> {
1999  let VLMul = MInfo.value, SEW=sew in {
2000    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
2001    def suffix : VPseudoBinaryNoMaskRoundingMode<RetClass, Op1Class, Op2Class,
2002                                                 Constraint, UsesVXRM>;
2003    def suffix # "_MASK" : VPseudoBinaryMaskPolicyRoundingMode<RetClass,
2004                                                               Op1Class,
2005                                                               Op2Class,
2006                                                               Constraint,
2007                                                               UsesVXRM>,
2008                           RISCVMaskedPseudo<MaskIdx=3>;
2009  }
2010}
2011
2012
2013multiclass VPseudoBinaryM<VReg RetClass,
2014                          VReg Op1Class,
2015                          DAGOperand Op2Class,
2016                          LMULInfo MInfo,
2017                          string Constraint = ""> {
2018  let VLMul = MInfo.value in {
2019    def "_" # MInfo.MX : VPseudoBinaryMOutNoMask<RetClass, Op1Class, Op2Class,
2020                                                 Constraint>;
2021    let ForceTailAgnostic = true in
2022    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask<RetClass, Op1Class,
2023                                                         Op2Class, Constraint>,
2024                                   RISCVMaskedPseudo<MaskIdx=3>;
2025  }
2026}
2027
2028multiclass VPseudoBinaryEmul<VReg RetClass,
2029                             VReg Op1Class,
2030                             DAGOperand Op2Class,
2031                             LMULInfo lmul,
2032                             LMULInfo emul,
2033                             string Constraint = "",
2034                             int sew = 0> {
2035  let VLMul = lmul.value, SEW=sew in {
2036    defvar suffix = !if(sew, "_" # lmul.MX # "_E" # sew, "_" # lmul.MX);
2037    def suffix # "_" # emul.MX : VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
2038                                                       Constraint>;
2039    def suffix # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
2040                                                                          Constraint>,
2041                                                  RISCVMaskedPseudo<MaskIdx=3>;
2042  }
2043}
2044
2045multiclass VPseudoTiedBinary<VReg RetClass,
2046                             DAGOperand Op2Class,
2047                             LMULInfo MInfo,
2048                             string Constraint = ""> {
2049  let VLMul = MInfo.value in {
2050    def "_" # MInfo.MX # "_TIED": VPseudoTiedBinaryNoMask<RetClass, Op2Class,
2051                                                          Constraint>;
2052    def "_" # MInfo.MX # "_MASK_TIED" : VPseudoTiedBinaryMask<RetClass, Op2Class,
2053                                                         Constraint>;
2054  }
2055}
2056
2057multiclass VPseudoTiedBinaryRoundingMode<VReg RetClass,
2058                                         DAGOperand Op2Class,
2059                                         LMULInfo MInfo,
2060                                         string Constraint = ""> {
2061    let VLMul = MInfo.value in {
2062    def "_" # MInfo.MX # "_TIED":
2063      VPseudoTiedBinaryNoMaskRoundingMode<RetClass, Op2Class, Constraint>;
2064    def "_" # MInfo.MX # "_MASK_TIED" :
2065      VPseudoTiedBinaryMaskRoundingMode<RetClass, Op2Class, Constraint>;
2066  }
2067}
2068
2069
2070multiclass VPseudoBinaryV_VV<LMULInfo m, string Constraint = "", int sew = 0> {
2071  defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint, sew>;
2072}
2073
2074multiclass VPseudoBinaryV_VV_RM<LMULInfo m, string Constraint = ""> {
2075  defm _VV : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
2076}
2077
2078// Similar to VPseudoBinaryV_VV, but uses MxListF.
2079multiclass VPseudoBinaryFV_VV<LMULInfo m, string Constraint = "", int sew = 0> {
2080  defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint, sew>;
2081}
2082
2083multiclass VPseudoBinaryFV_VV_RM<LMULInfo m, string Constraint = "", int sew = 0> {
2084  defm _VV : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, m.vrclass, m,
2085                                       Constraint, sew,
2086                                       UsesVXRM=0>;
2087}
2088
2089multiclass VPseudoVGTR_VV_EEW<int eew, string Constraint = ""> {
2090  foreach m = MxList in {
2091    defvar mx = m.MX;
2092    foreach sew = EEWList in {
2093      defvar dataEMULOctuple = m.octuple;
2094      // emul = lmul * eew / sew
2095      defvar idxEMULOctuple = !srl(!mul(dataEMULOctuple, eew), !logtwo(sew));
2096      if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
2097        defvar emulMX = octuple_to_str<idxEMULOctuple>.ret;
2098        defvar emul = !cast<LMULInfo>("V_" # emulMX);
2099        defvar sews = SchedSEWSet<mx>.val;
2100        foreach e = sews in {
2101          defvar WriteVRGatherVV_MX_E = !cast<SchedWrite>("WriteVRGatherVV_" # mx # "_E" # e);
2102          defvar ReadVRGatherVV_data_MX_E = !cast<SchedRead>("ReadVRGatherVV_data_" # mx # "_E" # e);
2103          defvar ReadVRGatherVV_index_MX_E = !cast<SchedRead>("ReadVRGatherVV_index_" # mx # "_E" # e);
2104          defm _VV : VPseudoBinaryEmul<m.vrclass, m.vrclass, emul.vrclass, m, emul, Constraint, e>,
2105                     Sched<[WriteVRGatherVV_MX_E, ReadVRGatherVV_data_MX_E, ReadVRGatherVV_index_MX_E]>;
2106        }
2107      }
2108    }
2109  }
2110}
2111
2112multiclass VPseudoBinaryV_VX<LMULInfo m, string Constraint = "", int sew = 0> {
2113  defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint, sew>;
2114}
2115
2116multiclass VPseudoBinaryV_VX_RM<LMULInfo m, string Constraint = ""> {
2117  defm "_VX" : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, GPR, m, Constraint>;
2118}
2119
2120multiclass VPseudoVSLD1_VX<string Constraint = ""> {
2121  foreach m = MxList in {
2122    defvar mx = m.MX;
2123    defvar WriteVISlide1X_MX = !cast<SchedWrite>("WriteVISlide1X_" # mx);
2124    defvar ReadVISlideV_MX = !cast<SchedRead>("ReadVISlideV_" # mx);
2125    defvar ReadVISlideX_MX = !cast<SchedRead>("ReadVISlideX_" # mx);
2126
2127    defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>,
2128                 Sched<[WriteVISlide1X_MX, ReadVISlideV_MX, ReadVISlideX_MX, ReadVMask]>;
2129  }
2130}
2131
2132multiclass VPseudoBinaryV_VF<LMULInfo m, FPR_Info f, string Constraint = "", int sew = 0> {
2133  defm "_V" # f.FX : VPseudoBinary<m.vrclass, m.vrclass,
2134                                   f.fprclass, m, Constraint, sew>;
2135}
2136
2137multiclass VPseudoBinaryV_VF_RM<LMULInfo m, FPR_Info f, string Constraint = "", int sew = 0> {
2138  defm "_V" # f.FX : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass,
2139                                               f.fprclass, m, Constraint, sew,
2140                                               UsesVXRM=0>;
2141}
2142
2143multiclass VPseudoVSLD1_VF<string Constraint = ""> {
2144  foreach f = FPList in {
2145    foreach m = f.MxList in {
2146      defvar mx = m.MX;
2147      defvar WriteVFSlide1F_MX = !cast<SchedWrite>("WriteVFSlide1F_" # mx);
2148      defvar ReadVFSlideV_MX = !cast<SchedRead>("ReadVFSlideV_" # mx);
2149      defvar ReadVFSlideF_MX = !cast<SchedRead>("ReadVFSlideF_" # mx);
2150
2151      defm "_V" # f.FX :
2152        VPseudoBinary<m.vrclass, m.vrclass, f.fprclass, m, Constraint>,
2153        Sched<[WriteVFSlide1F_MX, ReadVFSlideV_MX, ReadVFSlideF_MX, ReadVMask]>;
2154    }
2155  }
2156}
2157
2158multiclass VPseudoBinaryV_VI<Operand ImmType = simm5, LMULInfo m, string Constraint = ""> {
2159  defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
2160}
2161
2162multiclass VPseudoBinaryV_VI_RM<Operand ImmType = simm5, LMULInfo m, string Constraint = ""> {
2163  defm _VI : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, ImmType, m, Constraint>;
2164}
2165
2166multiclass VPseudoVALU_MM {
2167  foreach m = MxList in {
2168    defvar mx = m.MX;
2169    defvar WriteVMALUV_MX = !cast<SchedWrite>("WriteVMALUV_" # mx);
2170    defvar ReadVMALUV_MX = !cast<SchedRead>("ReadVMALUV_" # mx);
2171
2172    let VLMul = m.value in {
2173      def "_MM_" # mx : VPseudoBinaryNoMask<VR, VR, VR, "">,
2174                          Sched<[WriteVMALUV_MX, ReadVMALUV_MX, ReadVMALUV_MX]>;
2175    }
2176  }
2177}
2178
2179// We use earlyclobber here due to
2180// * The destination EEW is smaller than the source EEW and the overlap is
2181//   in the lowest-numbered part of the source register group is legal.
2182//   Otherwise, it is illegal.
2183// * The destination EEW is greater than the source EEW, the source EMUL is
2184//   at least 1, and the overlap is in the highest-numbered part of the
2185//   destination register group is legal. Otherwise, it is illegal.
2186multiclass VPseudoBinaryW_VV<LMULInfo m> {
2187  defm _VV : VPseudoBinary<m.wvrclass, m.vrclass, m.vrclass, m,
2188                           "@earlyclobber $rd">;
2189}
2190
2191multiclass VPseudoBinaryW_VV_RM<LMULInfo m> {
2192  defm _VV : VPseudoBinaryRoundingMode<m.wvrclass, m.vrclass, m.vrclass, m,
2193                                      "@earlyclobber $rd", UsesVXRM=0>;
2194}
2195
2196multiclass VPseudoBinaryW_VX<LMULInfo m> {
2197  defm "_VX" : VPseudoBinary<m.wvrclass, m.vrclass, GPR, m,
2198                             "@earlyclobber $rd">;
2199}
2200
2201multiclass VPseudoBinaryW_VF<LMULInfo m, FPR_Info f> {
2202  defm "_V" # f.FX : VPseudoBinary<m.wvrclass, m.vrclass,
2203                                   f.fprclass, m,
2204                                   "@earlyclobber $rd">;
2205}
2206
2207multiclass VPseudoBinaryW_VF_RM<LMULInfo m, FPR_Info f> {
2208  defm "_V" # f.FX : VPseudoBinaryRoundingMode<m.wvrclass, m.vrclass,
2209                                               f.fprclass, m,
2210                                               "@earlyclobber $rd",
2211                                               UsesVXRM=0>;
2212}
2213
2214multiclass VPseudoBinaryW_WV<LMULInfo m> {
2215  defm _WV : VPseudoBinary<m.wvrclass, m.wvrclass, m.vrclass, m,
2216                           "@earlyclobber $rd">;
2217  defm _WV : VPseudoTiedBinary<m.wvrclass, m.vrclass, m,
2218                               "@earlyclobber $rd">;
2219}
2220
2221multiclass VPseudoBinaryW_WV_RM<LMULInfo m> {
2222  defm _WV : VPseudoBinaryRoundingMode<m.wvrclass, m.wvrclass, m.vrclass, m,
2223                                       "@earlyclobber $rd", UsesVXRM=0>;
2224  defm _WV : VPseudoTiedBinaryRoundingMode<m.wvrclass, m.vrclass, m,
2225                                           "@earlyclobber $rd">;
2226}
2227
2228multiclass VPseudoBinaryW_WX<LMULInfo m> {
2229  defm "_WX" : VPseudoBinary<m.wvrclass, m.wvrclass, GPR, m>;
2230}
2231
2232multiclass VPseudoBinaryW_WF<LMULInfo m, FPR_Info f> {
2233  defm "_W" # f.FX : VPseudoBinary<m.wvrclass, m.wvrclass,
2234                                   f.fprclass, m>;
2235}
2236
2237multiclass VPseudoBinaryW_WF_RM<LMULInfo m, FPR_Info f> {
2238  defm "_W" # f.FX : VPseudoBinaryRoundingMode<m.wvrclass, m.wvrclass,
2239                                               f.fprclass, m,
2240                                               UsesVXRM=0>;
2241}
2242
2243// Narrowing instructions like vnsrl/vnsra/vnclip(u) don't need @earlyclobber
2244// if the source and destination have an LMUL<=1. This matches this overlap
2245// exception from the spec.
2246// "The destination EEW is smaller than the source EEW and the overlap is in the
2247//  lowest-numbered part of the source register group."
2248multiclass VPseudoBinaryV_WV<LMULInfo m> {
2249  defm _WV : VPseudoBinary<m.vrclass, m.wvrclass, m.vrclass, m,
2250                           !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
2251}
2252
2253multiclass VPseudoBinaryV_WV_RM<LMULInfo m> {
2254  defm _WV : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, m.vrclass, m,
2255                                       !if(!ge(m.octuple, 8),
2256                                       "@earlyclobber $rd", "")>;
2257}
2258
2259multiclass VPseudoBinaryV_WX<LMULInfo m> {
2260  defm _WX : VPseudoBinary<m.vrclass, m.wvrclass, GPR, m,
2261                           !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
2262}
2263
2264multiclass VPseudoBinaryV_WX_RM<LMULInfo m> {
2265  defm _WX : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, GPR, m,
2266                                       !if(!ge(m.octuple, 8),
2267                                       "@earlyclobber $rd", "")>;
2268}
2269
2270multiclass VPseudoBinaryV_WI<LMULInfo m> {
2271  defm _WI : VPseudoBinary<m.vrclass, m.wvrclass, uimm5, m,
2272                           !if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
2273}
2274
2275multiclass VPseudoBinaryV_WI_RM<LMULInfo m> {
2276  defm _WI : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, uimm5, m,
2277                                       !if(!ge(m.octuple, 8),
2278                                       "@earlyclobber $rd", "")>;
2279}
2280
2281// For vadc and vsbc, the instruction encoding is reserved if the destination
2282// vector register is v0.
2283// For vadc and vsbc, CarryIn == 1 and CarryOut == 0
2284multiclass VPseudoBinaryV_VM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2285                             string Constraint = ""> {
2286  def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX :
2287    VPseudoBinaryCarryIn<!if(CarryOut, VR,
2288                         !if(!and(CarryIn, !not(CarryOut)),
2289                             GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2290                         m.vrclass, m.vrclass, m, CarryIn, Constraint>;
2291}
2292
2293multiclass VPseudoTiedBinaryV_VM<LMULInfo m> {
2294  def "_VVM" # "_" # m.MX:
2295    VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
2296                             m.vrclass, m.vrclass, m, 1, "">;
2297}
2298
2299multiclass VPseudoBinaryV_XM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2300                             string Constraint = ""> {
2301  def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX :
2302    VPseudoBinaryCarryIn<!if(CarryOut, VR,
2303                         !if(!and(CarryIn, !not(CarryOut)),
2304                             GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2305                         m.vrclass, GPR, m, CarryIn, Constraint>;
2306}
2307
2308multiclass VPseudoTiedBinaryV_XM<LMULInfo m> {
2309  def "_VXM" # "_" # m.MX:
2310    VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
2311                             m.vrclass, GPR, m, 1, "">;
2312}
2313
2314multiclass VPseudoVMRG_FM {
2315  foreach f = FPList in {
2316    foreach m = f.MxList in {
2317      defvar mx = m.MX;
2318      defvar WriteVFMergeV_MX = !cast<SchedWrite>("WriteVFMergeV_" # mx);
2319      defvar ReadVFMergeV_MX = !cast<SchedRead>("ReadVFMergeV_" # mx);
2320      defvar ReadVFMergeF_MX = !cast<SchedRead>("ReadVFMergeF_" # mx);
2321
2322      def "_V" # f.FX # "M_" # mx:
2323        VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
2324                                 m.vrclass, f.fprclass, m, CarryIn=1, Constraint="">,
2325        Sched<[WriteVFMergeV_MX, ReadVFMergeV_MX, ReadVFMergeF_MX, ReadVMask]>;
2326    }
2327  }
2328}
2329
2330multiclass VPseudoBinaryV_IM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2331                             string Constraint = ""> {
2332  def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX :
2333    VPseudoBinaryCarryIn<!if(CarryOut, VR,
2334                         !if(!and(CarryIn, !not(CarryOut)),
2335                             GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2336                         m.vrclass, simm5, m, CarryIn, Constraint>;
2337}
2338
2339multiclass VPseudoTiedBinaryV_IM<LMULInfo m> {
2340  def "_VIM" # "_" # m.MX:
2341    VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
2342                             m.vrclass, simm5, m, 1, "">;
2343}
2344
2345multiclass VPseudoUnaryVMV_V_X_I {
2346  foreach m = MxList in {
2347    let VLMul = m.value in {
2348      defvar mx = m.MX;
2349      defvar WriteVIMovV_MX = !cast<SchedWrite>("WriteVIMovV_" # mx);
2350      defvar WriteVIMovX_MX = !cast<SchedWrite>("WriteVIMovX_" # mx);
2351      defvar WriteVIMovI_MX = !cast<SchedWrite>("WriteVIMovI_" # mx);
2352      defvar ReadVIMovV_MX = !cast<SchedRead>("ReadVIMovV_" # mx);
2353      defvar ReadVIMovX_MX = !cast<SchedRead>("ReadVIMovX_" # mx);
2354
2355      let VLMul = m.value in {
2356        def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2357                           Sched<[WriteVIMovV_MX, ReadVIMovV_MX]>;
2358        def "_X_" # mx : VPseudoUnaryNoMask<m.vrclass, GPR>,
2359                           Sched<[WriteVIMovX_MX, ReadVIMovX_MX]>;
2360        def "_I_" # mx : VPseudoUnaryNoMask<m.vrclass, simm5>,
2361                           Sched<[WriteVIMovI_MX]>;
2362      }
2363    }
2364  }
2365}
2366
2367multiclass VPseudoVMV_F {
2368  foreach f = FPList in {
2369    foreach m = f.MxList in {
2370      defvar mx = m.MX;
2371      defvar WriteVFMovV_MX = !cast<SchedWrite>("WriteVFMovV_" # mx);
2372      defvar ReadVFMovF_MX = !cast<SchedRead>("ReadVFMovF_" # mx);
2373
2374      let VLMul = m.value in {
2375        def "_" # f.FX # "_" # mx :
2376          VPseudoUnaryNoMask<m.vrclass, f.fprclass>,
2377          Sched<[WriteVFMovV_MX, ReadVFMovF_MX]>;
2378      }
2379    }
2380  }
2381}
2382
2383multiclass VPseudoVCLS_V {
2384  foreach m = MxListF in {
2385    defvar mx = m.MX;
2386    defvar WriteVFClassV_MX = !cast<SchedWrite>("WriteVFClassV_" # mx);
2387    defvar ReadVFClassV_MX = !cast<SchedRead>("ReadVFClassV_" # mx);
2388
2389    let VLMul = m.value in {
2390      def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2391                       Sched<[WriteVFClassV_MX, ReadVFClassV_MX, ReadVMask]>;
2392      def "_V_" # mx # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>,
2393                                 RISCVMaskedPseudo<MaskIdx=2>,
2394                                 Sched<[WriteVFClassV_MX, ReadVFClassV_MX, ReadVMask]>;
2395    }
2396  }
2397}
2398
2399multiclass VPseudoVSQR_V_RM {
2400  foreach m = MxListF in {
2401    defvar mx = m.MX;
2402    defvar sews = SchedSEWSet<m.MX, isF=1>.val;
2403
2404    let VLMul = m.value in
2405      foreach e = sews in {
2406        defvar suffix = "_" # mx # "_E" # e;
2407        defvar WriteVFSqrtV_MX_E = !cast<SchedWrite>("WriteVFSqrtV" # suffix);
2408        defvar ReadVFSqrtV_MX_E = !cast<SchedRead>("ReadVFSqrtV" # suffix);
2409
2410        let SEW = e in {
2411          def "_V" # suffix : VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass>,
2412                              Sched<[WriteVFSqrtV_MX_E, ReadVFSqrtV_MX_E,
2413                                     ReadVMask]>;
2414          def "_V" # suffix # "_MASK" : VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass>,
2415                                        RISCVMaskedPseudo<MaskIdx=2>,
2416                                        Sched<[WriteVFSqrtV_MX_E, ReadVFSqrtV_MX_E,
2417                                               ReadVMask]>;
2418        }
2419      }
2420  }
2421}
2422
2423multiclass VPseudoVRCP_V {
2424  foreach m = MxListF in {
2425    defvar mx = m.MX;
2426    defvar WriteVFRecpV_MX = !cast<SchedWrite>("WriteVFRecpV_" # mx);
2427    defvar ReadVFRecpV_MX = !cast<SchedRead>("ReadVFRecpV_" # mx);
2428
2429    let VLMul = m.value in {
2430      def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2431                         Sched<[WriteVFRecpV_MX, ReadVFRecpV_MX, ReadVMask]>;
2432      def "_V_" # mx # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>,
2433                                 RISCVMaskedPseudo<MaskIdx=2>,
2434                                 Sched<[WriteVFRecpV_MX, ReadVFRecpV_MX, ReadVMask]>;
2435    }
2436  }
2437}
2438
2439multiclass VPseudoVRCP_V_RM {
2440  foreach m = MxListF in {
2441    defvar mx = m.MX;
2442    defvar WriteVFRecpV_MX = !cast<SchedWrite>("WriteVFRecpV_" # mx);
2443    defvar ReadVFRecpV_MX = !cast<SchedRead>("ReadVFRecpV_" # mx);
2444
2445    let VLMul = m.value in {
2446      def "_V_" # mx : VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass>,
2447                         Sched<[WriteVFRecpV_MX, ReadVFRecpV_MX, ReadVMask]>;
2448      def "_V_" # mx # "_MASK" : VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass>,
2449                                 RISCVMaskedPseudo<MaskIdx=2>,
2450                                 Sched<[WriteVFRecpV_MX, ReadVFRecpV_MX, ReadVMask]>;
2451    }
2452  }
2453}
2454
2455multiclass PseudoVEXT_VF2 {
2456  defvar constraints = "@earlyclobber $rd";
2457  foreach m = MxListVF2 in {
2458    defvar mx = m.MX;
2459    defvar WriteVExtV_MX = !cast<SchedWrite>("WriteVExtV_" # mx);
2460    defvar ReadVExtV_MX = !cast<SchedRead>("ReadVExtV_" # mx);
2461
2462    let VLMul = m.value in {
2463      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f2vrclass, constraints>,
2464                     Sched<[WriteVExtV_MX, ReadVExtV_MX, ReadVMask]>;
2465      def "_" # mx # "_MASK" :
2466        VPseudoUnaryMask<m.vrclass, m.f2vrclass, constraints>,
2467        RISCVMaskedPseudo<MaskIdx=2>,
2468        Sched<[WriteVExtV_MX, ReadVExtV_MX, ReadVMask]>;
2469    }
2470  }
2471}
2472
2473multiclass PseudoVEXT_VF4 {
2474  defvar constraints = "@earlyclobber $rd";
2475  foreach m = MxListVF4 in {
2476    defvar mx = m.MX;
2477    defvar WriteVExtV_MX = !cast<SchedWrite>("WriteVExtV_" # mx);
2478    defvar ReadVExtV_MX = !cast<SchedRead>("ReadVExtV_" # mx);
2479
2480    let VLMul = m.value in {
2481      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f4vrclass, constraints>,
2482                     Sched<[WriteVExtV_MX, ReadVExtV_MX, ReadVMask]>;
2483      def "_" # mx # "_MASK" :
2484        VPseudoUnaryMask<m.vrclass, m.f4vrclass, constraints>,
2485        RISCVMaskedPseudo<MaskIdx=2>,
2486        Sched<[WriteVExtV_MX, ReadVExtV_MX, ReadVMask]>;
2487    }
2488  }
2489}
2490
2491multiclass PseudoVEXT_VF8 {
2492  defvar constraints = "@earlyclobber $rd";
2493  foreach m = MxListVF8 in {
2494    defvar mx = m.MX;
2495    defvar WriteVExtV_MX = !cast<SchedWrite>("WriteVExtV_" # mx);
2496    defvar ReadVExtV_MX = !cast<SchedRead>("ReadVExtV_" # mx);
2497
2498    let VLMul = m.value in {
2499      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f8vrclass, constraints>,
2500                     Sched<[WriteVExtV_MX, ReadVExtV_MX, ReadVMask]>;
2501      def "_" # mx # "_MASK" :
2502        VPseudoUnaryMask<m.vrclass, m.f8vrclass, constraints>,
2503        RISCVMaskedPseudo<MaskIdx=2>,
2504        Sched<[WriteVExtV_MX, ReadVExtV_MX, ReadVMask]>;
2505    }
2506  }
2507}
2508
2509// The destination EEW is 1 since "For the purposes of register group overlap
2510// constraints, mask elements have EEW=1."
2511// The source EEW is 8, 16, 32, or 64.
2512// When the destination EEW is different from source EEW, we need to use
2513// @earlyclobber to avoid the overlap between destination and source registers.
2514// We don't need @earlyclobber for LMUL<=1 since that matches this overlap
2515// exception from the spec
2516// "The destination EEW is smaller than the source EEW and the overlap is in the
2517//  lowest-numbered part of the source register group".
2518// With LMUL<=1 the source and dest occupy a single register so any overlap
2519// is in the lowest-numbered part.
2520multiclass VPseudoBinaryM_VV<LMULInfo m> {
2521  defm _VV : VPseudoBinaryM<VR, m.vrclass, m.vrclass, m,
2522                            !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
2523}
2524
2525multiclass VPseudoBinaryM_VX<LMULInfo m> {
2526  defm "_VX" :
2527    VPseudoBinaryM<VR, m.vrclass, GPR, m,
2528                   !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
2529}
2530
2531multiclass VPseudoBinaryM_VF<LMULInfo m, FPR_Info f> {
2532  defm "_V" # f.FX :
2533    VPseudoBinaryM<VR, m.vrclass, f.fprclass, m,
2534                   !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
2535}
2536
2537multiclass VPseudoBinaryM_VI<LMULInfo m> {
2538  defm _VI : VPseudoBinaryM<VR, m.vrclass, simm5, m,
2539                            !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
2540}
2541
2542multiclass VPseudoVGTR_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2543  foreach m = MxList in {
2544    defvar mx = m.MX;
2545    defvar WriteVRGatherVX_MX = !cast<SchedWrite>("WriteVRGatherVX_" # mx);
2546    defvar WriteVRGatherVI_MX = !cast<SchedWrite>("WriteVRGatherVI_" # mx);
2547    defvar ReadVRGatherVX_data_MX = !cast<SchedRead>("ReadVRGatherVX_data_" # mx);
2548    defvar ReadVRGatherVX_index_MX = !cast<SchedRead>("ReadVRGatherVX_index_" # mx);
2549    defvar ReadVRGatherVI_data_MX = !cast<SchedRead>("ReadVRGatherVI_data_" # mx);
2550
2551    defm "" : VPseudoBinaryV_VX<m, Constraint>,
2552              Sched<[WriteVRGatherVX_MX, ReadVRGatherVX_data_MX,
2553                     ReadVRGatherVX_index_MX, ReadVMask]>;
2554    defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
2555              Sched<[WriteVRGatherVI_MX, ReadVRGatherVI_data_MX, ReadVMask]>;
2556
2557    defvar sews = SchedSEWSet<mx>.val;
2558    foreach e = sews in {
2559      defvar WriteVRGatherVV_MX_E = !cast<SchedWrite>("WriteVRGatherVV_" # mx # "_E" # e);
2560      defvar ReadVRGatherVV_data_MX_E = !cast<SchedRead>("ReadVRGatherVV_data_" # mx # "_E" # e);
2561      defvar ReadVRGatherVV_index_MX_E = !cast<SchedRead>("ReadVRGatherVV_index_" # mx # "_E" # e);
2562      defm "" : VPseudoBinaryV_VV<m, Constraint, e>,
2563                Sched<[WriteVRGatherVV_MX_E, ReadVRGatherVV_data_MX_E,
2564                       ReadVRGatherVV_index_MX_E, ReadVMask]>;
2565    }
2566  }
2567}
2568
2569multiclass VPseudoVSALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2570  foreach m = MxList in {
2571    defvar mx = m.MX;
2572    defvar WriteVSALUV_MX = !cast<SchedWrite>("WriteVSALUV_" # mx);
2573    defvar WriteVSALUX_MX = !cast<SchedWrite>("WriteVSALUX_" # mx);
2574    defvar WriteVSALUI_MX = !cast<SchedWrite>("WriteVSALUI_" # mx);
2575    defvar ReadVSALUV_MX = !cast<SchedRead>("ReadVSALUV_" # mx);
2576    defvar ReadVSALUX_MX = !cast<SchedRead>("ReadVSALUX_" # mx);
2577
2578    defm "" : VPseudoBinaryV_VV<m, Constraint>,
2579              Sched<[WriteVSALUV_MX, ReadVSALUV_MX, ReadVSALUV_MX, ReadVMask]>;
2580    defm "" : VPseudoBinaryV_VX<m, Constraint>,
2581              Sched<[WriteVSALUX_MX, ReadVSALUV_MX, ReadVSALUX_MX, ReadVMask]>;
2582    defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
2583              Sched<[WriteVSALUI_MX, ReadVSALUV_MX, ReadVMask]>;
2584  }
2585}
2586
2587
2588multiclass VPseudoVSHT_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2589  foreach m = MxList in {
2590    defvar mx = m.MX;
2591    defvar WriteVShiftV_MX = !cast<SchedWrite>("WriteVShiftV_" # mx);
2592    defvar WriteVShiftX_MX = !cast<SchedWrite>("WriteVShiftX_" # mx);
2593    defvar WriteVShiftI_MX = !cast<SchedWrite>("WriteVShiftI_" # mx);
2594    defvar ReadVShiftV_MX = !cast<SchedRead>("ReadVShiftV_" # mx);
2595    defvar ReadVShiftX_MX = !cast<SchedRead>("ReadVShiftX_" # mx);
2596
2597    defm "" : VPseudoBinaryV_VV<m, Constraint>,
2598              Sched<[WriteVShiftV_MX, ReadVShiftV_MX, ReadVShiftV_MX, ReadVMask]>;
2599    defm "" : VPseudoBinaryV_VX<m, Constraint>,
2600              Sched<[WriteVShiftX_MX, ReadVShiftV_MX, ReadVShiftX_MX, ReadVMask]>;
2601    defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
2602              Sched<[WriteVShiftI_MX, ReadVShiftV_MX, ReadVMask]>;
2603  }
2604}
2605
2606multiclass VPseudoVSSHT_VV_VX_VI_RM<Operand ImmType = simm5, string Constraint = ""> {
2607  foreach m = MxList in {
2608    defvar mx = m.MX;
2609    defvar WriteVSShiftV_MX = !cast<SchedWrite>("WriteVSShiftV_" # mx);
2610    defvar WriteVSShiftX_MX = !cast<SchedWrite>("WriteVSShiftX_" # mx);
2611    defvar WriteVSShiftI_MX = !cast<SchedWrite>("WriteVSShiftI_" # mx);
2612    defvar ReadVSShiftV_MX = !cast<SchedRead>("ReadVSShiftV_" # mx);
2613    defvar ReadVSShiftX_MX = !cast<SchedRead>("ReadVSShiftX_" # mx);
2614
2615    defm "" : VPseudoBinaryV_VV_RM<m, Constraint>,
2616              Sched<[WriteVSShiftV_MX, ReadVSShiftV_MX, ReadVSShiftV_MX, ReadVMask]>;
2617    defm "" : VPseudoBinaryV_VX_RM<m, Constraint>,
2618              Sched<[WriteVSShiftX_MX, ReadVSShiftV_MX, ReadVSShiftX_MX, ReadVMask]>;
2619    defm "" : VPseudoBinaryV_VI_RM<ImmType, m, Constraint>,
2620              Sched<[WriteVSShiftI_MX, ReadVSShiftV_MX, ReadVMask]>;
2621  }
2622}
2623
2624multiclass VPseudoVALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2625  foreach m = MxList in {
2626    defvar mx = m.MX;
2627    defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
2628    defvar WriteVIALUX_MX = !cast<SchedWrite>("WriteVIALUX_" # mx);
2629    defvar WriteVIALUI_MX = !cast<SchedWrite>("WriteVIALUI_" # mx);
2630    defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
2631    defvar ReadVIALUX_MX = !cast<SchedRead>("ReadVIALUX_" # mx);
2632
2633    defm "" : VPseudoBinaryV_VV<m, Constraint>,
2634            Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
2635    defm "" : VPseudoBinaryV_VX<m, Constraint>,
2636            Sched<[WriteVIALUX_MX, ReadVIALUV_MX, ReadVIALUX_MX, ReadVMask]>;
2637    defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
2638            Sched<[WriteVIALUI_MX, ReadVIALUV_MX, ReadVMask]>;
2639  }
2640}
2641
2642multiclass VPseudoVSALU_VV_VX {
2643  foreach m = MxList in {
2644    defvar mx = m.MX;
2645    defvar WriteVSALUV_MX = !cast<SchedWrite>("WriteVSALUV_" # mx);
2646    defvar WriteVSALUX_MX = !cast<SchedWrite>("WriteVSALUX_" # mx);
2647    defvar ReadVSALUV_MX = !cast<SchedRead>("ReadVSALUV_" # mx);
2648    defvar ReadVSALUX_MX = !cast<SchedRead>("ReadVSALUX_" # mx);
2649
2650    defm "" : VPseudoBinaryV_VV<m>,
2651              Sched<[WriteVSALUV_MX, ReadVSALUV_MX, ReadVSALUV_MX, ReadVMask]>;
2652    defm "" : VPseudoBinaryV_VX<m>,
2653              Sched<[WriteVSALUX_MX, ReadVSALUV_MX, ReadVSALUX_MX, ReadVMask]>;
2654  }
2655}
2656
2657multiclass VPseudoVSMUL_VV_VX_RM {
2658  foreach m = MxList in {
2659    defvar mx = m.MX;
2660    defvar WriteVSMulV_MX = !cast<SchedWrite>("WriteVSMulV_" # mx);
2661    defvar WriteVSMulX_MX = !cast<SchedWrite>("WriteVSMulX_" # mx);
2662    defvar ReadVSMulV_MX = !cast<SchedRead>("ReadVSMulV_" # mx);
2663    defvar ReadVSMulX_MX = !cast<SchedRead>("ReadVSMulX_" # mx);
2664
2665    defm "" : VPseudoBinaryV_VV_RM<m>,
2666              Sched<[WriteVSMulV_MX, ReadVSMulV_MX, ReadVSMulV_MX, ReadVMask]>;
2667    defm "" : VPseudoBinaryV_VX_RM<m>,
2668              Sched<[WriteVSMulX_MX, ReadVSMulV_MX, ReadVSMulX_MX, ReadVMask]>;
2669  }
2670}
2671
2672multiclass VPseudoVAALU_VV_VX_RM {
2673  foreach m = MxList in {
2674    defvar mx = m.MX;
2675    defvar WriteVAALUV_MX = !cast<SchedWrite>("WriteVAALUV_" # mx);
2676    defvar WriteVAALUX_MX = !cast<SchedWrite>("WriteVAALUX_" # mx);
2677    defvar ReadVAALUV_MX = !cast<SchedRead>("ReadVAALUV_" # mx);
2678    defvar ReadVAALUX_MX = !cast<SchedRead>("ReadVAALUX_" # mx);
2679
2680    defm "" : VPseudoBinaryV_VV_RM<m>,
2681              Sched<[WriteVAALUV_MX, ReadVAALUV_MX, ReadVAALUV_MX, ReadVMask]>;
2682    defm "" : VPseudoBinaryV_VX_RM<m>,
2683              Sched<[WriteVAALUX_MX, ReadVAALUV_MX, ReadVAALUX_MX, ReadVMask]>;
2684  }
2685}
2686
2687multiclass VPseudoVMINMAX_VV_VX {
2688  foreach m = MxList in {
2689    defvar mx = m.MX;
2690    defvar WriteVIMinMaxV_MX = !cast<SchedWrite>("WriteVIMinMaxV_" # mx);
2691    defvar WriteVIMinMaxX_MX = !cast<SchedWrite>("WriteVIMinMaxX_" # mx);
2692    defvar ReadVIMinMaxV_MX = !cast<SchedRead>("ReadVIMinMaxV_" # mx);
2693    defvar ReadVIMinMaxX_MX = !cast<SchedRead>("ReadVIMinMaxX_" # mx);
2694
2695    defm "" : VPseudoBinaryV_VV<m>,
2696              Sched<[WriteVIMinMaxV_MX, ReadVIMinMaxV_MX, ReadVIMinMaxV_MX, ReadVMask]>;
2697    defm "" : VPseudoBinaryV_VX<m>,
2698              Sched<[WriteVIMinMaxX_MX, ReadVIMinMaxV_MX, ReadVIMinMaxX_MX, ReadVMask]>;
2699  }
2700}
2701
2702multiclass VPseudoVMUL_VV_VX {
2703  foreach m = MxList in {
2704    defvar mx = m.MX;
2705    defvar WriteVIMulV_MX = !cast<SchedWrite>("WriteVIMulV_" # mx);
2706    defvar WriteVIMulX_MX = !cast<SchedWrite>("WriteVIMulX_" # mx);
2707    defvar ReadVIMulV_MX = !cast<SchedRead>("ReadVIMulV_" # mx);
2708    defvar ReadVIMulX_MX = !cast<SchedRead>("ReadVIMulX_" # mx);
2709
2710    defm "" : VPseudoBinaryV_VV<m>,
2711              Sched<[WriteVIMulV_MX, ReadVIMulV_MX, ReadVIMulV_MX, ReadVMask]>;
2712    defm "" : VPseudoBinaryV_VX<m>,
2713              Sched<[WriteVIMulX_MX, ReadVIMulV_MX, ReadVIMulX_MX, ReadVMask]>;
2714  }
2715}
2716
2717multiclass VPseudoVDIV_VV_VX {
2718  foreach m = MxList in {
2719    defvar mx = m.MX;
2720    defvar sews = SchedSEWSet<mx>.val;
2721    foreach e = sews in {
2722      defvar WriteVIDivV_MX_E = !cast<SchedWrite>("WriteVIDivV_" # mx # "_E" # e);
2723      defvar WriteVIDivX_MX_E = !cast<SchedWrite>("WriteVIDivX_" # mx # "_E" # e);
2724      defvar ReadVIDivV_MX_E = !cast<SchedRead>("ReadVIDivV_" # mx # "_E" # e);
2725      defvar ReadVIDivX_MX_E = !cast<SchedRead>("ReadVIDivX_" # mx # "_E" # e);
2726
2727      defm "" : VPseudoBinaryV_VV<m, "", e>,
2728                Sched<[WriteVIDivV_MX_E, ReadVIDivV_MX_E, ReadVIDivV_MX_E, ReadVMask]>;
2729      defm "" : VPseudoBinaryV_VX<m, "", e>,
2730                Sched<[WriteVIDivX_MX_E, ReadVIDivV_MX_E, ReadVIDivX_MX_E, ReadVMask]>;
2731    }
2732  }
2733}
2734
2735multiclass VPseudoVFMUL_VV_VF_RM {
2736  foreach m = MxListF in {
2737    defvar mx = m.MX;
2738    defvar WriteVFMulV_MX = !cast<SchedWrite>("WriteVFMulV_" # mx);
2739    defvar ReadVFMulV_MX = !cast<SchedRead>("ReadVFMulV_" # mx);
2740
2741    defm "" : VPseudoBinaryFV_VV_RM<m>,
2742              Sched<[WriteVFMulV_MX, ReadVFMulV_MX, ReadVFMulV_MX, ReadVMask]>;
2743  }
2744
2745  foreach f = FPList in {
2746    foreach m = f.MxList in {
2747      defvar mx = m.MX;
2748      defvar WriteVFMulF_MX = !cast<SchedWrite>("WriteVFMulF_" # mx);
2749      defvar ReadVFMulV_MX = !cast<SchedRead>("ReadVFMulV_" # mx);
2750      defvar ReadVFMulF_MX = !cast<SchedRead>("ReadVFMulF_" # mx);
2751
2752      defm "" : VPseudoBinaryV_VF_RM<m, f>,
2753                Sched<[WriteVFMulF_MX, ReadVFMulV_MX, ReadVFMulF_MX, ReadVMask]>;
2754    }
2755  }
2756}
2757
2758multiclass VPseudoVFDIV_VV_VF_RM {
2759  foreach m = MxListF in {
2760    defvar mx = m.MX;
2761    defvar sews = SchedSEWSet<mx, isF=1>.val;
2762    foreach e = sews in {
2763      defvar WriteVFDivV_MX_E = !cast<SchedWrite>("WriteVFDivV_" # mx # "_E" # e);
2764      defvar ReadVFDivV_MX_E = !cast<SchedRead>("ReadVFDivV_" # mx # "_E" # e);
2765
2766      defm "" : VPseudoBinaryFV_VV_RM<m, "", e>,
2767                Sched<[WriteVFDivV_MX_E, ReadVFDivV_MX_E, ReadVFDivV_MX_E, ReadVMask]>;
2768    }
2769  }
2770
2771  foreach f = FPList in {
2772    foreach m = f.MxList in {
2773      defvar mx = m.MX;
2774      defvar WriteVFDivF_MX_E = !cast<SchedWrite>("WriteVFDivF_" # mx # "_E" # f.SEW);
2775      defvar ReadVFDivV_MX_E = !cast<SchedRead>("ReadVFDivV_" # mx # "_E" # f.SEW);
2776      defvar ReadVFDivF_MX_E = !cast<SchedRead>("ReadVFDivF_" # mx # "_E" # f.SEW);
2777
2778      defm "" : VPseudoBinaryV_VF_RM<m, f, "", f.SEW>,
2779                Sched<[WriteVFDivF_MX_E, ReadVFDivV_MX_E, ReadVFDivF_MX_E, ReadVMask]>;
2780    }
2781  }
2782}
2783
2784multiclass VPseudoVFRDIV_VF_RM {
2785  foreach f = FPList in {
2786    foreach m = f.MxList in {
2787      defvar mx = m.MX;
2788      defvar WriteVFDivF_MX_E = !cast<SchedWrite>("WriteVFDivF_" # mx # "_E" # f.SEW);
2789      defvar ReadVFDivV_MX_E = !cast<SchedRead>("ReadVFDivV_" # mx # "_E" # f.SEW);
2790      defvar ReadVFDivF_MX_E = !cast<SchedRead>("ReadVFDivF_" # mx # "_E" # f.SEW);
2791
2792      defm "" : VPseudoBinaryV_VF_RM<m, f, "", f.SEW>,
2793                Sched<[WriteVFDivF_MX_E, ReadVFDivV_MX_E, ReadVFDivF_MX_E, ReadVMask]>;
2794    }
2795  }
2796}
2797
2798multiclass VPseudoVALU_VV_VX {
2799 foreach m = MxList in {
2800    defvar mx = m.MX;
2801    defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
2802    defvar WriteVIALUX_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
2803    defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
2804    defvar ReadVIALUX_MX = !cast<SchedRead>("ReadVIALUX_" # mx);
2805
2806    defm "" : VPseudoBinaryV_VV<m>,
2807            Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
2808    defm "" : VPseudoBinaryV_VX<m>,
2809            Sched<[WriteVIALUX_MX, ReadVIALUV_MX, ReadVIALUX_MX, ReadVMask]>;
2810  }
2811}
2812
2813multiclass VPseudoVSGNJ_VV_VF {
2814  foreach m = MxListF in {
2815    defvar mx = m.MX;
2816    defvar WriteVFSgnjV_MX = !cast<SchedWrite>("WriteVFSgnjV_" # mx);
2817    defvar ReadVFSgnjV_MX = !cast<SchedRead>("ReadVFSgnjV_" # mx);
2818
2819    defm "" : VPseudoBinaryFV_VV<m>,
2820              Sched<[WriteVFSgnjV_MX, ReadVFSgnjV_MX, ReadVFSgnjV_MX, ReadVMask]>;
2821  }
2822
2823  foreach f = FPList in {
2824    foreach m = f.MxList in {
2825      defvar mx = m.MX;
2826      defvar WriteVFSgnjF_MX = !cast<SchedWrite>("WriteVFSgnjF_" # mx);
2827      defvar ReadVFSgnjV_MX = !cast<SchedRead>("ReadVFSgnjV_" # mx);
2828      defvar ReadVFSgnjF_MX = !cast<SchedRead>("ReadVFSgnjF_" # mx);
2829
2830      defm "" : VPseudoBinaryV_VF<m, f>,
2831                Sched<[WriteVFSgnjF_MX, ReadVFSgnjV_MX, ReadVFSgnjF_MX, ReadVMask]>;
2832    }
2833  }
2834}
2835
2836multiclass VPseudoVMAX_VV_VF {
2837  foreach m = MxListF in {
2838    defvar mx = m.MX;
2839    defvar WriteVFMinMaxV_MX = !cast<SchedWrite>("WriteVFMinMaxV_" # mx);
2840    defvar ReadVFMinMaxV_MX = !cast<SchedRead>("ReadVFMinMaxV_" # mx);
2841
2842    defm "" : VPseudoBinaryFV_VV<m>,
2843              Sched<[WriteVFMinMaxV_MX, ReadVFMinMaxV_MX, ReadVFMinMaxV_MX, ReadVMask]>;
2844  }
2845
2846  foreach f = FPList in {
2847    foreach m = f.MxList in {
2848      defvar mx = m.MX;
2849      defvar WriteVFMinMaxF_MX = !cast<SchedWrite>("WriteVFMinMaxF_" # mx);
2850      defvar ReadVFMinMaxV_MX = !cast<SchedRead>("ReadVFMinMaxV_" # mx);
2851      defvar ReadVFMinMaxF_MX = !cast<SchedRead>("ReadVFMinMaxF_" # mx);
2852
2853      defm "" : VPseudoBinaryV_VF<m, f>,
2854                Sched<[WriteVFMinMaxF_MX, ReadVFMinMaxV_MX, ReadVFMinMaxF_MX, ReadVMask]>;
2855    }
2856  }
2857}
2858
2859multiclass VPseudoVALU_VV_VF {
2860  foreach m = MxListF in {
2861    defvar mx = m.MX;
2862    defvar WriteVFALUV_MX = !cast<SchedWrite>("WriteVFALUV_" # mx);
2863    defvar ReadVFALUV_MX = !cast<SchedRead>("ReadVFALUV_" # mx);
2864
2865    defm "" : VPseudoBinaryFV_VV<m>,
2866              Sched<[WriteVFALUV_MX, ReadVFALUV_MX, ReadVFALUV_MX, ReadVMask]>;
2867  }
2868
2869  foreach f = FPList in {
2870    foreach m = f.MxList in {
2871      defvar mx = m.MX;
2872      defvar WriteVFALUF_MX = !cast<SchedWrite>("WriteVFALUF_" # mx);
2873      defvar ReadVFALUV_MX = !cast<SchedRead>("ReadVFALUV_" # mx);
2874      defvar ReadVFALUF_MX = !cast<SchedRead>("ReadVFALUF_" # mx);
2875      defm "" : VPseudoBinaryV_VF<m, f>,
2876                Sched<[WriteVFALUF_MX, ReadVFALUV_MX, ReadVFALUF_MX, ReadVMask]>;
2877    }
2878  }
2879}
2880
2881multiclass VPseudoVALU_VV_VF_RM {
2882  foreach m = MxListF in {
2883    defvar mx = m.MX;
2884    defvar WriteVFALUV_MX = !cast<SchedWrite>("WriteVFALUV_" # mx);
2885    defvar ReadVFALUV_MX = !cast<SchedRead>("ReadVFALUV_" # mx);
2886
2887    defm "" : VPseudoBinaryFV_VV_RM<m>,
2888              Sched<[WriteVFALUV_MX, ReadVFALUV_MX, ReadVFALUV_MX, ReadVMask]>;
2889  }
2890
2891  foreach f = FPList in {
2892    foreach m = f.MxList in {
2893      defvar mx = m.MX;
2894      defvar WriteVFALUF_MX = !cast<SchedWrite>("WriteVFALUF_" # mx);
2895      defvar ReadVFALUV_MX = !cast<SchedRead>("ReadVFALUV_" # mx);
2896      defvar ReadVFALUF_MX = !cast<SchedRead>("ReadVFALUF_" # mx);
2897      defm "" : VPseudoBinaryV_VF_RM<m, f>,
2898                Sched<[WriteVFALUF_MX, ReadVFALUV_MX, ReadVFALUF_MX, ReadVMask]>;
2899    }
2900  }
2901}
2902
2903multiclass VPseudoVALU_VF {
2904  foreach f = FPList in {
2905    foreach m = f.MxList in {
2906      defvar mx = m.MX;
2907      defvar WriteVFALUF_MX = !cast<SchedWrite>("WriteVFALUF_" # mx);
2908      defvar ReadVFALUV_MX = !cast<SchedRead>("ReadVFALUV_" # mx);
2909      defvar ReadVFALUF_MX = !cast<SchedRead>("ReadVFALUF_" # mx);
2910
2911      defm "" : VPseudoBinaryV_VF<m, f>,
2912                Sched<[WriteVFALUF_MX, ReadVFALUV_MX, ReadVFALUF_MX, ReadVMask]>;
2913    }
2914  }
2915}
2916
2917multiclass VPseudoVALU_VF_RM {
2918  foreach f = FPList in {
2919    foreach m = f.MxList in {
2920      defvar mx = m.MX;
2921      defvar WriteVFALUF_MX = !cast<SchedWrite>("WriteVFALUF_" # mx);
2922      defvar ReadVFALUV_MX = !cast<SchedRead>("ReadVFALUV_" # mx);
2923      defvar ReadVFALUF_MX = !cast<SchedRead>("ReadVFALUF_" # mx);
2924
2925      defm "" : VPseudoBinaryV_VF_RM<m, f>,
2926                Sched<[WriteVFALUF_MX, ReadVFALUV_MX, ReadVFALUF_MX, ReadVMask]>;
2927    }
2928  }
2929}
2930
2931multiclass VPseudoVALU_VX_VI<Operand ImmType = simm5> {
2932  foreach m = MxList in {
2933    defvar mx = m.MX;
2934    defvar WriteVIALUX_MX = !cast<SchedWrite>("WriteVIALUX_" # mx);
2935    defvar WriteVIALUI_MX = !cast<SchedWrite>("WriteVIALUI_" # mx);
2936    defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
2937    defvar ReadVIALUX_MX = !cast<SchedRead>("ReadVIALUX_" # mx);
2938
2939    defm "" : VPseudoBinaryV_VX<m>,
2940            Sched<[WriteVIALUX_MX, ReadVIALUV_MX, ReadVIALUX_MX, ReadVMask]>;
2941    defm "" : VPseudoBinaryV_VI<ImmType, m>,
2942            Sched<[WriteVIALUI_MX, ReadVIALUV_MX, ReadVMask]>;
2943  }
2944}
2945
2946multiclass VPseudoVWALU_VV_VX {
2947  foreach m = MxListW in {
2948    defvar mx = m.MX;
2949    defvar WriteVIWALUV_MX = !cast<SchedWrite>("WriteVIWALUV_" # mx);
2950    defvar WriteVIWALUX_MX = !cast<SchedWrite>("WriteVIWALUX_" # mx);
2951    defvar ReadVIWALUV_MX = !cast<SchedRead>("ReadVIWALUV_" # mx);
2952    defvar ReadVIWALUX_MX = !cast<SchedRead>("ReadVIWALUX_" # mx);
2953
2954    defm "" : VPseudoBinaryW_VV<m>,
2955            Sched<[WriteVIWALUV_MX, ReadVIWALUV_MX, ReadVIWALUV_MX, ReadVMask]>;
2956    defm "" : VPseudoBinaryW_VX<m>,
2957            Sched<[WriteVIWALUX_MX, ReadVIWALUV_MX, ReadVIWALUX_MX, ReadVMask]>;
2958  }
2959}
2960
2961multiclass VPseudoVWMUL_VV_VX {
2962  foreach m = MxListW in {
2963    defvar mx = m.MX;
2964    defvar WriteVIWMulV_MX = !cast<SchedWrite>("WriteVIWMulV_" # mx);
2965    defvar WriteVIWMulX_MX = !cast<SchedWrite>("WriteVIWMulX_" # mx);
2966    defvar ReadVIWMulV_MX = !cast<SchedRead>("ReadVIWMulV_" # mx);
2967    defvar ReadVIWMulX_MX = !cast<SchedRead>("ReadVIWMulX_" # mx);
2968
2969    defm "" : VPseudoBinaryW_VV<m>,
2970              Sched<[WriteVIWMulV_MX, ReadVIWMulV_MX, ReadVIWMulV_MX, ReadVMask]>;
2971    defm "" : VPseudoBinaryW_VX<m>,
2972              Sched<[WriteVIWMulX_MX, ReadVIWMulV_MX, ReadVIWMulX_MX, ReadVMask]>;
2973  }
2974}
2975
2976multiclass VPseudoVWMUL_VV_VF_RM {
2977  foreach m = MxListFW in {
2978    defvar mx = m.MX;
2979    defvar WriteVFWMulV_MX = !cast<SchedWrite>("WriteVFWMulV_" # mx);
2980    defvar ReadVFWMulV_MX = !cast<SchedRead>("ReadVFWMulV_" # mx);
2981
2982    defm "" : VPseudoBinaryW_VV_RM<m>,
2983              Sched<[WriteVFWMulV_MX, ReadVFWMulV_MX, ReadVFWMulV_MX, ReadVMask]>;
2984  }
2985
2986  foreach f = FPListW in {
2987    foreach m = f.MxListFW in {
2988      defvar mx = m.MX;
2989      defvar WriteVFWMulF_MX = !cast<SchedWrite>("WriteVFWMulF_" # mx);
2990      defvar ReadVFWMulV_MX = !cast<SchedRead>("ReadVFWMulV_" # mx);
2991      defvar ReadVFWMulF_MX = !cast<SchedRead>("ReadVFWMulF_" # mx);
2992
2993      defm "" : VPseudoBinaryW_VF_RM<m, f>,
2994                Sched<[WriteVFWMulF_MX, ReadVFWMulV_MX, ReadVFWMulF_MX, ReadVMask]>;
2995    }
2996  }
2997}
2998
2999multiclass VPseudoVWALU_WV_WX {
3000  foreach m = MxListW in {
3001    defvar mx = m.MX;
3002    defvar WriteVIWALUV_MX = !cast<SchedWrite>("WriteVIWALUV_" # mx);
3003    defvar WriteVIWALUX_MX = !cast<SchedWrite>("WriteVIWALUX_" # mx);
3004    defvar ReadVIWALUV_MX = !cast<SchedRead>("ReadVIWALUV_" # mx);
3005    defvar ReadVIWALUX_MX = !cast<SchedRead>("ReadVIWALUX_" # mx);
3006
3007    defm "" : VPseudoBinaryW_WV<m>,
3008              Sched<[WriteVIWALUV_MX, ReadVIWALUV_MX, ReadVIWALUV_MX, ReadVMask]>;
3009    defm "" : VPseudoBinaryW_WX<m>,
3010              Sched<[WriteVIWALUX_MX, ReadVIWALUV_MX, ReadVIWALUX_MX, ReadVMask]>;
3011  }
3012}
3013
3014multiclass VPseudoVFWALU_VV_VF_RM {
3015  foreach m = MxListFW in {
3016    defvar mx = m.MX;
3017    defvar WriteVFWALUV_MX = !cast<SchedWrite>("WriteVFWALUV_" # mx);
3018    defvar ReadVFWALUV_MX = !cast<SchedRead>("ReadVFWALUV_" # mx);
3019
3020    defm "" : VPseudoBinaryW_VV_RM<m>,
3021              Sched<[WriteVFWALUV_MX, ReadVFWALUV_MX, ReadVFWALUV_MX, ReadVMask]>;
3022  }
3023
3024  foreach f = FPListW in {
3025    foreach m = f.MxListFW in {
3026      defvar mx = m.MX;
3027      defvar WriteVFWALUF_MX = !cast<SchedWrite>("WriteVFWALUF_" # mx);
3028      defvar ReadVFWALUV_MX = !cast<SchedRead>("ReadVFWALUV_" # mx);
3029      defvar ReadVFWALUF_MX = !cast<SchedRead>("ReadVFWALUF_" # mx);
3030
3031      defm "" : VPseudoBinaryW_VF_RM<m, f>,
3032                Sched<[WriteVFWALUF_MX, ReadVFWALUV_MX, ReadVFWALUF_MX, ReadVMask]>;
3033    }
3034  }
3035}
3036
3037multiclass VPseudoVFWALU_WV_WF_RM {
3038  foreach m = MxListFW in {
3039    defvar mx = m.MX;
3040    defvar WriteVFWALUV_MX = !cast<SchedWrite>("WriteVFWALUV_" # mx);
3041    defvar ReadVFWALUV_MX = !cast<SchedRead>("ReadVFWALUV_" # mx);
3042
3043    defm "" : VPseudoBinaryW_WV_RM<m>,
3044              Sched<[WriteVFWALUV_MX, ReadVFWALUV_MX, ReadVFWALUV_MX, ReadVMask]>;
3045  }
3046  foreach f = FPListW in {
3047    foreach m = f.MxListFW in {
3048      defvar mx = m.MX;
3049      defvar WriteVFWALUF_MX = !cast<SchedWrite>("WriteVFWALUF_" # mx);
3050      defvar ReadVFWALUV_MX = !cast<SchedRead>("ReadVFWALUV_" # mx);
3051      defvar ReadVFWALUF_MX = !cast<SchedRead>("ReadVFWALUF_" # mx);
3052
3053      defm "" : VPseudoBinaryW_WF_RM<m, f>,
3054                Sched<[WriteVFWALUF_MX, ReadVFWALUV_MX, ReadVFWALUF_MX, ReadVMask]>;
3055    }
3056  }
3057}
3058
3059multiclass VPseudoVMRG_VM_XM_IM {
3060  foreach m = MxList in {
3061    defvar mx = m.MX;
3062    defvar WriteVIMergeV_MX = !cast<SchedWrite>("WriteVIMergeV_" # mx);
3063    defvar WriteVIMergeX_MX = !cast<SchedWrite>("WriteVIMergeX_" # mx);
3064    defvar WriteVIMergeI_MX = !cast<SchedWrite>("WriteVIMergeI_" # mx);
3065    defvar ReadVIMergeV_MX = !cast<SchedRead>("ReadVIMergeV_" # mx);
3066    defvar ReadVIMergeX_MX = !cast<SchedRead>("ReadVIMergeX_" # mx);
3067
3068    def "_VVM" # "_" # m.MX:
3069      VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
3070                               m.vrclass, m.vrclass, m, 1, "">,
3071      Sched<[WriteVIMergeV_MX, ReadVIMergeV_MX, ReadVIMergeV_MX, ReadVMask]>;
3072    def "_VXM" # "_" # m.MX:
3073      VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
3074                               m.vrclass, GPR, m, 1, "">,
3075      Sched<[WriteVIMergeX_MX, ReadVIMergeV_MX, ReadVIMergeX_MX, ReadVMask]>;
3076    def "_VIM" # "_" # m.MX:
3077      VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
3078                               m.vrclass, simm5, m, 1, "">,
3079      Sched<[WriteVIMergeI_MX, ReadVIMergeV_MX, ReadVMask]>;
3080  }
3081}
3082
3083multiclass VPseudoVCALU_VM_XM_IM {
3084  foreach m = MxList in {
3085    defvar mx = m.MX;
3086    defvar WriteVICALUV_MX = !cast<SchedWrite>("WriteVICALUV_" # mx);
3087    defvar WriteVICALUX_MX = !cast<SchedWrite>("WriteVICALUX_" # mx);
3088    defvar WriteVICALUI_MX = !cast<SchedWrite>("WriteVICALUI_" # mx);
3089    defvar ReadVICALUV_MX = !cast<SchedRead>("ReadVICALUV_" # mx);
3090    defvar ReadVICALUX_MX = !cast<SchedRead>("ReadVICALUX_" # mx);
3091
3092    defm "" : VPseudoTiedBinaryV_VM<m>,
3093              Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>;
3094    defm "" : VPseudoTiedBinaryV_XM<m>,
3095              Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX, ReadVMask]>;
3096    defm "" : VPseudoTiedBinaryV_IM<m>,
3097              Sched<[WriteVICALUI_MX, ReadVICALUV_MX, ReadVMask]>;
3098  }
3099}
3100
3101multiclass VPseudoVCALU_VM_XM {
3102  foreach m = MxList in {
3103    defvar mx = m.MX;
3104    defvar WriteVICALUV_MX = !cast<SchedWrite>("WriteVICALUV_" # mx);
3105    defvar WriteVICALUX_MX = !cast<SchedWrite>("WriteVICALUX_" # mx);
3106    defvar ReadVICALUV_MX = !cast<SchedRead>("ReadVICALUV_" # mx);
3107    defvar ReadVICALUX_MX = !cast<SchedRead>("ReadVICALUX_" # mx);
3108
3109    defm "" : VPseudoTiedBinaryV_VM<m>,
3110              Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>;
3111    defm "" : VPseudoTiedBinaryV_XM<m>,
3112              Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX, ReadVMask]>;
3113  }
3114}
3115
3116multiclass VPseudoVCALUM_VM_XM_IM<string Constraint> {
3117  foreach m = MxList in {
3118    defvar mx = m.MX;
3119    defvar WriteVICALUV_MX = !cast<SchedWrite>("WriteVICALUV_" # mx);
3120    defvar WriteVICALUX_MX = !cast<SchedWrite>("WriteVICALUX_" # mx);
3121    defvar WriteVICALUI_MX = !cast<SchedWrite>("WriteVICALUI_" # mx);
3122    defvar ReadVICALUV_MX = !cast<SchedRead>("ReadVICALUV_" # mx);
3123    defvar ReadVICALUX_MX = !cast<SchedRead>("ReadVICALUX_" # mx);
3124
3125    defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=1, Constraint=Constraint>,
3126              Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>;
3127    defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=1, Constraint=Constraint>,
3128              Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX, ReadVMask]>;
3129    defm "" : VPseudoBinaryV_IM<m, CarryOut=1, CarryIn=1, Constraint=Constraint>,
3130              Sched<[WriteVICALUI_MX, ReadVICALUV_MX, ReadVMask]>;
3131  }
3132}
3133
3134multiclass VPseudoVCALUM_VM_XM<string Constraint> {
3135  foreach m = MxList in {
3136    defvar mx = m.MX;
3137    defvar WriteVICALUV_MX = !cast<SchedWrite>("WriteVICALUV_" # mx);
3138    defvar WriteVICALUX_MX = !cast<SchedWrite>("WriteVICALUX_" # mx);
3139    defvar ReadVICALUV_MX = !cast<SchedRead>("ReadVICALUV_" # mx);
3140    defvar ReadVICALUX_MX = !cast<SchedRead>("ReadVICALUX_" # mx);
3141
3142    defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=1, Constraint=Constraint>,
3143              Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>;
3144    defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=1, Constraint=Constraint>,
3145              Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX, ReadVMask]>;
3146  }
3147}
3148
3149multiclass VPseudoVCALUM_V_X_I<string Constraint> {
3150  foreach m = MxList in {
3151    defvar mx = m.MX;
3152    defvar WriteVICALUV_MX = !cast<SchedWrite>("WriteVICALUV_" # mx);
3153    defvar WriteVICALUX_MX = !cast<SchedWrite>("WriteVICALUX_" # mx);
3154    defvar WriteVICALUI_MX = !cast<SchedWrite>("WriteVICALUI_" # mx);
3155    defvar ReadVICALUV_MX = !cast<SchedRead>("ReadVICALUV_" # mx);
3156    defvar ReadVICALUX_MX = !cast<SchedRead>("ReadVICALUX_" # mx);
3157
3158    defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=0, Constraint=Constraint>,
3159              Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX]>;
3160    defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=0, Constraint=Constraint>,
3161              Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX]>;
3162    defm "" : VPseudoBinaryV_IM<m, CarryOut=1, CarryIn=0, Constraint=Constraint>,
3163              Sched<[WriteVICALUI_MX, ReadVICALUV_MX]>;
3164  }
3165}
3166
3167multiclass VPseudoVCALUM_V_X<string Constraint> {
3168  foreach m = MxList in {
3169    defvar mx = m.MX;
3170    defvar WriteVICALUV_MX = !cast<SchedWrite>("WriteVICALUV_" # mx);
3171    defvar WriteVICALUX_MX = !cast<SchedWrite>("WriteVICALUX_" # mx);
3172    defvar ReadVICALUV_MX = !cast<SchedRead>("ReadVICALUV_" # mx);
3173    defvar ReadVICALUX_MX = !cast<SchedRead>("ReadVICALUX_" # mx);
3174
3175    defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=0, Constraint=Constraint>,
3176              Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX]>;
3177    defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=0, Constraint=Constraint>,
3178              Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX]>;
3179  }
3180}
3181
3182multiclass VPseudoVNCLP_WV_WX_WI_RM {
3183  foreach m = MxListW in {
3184    defvar mx = m.MX;
3185    defvar WriteVNClipV_MX = !cast<SchedWrite>("WriteVNClipV_" # mx);
3186    defvar WriteVNClipX_MX = !cast<SchedWrite>("WriteVNClipX_" # mx);
3187    defvar WriteVNClipI_MX = !cast<SchedWrite>("WriteVNClipI_" # mx);
3188    defvar ReadVNClipV_MX = !cast<SchedRead>("ReadVNClipV_" # mx);
3189    defvar ReadVNClipX_MX = !cast<SchedRead>("ReadVNClipX_" # mx);
3190
3191    defm "" : VPseudoBinaryV_WV_RM<m>,
3192              Sched<[WriteVNClipV_MX, ReadVNClipV_MX, ReadVNClipV_MX, ReadVMask]>;
3193    defm "" : VPseudoBinaryV_WX_RM<m>,
3194              Sched<[WriteVNClipX_MX, ReadVNClipV_MX, ReadVNClipX_MX, ReadVMask]>;
3195    defm "" : VPseudoBinaryV_WI_RM<m>,
3196              Sched<[WriteVNClipI_MX, ReadVNClipV_MX, ReadVMask]>;
3197  }
3198}
3199
3200multiclass VPseudoVNSHT_WV_WX_WI {
3201  foreach m = MxListW in {
3202    defvar mx = m.MX;
3203    defvar WriteVNShiftV_MX = !cast<SchedWrite>("WriteVNShiftV_" # mx);
3204    defvar WriteVNShiftX_MX = !cast<SchedWrite>("WriteVNShiftX_" # mx);
3205    defvar WriteVNShiftI_MX = !cast<SchedWrite>("WriteVNShiftI_" # mx);
3206    defvar ReadVNShiftV_MX = !cast<SchedRead>("ReadVNShiftV_" # mx);
3207    defvar ReadVNShiftX_MX = !cast<SchedRead>("ReadVNShiftX_" # mx);
3208
3209    defm "" : VPseudoBinaryV_WV<m>,
3210              Sched<[WriteVNShiftV_MX, ReadVNShiftV_MX, ReadVNShiftV_MX, ReadVMask]>;
3211    defm "" : VPseudoBinaryV_WX<m>,
3212              Sched<[WriteVNShiftX_MX, ReadVNShiftV_MX, ReadVNShiftX_MX, ReadVMask]>;
3213    defm "" : VPseudoBinaryV_WI<m>,
3214              Sched<[WriteVNShiftI_MX, ReadVNShiftV_MX, ReadVMask]>;
3215  }
3216}
3217
3218multiclass VPseudoTernaryWithTailPolicy<VReg RetClass,
3219                                          RegisterClass Op1Class,
3220                                          DAGOperand Op2Class,
3221                                          LMULInfo MInfo,
3222                                          int sew,
3223                                          string Constraint = "",
3224                                          bit Commutable = 0> {
3225  let VLMul = MInfo.value in {
3226    defvar mx = MInfo.MX;
3227    let isCommutable = Commutable in
3228    def "_" # mx # "_E" # sew : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint>;
3229    def "_" # mx # "_E" # sew # "_MASK" : VPseudoTernaryMaskPolicy<RetClass, Op1Class, Op2Class, Constraint>;
3230  }
3231}
3232
3233multiclass VPseudoTernaryWithTailPolicyRoundingMode<VReg RetClass,
3234                                          RegisterClass Op1Class,
3235                                          DAGOperand Op2Class,
3236                                          LMULInfo MInfo,
3237                                          int sew,
3238                                          string Constraint = "",
3239                                          bit Commutable = 0> {
3240  let VLMul = MInfo.value in {
3241    defvar mx = MInfo.MX;
3242    let isCommutable = Commutable in
3243    def "_" # mx # "_E" # sew
3244        : VPseudoTernaryNoMaskWithPolicyRoundingMode<RetClass, Op1Class,
3245                                                     Op2Class, Constraint>;
3246    def "_" # mx # "_E" # sew # "_MASK"
3247        : VPseudoTernaryMaskPolicyRoundingMode<RetClass, Op1Class,
3248                                               Op2Class, Constraint>;
3249  }
3250}
3251
3252multiclass VPseudoTernaryWithPolicy<VReg RetClass,
3253                                    RegisterClass Op1Class,
3254                                    DAGOperand Op2Class,
3255                                    LMULInfo MInfo,
3256                                    string Constraint = "",
3257                                    bit Commutable = 0> {
3258  let VLMul = MInfo.value in {
3259    let isCommutable = Commutable in
3260    def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint>;
3261    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class, Constraint>,
3262                                   RISCVMaskedPseudo<MaskIdx=3>;
3263  }
3264}
3265
3266multiclass VPseudoTernaryWithPolicyRoundingMode<VReg RetClass,
3267                                                RegisterClass Op1Class,
3268                                                DAGOperand Op2Class,
3269                                                LMULInfo MInfo,
3270                                                string Constraint = "",
3271                                                bit Commutable = 0> {
3272  let VLMul = MInfo.value in {
3273    let isCommutable = Commutable in
3274    def "_" # MInfo.MX :
3275        VPseudoTernaryNoMaskWithPolicyRoundingMode<RetClass, Op1Class,
3276                                                   Op2Class, Constraint>;
3277    def "_" # MInfo.MX # "_MASK" :
3278        VPseudoBinaryMaskPolicyRoundingMode<RetClass, Op1Class,
3279                                            Op2Class, Constraint,
3280                                            UsesVXRM_=0>,
3281                                   RISCVMaskedPseudo<MaskIdx=3>;
3282  }
3283}
3284
3285multiclass VPseudoTernaryV_VV_AAXA<LMULInfo m, string Constraint = ""> {
3286  defm _VV : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, m.vrclass, m,
3287                                      Constraint, Commutable=1>;
3288}
3289
3290multiclass VPseudoTernaryV_VV_AAXA_RM<LMULInfo m, string Constraint = ""> {
3291  defm _VV : VPseudoTernaryWithPolicyRoundingMode<m.vrclass, m.vrclass, m.vrclass, m,
3292                                                  Constraint, Commutable=1>;
3293}
3294
3295multiclass VPseudoTernaryV_VX_AAXA<LMULInfo m, string Constraint = ""> {
3296  defm "_VX" : VPseudoTernaryWithPolicy<m.vrclass, GPR, m.vrclass, m,
3297                                        Constraint, Commutable=1>;
3298}
3299
3300multiclass VPseudoTernaryV_VF_AAXA<LMULInfo m, FPR_Info f, string Constraint = ""> {
3301  defm "_V" # f.FX : VPseudoTernaryWithPolicy<m.vrclass, f.fprclass,
3302                                              m.vrclass, m, Constraint,
3303                                              Commutable=1>;
3304}
3305
3306multiclass VPseudoTernaryV_VF_AAXA_RM<LMULInfo m, FPR_Info f, string Constraint = ""> {
3307  defm "_V" # f.FX : VPseudoTernaryWithPolicyRoundingMode<m.vrclass, f.fprclass,
3308                                                          m.vrclass, m, Constraint,
3309                                                          Commutable=1>;
3310}
3311
3312multiclass VPseudoTernaryW_VV<LMULInfo m> {
3313  defvar constraint = "@earlyclobber $rd";
3314  defm _VV : VPseudoTernaryWithPolicy<m.wvrclass, m.vrclass, m.vrclass, m,
3315                                      constraint>;
3316}
3317
3318multiclass VPseudoTernaryW_VV_RM<LMULInfo m> {
3319  defvar constraint = "@earlyclobber $rd";
3320  defm _VV : VPseudoTernaryWithPolicyRoundingMode<m.wvrclass, m.vrclass, m.vrclass, m,
3321                                                  constraint>;
3322}
3323
3324multiclass VPseudoTernaryW_VX<LMULInfo m> {
3325  defvar constraint = "@earlyclobber $rd";
3326  defm "_VX" : VPseudoTernaryWithPolicy<m.wvrclass, GPR, m.vrclass, m,
3327                                        constraint>;
3328}
3329
3330multiclass VPseudoTernaryW_VF<LMULInfo m, FPR_Info f> {
3331  defvar constraint = "@earlyclobber $rd";
3332  defm "_V" # f.FX : VPseudoTernaryWithPolicy<m.wvrclass, f.fprclass,
3333                                              m.vrclass, m, constraint>;
3334}
3335
3336multiclass VPseudoTernaryW_VF_RM<LMULInfo m, FPR_Info f> {
3337  defvar constraint = "@earlyclobber $rd";
3338  defm "_V" # f.FX : VPseudoTernaryWithPolicyRoundingMode<m.wvrclass, f.fprclass,
3339                                                          m.vrclass, m, constraint>;
3340}
3341
3342multiclass VPseudoVSLDVWithPolicy<VReg RetClass,
3343                                  RegisterClass Op1Class,
3344                                  DAGOperand Op2Class,
3345                                  LMULInfo MInfo,
3346                                  string Constraint = ""> {
3347  let VLMul = MInfo.value in {
3348    def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint>;
3349    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class, Constraint>,
3350                                   RISCVMaskedPseudo<MaskIdx=3>;
3351  }
3352}
3353
3354multiclass VPseudoVSLDV_VX<LMULInfo m, string Constraint = ""> {
3355  defm _VX : VPseudoVSLDVWithPolicy<m.vrclass, m.vrclass, GPR, m, Constraint>;
3356}
3357
3358multiclass VPseudoVSLDV_VI<Operand ImmType = simm5, LMULInfo m, string Constraint = ""> {
3359  defm _VI : VPseudoVSLDVWithPolicy<m.vrclass, m.vrclass, ImmType, m, Constraint>;
3360}
3361
3362multiclass VPseudoVMAC_VV_VX_AAXA<string Constraint = ""> {
3363  foreach m = MxList in {
3364    defvar mx = m.MX;
3365    defvar WriteVIMulAddV_MX = !cast<SchedWrite>("WriteVIMulAddV_" # mx);
3366    defvar WriteVIMulAddX_MX = !cast<SchedWrite>("WriteVIMulAddX_" # mx);
3367    defvar ReadVIMulAddV_MX = !cast<SchedRead>("ReadVIMulAddV_" # mx);
3368    defvar ReadVIMulAddX_MX = !cast<SchedRead>("ReadVIMulAddX_" # mx);
3369
3370    defm "" : VPseudoTernaryV_VV_AAXA<m, Constraint>,
3371              Sched<[WriteVIMulAddV_MX, ReadVIMulAddV_MX, ReadVIMulAddV_MX,
3372                     ReadVIMulAddV_MX, ReadVMask]>;
3373    defm "" : VPseudoTernaryV_VX_AAXA<m, Constraint>,
3374              Sched<[WriteVIMulAddX_MX, ReadVIMulAddV_MX, ReadVIMulAddV_MX,
3375                     ReadVIMulAddX_MX, ReadVMask]>;
3376  }
3377}
3378
3379multiclass VPseudoVMAC_VV_VF_AAXA<string Constraint = ""> {
3380  foreach m = MxListF in {
3381    defvar mx = m.MX;
3382    defvar WriteVFMulAddV_MX = !cast<SchedWrite>("WriteVFMulAddV_" # mx);
3383    defvar ReadVFMulAddV_MX = !cast<SchedRead>("ReadVFMulAddV_" # mx);
3384
3385    defm "" : VPseudoTernaryV_VV_AAXA<m, Constraint>,
3386              Sched<[WriteVFMulAddV_MX, ReadVFMulAddV_MX, ReadVFMulAddV_MX, ReadVFMulAddV_MX, ReadVMask]>;
3387  }
3388
3389  foreach f = FPList in {
3390    foreach m = f.MxList in {
3391      defvar mx = m.MX;
3392      defvar WriteVFMulAddF_MX = !cast<SchedWrite>("WriteVFMulAddF_" # mx);
3393      defvar ReadVFMulAddV_MX = !cast<SchedRead>("ReadVFMulAddV_" # mx);
3394      defvar ReadVFMulAddF_MX = !cast<SchedRead>("ReadVFMulAddF_" # mx);
3395
3396      defm "" : VPseudoTernaryV_VF_AAXA<m, f, Constraint>,
3397                Sched<[WriteVFMulAddF_MX, ReadVFMulAddV_MX, ReadVFMulAddV_MX, ReadVFMulAddF_MX, ReadVMask]>;
3398    }
3399  }
3400}
3401
3402multiclass VPseudoVMAC_VV_VF_AAXA_RM<string Constraint = ""> {
3403  foreach m = MxListF in {
3404    defvar mx = m.MX;
3405    defvar WriteVFMulAddV_MX = !cast<SchedWrite>("WriteVFMulAddV_" # mx);
3406    defvar ReadVFMulAddV_MX = !cast<SchedRead>("ReadVFMulAddV_" # mx);
3407
3408    defm "" : VPseudoTernaryV_VV_AAXA_RM<m, Constraint>,
3409              Sched<[WriteVFMulAddV_MX, ReadVFMulAddV_MX, ReadVFMulAddV_MX, ReadVFMulAddV_MX, ReadVMask]>;
3410  }
3411
3412  foreach f = FPList in {
3413    foreach m = f.MxList in {
3414      defvar mx = m.MX;
3415      defvar WriteVFMulAddF_MX = !cast<SchedWrite>("WriteVFMulAddF_" # mx);
3416      defvar ReadVFMulAddV_MX = !cast<SchedRead>("ReadVFMulAddV_" # mx);
3417      defvar ReadVFMulAddF_MX = !cast<SchedRead>("ReadVFMulAddF_" # mx);
3418
3419      defm "" : VPseudoTernaryV_VF_AAXA_RM<m, f, Constraint>,
3420                Sched<[WriteVFMulAddF_MX, ReadVFMulAddV_MX, ReadVFMulAddV_MX, ReadVFMulAddF_MX, ReadVMask]>;
3421    }
3422  }
3423}
3424
3425multiclass VPseudoVSLD_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
3426  foreach m = MxList in {
3427    defvar mx = m.MX;
3428    defvar WriteVISlideX_MX = !cast<SchedWrite>("WriteVISlideX_" # mx);
3429    defvar WriteVISlideI_MX = !cast<SchedWrite>("WriteVISlideI_" # mx);
3430    defvar ReadVISlideV_MX = !cast<SchedRead>("ReadVISlideV_" # mx);
3431    defvar ReadVISlideX_MX = !cast<SchedRead>("ReadVISlideX_" # mx);
3432
3433    defm "" : VPseudoVSLDV_VX<m, Constraint>,
3434              Sched<[WriteVISlideX_MX, ReadVISlideV_MX, ReadVISlideV_MX,
3435                     ReadVISlideX_MX, ReadVMask]>;
3436    defm "" : VPseudoVSLDV_VI<ImmType, m, Constraint>,
3437              Sched<[WriteVISlideI_MX, ReadVISlideV_MX, ReadVISlideV_MX, ReadVMask]>;
3438  }
3439}
3440
3441multiclass VPseudoVWMAC_VV_VX {
3442  foreach m = MxListW in {
3443    defvar mx = m.MX;
3444    defvar WriteVIWMulAddV_MX = !cast<SchedWrite>("WriteVIWMulAddV_" # mx);
3445    defvar WriteVIWMulAddX_MX = !cast<SchedWrite>("WriteVIWMulAddX_" # mx);
3446    defvar ReadVIWMulAddV_MX = !cast<SchedRead>("ReadVIWMulAddV_" # mx);
3447    defvar ReadVIWMulAddX_MX = !cast<SchedRead>("ReadVIWMulAddX_" # mx);
3448
3449    defm "" : VPseudoTernaryW_VV<m>,
3450              Sched<[WriteVIWMulAddV_MX, ReadVIWMulAddV_MX, ReadVIWMulAddV_MX,
3451                     ReadVIWMulAddV_MX, ReadVMask]>;
3452    defm "" : VPseudoTernaryW_VX<m>,
3453              Sched<[WriteVIWMulAddX_MX, ReadVIWMulAddV_MX, ReadVIWMulAddV_MX,
3454                     ReadVIWMulAddX_MX, ReadVMask]>;
3455  }
3456}
3457
3458multiclass VPseudoVWMAC_VX {
3459  foreach m = MxListW in {
3460    defvar mx = m.MX;
3461    defvar WriteVIWMulAddX_MX = !cast<SchedWrite>("WriteVIWMulAddX_" # mx);
3462    defvar ReadVIWMulAddV_MX= !cast<SchedRead>("ReadVIWMulAddV_" # mx);
3463    defvar ReadVIWMulAddX_MX = !cast<SchedRead>("ReadVIWMulAddX_" # mx);
3464
3465    defm "" : VPseudoTernaryW_VX<m>,
3466              Sched<[WriteVIWMulAddX_MX, ReadVIWMulAddV_MX, ReadVIWMulAddV_MX,
3467                     ReadVIWMulAddX_MX, ReadVMask]>;
3468  }
3469}
3470
3471multiclass VPseudoVWMAC_VV_VF_RM {
3472  foreach m = MxListFW in {
3473    defvar mx = m.MX;
3474    defvar WriteVFWMulAddV_MX = !cast<SchedWrite>("WriteVFWMulAddV_" # mx);
3475    defvar ReadVFWMulAddV_MX = !cast<SchedRead>("ReadVFWMulAddV_" # mx);
3476
3477    defm "" : VPseudoTernaryW_VV_RM<m>,
3478              Sched<[WriteVFWMulAddV_MX, ReadVFWMulAddV_MX,
3479                     ReadVFWMulAddV_MX, ReadVFWMulAddV_MX, ReadVMask]>;
3480  }
3481
3482  foreach f = FPListW in {
3483    foreach m = f.MxListFW in {
3484      defvar mx = m.MX;
3485      defvar WriteVFWMulAddF_MX = !cast<SchedWrite>("WriteVFWMulAddF_" # mx);
3486      defvar ReadVFWMulAddV_MX = !cast<SchedRead>("ReadVFWMulAddV_" # mx);
3487      defvar ReadVFWMulAddF_MX = !cast<SchedRead>("ReadVFWMulAddF_" # mx);
3488
3489      defm "" : VPseudoTernaryW_VF_RM<m, f>,
3490                Sched<[WriteVFWMulAddF_MX, ReadVFWMulAddV_MX,
3491                       ReadVFWMulAddV_MX, ReadVFWMulAddF_MX, ReadVMask]>;
3492    }
3493  }
3494}
3495
3496multiclass VPseudoVCMPM_VV_VX_VI {
3497  foreach m = MxList in {
3498    defvar mx = m.MX;
3499    defvar WriteVICmpV_MX = !cast<SchedWrite>("WriteVICmpV_" # mx);
3500    defvar WriteVICmpX_MX = !cast<SchedWrite>("WriteVICmpX_" # mx);
3501    defvar WriteVICmpI_MX = !cast<SchedWrite>("WriteVICmpI_" # mx);
3502    defvar ReadVICmpV_MX = !cast<SchedRead>("ReadVICmpV_" # mx);
3503    defvar ReadVICmpX_MX = !cast<SchedRead>("ReadVICmpX_" # mx);
3504
3505    defm "" : VPseudoBinaryM_VV<m>,
3506              Sched<[WriteVICmpV_MX, ReadVICmpV_MX, ReadVICmpV_MX, ReadVMask]>;
3507    defm "" : VPseudoBinaryM_VX<m>,
3508              Sched<[WriteVICmpX_MX, ReadVICmpV_MX, ReadVICmpX_MX, ReadVMask]>;
3509    defm "" : VPseudoBinaryM_VI<m>,
3510              Sched<[WriteVICmpI_MX, ReadVICmpV_MX, ReadVMask]>;
3511  }
3512}
3513
3514multiclass VPseudoVCMPM_VV_VX {
3515  foreach m = MxList in {
3516    defvar mx = m.MX;
3517    defvar WriteVICmpV_MX = !cast<SchedWrite>("WriteVICmpV_" # mx);
3518    defvar WriteVICmpX_MX = !cast<SchedWrite>("WriteVICmpX_" # mx);
3519    defvar ReadVICmpV_MX = !cast<SchedRead>("ReadVICmpV_" # mx);
3520    defvar ReadVICmpX_MX = !cast<SchedRead>("ReadVICmpX_" # mx);
3521
3522    defm "" : VPseudoBinaryM_VV<m>,
3523              Sched<[WriteVICmpV_MX, ReadVICmpV_MX, ReadVICmpV_MX, ReadVMask]>;
3524    defm "" : VPseudoBinaryM_VX<m>,
3525              Sched<[WriteVICmpX_MX, ReadVICmpV_MX, ReadVICmpX_MX, ReadVMask]>;
3526  }
3527}
3528
3529multiclass VPseudoVCMPM_VV_VF {
3530  foreach m = MxListF in {
3531    defvar mx = m.MX;
3532    defvar WriteVFCmpV_MX = !cast<SchedWrite>("WriteVFCmpV_" # mx);
3533    defvar ReadVFCmpV_MX = !cast<SchedRead>("ReadVFCmpV_" # mx);
3534
3535    defm "" : VPseudoBinaryM_VV<m>,
3536              Sched<[WriteVFCmpV_MX, ReadVFCmpV_MX, ReadVFCmpV_MX, ReadVMask]>;
3537  }
3538
3539  foreach f = FPList in {
3540    foreach m = f.MxList in {
3541      defvar mx = m.MX;
3542      defvar WriteVFCmpF_MX = !cast<SchedWrite>("WriteVFCmpF_" # mx);
3543      defvar ReadVFCmpV_MX = !cast<SchedRead>("ReadVFCmpV_" # mx);
3544      defvar ReadVFCmpF_MX = !cast<SchedRead>("ReadVFCmpF_" # mx);
3545
3546      defm "" : VPseudoBinaryM_VF<m, f>,
3547                Sched<[WriteVFCmpF_MX, ReadVFCmpV_MX, ReadVFCmpF_MX, ReadVMask]>;
3548    }
3549  }
3550}
3551
3552multiclass VPseudoVCMPM_VF {
3553  foreach f = FPList in {
3554    foreach m = f.MxList in {
3555      defvar mx = m.MX;
3556      defvar WriteVFCmpF_MX = !cast<SchedWrite>("WriteVFCmpF_" # mx);
3557      defvar ReadVFCmpV_MX = !cast<SchedRead>("ReadVFCmpV_" # mx);
3558      defvar ReadVFCmpF_MX = !cast<SchedRead>("ReadVFCmpF_" # mx);
3559
3560      defm "" : VPseudoBinaryM_VF<m, f>,
3561                Sched<[WriteVFCmpF_MX, ReadVFCmpV_MX, ReadVFCmpF_MX, ReadVMask]>;
3562    }
3563  }
3564}
3565
3566multiclass VPseudoVCMPM_VX_VI {
3567  foreach m = MxList in {
3568    defvar mx = m.MX;
3569    defvar WriteVICmpX_MX = !cast<SchedWrite>("WriteVICmpX_" # mx);
3570    defvar WriteVICmpI_MX = !cast<SchedWrite>("WriteVICmpI_" # mx);
3571    defvar ReadVICmpV_MX = !cast<SchedRead>("ReadVICmpV_" # mx);
3572    defvar ReadVICmpX_MX = !cast<SchedRead>("ReadVICmpX_" # mx);
3573
3574    defm "" : VPseudoBinaryM_VX<m>,
3575              Sched<[WriteVICmpX_MX, ReadVICmpV_MX, ReadVICmpX_MX, ReadVMask]>;
3576    defm "" : VPseudoBinaryM_VI<m>,
3577              Sched<[WriteVICmpI_MX, ReadVICmpV_MX, ReadVMask]>;
3578  }
3579}
3580
3581multiclass VPseudoVRED_VS {
3582  foreach m = MxList in {
3583    defvar mx = m.MX;
3584    foreach e = SchedSEWSet<mx>.val in {
3585      defvar WriteVIRedV_From_MX_E = !cast<SchedWrite>("WriteVIRedV_From_" # mx # "_E" # e);
3586      defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m, e>,
3587                 Sched<[WriteVIRedV_From_MX_E, ReadVIRedV, ReadVIRedV, ReadVIRedV,
3588                        ReadVMask]>;
3589    }
3590  }
3591}
3592
3593multiclass VPseudoVREDMINMAX_VS {
3594  foreach m = MxList in {
3595    defvar mx = m.MX;
3596    foreach e = SchedSEWSet<mx>.val in {
3597      defvar WriteVIRedMinMaxV_From_MX_E = !cast<SchedWrite>("WriteVIRedMinMaxV_From_" # mx # "_E" # e);
3598      defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m, e>,
3599                 Sched<[WriteVIRedMinMaxV_From_MX_E, ReadVIRedV, ReadVIRedV,
3600                        ReadVIRedV, ReadVMask]>;
3601    }
3602  }
3603}
3604
3605multiclass VPseudoVWRED_VS {
3606  foreach m = MxListWRed in {
3607    defvar mx = m.MX;
3608    foreach e = SchedSEWSet<mx, isWidening=1>.val in {
3609      defvar WriteVIWRedV_From_MX_E = !cast<SchedWrite>("WriteVIWRedV_From_" # mx # "_E" # e);
3610      defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m, e>,
3611                 Sched<[WriteVIWRedV_From_MX_E, ReadVIWRedV, ReadVIWRedV,
3612                        ReadVIWRedV, ReadVMask]>;
3613    }
3614  }
3615}
3616
3617multiclass VPseudoVFRED_VS_RM {
3618  foreach m = MxListF in {
3619    defvar mx = m.MX;
3620    foreach e = SchedSEWSet<mx, isF=1>.val in {
3621      defvar WriteVFRedV_From_MX_E = !cast<SchedWrite>("WriteVFRedV_From_" # mx # "_E" # e);
3622      defm _VS
3623          : VPseudoTernaryWithTailPolicyRoundingMode<V_M1.vrclass, m.vrclass,
3624                                                     V_M1.vrclass, m, e>,
3625                 Sched<[WriteVFRedV_From_MX_E, ReadVFRedV, ReadVFRedV, ReadVFRedV,
3626                        ReadVMask]>;
3627    }
3628  }
3629}
3630
3631multiclass VPseudoVFREDMINMAX_VS {
3632  foreach m = MxListF in {
3633    defvar mx = m.MX;
3634    foreach e = SchedSEWSet<mx, isF=1>.val in {
3635      defvar WriteVFRedMinMaxV_From_MX_E = !cast<SchedWrite>("WriteVFRedMinMaxV_From_" # mx # "_E" # e);
3636      defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m, e>,
3637                 Sched<[WriteVFRedMinMaxV_From_MX_E, ReadVFRedV, ReadVFRedV, ReadVFRedV,
3638                        ReadVMask]>;
3639    }
3640  }
3641}
3642
3643multiclass VPseudoVFREDO_VS_RM {
3644  foreach m = MxListF in {
3645    defvar mx = m.MX;
3646    foreach e = SchedSEWSet<mx, isF=1>.val in {
3647      defvar WriteVFRedOV_From_MX_E = !cast<SchedWrite>("WriteVFRedOV_From_" # mx # "_E" # e);
3648      defm _VS : VPseudoTernaryWithTailPolicyRoundingMode<V_M1.vrclass, m.vrclass,
3649                                                          V_M1.vrclass, m, e>,
3650                 Sched<[WriteVFRedOV_From_MX_E, ReadVFRedOV, ReadVFRedOV,
3651                        ReadVFRedOV, ReadVMask]>;
3652    }
3653  }
3654}
3655
3656multiclass VPseudoVFWRED_VS_RM {
3657  foreach m = MxListFWRed in {
3658    defvar mx = m.MX;
3659    foreach e = SchedSEWSet<mx, isF=1, isWidening=1>.val in {
3660      defvar WriteVFWRedV_From_MX_E = !cast<SchedWrite>("WriteVFWRedV_From_" # mx # "_E" # e);
3661      defm _VS
3662          : VPseudoTernaryWithTailPolicyRoundingMode<V_M1.vrclass, m.vrclass,
3663                                                     V_M1.vrclass, m, e>,
3664                 Sched<[WriteVFWRedV_From_MX_E, ReadVFWRedV, ReadVFWRedV,
3665                        ReadVFWRedV, ReadVMask]>;
3666    }
3667  }
3668}
3669
3670multiclass VPseudoConversion<VReg RetClass,
3671                             VReg Op1Class,
3672                             LMULInfo MInfo,
3673                             string Constraint = ""> {
3674  let VLMul = MInfo.value in {
3675    def "_" # MInfo.MX : VPseudoUnaryNoMask<RetClass, Op1Class, Constraint>;
3676    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMask<RetClass, Op1Class,
3677                                                    Constraint>,
3678                                   RISCVMaskedPseudo<MaskIdx=2>;
3679  }
3680}
3681
3682multiclass VPseudoConversionRoundingMode<VReg RetClass,
3683                             VReg Op1Class,
3684                             LMULInfo MInfo,
3685                             string Constraint = ""> {
3686  let VLMul = MInfo.value in {
3687    def "_" # MInfo.MX : VPseudoUnaryNoMaskRoundingMode<RetClass, Op1Class, Constraint>;
3688    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMaskRoundingMode<RetClass, Op1Class,
3689                                                                Constraint>,
3690                                   RISCVMaskedPseudo<MaskIdx=2>;
3691  }
3692}
3693
3694
3695multiclass VPseudoConversionRM<VReg RetClass,
3696                               VReg Op1Class,
3697                               LMULInfo MInfo,
3698                               string Constraint = ""> {
3699  let VLMul = MInfo.value in {
3700    def "_" # MInfo.MX : VPseudoUnaryNoMask_FRM<RetClass, Op1Class,
3701                                                        Constraint>;
3702    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMask_FRM<RetClass, Op1Class,
3703                                                        Constraint>,
3704                                   RISCVMaskedPseudo<MaskIdx=2>;
3705  }
3706}
3707
3708multiclass VPseudoConversionNoExcept<VReg RetClass,
3709                                     VReg Op1Class,
3710                                     LMULInfo MInfo,
3711                                     string Constraint = ""> {
3712  let VLMul = MInfo.value in {
3713    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMask_NoExcept<RetClass, Op1Class, Constraint>;
3714  }
3715}
3716
3717multiclass VPseudoVCVTI_V {
3718  foreach m = MxListF in {
3719    defvar mx = m.MX;
3720    defvar WriteVFCvtFToIV_MX = !cast<SchedWrite>("WriteVFCvtFToIV_" # mx);
3721    defvar ReadVFCvtFToIV_MX = !cast<SchedRead>("ReadVFCvtFToIV_" # mx);
3722
3723    defm _V : VPseudoConversion<m.vrclass, m.vrclass, m>,
3724              Sched<[WriteVFCvtFToIV_MX, ReadVFCvtFToIV_MX, ReadVMask]>;
3725  }
3726}
3727
3728multiclass VPseudoVCVTI_V_RM {
3729  foreach m = MxListF in {
3730    defvar mx = m.MX;
3731    defvar WriteVFCvtFToIV_MX = !cast<SchedWrite>("WriteVFCvtFToIV_" # mx);
3732    defvar ReadVFCvtFToIV_MX = !cast<SchedRead>("ReadVFCvtFToIV_" # mx);
3733
3734    defm _V : VPseudoConversionRoundingMode<m.vrclass, m.vrclass, m>,
3735              Sched<[WriteVFCvtFToIV_MX, ReadVFCvtFToIV_MX, ReadVMask]>;
3736  }
3737}
3738
3739multiclass VPseudoVCVTI_RM_V {
3740  foreach m = MxListF in {
3741    defvar mx = m.MX;
3742    defvar WriteVFCvtFToIV_MX = !cast<SchedWrite>("WriteVFCvtFToIV_" # mx);
3743    defvar ReadVFCvtFToIV_MX = !cast<SchedRead>("ReadVFCvtFToIV_" # mx);
3744
3745    defm _V : VPseudoConversionRM<m.vrclass, m.vrclass, m>,
3746              Sched<[WriteVFCvtFToIV_MX, ReadVFCvtFToIV_MX, ReadVMask]>;
3747  }
3748}
3749
3750multiclass VPseudoVFROUND_NOEXCEPT_V {
3751  foreach m = MxListF in {
3752    defvar mx = m.MX;
3753    defvar WriteVFCvtFToIV_MX = !cast<SchedWrite>("WriteVFCvtFToIV_" # mx);
3754    defvar ReadVFCvtFToIV_MX = !cast<SchedRead>("ReadVFCvtFToIV_" # mx);
3755
3756    defm _V : VPseudoConversionNoExcept<m.vrclass, m.vrclass, m>,
3757              Sched<[WriteVFCvtFToIV_MX, ReadVFCvtFToIV_MX, ReadVMask]>;
3758  }
3759}
3760
3761multiclass VPseudoVCVTF_V_RM {
3762  foreach m = MxListF in {
3763    defvar mx = m.MX;
3764    defvar WriteVFCvtIToFV_MX = !cast<SchedWrite>("WriteVFCvtIToFV_" # mx);
3765    defvar ReadVFCvtIToFV_MX = !cast<SchedRead>("ReadVFCvtIToFV_" # mx);
3766
3767    defm _V : VPseudoConversionRoundingMode<m.vrclass, m.vrclass, m>,
3768              Sched<[WriteVFCvtIToFV_MX, ReadVFCvtIToFV_MX, ReadVMask]>;
3769  }
3770}
3771
3772multiclass VPseudoVCVTF_RM_V {
3773  foreach m = MxListF in {
3774    defvar mx = m.MX;
3775    defvar WriteVFCvtIToFV_MX = !cast<SchedWrite>("WriteVFCvtIToFV_" # mx);
3776    defvar ReadVFCvtIToFV_MX = !cast<SchedRead>("ReadVFCvtIToFV_" # mx);
3777
3778    defm _V : VPseudoConversionRM<m.vrclass, m.vrclass, m>,
3779              Sched<[WriteVFCvtIToFV_MX, ReadVFCvtIToFV_MX, ReadVMask]>;
3780  }
3781}
3782
3783multiclass VPseudoVWCVTI_V {
3784  defvar constraint = "@earlyclobber $rd";
3785  foreach m = MxListFW in {
3786    defvar mx = m.MX;
3787    defvar WriteVFWCvtFToIV_MX = !cast<SchedWrite>("WriteVFWCvtFToIV_" # mx);
3788    defvar ReadVFWCvtFToIV_MX = !cast<SchedRead>("ReadVFWCvtFToIV_" # mx);
3789
3790    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>,
3791              Sched<[WriteVFWCvtFToIV_MX, ReadVFWCvtFToIV_MX, ReadVMask]>;
3792  }
3793}
3794
3795multiclass VPseudoVWCVTI_V_RM {
3796  defvar constraint = "@earlyclobber $rd";
3797  foreach m = MxListFW in {
3798    defvar mx = m.MX;
3799    defvar WriteVFWCvtFToIV_MX = !cast<SchedWrite>("WriteVFWCvtFToIV_" # mx);
3800    defvar ReadVFWCvtFToIV_MX = !cast<SchedRead>("ReadVFWCvtFToIV_" # mx);
3801
3802    defm _V : VPseudoConversionRoundingMode<m.wvrclass, m.vrclass, m, constraint>,
3803              Sched<[WriteVFWCvtFToIV_MX, ReadVFWCvtFToIV_MX, ReadVMask]>;
3804  }
3805}
3806
3807multiclass VPseudoVWCVTI_RM_V {
3808  defvar constraint = "@earlyclobber $rd";
3809  foreach m = MxListFW in {
3810    defvar mx = m.MX;
3811    defvar WriteVFWCvtFToIV_MX = !cast<SchedWrite>("WriteVFWCvtFToIV_" # mx);
3812    defvar ReadVFWCvtFToIV_MX = !cast<SchedRead>("ReadVFWCvtFToIV_" # mx);
3813
3814    defm _V : VPseudoConversionRM<m.wvrclass, m.vrclass, m, constraint>,
3815              Sched<[WriteVFWCvtFToIV_MX, ReadVFWCvtFToIV_MX, ReadVMask]>;
3816  }
3817}
3818
3819multiclass VPseudoVWCVTF_V {
3820  defvar constraint = "@earlyclobber $rd";
3821  foreach m = MxListW in {
3822    defvar mx = m.MX;
3823    defvar WriteVFWCvtIToFV_MX = !cast<SchedWrite>("WriteVFWCvtIToFV_" # mx);
3824    defvar ReadVFWCvtIToFV_MX = !cast<SchedRead>("ReadVFWCvtIToFV_" # mx);
3825
3826    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>,
3827              Sched<[WriteVFWCvtIToFV_MX, ReadVFWCvtIToFV_MX, ReadVMask]>;
3828  }
3829}
3830
3831multiclass VPseudoVWCVTD_V {
3832  defvar constraint = "@earlyclobber $rd";
3833  foreach m = MxListFW in {
3834    defvar mx = m.MX;
3835    defvar WriteVFWCvtFToFV_MX = !cast<SchedWrite>("WriteVFWCvtFToFV_" # mx);
3836    defvar ReadVFWCvtFToFV_MX = !cast<SchedRead>("ReadVFWCvtFToFV_" # mx);
3837
3838    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>,
3839              Sched<[WriteVFWCvtFToFV_MX, ReadVFWCvtFToFV_MX, ReadVMask]>;
3840  }
3841}
3842
3843multiclass VPseudoVNCVTI_W {
3844  defvar constraint = "@earlyclobber $rd";
3845  foreach m = MxListW in {
3846    defvar mx = m.MX;
3847    defvar WriteVFNCvtFToIV_MX = !cast<SchedWrite>("WriteVFNCvtFToIV_" # mx);
3848    defvar ReadVFNCvtFToIV_MX = !cast<SchedRead>("ReadVFNCvtFToIV_" # mx);
3849
3850    defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>,
3851              Sched<[WriteVFNCvtFToIV_MX, ReadVFNCvtFToIV_MX, ReadVMask]>;
3852  }
3853}
3854
3855multiclass VPseudoVNCVTI_W_RM {
3856  defvar constraint = "@earlyclobber $rd";
3857  foreach m = MxListW in {
3858    defvar mx = m.MX;
3859    defvar WriteVFNCvtFToIV_MX = !cast<SchedWrite>("WriteVFNCvtFToIV_" # mx);
3860    defvar ReadVFNCvtFToIV_MX = !cast<SchedRead>("ReadVFNCvtFToIV_" # mx);
3861
3862    defm _W : VPseudoConversionRoundingMode<m.vrclass, m.wvrclass, m, constraint>,
3863              Sched<[WriteVFNCvtFToIV_MX, ReadVFNCvtFToIV_MX, ReadVMask]>;
3864  }
3865}
3866
3867multiclass VPseudoVNCVTI_RM_W {
3868  defvar constraint = "@earlyclobber $rd";
3869  foreach m = MxListW in {
3870    defvar mx = m.MX;
3871    defvar WriteVFNCvtFToIV_MX = !cast<SchedWrite>("WriteVFNCvtFToIV_" # mx);
3872    defvar ReadVFNCvtFToIV_MX = !cast<SchedRead>("ReadVFNCvtFToIV_" # mx);
3873
3874    defm _W : VPseudoConversionRM<m.vrclass, m.wvrclass, m, constraint>,
3875              Sched<[WriteVFNCvtFToIV_MX, ReadVFNCvtFToIV_MX, ReadVMask]>;
3876  }
3877}
3878
3879multiclass VPseudoVNCVTF_W_RM {
3880  defvar constraint = "@earlyclobber $rd";
3881  foreach m = MxListFW in {
3882    defvar mx = m.MX;
3883    defvar WriteVFNCvtIToFV_MX = !cast<SchedWrite>("WriteVFNCvtIToFV_" # mx);
3884    defvar ReadVFNCvtIToFV_MX = !cast<SchedRead>("ReadVFNCvtIToFV_" # mx);
3885
3886    defm _W : VPseudoConversionRoundingMode<m.vrclass, m.wvrclass, m, constraint>,
3887              Sched<[WriteVFNCvtIToFV_MX, ReadVFNCvtIToFV_MX, ReadVMask]>;
3888  }
3889}
3890
3891multiclass VPseudoVNCVTF_RM_W {
3892  defvar constraint = "@earlyclobber $rd";
3893  foreach m = MxListFW in {
3894    defvar mx = m.MX;
3895    defvar WriteVFNCvtIToFV_MX = !cast<SchedWrite>("WriteVFNCvtIToFV_" # mx);
3896    defvar ReadVFNCvtIToFV_MX = !cast<SchedRead>("ReadVFNCvtIToFV_" # mx);
3897
3898    defm _W : VPseudoConversionRM<m.vrclass, m.wvrclass, m, constraint>,
3899              Sched<[WriteVFNCvtIToFV_MX, ReadVFNCvtIToFV_MX, ReadVMask]>;
3900  }
3901}
3902
3903multiclass VPseudoVNCVTD_W {
3904  defvar constraint = "@earlyclobber $rd";
3905  foreach m = MxListFW in {
3906    defvar mx = m.MX;
3907    defvar WriteVFNCvtFToFV_MX = !cast<SchedWrite>("WriteVFNCvtFToFV_" # mx);
3908    defvar ReadVFNCvtFToFV_MX = !cast<SchedRead>("ReadVFNCvtFToFV_" # mx);
3909
3910    defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>,
3911              Sched<[WriteVFNCvtFToFV_MX, ReadVFNCvtFToFV_MX, ReadVMask]>;
3912  }
3913}
3914
3915multiclass VPseudoVNCVTD_W_RM {
3916  defvar constraint = "@earlyclobber $rd";
3917  foreach m = MxListFW in {
3918    defvar mx = m.MX;
3919    defvar WriteVFNCvtFToFV_MX = !cast<SchedWrite>("WriteVFNCvtFToFV_" # mx);
3920    defvar ReadVFNCvtFToFV_MX = !cast<SchedRead>("ReadVFNCvtFToFV_" # mx);
3921
3922    defm _W : VPseudoConversionRoundingMode<m.vrclass, m.wvrclass, m, constraint>,
3923              Sched<[WriteVFNCvtFToFV_MX, ReadVFNCvtFToFV_MX, ReadVMask]>;
3924  }
3925}
3926
3927multiclass VPseudoUSSegLoad {
3928  foreach eew = EEWList in {
3929    foreach lmul = MxSet<eew>.m in {
3930      defvar LInfo = lmul.MX;
3931      let VLMul = lmul.value, SEW=eew in {
3932        foreach nf = NFSet<lmul>.L in {
3933          defvar vreg = SegRegClass<lmul, nf>.RC;
3934          def nf # "E" # eew # "_V_" # LInfo :
3935            VPseudoUSSegLoadNoMask<vreg, eew, nf>, VLSEGSched<nf, eew, LInfo>;
3936          def nf # "E" # eew # "_V_" # LInfo # "_MASK" :
3937            VPseudoUSSegLoadMask<vreg, eew, nf>, VLSEGSched<nf, eew, LInfo>;
3938        }
3939      }
3940    }
3941  }
3942}
3943
3944multiclass VPseudoUSSegLoadFF {
3945  foreach eew = EEWList in {
3946    foreach lmul = MxSet<eew>.m in {
3947      defvar LInfo = lmul.MX;
3948      let VLMul = lmul.value, SEW=eew in {
3949        foreach nf = NFSet<lmul>.L in {
3950          defvar vreg = SegRegClass<lmul, nf>.RC;
3951          def nf # "E" # eew # "FF_V_" # LInfo :
3952            VPseudoUSSegLoadFFNoMask<vreg, eew, nf>, VLSEGFFSched<nf, eew, LInfo>;
3953          def nf # "E" # eew # "FF_V_" # LInfo # "_MASK" :
3954            VPseudoUSSegLoadFFMask<vreg, eew, nf>, VLSEGFFSched<nf, eew, LInfo>;
3955        }
3956      }
3957    }
3958  }
3959}
3960
3961multiclass VPseudoSSegLoad {
3962  foreach eew = EEWList in {
3963    foreach lmul = MxSet<eew>.m in {
3964      defvar LInfo = lmul.MX;
3965      let VLMul = lmul.value, SEW=eew in {
3966        foreach nf = NFSet<lmul>.L in {
3967          defvar vreg = SegRegClass<lmul, nf>.RC;
3968          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew, nf>,
3969                                               VLSSEGSched<nf, eew, LInfo>;
3970          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew, nf>,
3971                                                         VLSSEGSched<nf, eew, LInfo>;
3972        }
3973      }
3974    }
3975  }
3976}
3977
3978multiclass VPseudoISegLoad<bit Ordered> {
3979  foreach idxEEW = EEWList in {
3980    foreach dataEEW = EEWList in {
3981      foreach dataEMUL = MxSet<dataEEW>.m in {
3982        defvar dataEMULOctuple = dataEMUL.octuple;
3983        // Calculate emul = eew * lmul / sew
3984        defvar idxEMULOctuple = !srl(!mul(idxEEW, dataEMULOctuple), !logtwo(dataEEW));
3985        if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
3986          defvar DataLInfo = dataEMUL.MX;
3987          defvar IdxLInfo = octuple_to_str<idxEMULOctuple>.ret;
3988          defvar idxEMUL = !cast<LMULInfo>("V_" # IdxLInfo);
3989          defvar DataVreg = dataEMUL.vrclass;
3990          defvar IdxVreg = idxEMUL.vrclass;
3991          defvar Order = !if(Ordered, "O", "U");
3992          let VLMul = dataEMUL.value in {
3993            foreach nf = NFSet<dataEMUL>.L in {
3994              defvar Vreg = SegRegClass<dataEMUL, nf>.RC;
3995              def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo :
3996                VPseudoISegLoadNoMask<Vreg, IdxVreg, idxEEW, idxEMUL.value,
3997                                      nf, Ordered>,
3998                VLXSEGSched<nf, dataEEW, Order, DataLInfo>;
3999              def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" :
4000                VPseudoISegLoadMask<Vreg, IdxVreg, idxEEW, idxEMUL.value,
4001                                    nf, Ordered>,
4002                VLXSEGSched<nf, dataEEW, Order, DataLInfo>;
4003            }
4004          }
4005        }
4006      }
4007    }
4008  }
4009}
4010
4011multiclass VPseudoUSSegStore {
4012  foreach eew = EEWList in {
4013    foreach lmul = MxSet<eew>.m in {
4014      defvar LInfo = lmul.MX;
4015      let VLMul = lmul.value, SEW=eew in {
4016        foreach nf = NFSet<lmul>.L in {
4017          defvar vreg = SegRegClass<lmul, nf>.RC;
4018          def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew, nf>,
4019                                               VSSEGSched<nf, eew, LInfo>;
4020          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew, nf>,
4021                                                         VSSEGSched<nf, eew, LInfo>;
4022        }
4023      }
4024    }
4025  }
4026}
4027
4028multiclass VPseudoSSegStore {
4029  foreach eew = EEWList in {
4030    foreach lmul = MxSet<eew>.m in {
4031      defvar LInfo = lmul.MX;
4032      let VLMul = lmul.value, SEW=eew in {
4033        foreach nf = NFSet<lmul>.L in {
4034          defvar vreg = SegRegClass<lmul, nf>.RC;
4035          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew, nf>,
4036                                               VSSSEGSched<nf, eew, LInfo>;
4037          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew, nf>,
4038                                                         VSSSEGSched<nf, eew, LInfo>;
4039        }
4040      }
4041    }
4042  }
4043}
4044
4045multiclass VPseudoISegStore<bit Ordered> {
4046  foreach idxEEW = EEWList in {
4047    foreach dataEEW = EEWList in {
4048      foreach dataEMUL = MxSet<dataEEW>.m in {
4049        defvar dataEMULOctuple = dataEMUL.octuple;
4050        // Calculate emul = eew * lmul / sew
4051        defvar idxEMULOctuple = !srl(!mul(idxEEW, dataEMULOctuple), !logtwo(dataEEW));
4052        if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
4053          defvar DataLInfo = dataEMUL.MX;
4054          defvar IdxLInfo = octuple_to_str<idxEMULOctuple>.ret;
4055          defvar idxEMUL = !cast<LMULInfo>("V_" # IdxLInfo);
4056          defvar DataVreg = dataEMUL.vrclass;
4057          defvar IdxVreg = idxEMUL.vrclass;
4058          defvar Order = !if(Ordered, "O", "U");
4059          let VLMul = dataEMUL.value in {
4060            foreach nf = NFSet<dataEMUL>.L in {
4061              defvar Vreg = SegRegClass<dataEMUL, nf>.RC;
4062              def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo :
4063                VPseudoISegStoreNoMask<Vreg, IdxVreg, idxEEW, idxEMUL.value,
4064                                       nf, Ordered>,
4065                VSXSEGSched<nf, idxEEW, Order, DataLInfo>;
4066              def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" :
4067                VPseudoISegStoreMask<Vreg, IdxVreg, idxEEW, idxEMUL.value,
4068                                     nf, Ordered>,
4069                VSXSEGSched<nf, idxEEW, Order, DataLInfo>;
4070            }
4071          }
4072        }
4073      }
4074    }
4075  }
4076}
4077
4078//===----------------------------------------------------------------------===//
4079// Helpers to define the intrinsic patterns.
4080//===----------------------------------------------------------------------===//
4081
4082class VPatUnaryNoMask<string intrinsic_name,
4083                      string inst,
4084                      string kind,
4085                      ValueType result_type,
4086                      ValueType op2_type,
4087                      int log2sew,
4088                      LMULInfo vlmul,
4089                      VReg result_reg_class,
4090                      VReg op2_reg_class,
4091                      bit isSEWAware = 0> :
4092  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4093                   (result_type result_reg_class:$merge),
4094                   (op2_type op2_reg_class:$rs2),
4095                   VLOpFrag)),
4096                   (!cast<Instruction>(
4097                      !if(isSEWAware,
4098                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
4099                          inst#"_"#kind#"_"#vlmul.MX))
4100                   (result_type result_reg_class:$merge),
4101                   (op2_type op2_reg_class:$rs2),
4102                   GPR:$vl, log2sew, TU_MU)>;
4103
4104class VPatUnaryNoMaskRoundingMode<string intrinsic_name,
4105                                  string inst,
4106                                  string kind,
4107                                  ValueType result_type,
4108                                  ValueType op2_type,
4109                                  int log2sew,
4110                                  LMULInfo vlmul,
4111                                  VReg result_reg_class,
4112                                  VReg op2_reg_class,
4113                                  bit isSEWAware = 0> :
4114  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4115                   (result_type result_reg_class:$merge),
4116                   (op2_type op2_reg_class:$rs2),
4117                   (XLenVT timm:$round),
4118                   VLOpFrag)),
4119                   (!cast<Instruction>(
4120                      !if(isSEWAware,
4121                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
4122                          inst#"_"#kind#"_"#vlmul.MX))
4123                   (result_type result_reg_class:$merge),
4124                   (op2_type op2_reg_class:$rs2),
4125                   (XLenVT timm:$round),
4126                   GPR:$vl, log2sew, TU_MU)>;
4127
4128
4129class VPatUnaryMask<string intrinsic_name,
4130                    string inst,
4131                    string kind,
4132                    ValueType result_type,
4133                    ValueType op2_type,
4134                    ValueType mask_type,
4135                    int log2sew,
4136                    LMULInfo vlmul,
4137                    VReg result_reg_class,
4138                    VReg op2_reg_class,
4139                    bit isSEWAware = 0> :
4140  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4141                   (result_type result_reg_class:$merge),
4142                   (op2_type op2_reg_class:$rs2),
4143                   (mask_type V0),
4144                   VLOpFrag, (XLenVT timm:$policy))),
4145                   (!cast<Instruction>(
4146                      !if(isSEWAware,
4147                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
4148                          inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
4149                   (result_type result_reg_class:$merge),
4150                   (op2_type op2_reg_class:$rs2),
4151                   (mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy))>;
4152
4153class VPatUnaryMaskRoundingMode<string intrinsic_name,
4154                                string inst,
4155                                string kind,
4156                                ValueType result_type,
4157                                ValueType op2_type,
4158                                ValueType mask_type,
4159                                int log2sew,
4160                                LMULInfo vlmul,
4161                                VReg result_reg_class,
4162                                VReg op2_reg_class,
4163                                bit isSEWAware = 0> :
4164  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4165                   (result_type result_reg_class:$merge),
4166                   (op2_type op2_reg_class:$rs2),
4167                   (mask_type V0),
4168                   (XLenVT timm:$round),
4169                   VLOpFrag, (XLenVT timm:$policy))),
4170                   (!cast<Instruction>(
4171                      !if(isSEWAware,
4172                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
4173                          inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
4174                   (result_type result_reg_class:$merge),
4175                   (op2_type op2_reg_class:$rs2),
4176                   (mask_type V0),
4177                   (XLenVT timm:$round),
4178                   GPR:$vl, log2sew, (XLenVT timm:$policy))>;
4179
4180
4181class VPatMaskUnaryNoMask<string intrinsic_name,
4182                          string inst,
4183                          MTypeInfo mti> :
4184  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name)
4185                (mti.Mask VR:$rs2),
4186                VLOpFrag)),
4187                (!cast<Instruction>(inst#"_M_"#mti.BX)
4188                (mti.Mask (IMPLICIT_DEF)),
4189                (mti.Mask VR:$rs2),
4190                GPR:$vl, mti.Log2SEW, TU_MU)>;
4191
4192class VPatMaskUnaryMask<string intrinsic_name,
4193                        string inst,
4194                        MTypeInfo mti> :
4195  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name#"_mask")
4196                (mti.Mask VR:$merge),
4197                (mti.Mask VR:$rs2),
4198                (mti.Mask V0),
4199                VLOpFrag)),
4200                (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
4201                (mti.Mask VR:$merge),
4202                (mti.Mask VR:$rs2),
4203                (mti.Mask V0), GPR:$vl, mti.Log2SEW, TU_MU)>;
4204
4205class VPatUnaryAnyMask<string intrinsic,
4206                       string inst,
4207                       string kind,
4208                       ValueType result_type,
4209                       ValueType op1_type,
4210                       ValueType mask_type,
4211                       int log2sew,
4212                       LMULInfo vlmul,
4213                       VReg result_reg_class,
4214                       VReg op1_reg_class> :
4215  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4216                   (result_type result_reg_class:$merge),
4217                   (op1_type op1_reg_class:$rs1),
4218                   (mask_type VR:$rs2),
4219                   VLOpFrag)),
4220                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
4221                   (result_type result_reg_class:$merge),
4222                   (op1_type op1_reg_class:$rs1),
4223                   (mask_type VR:$rs2),
4224                   GPR:$vl, log2sew)>;
4225
4226class VPatBinaryM<string intrinsic_name,
4227                  string inst,
4228                  ValueType result_type,
4229                  ValueType op1_type,
4230                  ValueType op2_type,
4231                  int sew,
4232                  VReg op1_reg_class,
4233                  DAGOperand op2_kind> :
4234  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4235                   (op1_type op1_reg_class:$rs1),
4236                   (op2_type op2_kind:$rs2),
4237                   VLOpFrag)),
4238                   (!cast<Instruction>(inst)
4239                   (op1_type op1_reg_class:$rs1),
4240                   (op2_type op2_kind:$rs2),
4241                   GPR:$vl, sew)>;
4242
4243class VPatBinaryNoMaskTU<string intrinsic_name,
4244                         string inst,
4245                         ValueType result_type,
4246                         ValueType op1_type,
4247                         ValueType op2_type,
4248                         int sew,
4249                         VReg result_reg_class,
4250                         VReg op1_reg_class,
4251                         DAGOperand op2_kind> :
4252  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4253                   (result_type result_reg_class:$merge),
4254                   (op1_type op1_reg_class:$rs1),
4255                   (op2_type op2_kind:$rs2),
4256                   VLOpFrag)),
4257                   (!cast<Instruction>(inst)
4258                   (result_type result_reg_class:$merge),
4259                   (op1_type op1_reg_class:$rs1),
4260                   (op2_type op2_kind:$rs2),
4261                   GPR:$vl, sew, TU_MU)>;
4262
4263class VPatBinaryNoMaskRoundingMode<string intrinsic_name,
4264                                   string inst,
4265                                   ValueType result_type,
4266                                   ValueType op1_type,
4267                                   ValueType op2_type,
4268                                   int sew,
4269                                   VReg op1_reg_class,
4270                                   DAGOperand op2_kind> :
4271  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4272                   (result_type (undef)),
4273                   (op1_type op1_reg_class:$rs1),
4274                   (op2_type op2_kind:$rs2),
4275                   (XLenVT timm:$round),
4276                   VLOpFrag)),
4277                   (!cast<Instruction>(inst)
4278                   (result_type (IMPLICIT_DEF)),
4279                   (op1_type op1_reg_class:$rs1),
4280                   (op2_type op2_kind:$rs2),
4281                   (XLenVT timm:$round),
4282                   GPR:$vl, sew, TA_MA)>;
4283
4284class VPatBinaryNoMaskTURoundingMode<string intrinsic_name,
4285                                     string inst,
4286                                     ValueType result_type,
4287                                     ValueType op1_type,
4288                                     ValueType op2_type,
4289                                     int sew,
4290                                     VReg result_reg_class,
4291                                     VReg op1_reg_class,
4292                                     DAGOperand op2_kind> :
4293  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4294                   (result_type result_reg_class:$merge),
4295                   (op1_type op1_reg_class:$rs1),
4296                   (op2_type op2_kind:$rs2),
4297                   (XLenVT timm:$round),
4298                   VLOpFrag)),
4299                   (!cast<Instruction>(inst)
4300                   (result_type result_reg_class:$merge),
4301                   (op1_type op1_reg_class:$rs1),
4302                   (op2_type op2_kind:$rs2),
4303                   (XLenVT timm:$round),
4304                   GPR:$vl, sew, TU_MU)>;
4305
4306
4307// Same as above but source operands are swapped.
4308class VPatBinaryNoMaskSwapped<string intrinsic_name,
4309                              string inst,
4310                              ValueType result_type,
4311                              ValueType op1_type,
4312                              ValueType op2_type,
4313                              int sew,
4314                              VReg op1_reg_class,
4315                              DAGOperand op2_kind> :
4316  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4317                   (op2_type op2_kind:$rs2),
4318                   (op1_type op1_reg_class:$rs1),
4319                   VLOpFrag)),
4320                   (!cast<Instruction>(inst)
4321                   (op1_type op1_reg_class:$rs1),
4322                   (op2_type op2_kind:$rs2),
4323                   GPR:$vl, sew)>;
4324
4325class VPatBinaryMask<string intrinsic_name,
4326                     string inst,
4327                     ValueType result_type,
4328                     ValueType op1_type,
4329                     ValueType op2_type,
4330                     ValueType mask_type,
4331                     int sew,
4332                     VReg result_reg_class,
4333                     VReg op1_reg_class,
4334                     DAGOperand op2_kind> :
4335  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4336                   (result_type result_reg_class:$merge),
4337                   (op1_type op1_reg_class:$rs1),
4338                   (op2_type op2_kind:$rs2),
4339                   (mask_type V0),
4340                   VLOpFrag)),
4341                   (!cast<Instruction>(inst#"_MASK")
4342                   (result_type result_reg_class:$merge),
4343                   (op1_type op1_reg_class:$rs1),
4344                   (op2_type op2_kind:$rs2),
4345                   (mask_type V0), GPR:$vl, sew)>;
4346
4347class VPatBinaryMaskTA<string intrinsic_name,
4348                       string inst,
4349                       ValueType result_type,
4350                       ValueType op1_type,
4351                       ValueType op2_type,
4352                       ValueType mask_type,
4353                       int sew,
4354                       VReg result_reg_class,
4355                       VReg op1_reg_class,
4356                       DAGOperand op2_kind> :
4357  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4358                   (result_type result_reg_class:$merge),
4359                   (op1_type op1_reg_class:$rs1),
4360                   (op2_type op2_kind:$rs2),
4361                   (mask_type V0),
4362                   VLOpFrag, (XLenVT timm:$policy))),
4363                   (!cast<Instruction>(inst#"_MASK")
4364                   (result_type result_reg_class:$merge),
4365                   (op1_type op1_reg_class:$rs1),
4366                   (op2_type op2_kind:$rs2),
4367                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
4368
4369class VPatBinaryMaskTARoundingMode<string intrinsic_name,
4370                                   string inst,
4371                                   ValueType result_type,
4372                                   ValueType op1_type,
4373                                   ValueType op2_type,
4374                                   ValueType mask_type,
4375                                   int sew,
4376                                   VReg result_reg_class,
4377                                   VReg op1_reg_class,
4378                                   DAGOperand op2_kind> :
4379  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4380                   (result_type result_reg_class:$merge),
4381                   (op1_type op1_reg_class:$rs1),
4382                   (op2_type op2_kind:$rs2),
4383                   (mask_type V0),
4384                   (XLenVT timm:$round),
4385                   VLOpFrag, (XLenVT timm:$policy))),
4386                   (!cast<Instruction>(inst#"_MASK")
4387                   (result_type result_reg_class:$merge),
4388                   (op1_type op1_reg_class:$rs1),
4389                   (op2_type op2_kind:$rs2),
4390                   (mask_type V0),
4391                   (XLenVT timm:$round),
4392                   GPR:$vl, sew, (XLenVT timm:$policy))>;
4393
4394// Same as above but source operands are swapped.
4395class VPatBinaryMaskSwapped<string intrinsic_name,
4396                            string inst,
4397                            ValueType result_type,
4398                            ValueType op1_type,
4399                            ValueType op2_type,
4400                            ValueType mask_type,
4401                            int sew,
4402                            VReg result_reg_class,
4403                            VReg op1_reg_class,
4404                            DAGOperand op2_kind> :
4405  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4406                   (result_type result_reg_class:$merge),
4407                   (op2_type op2_kind:$rs2),
4408                   (op1_type op1_reg_class:$rs1),
4409                   (mask_type V0),
4410                   VLOpFrag)),
4411                   (!cast<Instruction>(inst#"_MASK")
4412                   (result_type result_reg_class:$merge),
4413                   (op1_type op1_reg_class:$rs1),
4414                   (op2_type op2_kind:$rs2),
4415                   (mask_type V0), GPR:$vl, sew)>;
4416
4417class VPatTiedBinaryNoMask<string intrinsic_name,
4418                           string inst,
4419                           ValueType result_type,
4420                           ValueType op2_type,
4421                           int sew,
4422                           VReg result_reg_class,
4423                           DAGOperand op2_kind> :
4424  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4425                   (result_type (undef)),
4426                   (result_type result_reg_class:$rs1),
4427                   (op2_type op2_kind:$rs2),
4428                   VLOpFrag)),
4429                   (!cast<Instruction>(inst#"_TIED")
4430                   (result_type result_reg_class:$rs1),
4431                   (op2_type op2_kind:$rs2),
4432                   GPR:$vl, sew, TAIL_AGNOSTIC)>;
4433
4434class VPatTiedBinaryNoMaskRoundingMode<string intrinsic_name,
4435                                       string inst,
4436                                       ValueType result_type,
4437                                       ValueType op2_type,
4438                                       int sew,
4439                                       VReg result_reg_class,
4440                                       DAGOperand op2_kind> :
4441  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4442                   (result_type (undef)),
4443                   (result_type result_reg_class:$rs1),
4444                   (op2_type op2_kind:$rs2),
4445                   (XLenVT timm:$round),
4446                   VLOpFrag)),
4447                   (!cast<Instruction>(inst#"_TIED")
4448                   (result_type result_reg_class:$rs1),
4449                   (op2_type op2_kind:$rs2),
4450                   (XLenVT timm:$round),
4451                   GPR:$vl, sew, TAIL_AGNOSTIC)>;
4452
4453class VPatTiedBinaryNoMaskTU<string intrinsic_name,
4454                             string inst,
4455                             ValueType result_type,
4456                             ValueType op2_type,
4457                             int sew,
4458                             VReg result_reg_class,
4459                             DAGOperand op2_kind> :
4460  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4461                   (result_type result_reg_class:$merge),
4462                   (result_type result_reg_class:$merge),
4463                   (op2_type op2_kind:$rs2),
4464                   VLOpFrag)),
4465                   (!cast<Instruction>(inst#"_TIED")
4466                   (result_type result_reg_class:$merge),
4467                   (op2_type op2_kind:$rs2),
4468                   GPR:$vl, sew, TU_MU)>;
4469
4470class VPatTiedBinaryNoMaskTURoundingMode<string intrinsic_name,
4471                                         string inst,
4472                                         ValueType result_type,
4473                                         ValueType op2_type,
4474                                         int sew,
4475                                         VReg result_reg_class,
4476                                         DAGOperand op2_kind> :
4477  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4478                   (result_type result_reg_class:$merge),
4479                   (result_type result_reg_class:$merge),
4480                   (op2_type op2_kind:$rs2),
4481                   (XLenVT timm:$round),
4482                   VLOpFrag)),
4483                   (!cast<Instruction>(inst#"_TIED")
4484                   (result_type result_reg_class:$merge),
4485                   (op2_type op2_kind:$rs2),
4486                   (XLenVT timm:$round),
4487                   GPR:$vl, sew, TU_MU)>;
4488
4489class VPatTiedBinaryMask<string intrinsic_name,
4490                         string inst,
4491                         ValueType result_type,
4492                         ValueType op2_type,
4493                         ValueType mask_type,
4494                         int sew,
4495                         VReg result_reg_class,
4496                         DAGOperand op2_kind> :
4497  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4498                   (result_type result_reg_class:$merge),
4499                   (result_type result_reg_class:$merge),
4500                   (op2_type op2_kind:$rs2),
4501                   (mask_type V0),
4502                   VLOpFrag, (XLenVT timm:$policy))),
4503                   (!cast<Instruction>(inst#"_MASK_TIED")
4504                   (result_type result_reg_class:$merge),
4505                   (op2_type op2_kind:$rs2),
4506                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
4507
4508class VPatTiedBinaryMaskRoundingMode<string intrinsic_name,
4509                                     string inst,
4510                                     ValueType result_type,
4511                                     ValueType op2_type,
4512                                     ValueType mask_type,
4513                                     int sew,
4514                                     VReg result_reg_class,
4515                                     DAGOperand op2_kind> :
4516  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4517                   (result_type result_reg_class:$merge),
4518                   (result_type result_reg_class:$merge),
4519                   (op2_type op2_kind:$rs2),
4520                   (mask_type V0),
4521                   (XLenVT timm:$round),
4522                   VLOpFrag, (XLenVT timm:$policy))),
4523                   (!cast<Instruction>(inst#"_MASK_TIED")
4524                   (result_type result_reg_class:$merge),
4525                   (op2_type op2_kind:$rs2),
4526                   (mask_type V0),
4527                   (XLenVT timm:$round),
4528                   GPR:$vl, sew, (XLenVT timm:$policy))>;
4529
4530class VPatTernaryNoMask<string intrinsic,
4531                        string inst,
4532                        string kind,
4533                        ValueType result_type,
4534                        ValueType op1_type,
4535                        ValueType op2_type,
4536                        int sew,
4537                        LMULInfo vlmul,
4538                        VReg result_reg_class,
4539                        RegisterClass op1_reg_class,
4540                        DAGOperand op2_kind> :
4541  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4542                    (result_type result_reg_class:$rs3),
4543                    (op1_type op1_reg_class:$rs1),
4544                    (op2_type op2_kind:$rs2),
4545                    VLOpFrag)),
4546                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4547                    result_reg_class:$rs3,
4548                    (op1_type op1_reg_class:$rs1),
4549                    op2_kind:$rs2,
4550                    GPR:$vl, sew)>;
4551
4552class VPatTernaryNoMaskTA<string intrinsic,
4553                          string inst,
4554                          string kind,
4555                          ValueType result_type,
4556                          ValueType op1_type,
4557                          ValueType op2_type,
4558                          int log2sew,
4559                          LMULInfo vlmul,
4560                          VReg result_reg_class,
4561                          RegisterClass op1_reg_class,
4562                          DAGOperand op2_kind> :
4563  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4564                    (result_type result_reg_class:$rs3),
4565                    (op1_type op1_reg_class:$rs1),
4566                    (op2_type op2_kind:$rs2),
4567                    VLOpFrag)),
4568                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
4569                    result_reg_class:$rs3,
4570                    (op1_type op1_reg_class:$rs1),
4571                    op2_kind:$rs2,
4572                    GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
4573
4574class VPatTernaryNoMaskTARoundingMode<string intrinsic,
4575                          string inst,
4576                          string kind,
4577                          ValueType result_type,
4578                          ValueType op1_type,
4579                          ValueType op2_type,
4580                          int log2sew,
4581                          LMULInfo vlmul,
4582                          VReg result_reg_class,
4583                          RegisterClass op1_reg_class,
4584                          DAGOperand op2_kind> :
4585  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4586                    (result_type result_reg_class:$rs3),
4587                    (op1_type op1_reg_class:$rs1),
4588                    (op2_type op2_kind:$rs2),
4589                    (XLenVT timm:$round),
4590                    VLOpFrag)),
4591                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
4592                    result_reg_class:$rs3,
4593                    (op1_type op1_reg_class:$rs1),
4594                    op2_kind:$rs2,
4595                    (XLenVT timm:$round),
4596                    GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
4597
4598class VPatTernaryNoMaskWithPolicy<string intrinsic,
4599                                  string inst,
4600                                  string kind,
4601                                  ValueType result_type,
4602                                  ValueType op1_type,
4603                                  ValueType op2_type,
4604                                  int sew,
4605                                  LMULInfo vlmul,
4606                                  VReg result_reg_class,
4607                                  RegisterClass op1_reg_class,
4608                                  DAGOperand op2_kind> :
4609  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4610                    (result_type result_reg_class:$rs3),
4611                    (op1_type op1_reg_class:$rs1),
4612                    (op2_type op2_kind:$rs2),
4613                    VLOpFrag, (XLenVT timm:$policy))),
4614                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4615                    result_reg_class:$rs3,
4616                    (op1_type op1_reg_class:$rs1),
4617                    op2_kind:$rs2,
4618                    GPR:$vl, sew, (XLenVT timm:$policy))>;
4619
4620class VPatTernaryNoMaskWithPolicyRoundingMode<string intrinsic,
4621                                  string inst,
4622                                  string kind,
4623                                  ValueType result_type,
4624                                  ValueType op1_type,
4625                                  ValueType op2_type,
4626                                  int sew,
4627                                  LMULInfo vlmul,
4628                                  VReg result_reg_class,
4629                                  RegisterClass op1_reg_class,
4630                                  DAGOperand op2_kind> :
4631  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4632                    (result_type result_reg_class:$rs3),
4633                    (op1_type op1_reg_class:$rs1),
4634                    (op2_type op2_kind:$rs2),
4635                    (XLenVT timm:$round),
4636                    VLOpFrag, (XLenVT timm:$policy))),
4637                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4638                    result_reg_class:$rs3,
4639                    (op1_type op1_reg_class:$rs1),
4640                    op2_kind:$rs2,
4641                    (XLenVT timm:$round),
4642                    GPR:$vl, sew, (XLenVT timm:$policy))>;
4643
4644class VPatTernaryMask<string intrinsic,
4645                      string inst,
4646                      string kind,
4647                      ValueType result_type,
4648                      ValueType op1_type,
4649                      ValueType op2_type,
4650                      ValueType mask_type,
4651                      int sew,
4652                      LMULInfo vlmul,
4653                      VReg result_reg_class,
4654                      RegisterClass op1_reg_class,
4655                      DAGOperand op2_kind> :
4656  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4657                    (result_type result_reg_class:$rs3),
4658                    (op1_type op1_reg_class:$rs1),
4659                    (op2_type op2_kind:$rs2),
4660                    (mask_type V0),
4661                    VLOpFrag)),
4662                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
4663                    result_reg_class:$rs3,
4664                    (op1_type op1_reg_class:$rs1),
4665                    op2_kind:$rs2,
4666                    (mask_type V0),
4667                    GPR:$vl, sew)>;
4668
4669class VPatTernaryMaskPolicy<string intrinsic,
4670                            string inst,
4671                            string kind,
4672                            ValueType result_type,
4673                            ValueType op1_type,
4674                            ValueType op2_type,
4675                            ValueType mask_type,
4676                            int sew,
4677                            LMULInfo vlmul,
4678                            VReg result_reg_class,
4679                            RegisterClass op1_reg_class,
4680                            DAGOperand op2_kind> :
4681  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4682                    (result_type result_reg_class:$rs3),
4683                    (op1_type op1_reg_class:$rs1),
4684                    (op2_type op2_kind:$rs2),
4685                    (mask_type V0),
4686                    VLOpFrag, (XLenVT timm:$policy))),
4687                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
4688                    result_reg_class:$rs3,
4689                    (op1_type op1_reg_class:$rs1),
4690                    op2_kind:$rs2,
4691                    (mask_type V0),
4692                    GPR:$vl, sew, (XLenVT timm:$policy))>;
4693
4694class VPatTernaryMaskPolicyRoundingMode<string intrinsic,
4695                                        string inst,
4696                                        string kind,
4697                                        ValueType result_type,
4698                                        ValueType op1_type,
4699                                        ValueType op2_type,
4700                                        ValueType mask_type,
4701                                        int sew,
4702                                        LMULInfo vlmul,
4703                                        VReg result_reg_class,
4704                                        RegisterClass op1_reg_class,
4705                                        DAGOperand op2_kind> :
4706  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4707                    (result_type result_reg_class:$rs3),
4708                    (op1_type op1_reg_class:$rs1),
4709                    (op2_type op2_kind:$rs2),
4710                    (mask_type V0),
4711                    (XLenVT timm:$round),
4712                    VLOpFrag, (XLenVT timm:$policy))),
4713                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
4714                    result_reg_class:$rs3,
4715                    (op1_type op1_reg_class:$rs1),
4716                    op2_kind:$rs2,
4717                    (mask_type V0),
4718                    (XLenVT timm:$round),
4719                    GPR:$vl, sew, (XLenVT timm:$policy))>;
4720
4721class VPatTernaryMaskTA<string intrinsic,
4722                        string inst,
4723                        string kind,
4724                        ValueType result_type,
4725                        ValueType op1_type,
4726                        ValueType op2_type,
4727                        ValueType mask_type,
4728                        int log2sew,
4729                        LMULInfo vlmul,
4730                        VReg result_reg_class,
4731                        RegisterClass op1_reg_class,
4732                        DAGOperand op2_kind> :
4733  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4734                    (result_type result_reg_class:$rs3),
4735                    (op1_type op1_reg_class:$rs1),
4736                    (op2_type op2_kind:$rs2),
4737                    (mask_type V0),
4738                    VLOpFrag)),
4739                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)# "_MASK")
4740                    result_reg_class:$rs3,
4741                    (op1_type op1_reg_class:$rs1),
4742                    op2_kind:$rs2,
4743                    (mask_type V0),
4744                    GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
4745
4746class VPatTernaryMaskTARoundingMode<string intrinsic,
4747                                    string inst,
4748                                    string kind,
4749                                    ValueType result_type,
4750                                    ValueType op1_type,
4751                                    ValueType op2_type,
4752                                    ValueType mask_type,
4753                                    int log2sew,
4754                                    LMULInfo vlmul,
4755                                    VReg result_reg_class,
4756                                    RegisterClass op1_reg_class,
4757                                    DAGOperand op2_kind> :
4758  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4759                    (result_type result_reg_class:$rs3),
4760                    (op1_type op1_reg_class:$rs1),
4761                    (op2_type op2_kind:$rs2),
4762                    (mask_type V0),
4763                    (XLenVT timm:$round),
4764                    VLOpFrag)),
4765                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)# "_MASK")
4766                    result_reg_class:$rs3,
4767                    (op1_type op1_reg_class:$rs1),
4768                    op2_kind:$rs2,
4769                    (mask_type V0),
4770                    (XLenVT timm:$round),
4771                    GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
4772
4773multiclass VPatUnaryS_M<string intrinsic_name,
4774                             string inst> {
4775  foreach mti = AllMasks in {
4776    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name)
4777                      (mti.Mask VR:$rs1), VLOpFrag)),
4778                      (!cast<Instruction>(inst#"_M_"#mti.BX) $rs1,
4779                      GPR:$vl, mti.Log2SEW)>;
4780    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name # "_mask")
4781                      (mti.Mask VR:$rs1), (mti.Mask V0), VLOpFrag)),
4782                      (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK") $rs1,
4783                      (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
4784  }
4785}
4786
4787multiclass VPatUnaryV_V_AnyMask<string intrinsic, string instruction,
4788                                list<VTypeInfo> vtilist> {
4789  foreach vti = vtilist in {
4790    let Predicates = GetVTypePredicates<vti>.Predicates in
4791    def : VPatUnaryAnyMask<intrinsic, instruction, "VM",
4792                           vti.Vector, vti.Vector, vti.Mask,
4793                           vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
4794  }
4795}
4796
4797multiclass VPatUnaryM_M<string intrinsic,
4798                         string inst> {
4799  foreach mti = AllMasks in {
4800    def : VPatMaskUnaryNoMask<intrinsic, inst, mti>;
4801    def : VPatMaskUnaryMask<intrinsic, inst, mti>;
4802  }
4803}
4804
4805multiclass VPatUnaryV_M<string intrinsic, string instruction> {
4806  foreach vti = AllIntegerVectors in {
4807    let Predicates = GetVTypePredicates<vti>.Predicates in {
4808      def : VPatUnaryNoMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
4809                            vti.Log2SEW, vti.LMul, vti.RegClass, VR>;
4810      def : VPatUnaryMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
4811                          vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, VR>;
4812    }
4813  }
4814}
4815
4816multiclass VPatUnaryV_VF<string intrinsic, string instruction, string suffix,
4817                         list<VTypeInfoToFraction> fractionList> {
4818  foreach vtiTofti = fractionList in {
4819      defvar vti = vtiTofti.Vti;
4820      defvar fti = vtiTofti.Fti;
4821      let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
4822                                   GetVTypePredicates<fti>.Predicates) in {
4823        def : VPatUnaryNoMask<intrinsic, instruction, suffix,
4824                              vti.Vector, fti.Vector,
4825                              vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
4826        def : VPatUnaryMask<intrinsic, instruction, suffix,
4827                            vti.Vector, fti.Vector, vti.Mask,
4828                            vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
4829      }
4830  }
4831}
4832
4833multiclass VPatUnaryV_V<string intrinsic, string instruction,
4834                        list<VTypeInfo> vtilist, bit isSEWAware = 0> {
4835  foreach vti = vtilist in {
4836    let Predicates = GetVTypePredicates<vti>.Predicates in {
4837      def : VPatUnaryNoMask<intrinsic, instruction, "V",
4838                            vti.Vector, vti.Vector, vti.Log2SEW,
4839                            vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>;
4840      def : VPatUnaryMask<intrinsic, instruction, "V",
4841                          vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
4842                          vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>;
4843    }
4844  }
4845}
4846
4847multiclass VPatUnaryV_V_RM<string intrinsic, string instruction,
4848                        list<VTypeInfo> vtilist, bit isSEWAware = 0> {
4849  foreach vti = vtilist in {
4850    let Predicates = GetVTypePredicates<vti>.Predicates in {
4851      def : VPatUnaryNoMaskRoundingMode<intrinsic, instruction, "V",
4852                                        vti.Vector, vti.Vector, vti.Log2SEW,
4853                                        vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>;
4854      def : VPatUnaryMaskRoundingMode<intrinsic, instruction, "V",
4855                                      vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
4856                                      vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>;
4857    }
4858  }
4859}
4860
4861multiclass VPatNullaryV<string intrinsic, string instruction> {
4862  foreach vti = AllIntegerVectors in {
4863    let Predicates = GetVTypePredicates<vti>.Predicates in {
4864      def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
4865                            (vti.Vector vti.RegClass:$merge),
4866                            VLOpFrag)),
4867                            (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
4868                            vti.RegClass:$merge, GPR:$vl, vti.Log2SEW, TU_MU)>;
4869      def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
4870                            (vti.Vector vti.RegClass:$merge),
4871                            (vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))),
4872                            (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK")
4873                            vti.RegClass:$merge, (vti.Mask V0),
4874                            GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
4875  }
4876  }
4877}
4878
4879multiclass VPatNullaryM<string intrinsic, string inst> {
4880  foreach mti = AllMasks in
4881    def : Pat<(mti.Mask (!cast<Intrinsic>(intrinsic)
4882                        VLOpFrag)),
4883                        (!cast<Instruction>(inst#"_M_"#mti.BX)
4884                        GPR:$vl, mti.Log2SEW)>;
4885}
4886
4887multiclass VPatBinaryM<string intrinsic,
4888                      string inst,
4889                      ValueType result_type,
4890                      ValueType op1_type,
4891                      ValueType op2_type,
4892                      ValueType mask_type,
4893                      int sew,
4894                      VReg result_reg_class,
4895                      VReg op1_reg_class,
4896                      DAGOperand op2_kind> {
4897  def : VPatBinaryM<intrinsic, inst, result_type, op1_type, op2_type,
4898                    sew, op1_reg_class, op2_kind>;
4899  def : VPatBinaryMask<intrinsic, inst, result_type, op1_type, op2_type,
4900                       mask_type, sew, result_reg_class, op1_reg_class,
4901                       op2_kind>;
4902}
4903
4904multiclass VPatBinary<string intrinsic,
4905                      string inst,
4906                      ValueType result_type,
4907                      ValueType op1_type,
4908                      ValueType op2_type,
4909                      ValueType mask_type,
4910                      int sew,
4911                      VReg result_reg_class,
4912                      VReg op1_reg_class,
4913                      DAGOperand op2_kind> {
4914  def : VPatBinaryNoMaskTU<intrinsic, inst, result_type, op1_type, op2_type,
4915                           sew, result_reg_class, op1_reg_class, op2_kind>;
4916  def : VPatBinaryMaskTA<intrinsic, inst, result_type, op1_type, op2_type,
4917                         mask_type, sew, result_reg_class, op1_reg_class,
4918                         op2_kind>;
4919}
4920
4921multiclass VPatBinaryRoundingMode<string intrinsic,
4922                                  string inst,
4923                                  ValueType result_type,
4924                                  ValueType op1_type,
4925                                  ValueType op2_type,
4926                                  ValueType mask_type,
4927                                  int sew,
4928                                  VReg result_reg_class,
4929                                  VReg op1_reg_class,
4930                                  DAGOperand op2_kind> {
4931  def : VPatBinaryNoMaskRoundingMode<intrinsic, inst, result_type, op1_type, op2_type,
4932                                       sew, op1_reg_class, op2_kind>;
4933  def : VPatBinaryNoMaskTURoundingMode<intrinsic, inst, result_type, op1_type, op2_type,
4934                                       sew, result_reg_class, op1_reg_class, op2_kind>;
4935  def : VPatBinaryMaskTARoundingMode<intrinsic, inst, result_type, op1_type, op2_type,
4936                                     mask_type, sew, result_reg_class, op1_reg_class,
4937                                     op2_kind>;
4938}
4939
4940multiclass VPatBinarySwapped<string intrinsic,
4941                      string inst,
4942                      ValueType result_type,
4943                      ValueType op1_type,
4944                      ValueType op2_type,
4945                      ValueType mask_type,
4946                      int sew,
4947                      VReg result_reg_class,
4948                      VReg op1_reg_class,
4949                      DAGOperand op2_kind> {
4950  def : VPatBinaryNoMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type,
4951                                sew, op1_reg_class, op2_kind>;
4952  def : VPatBinaryMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type,
4953                              mask_type, sew, result_reg_class, op1_reg_class,
4954                              op2_kind>;
4955}
4956
4957multiclass VPatBinaryCarryInTAIL<string intrinsic,
4958                                 string inst,
4959                                 string kind,
4960                                 ValueType result_type,
4961                                 ValueType op1_type,
4962                                 ValueType op2_type,
4963                                 ValueType mask_type,
4964                                 int sew,
4965                                 LMULInfo vlmul,
4966                                 VReg result_reg_class,
4967                                 VReg op1_reg_class,
4968                                 DAGOperand op2_kind> {
4969  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
4970                         (result_type result_reg_class:$merge),
4971                         (op1_type op1_reg_class:$rs1),
4972                         (op2_type op2_kind:$rs2),
4973                         (mask_type V0),
4974                         VLOpFrag)),
4975                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4976                         (result_type result_reg_class:$merge),
4977                         (op1_type op1_reg_class:$rs1),
4978                         (op2_type op2_kind:$rs2),
4979                         (mask_type V0), GPR:$vl, sew)>;
4980}
4981
4982multiclass VPatBinaryCarryIn<string intrinsic,
4983                             string inst,
4984                             string kind,
4985                             ValueType result_type,
4986                             ValueType op1_type,
4987                             ValueType op2_type,
4988                             ValueType mask_type,
4989                             int sew,
4990                             LMULInfo vlmul,
4991                             VReg op1_reg_class,
4992                             DAGOperand op2_kind> {
4993  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
4994                         (op1_type op1_reg_class:$rs1),
4995                         (op2_type op2_kind:$rs2),
4996                         (mask_type V0),
4997                         VLOpFrag)),
4998                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4999                         (op1_type op1_reg_class:$rs1),
5000                         (op2_type op2_kind:$rs2),
5001                         (mask_type V0), GPR:$vl, sew)>;
5002}
5003
5004multiclass VPatBinaryMaskOut<string intrinsic,
5005                             string inst,
5006                             string kind,
5007                             ValueType result_type,
5008                             ValueType op1_type,
5009                             ValueType op2_type,
5010                             int sew,
5011                             LMULInfo vlmul,
5012                             VReg op1_reg_class,
5013                             DAGOperand op2_kind> {
5014  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
5015                         (op1_type op1_reg_class:$rs1),
5016                         (op2_type op2_kind:$rs2),
5017                         VLOpFrag)),
5018                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
5019                         (op1_type op1_reg_class:$rs1),
5020                         (op2_type op2_kind:$rs2),
5021                         GPR:$vl, sew)>;
5022}
5023
5024multiclass VPatConversionTA<string intrinsic,
5025                            string inst,
5026                            string kind,
5027                            ValueType result_type,
5028                            ValueType op1_type,
5029                            ValueType mask_type,
5030                            int sew,
5031                            LMULInfo vlmul,
5032                            VReg result_reg_class,
5033                            VReg op1_reg_class> {
5034  def : VPatUnaryNoMask<intrinsic, inst, kind, result_type, op1_type,
5035                        sew, vlmul, result_reg_class, op1_reg_class>;
5036  def : VPatUnaryMask<intrinsic, inst, kind, result_type, op1_type,
5037                      mask_type, sew, vlmul, result_reg_class, op1_reg_class>;
5038}
5039
5040multiclass VPatConversionTARoundingMode<string intrinsic,
5041                                        string inst,
5042                                        string kind,
5043                                        ValueType result_type,
5044                                        ValueType op1_type,
5045                                        ValueType mask_type,
5046                                        int sew,
5047                                        LMULInfo vlmul,
5048                                        VReg result_reg_class,
5049                                        VReg op1_reg_class> {
5050  def : VPatUnaryNoMaskRoundingMode<intrinsic, inst, kind, result_type, op1_type,
5051                                    sew, vlmul, result_reg_class, op1_reg_class>;
5052  def : VPatUnaryMaskRoundingMode<intrinsic, inst, kind, result_type, op1_type,
5053                                  mask_type, sew, vlmul, result_reg_class, op1_reg_class>;
5054}
5055
5056multiclass VPatBinaryV_VV<string intrinsic, string instruction,
5057                          list<VTypeInfo> vtilist, bit isSEWAware = 0> {
5058  foreach vti = vtilist in
5059    let Predicates = GetVTypePredicates<vti>.Predicates in
5060    defm : VPatBinary<intrinsic,
5061                      !if(isSEWAware,
5062                          instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW,
5063                          instruction # "_VV_" # vti.LMul.MX),
5064                      vti.Vector, vti.Vector, vti.Vector,vti.Mask,
5065                      vti.Log2SEW, vti.RegClass,
5066                      vti.RegClass, vti.RegClass>;
5067}
5068
5069multiclass VPatBinaryV_VV_RM<string intrinsic, string instruction,
5070                             list<VTypeInfo> vtilist, bit isSEWAware = 0> {
5071  foreach vti = vtilist in
5072    let Predicates = GetVTypePredicates<vti>.Predicates in
5073    defm : VPatBinaryRoundingMode<intrinsic,
5074                                  !if(isSEWAware,
5075                                      instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW,
5076                                      instruction # "_VV_" # vti.LMul.MX),
5077                                  vti.Vector, vti.Vector, vti.Vector,vti.Mask,
5078                                  vti.Log2SEW, vti.RegClass,
5079                                  vti.RegClass, vti.RegClass>;
5080}
5081
5082multiclass VPatBinaryV_VV_INT<string intrinsic, string instruction,
5083                              list<VTypeInfo> vtilist> {
5084  foreach vti = vtilist in {
5085    defvar ivti = GetIntVTypeInfo<vti>.Vti;
5086    let Predicates = GetVTypePredicates<vti>.Predicates in
5087    defm : VPatBinary<intrinsic,
5088                      instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW,
5089                      vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
5090                      vti.Log2SEW, vti.RegClass,
5091                      vti.RegClass, vti.RegClass>;
5092  }
5093}
5094
5095multiclass VPatBinaryV_VV_INT_EEW<string intrinsic, string instruction,
5096                                  int eew, list<VTypeInfo> vtilist> {
5097  foreach vti = vtilist in {
5098    // emul = lmul * eew / sew
5099    defvar vlmul = vti.LMul;
5100    defvar octuple_lmul = vlmul.octuple;
5101    defvar octuple_emul = !srl(!mul(octuple_lmul, eew), vti.Log2SEW);
5102    if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
5103      defvar emul_str = octuple_to_str<octuple_emul>.ret;
5104      defvar ivti = !cast<VTypeInfo>("VI" # eew # emul_str);
5105      defvar inst = instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str;
5106      let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5107                                   GetVTypePredicates<ivti>.Predicates) in
5108      defm : VPatBinary<intrinsic, inst,
5109                        vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
5110                        vti.Log2SEW, vti.RegClass,
5111                        vti.RegClass, ivti.RegClass>;
5112    }
5113  }
5114}
5115
5116multiclass VPatBinaryV_VX<string intrinsic, string instruction,
5117                          list<VTypeInfo> vtilist, bit isSEWAware = 0> {
5118  foreach vti = vtilist in {
5119    defvar kind = "V"#vti.ScalarSuffix;
5120    let Predicates = GetVTypePredicates<vti>.Predicates in
5121    defm : VPatBinary<intrinsic,
5122                      !if(isSEWAware,
5123                          instruction#"_"#kind#"_"#vti.LMul.MX#"_E"#vti.SEW,
5124                          instruction#"_"#kind#"_"#vti.LMul.MX),
5125                      vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
5126                      vti.Log2SEW, vti.RegClass,
5127                      vti.RegClass, vti.ScalarRegClass>;
5128  }
5129}
5130
5131multiclass VPatBinaryV_VX_RM<string intrinsic, string instruction,
5132                             list<VTypeInfo> vtilist, bit isSEWAware = 0> {
5133  foreach vti = vtilist in {
5134    defvar kind = "V"#vti.ScalarSuffix;
5135    let Predicates = GetVTypePredicates<vti>.Predicates in
5136    defm : VPatBinaryRoundingMode<intrinsic,
5137                                  !if(isSEWAware,
5138                                      instruction#"_"#kind#"_"#vti.LMul.MX#"_E"#vti.SEW,
5139                                      instruction#"_"#kind#"_"#vti.LMul.MX),
5140                                  vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
5141                                  vti.Log2SEW, vti.RegClass,
5142                                  vti.RegClass, vti.ScalarRegClass>;
5143  }
5144}
5145
5146multiclass VPatBinaryV_VX_INT<string intrinsic, string instruction,
5147                          list<VTypeInfo> vtilist> {
5148  foreach vti = vtilist in
5149    let Predicates = GetVTypePredicates<vti>.Predicates in
5150    defm : VPatBinary<intrinsic, instruction # "_VX_" # vti.LMul.MX,
5151                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
5152                      vti.Log2SEW, vti.RegClass,
5153                      vti.RegClass, GPR>;
5154}
5155
5156multiclass VPatBinaryV_VI<string intrinsic, string instruction,
5157                          list<VTypeInfo> vtilist, Operand imm_type> {
5158  foreach vti = vtilist in
5159    let Predicates = GetVTypePredicates<vti>.Predicates in
5160    defm : VPatBinary<intrinsic, instruction # "_VI_" # vti.LMul.MX,
5161                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
5162                      vti.Log2SEW, vti.RegClass,
5163                      vti.RegClass, imm_type>;
5164}
5165
5166multiclass VPatBinaryV_VI_RM<string intrinsic, string instruction,
5167                             list<VTypeInfo> vtilist,
5168                             Operand imm_type> {
5169  foreach vti = vtilist in
5170    let Predicates = GetVTypePredicates<vti>.Predicates in
5171    defm : VPatBinaryRoundingMode<intrinsic,
5172                                  instruction # "_VI_" # vti.LMul.MX,
5173                                  vti.Vector, vti.Vector, XLenVT, vti.Mask,
5174                                  vti.Log2SEW, vti.RegClass,
5175                                  vti.RegClass, imm_type>;
5176}
5177
5178multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
5179  foreach mti = AllMasks in
5180    let Predicates = [HasVInstructions] in
5181    def : VPatBinaryM<intrinsic, instruction # "_MM_" # mti.LMul.MX,
5182                      mti.Mask, mti.Mask, mti.Mask,
5183                      mti.Log2SEW, VR, VR>;
5184}
5185
5186multiclass VPatBinaryW_VV<string intrinsic, string instruction,
5187                          list<VTypeInfoToWide> vtilist> {
5188  foreach VtiToWti = vtilist in {
5189    defvar Vti = VtiToWti.Vti;
5190    defvar Wti = VtiToWti.Wti;
5191    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5192                                 GetVTypePredicates<Wti>.Predicates) in
5193    defm : VPatBinary<intrinsic, instruction # "_VV_" # Vti.LMul.MX,
5194                      Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
5195                      Vti.Log2SEW, Wti.RegClass,
5196                      Vti.RegClass, Vti.RegClass>;
5197  }
5198}
5199
5200multiclass VPatBinaryW_VV_RM<string intrinsic, string instruction,
5201                             list<VTypeInfoToWide> vtilist> {
5202  foreach VtiToWti = vtilist in {
5203    defvar Vti = VtiToWti.Vti;
5204    defvar Wti = VtiToWti.Wti;
5205    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5206                                 GetVTypePredicates<Wti>.Predicates) in
5207    defm : VPatBinaryRoundingMode<intrinsic, instruction # "_VV_" # Vti.LMul.MX,
5208                                  Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
5209                                  Vti.Log2SEW, Wti.RegClass,
5210                                  Vti.RegClass, Vti.RegClass>;
5211  }
5212}
5213
5214multiclass VPatBinaryW_VX<string intrinsic, string instruction,
5215                          list<VTypeInfoToWide> vtilist> {
5216  foreach VtiToWti = vtilist in {
5217    defvar Vti = VtiToWti.Vti;
5218    defvar Wti = VtiToWti.Wti;
5219    defvar kind = "V"#Vti.ScalarSuffix;
5220    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5221                                 GetVTypePredicates<Wti>.Predicates) in
5222    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
5223                      Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
5224                      Vti.Log2SEW, Wti.RegClass,
5225                      Vti.RegClass, Vti.ScalarRegClass>;
5226  }
5227}
5228
5229multiclass VPatBinaryW_VX_RM<string intrinsic, string instruction,
5230                          list<VTypeInfoToWide> vtilist> {
5231  foreach VtiToWti = vtilist in {
5232    defvar Vti = VtiToWti.Vti;
5233    defvar Wti = VtiToWti.Wti;
5234    defvar kind = "V"#Vti.ScalarSuffix;
5235    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5236                                 GetVTypePredicates<Wti>.Predicates) in
5237    defm : VPatBinaryRoundingMode<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
5238                                  Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
5239                                  Vti.Log2SEW, Wti.RegClass,
5240                                  Vti.RegClass, Vti.ScalarRegClass>;
5241  }
5242}
5243
5244multiclass VPatBinaryW_WV<string intrinsic, string instruction,
5245                          list<VTypeInfoToWide> vtilist> {
5246  foreach VtiToWti = vtilist in {
5247    defvar Vti = VtiToWti.Vti;
5248    defvar Wti = VtiToWti.Wti;
5249    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5250                                 GetVTypePredicates<Wti>.Predicates) in {
5251      def : VPatTiedBinaryNoMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5252                                 Wti.Vector, Vti.Vector,
5253                                 Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5254      def : VPatBinaryNoMaskTU<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5255                               Wti.Vector, Wti.Vector, Vti.Vector, Vti.Log2SEW,
5256                               Wti.RegClass, Wti.RegClass, Vti.RegClass>;
5257      let AddedComplexity = 1 in {
5258      def : VPatTiedBinaryNoMaskTU<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5259                                   Wti.Vector, Vti.Vector,
5260                                   Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5261      def : VPatTiedBinaryMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5262                               Wti.Vector, Vti.Vector, Vti.Mask,
5263                               Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5264      }
5265      def : VPatBinaryMaskTA<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5266                             Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
5267                             Vti.Log2SEW, Wti.RegClass,
5268                             Wti.RegClass, Vti.RegClass>;
5269    }
5270  }
5271}
5272
5273multiclass VPatBinaryW_WV_RM<string intrinsic, string instruction,
5274                             list<VTypeInfoToWide> vtilist> {
5275  foreach VtiToWti = vtilist in {
5276    defvar Vti = VtiToWti.Vti;
5277    defvar Wti = VtiToWti.Wti;
5278    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5279                                 GetVTypePredicates<Wti>.Predicates) in {
5280      def : VPatTiedBinaryNoMaskRoundingMode<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5281                                             Wti.Vector, Vti.Vector,
5282                                             Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5283      def : VPatBinaryNoMaskTURoundingMode<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5284                                           Wti.Vector, Wti.Vector, Vti.Vector, Vti.Log2SEW,
5285                                           Wti.RegClass, Wti.RegClass, Vti.RegClass>;
5286      let AddedComplexity = 1 in {
5287      def : VPatTiedBinaryNoMaskTURoundingMode<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5288                                               Wti.Vector, Vti.Vector,
5289                                               Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5290      def : VPatTiedBinaryMaskRoundingMode<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5291                                           Wti.Vector, Vti.Vector, Vti.Mask,
5292                                           Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5293      }
5294      def : VPatBinaryMaskTARoundingMode<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5295                                         Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
5296                                         Vti.Log2SEW, Wti.RegClass,
5297                                         Wti.RegClass, Vti.RegClass>;
5298    }
5299  }
5300}
5301
5302multiclass VPatBinaryW_WX<string intrinsic, string instruction,
5303                          list<VTypeInfoToWide> vtilist> {
5304  foreach VtiToWti = vtilist in {
5305    defvar Vti = VtiToWti.Vti;
5306    defvar Wti = VtiToWti.Wti;
5307    defvar kind = "W"#Vti.ScalarSuffix;
5308    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5309                                 GetVTypePredicates<Wti>.Predicates) in
5310    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
5311                      Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
5312                      Vti.Log2SEW, Wti.RegClass,
5313                      Wti.RegClass, Vti.ScalarRegClass>;
5314  }
5315}
5316
5317multiclass VPatBinaryW_WX_RM<string intrinsic, string instruction,
5318                             list<VTypeInfoToWide> vtilist> {
5319  foreach VtiToWti = vtilist in {
5320    defvar Vti = VtiToWti.Vti;
5321    defvar Wti = VtiToWti.Wti;
5322    defvar kind = "W"#Vti.ScalarSuffix;
5323    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5324                                 GetVTypePredicates<Wti>.Predicates) in
5325    defm : VPatBinaryRoundingMode<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
5326                                  Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
5327                                  Vti.Log2SEW, Wti.RegClass,
5328                                  Wti.RegClass, Vti.ScalarRegClass>;
5329  }
5330}
5331
5332multiclass VPatBinaryV_WV<string intrinsic, string instruction,
5333                          list<VTypeInfoToWide> vtilist> {
5334  foreach VtiToWti = vtilist in {
5335    defvar Vti = VtiToWti.Vti;
5336    defvar Wti = VtiToWti.Wti;
5337    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5338                                 GetVTypePredicates<Wti>.Predicates) in
5339    defm : VPatBinary<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5340                      Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
5341                      Vti.Log2SEW, Vti.RegClass,
5342                      Wti.RegClass, Vti.RegClass>;
5343  }
5344}
5345
5346multiclass VPatBinaryV_WV_RM<string intrinsic, string instruction,
5347                             list<VTypeInfoToWide> vtilist> {
5348  foreach VtiToWti = vtilist in {
5349    defvar Vti = VtiToWti.Vti;
5350    defvar Wti = VtiToWti.Wti;
5351    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5352                                 GetVTypePredicates<Wti>.Predicates) in
5353    defm : VPatBinaryRoundingMode<intrinsic,
5354                                  instruction # "_WV_" # Vti.LMul.MX,
5355                                  Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
5356                                  Vti.Log2SEW, Vti.RegClass,
5357                                  Wti.RegClass, Vti.RegClass>;
5358  }
5359}
5360
5361multiclass VPatBinaryV_WX<string intrinsic, string instruction,
5362                          list<VTypeInfoToWide> vtilist> {
5363  foreach VtiToWti = vtilist in {
5364    defvar Vti = VtiToWti.Vti;
5365    defvar Wti = VtiToWti.Wti;
5366    defvar kind = "W"#Vti.ScalarSuffix;
5367    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5368                                 GetVTypePredicates<Wti>.Predicates) in
5369    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
5370                      Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
5371                      Vti.Log2SEW, Vti.RegClass,
5372                      Wti.RegClass, Vti.ScalarRegClass>;
5373  }
5374}
5375
5376multiclass VPatBinaryV_WX_RM<string intrinsic, string instruction,
5377                             list<VTypeInfoToWide> vtilist> {
5378  foreach VtiToWti = vtilist in {
5379    defvar Vti = VtiToWti.Vti;
5380    defvar Wti = VtiToWti.Wti;
5381    defvar kind = "W"#Vti.ScalarSuffix;
5382    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5383                                 GetVTypePredicates<Wti>.Predicates) in
5384    defm : VPatBinaryRoundingMode<intrinsic,
5385                                  instruction#"_"#kind#"_"#Vti.LMul.MX,
5386                                  Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
5387                                  Vti.Log2SEW, Vti.RegClass,
5388                                  Wti.RegClass, Vti.ScalarRegClass>;
5389  }
5390}
5391
5392
5393multiclass VPatBinaryV_WI<string intrinsic, string instruction,
5394                          list<VTypeInfoToWide> vtilist> {
5395  foreach VtiToWti = vtilist in {
5396    defvar Vti = VtiToWti.Vti;
5397    defvar Wti = VtiToWti.Wti;
5398    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5399                                 GetVTypePredicates<Wti>.Predicates) in
5400    defm : VPatBinary<intrinsic, instruction # "_WI_" # Vti.LMul.MX,
5401                      Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
5402                      Vti.Log2SEW, Vti.RegClass,
5403                      Wti.RegClass, uimm5>;
5404  }
5405}
5406
5407multiclass VPatBinaryV_WI_RM<string intrinsic, string instruction,
5408                             list<VTypeInfoToWide> vtilist> {
5409  foreach VtiToWti = vtilist in {
5410    defvar Vti = VtiToWti.Vti;
5411    defvar Wti = VtiToWti.Wti;
5412    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5413                                 GetVTypePredicates<Wti>.Predicates) in
5414    defm : VPatBinaryRoundingMode<intrinsic,
5415                                  instruction # "_WI_" # Vti.LMul.MX,
5416                                  Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
5417                                  Vti.Log2SEW, Vti.RegClass,
5418                                  Wti.RegClass, uimm5>;
5419  }
5420}
5421
5422multiclass VPatBinaryV_VM<string intrinsic, string instruction,
5423                          bit CarryOut = 0,
5424                          list<VTypeInfo> vtilist = AllIntegerVectors> {
5425  foreach vti = vtilist in
5426    let Predicates = GetVTypePredicates<vti>.Predicates in
5427    defm : VPatBinaryCarryIn<intrinsic, instruction, "VVM",
5428                             !if(CarryOut, vti.Mask, vti.Vector),
5429                             vti.Vector, vti.Vector, vti.Mask,
5430                             vti.Log2SEW, vti.LMul,
5431                             vti.RegClass, vti.RegClass>;
5432}
5433
5434multiclass VPatBinaryV_XM<string intrinsic, string instruction,
5435                          bit CarryOut = 0,
5436                          list<VTypeInfo> vtilist = AllIntegerVectors> {
5437  foreach vti = vtilist in
5438    let Predicates = GetVTypePredicates<vti>.Predicates in
5439    defm : VPatBinaryCarryIn<intrinsic, instruction,
5440                             "V"#vti.ScalarSuffix#"M",
5441                             !if(CarryOut, vti.Mask, vti.Vector),
5442                             vti.Vector, vti.Scalar, vti.Mask,
5443                             vti.Log2SEW, vti.LMul,
5444                             vti.RegClass, vti.ScalarRegClass>;
5445}
5446
5447multiclass VPatBinaryV_IM<string intrinsic, string instruction,
5448                          bit CarryOut = 0> {
5449  foreach vti = AllIntegerVectors in
5450    let Predicates = GetVTypePredicates<vti>.Predicates in
5451    defm : VPatBinaryCarryIn<intrinsic, instruction, "VIM",
5452                             !if(CarryOut, vti.Mask, vti.Vector),
5453                             vti.Vector, XLenVT, vti.Mask,
5454                             vti.Log2SEW, vti.LMul,
5455                             vti.RegClass, simm5>;
5456}
5457
5458multiclass VPatBinaryV_VM_TAIL<string intrinsic, string instruction> {
5459  foreach vti = AllIntegerVectors in
5460    let Predicates = GetVTypePredicates<vti>.Predicates in
5461    defm : VPatBinaryCarryInTAIL<intrinsic, instruction, "VVM",
5462                                 vti.Vector,
5463                                 vti.Vector, vti.Vector, vti.Mask,
5464                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5465                                 vti.RegClass, vti.RegClass>;
5466}
5467
5468multiclass VPatBinaryV_XM_TAIL<string intrinsic, string instruction> {
5469  foreach vti = AllIntegerVectors in
5470    let Predicates = GetVTypePredicates<vti>.Predicates in
5471    defm : VPatBinaryCarryInTAIL<intrinsic, instruction,
5472                                 "V"#vti.ScalarSuffix#"M",
5473                                 vti.Vector,
5474                                 vti.Vector, vti.Scalar, vti.Mask,
5475                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5476                                 vti.RegClass, vti.ScalarRegClass>;
5477}
5478
5479multiclass VPatBinaryV_IM_TAIL<string intrinsic, string instruction> {
5480  foreach vti = AllIntegerVectors in
5481    let Predicates = GetVTypePredicates<vti>.Predicates in
5482    defm : VPatBinaryCarryInTAIL<intrinsic, instruction, "VIM",
5483                                 vti.Vector,
5484                                 vti.Vector, XLenVT, vti.Mask,
5485                                 vti.Log2SEW, vti.LMul,
5486                                 vti.RegClass, vti.RegClass, simm5>;
5487}
5488
5489multiclass VPatBinaryV_V<string intrinsic, string instruction> {
5490  foreach vti = AllIntegerVectors in
5491    let Predicates = GetVTypePredicates<vti>.Predicates in
5492    defm : VPatBinaryMaskOut<intrinsic, instruction, "VV",
5493                             vti.Mask, vti.Vector, vti.Vector,
5494                             vti.Log2SEW, vti.LMul,
5495                             vti.RegClass, vti.RegClass>;
5496}
5497
5498multiclass VPatBinaryV_X<string intrinsic, string instruction> {
5499  foreach vti = AllIntegerVectors in
5500    let Predicates = GetVTypePredicates<vti>.Predicates in
5501    defm : VPatBinaryMaskOut<intrinsic, instruction, "VX",
5502                             vti.Mask, vti.Vector, XLenVT,
5503                             vti.Log2SEW, vti.LMul,
5504                             vti.RegClass, GPR>;
5505}
5506
5507multiclass VPatBinaryV_I<string intrinsic, string instruction> {
5508  foreach vti = AllIntegerVectors in
5509    let Predicates = GetVTypePredicates<vti>.Predicates in
5510    defm : VPatBinaryMaskOut<intrinsic, instruction, "VI",
5511                             vti.Mask, vti.Vector, XLenVT,
5512                             vti.Log2SEW, vti.LMul,
5513                             vti.RegClass, simm5>;
5514}
5515
5516multiclass VPatBinaryM_VV<string intrinsic, string instruction,
5517                          list<VTypeInfo> vtilist> {
5518  foreach vti = vtilist in
5519    let Predicates = GetVTypePredicates<vti>.Predicates in
5520    defm : VPatBinaryM<intrinsic, instruction # "_VV_" # vti.LMul.MX,
5521                       vti.Mask, vti.Vector, vti.Vector, vti.Mask,
5522                       vti.Log2SEW, VR,
5523                       vti.RegClass, vti.RegClass>;
5524}
5525
5526multiclass VPatBinarySwappedM_VV<string intrinsic, string instruction,
5527                                 list<VTypeInfo> vtilist> {
5528  foreach vti = vtilist in
5529    let Predicates = GetVTypePredicates<vti>.Predicates in
5530    defm : VPatBinarySwapped<intrinsic, instruction # "_VV_" # vti.LMul.MX,
5531                             vti.Mask, vti.Vector, vti.Vector, vti.Mask,
5532                             vti.Log2SEW, VR,
5533                             vti.RegClass, vti.RegClass>;
5534}
5535
5536multiclass VPatBinaryM_VX<string intrinsic, string instruction,
5537                          list<VTypeInfo> vtilist> {
5538  foreach vti = vtilist in {
5539    defvar kind = "V"#vti.ScalarSuffix;
5540    let Predicates = GetVTypePredicates<vti>.Predicates in
5541    defm : VPatBinaryM<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
5542                       vti.Mask, vti.Vector, vti.Scalar, vti.Mask,
5543                       vti.Log2SEW, VR,
5544                       vti.RegClass, vti.ScalarRegClass>;
5545  }
5546}
5547
5548multiclass VPatBinaryM_VI<string intrinsic, string instruction,
5549                          list<VTypeInfo> vtilist> {
5550  foreach vti = vtilist in
5551    let Predicates = GetVTypePredicates<vti>.Predicates in
5552    defm : VPatBinaryM<intrinsic, instruction # "_VI_" # vti.LMul.MX,
5553                       vti.Mask, vti.Vector, XLenVT, vti.Mask,
5554                       vti.Log2SEW, VR,
5555                       vti.RegClass, simm5>;
5556}
5557
5558multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
5559                                list<VTypeInfo> vtilist, Operand ImmType = simm5>
5560    : VPatBinaryV_VV<intrinsic, instruction, vtilist>,
5561      VPatBinaryV_VX<intrinsic, instruction, vtilist>,
5562      VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
5563
5564multiclass VPatBinaryV_VV_VX_VI_RM<string intrinsic, string instruction,
5565                                   list<VTypeInfo> vtilist, Operand ImmType = simm5>
5566    : VPatBinaryV_VV_RM<intrinsic, instruction, vtilist>,
5567      VPatBinaryV_VX_RM<intrinsic, instruction, vtilist>,
5568      VPatBinaryV_VI_RM<intrinsic, instruction, vtilist, ImmType>;
5569
5570multiclass VPatBinaryV_VV_VX<string intrinsic, string instruction,
5571                             list<VTypeInfo> vtilist, bit isSEWAware = 0>
5572    : VPatBinaryV_VV<intrinsic, instruction, vtilist, isSEWAware>,
5573      VPatBinaryV_VX<intrinsic, instruction, vtilist, isSEWAware>;
5574
5575multiclass VPatBinaryV_VV_VX_RM<string intrinsic, string instruction,
5576                                list<VTypeInfo> vtilist, bit isSEWAware = 0>
5577    : VPatBinaryV_VV_RM<intrinsic, instruction, vtilist, isSEWAware>,
5578      VPatBinaryV_VX_RM<intrinsic, instruction, vtilist, isSEWAware>;
5579
5580multiclass VPatBinaryV_VX_VI<string intrinsic, string instruction,
5581                             list<VTypeInfo> vtilist>
5582    : VPatBinaryV_VX<intrinsic, instruction, vtilist>,
5583      VPatBinaryV_VI<intrinsic, instruction, vtilist, simm5>;
5584
5585multiclass VPatBinaryW_VV_VX<string intrinsic, string instruction,
5586                             list<VTypeInfoToWide> vtilist>
5587    : VPatBinaryW_VV<intrinsic, instruction, vtilist>,
5588      VPatBinaryW_VX<intrinsic, instruction, vtilist>;
5589
5590multiclass VPatBinaryW_VV_VX_RM<string intrinsic, string instruction,
5591                                list<VTypeInfoToWide> vtilist>
5592    : VPatBinaryW_VV_RM<intrinsic, instruction, vtilist>,
5593      VPatBinaryW_VX_RM<intrinsic, instruction, vtilist>;
5594
5595multiclass VPatBinaryW_WV_WX<string intrinsic, string instruction,
5596                             list<VTypeInfoToWide> vtilist>
5597    : VPatBinaryW_WV<intrinsic, instruction, vtilist>,
5598      VPatBinaryW_WX<intrinsic, instruction, vtilist>;
5599
5600multiclass VPatBinaryW_WV_WX_RM<string intrinsic, string instruction,
5601                                list<VTypeInfoToWide> vtilist>
5602    : VPatBinaryW_WV_RM<intrinsic, instruction, vtilist>,
5603      VPatBinaryW_WX_RM<intrinsic, instruction, vtilist>;
5604
5605multiclass VPatBinaryV_WV_WX_WI<string intrinsic, string instruction,
5606                                list<VTypeInfoToWide> vtilist>
5607    : VPatBinaryV_WV<intrinsic, instruction, vtilist>,
5608      VPatBinaryV_WX<intrinsic, instruction, vtilist>,
5609      VPatBinaryV_WI<intrinsic, instruction, vtilist>;
5610
5611multiclass VPatBinaryV_WV_WX_WI_RM<string intrinsic, string instruction,
5612                                   list<VTypeInfoToWide> vtilist>
5613    : VPatBinaryV_WV_RM<intrinsic, instruction, vtilist>,
5614      VPatBinaryV_WX_RM<intrinsic, instruction, vtilist>,
5615      VPatBinaryV_WI_RM<intrinsic, instruction, vtilist>;
5616
5617multiclass VPatBinaryV_VM_XM_IM<string intrinsic, string instruction>
5618    : VPatBinaryV_VM_TAIL<intrinsic, instruction>,
5619      VPatBinaryV_XM_TAIL<intrinsic, instruction>,
5620      VPatBinaryV_IM_TAIL<intrinsic, instruction>;
5621
5622multiclass VPatBinaryM_VM_XM_IM<string intrinsic, string instruction>
5623    : VPatBinaryV_VM<intrinsic, instruction, CarryOut=1>,
5624      VPatBinaryV_XM<intrinsic, instruction, CarryOut=1>,
5625      VPatBinaryV_IM<intrinsic, instruction, CarryOut=1>;
5626
5627multiclass VPatBinaryM_V_X_I<string intrinsic, string instruction>
5628    : VPatBinaryV_V<intrinsic, instruction>,
5629      VPatBinaryV_X<intrinsic, instruction>,
5630      VPatBinaryV_I<intrinsic, instruction>;
5631
5632multiclass VPatBinaryV_VM_XM<string intrinsic, string instruction>
5633    : VPatBinaryV_VM_TAIL<intrinsic, instruction>,
5634      VPatBinaryV_XM_TAIL<intrinsic, instruction>;
5635
5636multiclass VPatBinaryM_VM_XM<string intrinsic, string instruction>
5637    : VPatBinaryV_VM<intrinsic, instruction, CarryOut=1>,
5638      VPatBinaryV_XM<intrinsic, instruction, CarryOut=1>;
5639
5640multiclass VPatBinaryM_V_X<string intrinsic, string instruction>
5641    : VPatBinaryV_V<intrinsic, instruction>,
5642      VPatBinaryV_X<intrinsic, instruction>;
5643
5644multiclass VPatTernary<string intrinsic,
5645                       string inst,
5646                       string kind,
5647                       ValueType result_type,
5648                       ValueType op1_type,
5649                       ValueType op2_type,
5650                       ValueType mask_type,
5651                       int sew,
5652                       LMULInfo vlmul,
5653                       VReg result_reg_class,
5654                       RegisterClass op1_reg_class,
5655                       DAGOperand op2_kind> {
5656  def : VPatTernaryNoMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
5657                          sew, vlmul, result_reg_class, op1_reg_class,
5658                          op2_kind>;
5659  def : VPatTernaryMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
5660                        mask_type, sew, vlmul, result_reg_class, op1_reg_class,
5661                        op2_kind>;
5662}
5663
5664multiclass VPatTernaryNoMaskNoPolicy<string intrinsic,
5665                                     string inst,
5666                                     string kind,
5667                                     ValueType result_type,
5668                                     ValueType op1_type,
5669                                     ValueType op2_type,
5670                                     ValueType mask_type,
5671                                     int sew,
5672                                     LMULInfo vlmul,
5673                                     VReg result_reg_class,
5674                                     RegisterClass op1_reg_class,
5675                                     DAGOperand op2_kind> {
5676  def : VPatTernaryNoMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
5677                          sew, vlmul, result_reg_class, op1_reg_class,
5678                          op2_kind>;
5679  def : VPatTernaryMaskPolicy<intrinsic, inst, kind, result_type, op1_type, op2_type,
5680                              mask_type, sew, vlmul, result_reg_class, op1_reg_class,
5681                              op2_kind>;
5682}
5683
5684multiclass VPatTernaryWithPolicy<string intrinsic,
5685                                 string inst,
5686                                 string kind,
5687                                 ValueType result_type,
5688                                 ValueType op1_type,
5689                                 ValueType op2_type,
5690                                 ValueType mask_type,
5691                                 int sew,
5692                                 LMULInfo vlmul,
5693                                 VReg result_reg_class,
5694                                 RegisterClass op1_reg_class,
5695                                 DAGOperand op2_kind> {
5696  def : VPatTernaryNoMaskWithPolicy<intrinsic, inst, kind, result_type, op1_type,
5697                                    op2_type, sew, vlmul, result_reg_class,
5698                                    op1_reg_class, op2_kind>;
5699  def : VPatTernaryMaskPolicy<intrinsic, inst, kind, result_type, op1_type, op2_type,
5700                              mask_type, sew, vlmul, result_reg_class, op1_reg_class,
5701                              op2_kind>;
5702}
5703
5704multiclass VPatTernaryWithPolicyRoundingMode<string intrinsic,
5705                                             string inst,
5706                                             string kind,
5707                                             ValueType result_type,
5708                                             ValueType op1_type,
5709                                             ValueType op2_type,
5710                                             ValueType mask_type,
5711                                             int sew,
5712                                             LMULInfo vlmul,
5713                                             VReg result_reg_class,
5714                                             RegisterClass op1_reg_class,
5715                                             DAGOperand op2_kind> {
5716  def : VPatTernaryNoMaskWithPolicyRoundingMode<intrinsic, inst, kind, result_type,
5717                                                op1_type, op2_type, sew, vlmul,
5718                                                result_reg_class, op1_reg_class,
5719                                                op2_kind>;
5720  def : VPatTernaryMaskPolicyRoundingMode<intrinsic, inst, kind, result_type, op1_type,
5721                                                op2_type, mask_type, sew, vlmul,
5722                                                result_reg_class, op1_reg_class,
5723                                                op2_kind>;
5724}
5725
5726multiclass VPatTernaryTA<string intrinsic,
5727                         string inst,
5728                         string kind,
5729                         ValueType result_type,
5730                         ValueType op1_type,
5731                         ValueType op2_type,
5732                         ValueType mask_type,
5733                         int log2sew,
5734                         LMULInfo vlmul,
5735                         VReg result_reg_class,
5736                         RegisterClass op1_reg_class,
5737                         DAGOperand op2_kind> {
5738  def : VPatTernaryNoMaskTA<intrinsic, inst, kind, result_type, op1_type,
5739                            op2_type, log2sew, vlmul, result_reg_class,
5740                            op1_reg_class, op2_kind>;
5741  def : VPatTernaryMaskTA<intrinsic, inst, kind, result_type, op1_type,
5742                          op2_type, mask_type, log2sew, vlmul,
5743                          result_reg_class, op1_reg_class, op2_kind>;
5744}
5745
5746multiclass VPatTernaryTARoundingMode<string intrinsic,
5747                                     string inst,
5748                                     string kind,
5749                                     ValueType result_type,
5750                                     ValueType op1_type,
5751                                     ValueType op2_type,
5752                                     ValueType mask_type,
5753                                     int log2sew,
5754                                     LMULInfo vlmul,
5755                                     VReg result_reg_class,
5756                                     RegisterClass op1_reg_class,
5757                                     DAGOperand op2_kind> {
5758  def : VPatTernaryNoMaskTARoundingMode<intrinsic, inst, kind, result_type, op1_type,
5759                            op2_type, log2sew, vlmul, result_reg_class,
5760                            op1_reg_class, op2_kind>;
5761  def : VPatTernaryMaskTARoundingMode<intrinsic, inst, kind, result_type, op1_type,
5762                          op2_type, mask_type, log2sew, vlmul,
5763                          result_reg_class, op1_reg_class, op2_kind>;
5764}
5765
5766multiclass VPatTernaryV_VV_AAXA<string intrinsic, string instruction,
5767                                list<VTypeInfo> vtilist> {
5768  foreach vti = vtilist in
5769    let Predicates = GetVTypePredicates<vti>.Predicates in
5770    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VV",
5771                                 vti.Vector, vti.Vector, vti.Vector, vti.Mask,
5772                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5773                                 vti.RegClass, vti.RegClass>;
5774}
5775
5776multiclass VPatTernaryV_VV_AAXA_RM<string intrinsic, string instruction,
5777                                list<VTypeInfo> vtilist> {
5778  foreach vti = vtilist in
5779    let Predicates = GetVTypePredicates<vti>.Predicates in
5780    defm : VPatTernaryWithPolicyRoundingMode<intrinsic, instruction, "VV",
5781                                             vti.Vector, vti.Vector, vti.Vector, vti.Mask,
5782                                             vti.Log2SEW, vti.LMul, vti.RegClass,
5783                                             vti.RegClass, vti.RegClass>;
5784}
5785
5786multiclass VPatTernaryV_VX<string intrinsic, string instruction,
5787                           list<VTypeInfo> vtilist> {
5788  foreach vti = vtilist in
5789    let Predicates = GetVTypePredicates<vti>.Predicates in
5790    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VX",
5791                                 vti.Vector, vti.Vector, XLenVT, vti.Mask,
5792                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5793                                 vti.RegClass, GPR>;
5794}
5795
5796multiclass VPatTernaryV_VX_AAXA<string intrinsic, string instruction,
5797                           list<VTypeInfo> vtilist> {
5798  foreach vti = vtilist in
5799    let Predicates = GetVTypePredicates<vti>.Predicates in
5800    defm : VPatTernaryWithPolicy<intrinsic, instruction,
5801                                 "V"#vti.ScalarSuffix,
5802                                 vti.Vector, vti.Scalar, vti.Vector, vti.Mask,
5803                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5804                                 vti.ScalarRegClass, vti.RegClass>;
5805}
5806
5807multiclass VPatTernaryV_VX_AAXA_RM<string intrinsic, string instruction,
5808                           list<VTypeInfo> vtilist> {
5809  foreach vti = vtilist in
5810    let Predicates = GetVTypePredicates<vti>.Predicates in
5811    defm : VPatTernaryWithPolicyRoundingMode<intrinsic, instruction,
5812                                             "V"#vti.ScalarSuffix,
5813                                             vti.Vector, vti.Scalar, vti.Vector, vti.Mask,
5814                                             vti.Log2SEW, vti.LMul, vti.RegClass,
5815                                             vti.ScalarRegClass, vti.RegClass>;
5816}
5817
5818multiclass VPatTernaryV_VI<string intrinsic, string instruction,
5819                           list<VTypeInfo> vtilist, Operand Imm_type> {
5820  foreach vti = vtilist in
5821    let Predicates = GetVTypePredicates<vti>.Predicates in
5822    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VI",
5823                                 vti.Vector, vti.Vector, XLenVT, vti.Mask,
5824                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5825                                 vti.RegClass, Imm_type>;
5826}
5827
5828multiclass VPatTernaryW_VV<string intrinsic, string instruction,
5829                           list<VTypeInfoToWide> vtilist> {
5830  foreach vtiToWti = vtilist in {
5831    defvar vti = vtiToWti.Vti;
5832    defvar wti = vtiToWti.Wti;
5833    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5834                                 GetVTypePredicates<wti>.Predicates) in
5835    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VV",
5836                                 wti.Vector, vti.Vector, vti.Vector,
5837                                 vti.Mask, vti.Log2SEW, vti.LMul,
5838                                 wti.RegClass, vti.RegClass, vti.RegClass>;
5839  }
5840}
5841
5842multiclass VPatTernaryW_VV_RM<string intrinsic, string instruction,
5843                           list<VTypeInfoToWide> vtilist> {
5844  foreach vtiToWti = vtilist in {
5845    defvar vti = vtiToWti.Vti;
5846    defvar wti = vtiToWti.Wti;
5847    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5848                                 GetVTypePredicates<wti>.Predicates) in
5849    defm : VPatTernaryWithPolicyRoundingMode<intrinsic, instruction, "VV",
5850                                             wti.Vector, vti.Vector, vti.Vector,
5851                                             vti.Mask, vti.Log2SEW, vti.LMul,
5852                                             wti.RegClass, vti.RegClass, vti.RegClass>;
5853  }
5854}
5855
5856multiclass VPatTernaryW_VX<string intrinsic, string instruction,
5857                           list<VTypeInfoToWide> vtilist> {
5858  foreach vtiToWti = vtilist in {
5859    defvar vti = vtiToWti.Vti;
5860    defvar wti = vtiToWti.Wti;
5861    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5862                                 GetVTypePredicates<wti>.Predicates) in
5863    defm : VPatTernaryWithPolicy<intrinsic, instruction,
5864                                 "V"#vti.ScalarSuffix,
5865                                 wti.Vector, vti.Scalar, vti.Vector,
5866                                 vti.Mask, vti.Log2SEW, vti.LMul,
5867                                 wti.RegClass, vti.ScalarRegClass, vti.RegClass>;
5868  }
5869}
5870
5871multiclass VPatTernaryW_VX_RM<string intrinsic, string instruction,
5872                           list<VTypeInfoToWide> vtilist> {
5873  foreach vtiToWti = vtilist in {
5874    defvar vti = vtiToWti.Vti;
5875    defvar wti = vtiToWti.Wti;
5876    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5877                                 GetVTypePredicates<wti>.Predicates) in
5878    defm : VPatTernaryWithPolicyRoundingMode<intrinsic, instruction,
5879                                             "V"#vti.ScalarSuffix,
5880                                             wti.Vector, vti.Scalar, vti.Vector,
5881                                             vti.Mask, vti.Log2SEW, vti.LMul,
5882                                             wti.RegClass, vti.ScalarRegClass,
5883                                             vti.RegClass>;
5884  }
5885}
5886
5887multiclass VPatTernaryV_VV_VX_AAXA<string intrinsic, string instruction,
5888                              list<VTypeInfo> vtilist>
5889    : VPatTernaryV_VV_AAXA<intrinsic, instruction, vtilist>,
5890      VPatTernaryV_VX_AAXA<intrinsic, instruction, vtilist>;
5891
5892multiclass VPatTernaryV_VV_VX_AAXA_RM<string intrinsic, string instruction,
5893                              list<VTypeInfo> vtilist>
5894    : VPatTernaryV_VV_AAXA_RM<intrinsic, instruction, vtilist>,
5895      VPatTernaryV_VX_AAXA_RM<intrinsic, instruction, vtilist>;
5896
5897multiclass VPatTernaryV_VX_VI<string intrinsic, string instruction,
5898                              list<VTypeInfo> vtilist, Operand Imm_type = simm5>
5899    : VPatTernaryV_VX<intrinsic, instruction, vtilist>,
5900      VPatTernaryV_VI<intrinsic, instruction, vtilist, Imm_type>;
5901
5902
5903multiclass VPatBinaryM_VV_VX_VI<string intrinsic, string instruction,
5904                                list<VTypeInfo> vtilist>
5905    : VPatBinaryM_VV<intrinsic, instruction, vtilist>,
5906      VPatBinaryM_VX<intrinsic, instruction, vtilist>,
5907      VPatBinaryM_VI<intrinsic, instruction, vtilist>;
5908
5909multiclass VPatTernaryW_VV_VX<string intrinsic, string instruction,
5910                              list<VTypeInfoToWide> vtilist>
5911    : VPatTernaryW_VV<intrinsic, instruction, vtilist>,
5912      VPatTernaryW_VX<intrinsic, instruction, vtilist>;
5913
5914multiclass VPatTernaryW_VV_VX_RM<string intrinsic, string instruction,
5915                              list<VTypeInfoToWide> vtilist>
5916    : VPatTernaryW_VV_RM<intrinsic, instruction, vtilist>,
5917      VPatTernaryW_VX_RM<intrinsic, instruction, vtilist>;
5918
5919multiclass VPatBinaryM_VV_VX<string intrinsic, string instruction,
5920                             list<VTypeInfo> vtilist>
5921    : VPatBinaryM_VV<intrinsic, instruction, vtilist>,
5922      VPatBinaryM_VX<intrinsic, instruction, vtilist>;
5923
5924multiclass VPatBinaryM_VX_VI<string intrinsic, string instruction,
5925                             list<VTypeInfo> vtilist>
5926    : VPatBinaryM_VX<intrinsic, instruction, vtilist>,
5927      VPatBinaryM_VI<intrinsic, instruction, vtilist>;
5928
5929multiclass VPatBinaryV_VV_VX_VI_INT<string intrinsic, string instruction,
5930                                    list<VTypeInfo> vtilist, Operand ImmType = simm5>
5931    : VPatBinaryV_VV_INT<intrinsic#"_vv", instruction, vtilist>,
5932      VPatBinaryV_VX_INT<intrinsic#"_vx", instruction, vtilist>,
5933      VPatBinaryV_VI<intrinsic#"_vx", instruction, vtilist, ImmType>;
5934
5935multiclass VPatReductionV_VS<string intrinsic, string instruction, bit IsFloat = 0> {
5936  foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in {
5937    defvar vectorM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # vti.SEW # "M1");
5938    let Predicates = GetVTypePredicates<vti>.Predicates in
5939    defm : VPatTernaryTA<intrinsic, instruction, "VS",
5940                         vectorM1.Vector, vti.Vector,
5941                         vectorM1.Vector, vti.Mask,
5942                         vti.Log2SEW, vti.LMul,
5943                         VR, vti.RegClass, VR>;
5944  }
5945  foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in {
5946    let Predicates = GetVTypePredicates<gvti>.Predicates in
5947    defm : VPatTernaryTA<intrinsic, instruction, "VS",
5948                         gvti.VectorM1, gvti.Vector,
5949                         gvti.VectorM1, gvti.Mask,
5950                         gvti.Log2SEW, gvti.LMul,
5951                         VR, gvti.RegClass, VR>;
5952  }
5953}
5954
5955multiclass VPatReductionV_VS_RM<string intrinsic, string instruction, bit IsFloat = 0> {
5956  foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in {
5957    defvar vectorM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # vti.SEW # "M1");
5958    let Predicates = GetVTypePredicates<vti>.Predicates in
5959    defm : VPatTernaryTARoundingMode<intrinsic, instruction, "VS",
5960                                     vectorM1.Vector, vti.Vector,
5961                                     vectorM1.Vector, vti.Mask,
5962                                     vti.Log2SEW, vti.LMul,
5963                                     VR, vti.RegClass, VR>;
5964  }
5965  foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in {
5966    let Predicates = GetVTypePredicates<gvti>.Predicates in
5967    defm : VPatTernaryTARoundingMode<intrinsic, instruction, "VS",
5968                                     gvti.VectorM1, gvti.Vector,
5969                                     gvti.VectorM1, gvti.Mask,
5970                                     gvti.Log2SEW, gvti.LMul,
5971                                     VR, gvti.RegClass, VR>;
5972  }
5973}
5974
5975multiclass VPatReductionW_VS<string intrinsic, string instruction, bit IsFloat = 0> {
5976  foreach vti = !if(IsFloat, AllFloatVectors, AllIntegerVectors) in {
5977    defvar wtiSEW = !mul(vti.SEW, 2);
5978    if !le(wtiSEW, 64) then {
5979      defvar wtiM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # wtiSEW # "M1");
5980      let Predicates = GetVTypePredicates<vti>.Predicates in
5981      defm : VPatTernaryTA<intrinsic, instruction, "VS",
5982                           wtiM1.Vector, vti.Vector,
5983                           wtiM1.Vector, vti.Mask,
5984                           vti.Log2SEW, vti.LMul,
5985                           wtiM1.RegClass, vti.RegClass,
5986                           wtiM1.RegClass>;
5987    }
5988  }
5989}
5990
5991multiclass VPatReductionW_VS_RM<string intrinsic, string instruction, bit IsFloat = 0> {
5992  foreach vti = !if(IsFloat, AllFloatVectors, AllIntegerVectors) in {
5993    defvar wtiSEW = !mul(vti.SEW, 2);
5994    if !le(wtiSEW, 64) then {
5995      defvar wtiM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # wtiSEW # "M1");
5996      let Predicates = GetVTypePredicates<vti>.Predicates in
5997      defm : VPatTernaryTARoundingMode<intrinsic, instruction, "VS",
5998                                       wtiM1.Vector, vti.Vector,
5999                                       wtiM1.Vector, vti.Mask,
6000                                       vti.Log2SEW, vti.LMul,
6001                                       wtiM1.RegClass, vti.RegClass,
6002                                       wtiM1.RegClass>;
6003    }
6004  }
6005}
6006
6007multiclass VPatConversionVI_VF<string intrinsic,
6008                               string instruction> {
6009  foreach fvti = AllFloatVectors in {
6010    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
6011    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
6012                                 GetVTypePredicates<ivti>.Predicates) in
6013    defm : VPatConversionTA<intrinsic, instruction, "V",
6014                            ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
6015                            fvti.LMul, ivti.RegClass, fvti.RegClass>;
6016  }
6017}
6018
6019multiclass VPatConversionVI_VF_RM<string intrinsic,
6020                                  string instruction> {
6021  foreach fvti = AllFloatVectors in {
6022    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
6023    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
6024                                 GetVTypePredicates<ivti>.Predicates) in
6025    defm : VPatConversionTARoundingMode<intrinsic, instruction, "V",
6026                                        ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
6027                                        fvti.LMul, ivti.RegClass, fvti.RegClass>;
6028  }
6029}
6030
6031multiclass VPatConversionVF_VI_RM<string intrinsic,
6032                                  string instruction> {
6033  foreach fvti = AllFloatVectors in {
6034    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
6035    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
6036                                 GetVTypePredicates<ivti>.Predicates) in
6037    defm : VPatConversionTARoundingMode<intrinsic, instruction, "V",
6038                                        fvti.Vector, ivti.Vector, fvti.Mask, ivti.Log2SEW,
6039                                        ivti.LMul, fvti.RegClass, ivti.RegClass>;
6040  }
6041}
6042
6043multiclass VPatConversionWI_VF<string intrinsic, string instruction> {
6044  foreach fvtiToFWti = AllWidenableFloatVectors in {
6045    defvar fvti = fvtiToFWti.Vti;
6046    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
6047    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
6048                                 GetVTypePredicates<iwti>.Predicates) in
6049    defm : VPatConversionTA<intrinsic, instruction, "V",
6050                            iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
6051                            fvti.LMul, iwti.RegClass, fvti.RegClass>;
6052  }
6053}
6054
6055multiclass VPatConversionWI_VF_RM<string intrinsic, string instruction> {
6056  foreach fvtiToFWti = AllWidenableFloatVectors in {
6057    defvar fvti = fvtiToFWti.Vti;
6058    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
6059    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
6060                                 GetVTypePredicates<iwti>.Predicates) in
6061    defm : VPatConversionTARoundingMode<intrinsic, instruction, "V",
6062                                        iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
6063                                        fvti.LMul, iwti.RegClass, fvti.RegClass>;
6064  }
6065}
6066
6067multiclass VPatConversionWF_VI<string intrinsic, string instruction> {
6068  foreach vtiToWti = AllWidenableIntToFloatVectors in {
6069    defvar vti = vtiToWti.Vti;
6070    defvar fwti = vtiToWti.Wti;
6071    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
6072                                 GetVTypePredicates<fwti>.Predicates) in
6073    defm : VPatConversionTA<intrinsic, instruction, "V",
6074                            fwti.Vector, vti.Vector, fwti.Mask, vti.Log2SEW,
6075                            vti.LMul, fwti.RegClass, vti.RegClass>;
6076  }
6077}
6078
6079multiclass VPatConversionWF_VF<string intrinsic, string instruction> {
6080  foreach fvtiToFWti = AllWidenableFloatVectors in {
6081    defvar fvti = fvtiToFWti.Vti;
6082    defvar fwti = fvtiToFWti.Wti;
6083    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
6084                                 GetVTypePredicates<fwti>.Predicates) in
6085    defm : VPatConversionTA<intrinsic, instruction, "V",
6086                            fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
6087                            fvti.LMul, fwti.RegClass, fvti.RegClass>;
6088  }
6089}
6090
6091multiclass VPatConversionVI_WF <string intrinsic, string instruction> {
6092  foreach vtiToWti = AllWidenableIntToFloatVectors in {
6093    defvar vti = vtiToWti.Vti;
6094    defvar fwti = vtiToWti.Wti;
6095    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
6096                                 GetVTypePredicates<fwti>.Predicates) in
6097    defm : VPatConversionTA<intrinsic, instruction, "W",
6098                            vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
6099                            vti.LMul, vti.RegClass, fwti.RegClass>;
6100  }
6101}
6102
6103multiclass VPatConversionVI_WF_RM <string intrinsic, string instruction> {
6104  foreach vtiToWti = AllWidenableIntToFloatVectors in {
6105    defvar vti = vtiToWti.Vti;
6106    defvar fwti = vtiToWti.Wti;
6107    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
6108                                 GetVTypePredicates<fwti>.Predicates) in
6109    defm : VPatConversionTARoundingMode<intrinsic, instruction, "W",
6110                                        vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
6111                                        vti.LMul, vti.RegClass, fwti.RegClass>;
6112  }
6113}
6114
6115multiclass VPatConversionVF_WI_RM <string intrinsic, string instruction> {
6116  foreach fvtiToFWti = AllWidenableFloatVectors in {
6117    defvar fvti = fvtiToFWti.Vti;
6118    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
6119    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
6120                                 GetVTypePredicates<iwti>.Predicates) in
6121    defm : VPatConversionTARoundingMode<intrinsic, instruction, "W",
6122                                        fvti.Vector, iwti.Vector, fvti.Mask, fvti.Log2SEW,
6123                                        fvti.LMul, fvti.RegClass, iwti.RegClass>;
6124  }
6125}
6126
6127multiclass VPatConversionVF_WF <string intrinsic, string instruction> {
6128  foreach fvtiToFWti = AllWidenableFloatVectors in {
6129    defvar fvti = fvtiToFWti.Vti;
6130    defvar fwti = fvtiToFWti.Wti;
6131    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
6132                                 GetVTypePredicates<fwti>.Predicates) in
6133    defm : VPatConversionTA<intrinsic, instruction, "W",
6134                            fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
6135                            fvti.LMul, fvti.RegClass, fwti.RegClass>;
6136  }
6137}
6138
6139multiclass VPatConversionVF_WF_RM <string intrinsic, string instruction> {
6140  foreach fvtiToFWti = AllWidenableFloatVectors in {
6141    defvar fvti = fvtiToFWti.Vti;
6142    defvar fwti = fvtiToFWti.Wti;
6143    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
6144                                 GetVTypePredicates<fwti>.Predicates) in
6145    defm : VPatConversionTARoundingMode<intrinsic, instruction, "W",
6146                                        fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
6147                                        fvti.LMul, fvti.RegClass, fwti.RegClass>;
6148  }
6149}
6150
6151multiclass VPatCompare_VI<string intrinsic, string inst,
6152                          ImmLeaf ImmType> {
6153  foreach vti = AllIntegerVectors in {
6154    defvar Intr = !cast<Intrinsic>(intrinsic);
6155    defvar Pseudo = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX);
6156    let Predicates = GetVTypePredicates<vti>.Predicates in
6157    def : Pat<(vti.Mask (Intr (vti.Vector vti.RegClass:$rs1),
6158                              (vti.Scalar ImmType:$rs2),
6159                              VLOpFrag)),
6160              (Pseudo vti.RegClass:$rs1, (DecImm ImmType:$rs2),
6161                      GPR:$vl, vti.Log2SEW)>;
6162    defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
6163    defvar PseudoMask = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX#"_MASK");
6164    let Predicates = GetVTypePredicates<vti>.Predicates in
6165    def : Pat<(vti.Mask (IntrMask (vti.Mask VR:$merge),
6166                                  (vti.Vector vti.RegClass:$rs1),
6167                                  (vti.Scalar ImmType:$rs2),
6168                                  (vti.Mask V0),
6169                                  VLOpFrag)),
6170              (PseudoMask VR:$merge, vti.RegClass:$rs1, (DecImm ImmType:$rs2),
6171                          (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
6172  }
6173}
6174
6175//===----------------------------------------------------------------------===//
6176// Pseudo instructions
6177//===----------------------------------------------------------------------===//
6178
6179let Predicates = [HasVInstructions] in {
6180
6181//===----------------------------------------------------------------------===//
6182// Pseudo Instructions for CodeGen
6183//===----------------------------------------------------------------------===//
6184
6185let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1 in {
6186  def PseudoReadVLENB : Pseudo<(outs GPR:$rd), (ins),
6187                               [(set GPR:$rd, (riscv_read_vlenb))]>,
6188                        PseudoInstExpansion<(CSRRS GPR:$rd, SysRegVLENB.Encoding, X0)>,
6189                        Sched<[WriteRdVLENB]>;
6190}
6191
6192let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1,
6193    Uses = [VL] in
6194def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins), []>,
6195                   PseudoInstExpansion<(CSRRS GPR:$rd, SysRegVL.Encoding, X0)>;
6196
6197foreach lmul = MxList in {
6198  foreach nf = NFSet<lmul>.L in {
6199    defvar vreg = SegRegClass<lmul, nf>.RC;
6200    let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1,
6201        Size = !mul(4, !sub(!mul(nf, 2), 1)) in {
6202      def "PseudoVSPILL" # nf # "_" # lmul.MX :
6203        Pseudo<(outs), (ins vreg:$rs1, GPR:$rs2), []>;
6204    }
6205    let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1,
6206        Size = !mul(4, !sub(!mul(nf, 2), 1)) in {
6207      def "PseudoVRELOAD" # nf # "_" # lmul.MX :
6208        Pseudo<(outs vreg:$rs1), (ins GPR:$rs2), []>;
6209    }
6210  }
6211}
6212
6213/// Empty pseudo for RISCVInitUndefPass
6214let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 0,
6215    isCodeGenOnly = 1 in {
6216  def PseudoRVVInitUndefM1 : Pseudo<(outs VR:$vd), (ins), [], "">;
6217  def PseudoRVVInitUndefM2 : Pseudo<(outs VRM2:$vd), (ins), [], "">;
6218  def PseudoRVVInitUndefM4 : Pseudo<(outs VRM4:$vd), (ins), [], "">;
6219  def PseudoRVVInitUndefM8 : Pseudo<(outs VRM8:$vd), (ins), [], "">;
6220}
6221
6222//===----------------------------------------------------------------------===//
6223// 6. Configuration-Setting Instructions
6224//===----------------------------------------------------------------------===//
6225
6226// Pseudos.
6227let hasSideEffects = 1, mayLoad = 0, mayStore = 0, Defs = [VL, VTYPE] in {
6228// Due to rs1=X0 having special meaning, we need a GPRNoX0 register class for
6229// the when we aren't using one of the special X0 encodings. Otherwise it could
6230// be accidentally be made X0 by MachineIR optimizations. To satisfy the
6231// verifier, we also need a GPRX0 instruction for the special encodings.
6232def PseudoVSETVLI : Pseudo<(outs GPR:$rd), (ins GPRNoX0:$rs1, VTypeIOp11:$vtypei), []>,
6233                    Sched<[WriteVSETVLI, ReadVSETVLI]>;
6234def PseudoVSETVLIX0 : Pseudo<(outs GPR:$rd), (ins GPRX0:$rs1, VTypeIOp11:$vtypei), []>,
6235                      Sched<[WriteVSETVLI, ReadVSETVLI]>;
6236def PseudoVSETIVLI : Pseudo<(outs GPR:$rd), (ins uimm5:$rs1, VTypeIOp10:$vtypei), []>,
6237                     Sched<[WriteVSETIVLI]>;
6238}
6239
6240//===----------------------------------------------------------------------===//
6241// 7. Vector Loads and Stores
6242//===----------------------------------------------------------------------===//
6243
6244//===----------------------------------------------------------------------===//
6245// 7.4 Vector Unit-Stride Instructions
6246//===----------------------------------------------------------------------===//
6247
6248// Pseudos Unit-Stride Loads and Stores
6249defm PseudoVL : VPseudoUSLoad;
6250defm PseudoVS : VPseudoUSStore;
6251
6252defm PseudoVLM : VPseudoLoadMask;
6253defm PseudoVSM : VPseudoStoreMask;
6254
6255//===----------------------------------------------------------------------===//
6256// 7.5 Vector Strided Instructions
6257//===----------------------------------------------------------------------===//
6258
6259// Vector Strided Loads and Stores
6260defm PseudoVLS : VPseudoSLoad;
6261defm PseudoVSS : VPseudoSStore;
6262
6263//===----------------------------------------------------------------------===//
6264// 7.6 Vector Indexed Instructions
6265//===----------------------------------------------------------------------===//
6266
6267// Vector Indexed Loads and Stores
6268defm PseudoVLUX : VPseudoILoad<Ordered=false>;
6269defm PseudoVLOX : VPseudoILoad<Ordered=true>;
6270defm PseudoVSOX : VPseudoIStore<Ordered=true>;
6271defm PseudoVSUX : VPseudoIStore<Ordered=false>;
6272
6273//===----------------------------------------------------------------------===//
6274// 7.7. Unit-stride Fault-Only-First Loads
6275//===----------------------------------------------------------------------===//
6276
6277// vleff may update VL register
6278let hasSideEffects = 1, Defs = [VL] in
6279defm PseudoVL : VPseudoFFLoad;
6280
6281//===----------------------------------------------------------------------===//
6282// 7.8. Vector Load/Store Segment Instructions
6283//===----------------------------------------------------------------------===//
6284defm PseudoVLSEG : VPseudoUSSegLoad;
6285defm PseudoVLSSEG : VPseudoSSegLoad;
6286defm PseudoVLOXSEG : VPseudoISegLoad<Ordered=true>;
6287defm PseudoVLUXSEG : VPseudoISegLoad<Ordered=false>;
6288defm PseudoVSSEG : VPseudoUSSegStore;
6289defm PseudoVSSSEG : VPseudoSSegStore;
6290defm PseudoVSOXSEG : VPseudoISegStore<Ordered=true>;
6291defm PseudoVSUXSEG : VPseudoISegStore<Ordered=false>;
6292
6293// vlseg<nf>e<eew>ff.v may update VL register
6294let hasSideEffects = 1, Defs = [VL] in {
6295defm PseudoVLSEG : VPseudoUSSegLoadFF;
6296}
6297
6298//===----------------------------------------------------------------------===//
6299// 11. Vector Integer Arithmetic Instructions
6300//===----------------------------------------------------------------------===//
6301
6302//===----------------------------------------------------------------------===//
6303// 11.1. Vector Single-Width Integer Add and Subtract
6304//===----------------------------------------------------------------------===//
6305defm PseudoVADD   : VPseudoVALU_VV_VX_VI;
6306defm PseudoVSUB   : VPseudoVALU_VV_VX;
6307defm PseudoVRSUB  : VPseudoVALU_VX_VI;
6308
6309foreach vti = AllIntegerVectors in {
6310  // Match vrsub with 2 vector operands to vsub.vv by swapping operands. This
6311  // Occurs when legalizing vrsub.vx intrinsics for i64 on RV32 since we need
6312  // to use a more complex splat sequence. Add the pattern for all VTs for
6313  // consistency.
6314  let Predicates = GetVTypePredicates<vti>.Predicates in {
6315    def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$merge),
6316                                           (vti.Vector vti.RegClass:$rs2),
6317                                           (vti.Vector vti.RegClass:$rs1),
6318                                           VLOpFrag)),
6319              (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX)
6320                                                        vti.RegClass:$merge,
6321                                                        vti.RegClass:$rs1,
6322                                                        vti.RegClass:$rs2,
6323                                                        GPR:$vl,
6324                                                        vti.Log2SEW, TU_MU)>;
6325    def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge),
6326                                                (vti.Vector vti.RegClass:$rs2),
6327                                                (vti.Vector vti.RegClass:$rs1),
6328                                                (vti.Mask V0),
6329                                                VLOpFrag,
6330                                                (XLenVT timm:$policy))),
6331              (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK")
6332                                                        vti.RegClass:$merge,
6333                                                        vti.RegClass:$rs1,
6334                                                        vti.RegClass:$rs2,
6335                                                        (vti.Mask V0),
6336                                                        GPR:$vl,
6337                                                        vti.Log2SEW,
6338                                                        (XLenVT timm:$policy))>;
6339
6340    // Match VSUB with a small immediate to vadd.vi by negating the immediate.
6341    def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector (undef)),
6342                                          (vti.Vector vti.RegClass:$rs1),
6343                                          (vti.Scalar simm5_plus1:$rs2),
6344                                          VLOpFrag)),
6345              (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)),
6346                                                                vti.RegClass:$rs1,
6347                                                                (NegImm simm5_plus1:$rs2),
6348                                                                GPR:$vl,
6349                                                                vti.Log2SEW, TU_MU)>;
6350    def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge),
6351                                               (vti.Vector vti.RegClass:$rs1),
6352                                               (vti.Scalar simm5_plus1:$rs2),
6353                                               (vti.Mask V0),
6354                                               VLOpFrag,
6355                                               (XLenVT timm:$policy))),
6356              (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX#"_MASK")
6357                                                        vti.RegClass:$merge,
6358                                                        vti.RegClass:$rs1,
6359                                                        (NegImm simm5_plus1:$rs2),
6360                                                        (vti.Mask V0),
6361                                                        GPR:$vl,
6362                                                        vti.Log2SEW,
6363                                                        (XLenVT timm:$policy))>;
6364  }
6365}
6366
6367//===----------------------------------------------------------------------===//
6368// 11.2. Vector Widening Integer Add/Subtract
6369//===----------------------------------------------------------------------===//
6370defm PseudoVWADDU : VPseudoVWALU_VV_VX;
6371defm PseudoVWSUBU : VPseudoVWALU_VV_VX;
6372defm PseudoVWADD  : VPseudoVWALU_VV_VX;
6373defm PseudoVWSUB  : VPseudoVWALU_VV_VX;
6374defm PseudoVWADDU : VPseudoVWALU_WV_WX;
6375defm PseudoVWSUBU : VPseudoVWALU_WV_WX;
6376defm PseudoVWADD  : VPseudoVWALU_WV_WX;
6377defm PseudoVWSUB  : VPseudoVWALU_WV_WX;
6378
6379//===----------------------------------------------------------------------===//
6380// 11.3. Vector Integer Extension
6381//===----------------------------------------------------------------------===//
6382defm PseudoVZEXT_VF2 : PseudoVEXT_VF2;
6383defm PseudoVZEXT_VF4 : PseudoVEXT_VF4;
6384defm PseudoVZEXT_VF8 : PseudoVEXT_VF8;
6385defm PseudoVSEXT_VF2 : PseudoVEXT_VF2;
6386defm PseudoVSEXT_VF4 : PseudoVEXT_VF4;
6387defm PseudoVSEXT_VF8 : PseudoVEXT_VF8;
6388
6389//===----------------------------------------------------------------------===//
6390// 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
6391//===----------------------------------------------------------------------===//
6392defm PseudoVADC  : VPseudoVCALU_VM_XM_IM;
6393defm PseudoVMADC : VPseudoVCALUM_VM_XM_IM<"@earlyclobber $rd">;
6394defm PseudoVMADC : VPseudoVCALUM_V_X_I<"@earlyclobber $rd">;
6395
6396defm PseudoVSBC  : VPseudoVCALU_VM_XM;
6397defm PseudoVMSBC : VPseudoVCALUM_VM_XM<"@earlyclobber $rd">;
6398defm PseudoVMSBC : VPseudoVCALUM_V_X<"@earlyclobber $rd">;
6399
6400//===----------------------------------------------------------------------===//
6401// 11.5. Vector Bitwise Logical Instructions
6402//===----------------------------------------------------------------------===//
6403defm PseudoVAND : VPseudoVALU_VV_VX_VI;
6404defm PseudoVOR  : VPseudoVALU_VV_VX_VI;
6405defm PseudoVXOR : VPseudoVALU_VV_VX_VI;
6406
6407//===----------------------------------------------------------------------===//
6408// 11.6. Vector Single-Width Bit Shift Instructions
6409//===----------------------------------------------------------------------===//
6410defm PseudoVSLL : VPseudoVSHT_VV_VX_VI<uimm5>;
6411defm PseudoVSRL : VPseudoVSHT_VV_VX_VI<uimm5>;
6412defm PseudoVSRA : VPseudoVSHT_VV_VX_VI<uimm5>;
6413
6414//===----------------------------------------------------------------------===//
6415// 11.7. Vector Narrowing Integer Right Shift Instructions
6416//===----------------------------------------------------------------------===//
6417defm PseudoVNSRL : VPseudoVNSHT_WV_WX_WI;
6418defm PseudoVNSRA : VPseudoVNSHT_WV_WX_WI;
6419
6420//===----------------------------------------------------------------------===//
6421// 11.8. Vector Integer Comparison Instructions
6422//===----------------------------------------------------------------------===//
6423defm PseudoVMSEQ  : VPseudoVCMPM_VV_VX_VI;
6424defm PseudoVMSNE  : VPseudoVCMPM_VV_VX_VI;
6425defm PseudoVMSLTU : VPseudoVCMPM_VV_VX;
6426defm PseudoVMSLT  : VPseudoVCMPM_VV_VX;
6427defm PseudoVMSLEU : VPseudoVCMPM_VV_VX_VI;
6428defm PseudoVMSLE  : VPseudoVCMPM_VV_VX_VI;
6429defm PseudoVMSGTU : VPseudoVCMPM_VX_VI;
6430defm PseudoVMSGT  : VPseudoVCMPM_VX_VI;
6431
6432//===----------------------------------------------------------------------===//
6433// 11.9. Vector Integer Min/Max Instructions
6434//===----------------------------------------------------------------------===//
6435defm PseudoVMINU : VPseudoVMINMAX_VV_VX;
6436defm PseudoVMIN  : VPseudoVMINMAX_VV_VX;
6437defm PseudoVMAXU : VPseudoVMINMAX_VV_VX;
6438defm PseudoVMAX  : VPseudoVMINMAX_VV_VX;
6439
6440//===----------------------------------------------------------------------===//
6441// 11.10. Vector Single-Width Integer Multiply Instructions
6442//===----------------------------------------------------------------------===//
6443defm PseudoVMUL    : VPseudoVMUL_VV_VX;
6444defm PseudoVMULH   : VPseudoVMUL_VV_VX;
6445defm PseudoVMULHU  : VPseudoVMUL_VV_VX;
6446defm PseudoVMULHSU : VPseudoVMUL_VV_VX;
6447
6448//===----------------------------------------------------------------------===//
6449// 11.11. Vector Integer Divide Instructions
6450//===----------------------------------------------------------------------===//
6451defm PseudoVDIVU : VPseudoVDIV_VV_VX;
6452defm PseudoVDIV  : VPseudoVDIV_VV_VX;
6453defm PseudoVREMU : VPseudoVDIV_VV_VX;
6454defm PseudoVREM  : VPseudoVDIV_VV_VX;
6455
6456//===----------------------------------------------------------------------===//
6457// 11.12. Vector Widening Integer Multiply Instructions
6458//===----------------------------------------------------------------------===//
6459defm PseudoVWMUL   : VPseudoVWMUL_VV_VX;
6460defm PseudoVWMULU  : VPseudoVWMUL_VV_VX;
6461defm PseudoVWMULSU : VPseudoVWMUL_VV_VX;
6462
6463//===----------------------------------------------------------------------===//
6464// 11.13. Vector Single-Width Integer Multiply-Add Instructions
6465//===----------------------------------------------------------------------===//
6466defm PseudoVMACC  : VPseudoVMAC_VV_VX_AAXA;
6467defm PseudoVNMSAC : VPseudoVMAC_VV_VX_AAXA;
6468defm PseudoVMADD  : VPseudoVMAC_VV_VX_AAXA;
6469defm PseudoVNMSUB : VPseudoVMAC_VV_VX_AAXA;
6470
6471//===----------------------------------------------------------------------===//
6472// 11.14. Vector Widening Integer Multiply-Add Instructions
6473//===----------------------------------------------------------------------===//
6474defm PseudoVWMACCU  : VPseudoVWMAC_VV_VX;
6475defm PseudoVWMACC   : VPseudoVWMAC_VV_VX;
6476defm PseudoVWMACCSU : VPseudoVWMAC_VV_VX;
6477defm PseudoVWMACCUS : VPseudoVWMAC_VX;
6478
6479//===----------------------------------------------------------------------===//
6480// 11.15. Vector Integer Merge Instructions
6481//===----------------------------------------------------------------------===//
6482defm PseudoVMERGE : VPseudoVMRG_VM_XM_IM;
6483
6484//===----------------------------------------------------------------------===//
6485// 11.16. Vector Integer Move Instructions
6486//===----------------------------------------------------------------------===//
6487defm PseudoVMV_V : VPseudoUnaryVMV_V_X_I;
6488
6489//===----------------------------------------------------------------------===//
6490// 12. Vector Fixed-Point Arithmetic Instructions
6491//===----------------------------------------------------------------------===//
6492
6493//===----------------------------------------------------------------------===//
6494// 12.1. Vector Single-Width Saturating Add and Subtract
6495//===----------------------------------------------------------------------===//
6496let Defs = [VXSAT], hasSideEffects = 1 in {
6497  defm PseudoVSADDU : VPseudoVSALU_VV_VX_VI;
6498  defm PseudoVSADD  : VPseudoVSALU_VV_VX_VI;
6499  defm PseudoVSSUBU : VPseudoVSALU_VV_VX;
6500  defm PseudoVSSUB  : VPseudoVSALU_VV_VX;
6501}
6502
6503//===----------------------------------------------------------------------===//
6504// 12.2. Vector Single-Width Averaging Add and Subtract
6505//===----------------------------------------------------------------------===//
6506defm PseudoVAADDU : VPseudoVAALU_VV_VX_RM;
6507defm PseudoVAADD  : VPseudoVAALU_VV_VX_RM;
6508defm PseudoVASUBU : VPseudoVAALU_VV_VX_RM;
6509defm PseudoVASUB  : VPseudoVAALU_VV_VX_RM;
6510
6511//===----------------------------------------------------------------------===//
6512// 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
6513//===----------------------------------------------------------------------===//
6514let Defs = [VXSAT], hasSideEffects = 1 in {
6515  defm PseudoVSMUL : VPseudoVSMUL_VV_VX_RM;
6516}
6517
6518//===----------------------------------------------------------------------===//
6519// 12.4. Vector Single-Width Scaling Shift Instructions
6520//===----------------------------------------------------------------------===//
6521defm PseudoVSSRL : VPseudoVSSHT_VV_VX_VI_RM<uimm5>;
6522defm PseudoVSSRA : VPseudoVSSHT_VV_VX_VI_RM<uimm5>;
6523
6524//===----------------------------------------------------------------------===//
6525// 12.5. Vector Narrowing Fixed-Point Clip Instructions
6526//===----------------------------------------------------------------------===//
6527let Defs = [VXSAT], hasSideEffects = 1 in {
6528  defm PseudoVNCLIP  : VPseudoVNCLP_WV_WX_WI_RM;
6529  defm PseudoVNCLIPU : VPseudoVNCLP_WV_WX_WI_RM;
6530}
6531
6532} // Predicates = [HasVInstructions]
6533
6534//===----------------------------------------------------------------------===//
6535// 13. Vector Floating-Point Instructions
6536//===----------------------------------------------------------------------===//
6537
6538let Predicates = [HasVInstructionsAnyF] in {
6539//===----------------------------------------------------------------------===//
6540// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
6541//===----------------------------------------------------------------------===//
6542let mayRaiseFPException = true, hasPostISelHook = 1 in {
6543defm PseudoVFADD  : VPseudoVALU_VV_VF_RM;
6544defm PseudoVFSUB  : VPseudoVALU_VV_VF_RM;
6545defm PseudoVFRSUB : VPseudoVALU_VF_RM;
6546}
6547
6548//===----------------------------------------------------------------------===//
6549// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
6550//===----------------------------------------------------------------------===//
6551let mayRaiseFPException = true, hasSideEffects = 0, hasPostISelHook = 1 in {
6552defm PseudoVFWADD : VPseudoVFWALU_VV_VF_RM;
6553defm PseudoVFWSUB : VPseudoVFWALU_VV_VF_RM;
6554defm PseudoVFWADD : VPseudoVFWALU_WV_WF_RM;
6555defm PseudoVFWSUB : VPseudoVFWALU_WV_WF_RM;
6556}
6557
6558//===----------------------------------------------------------------------===//
6559// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
6560//===----------------------------------------------------------------------===//
6561let mayRaiseFPException = true, hasSideEffects = 0, hasPostISelHook = 1 in {
6562defm PseudoVFMUL  : VPseudoVFMUL_VV_VF_RM;
6563defm PseudoVFDIV  : VPseudoVFDIV_VV_VF_RM;
6564defm PseudoVFRDIV : VPseudoVFRDIV_VF_RM;
6565}
6566
6567//===----------------------------------------------------------------------===//
6568// 13.5. Vector Widening Floating-Point Multiply
6569//===----------------------------------------------------------------------===//
6570let mayRaiseFPException = true, hasSideEffects = 0 in {
6571defm PseudoVFWMUL : VPseudoVWMUL_VV_VF_RM;
6572}
6573
6574//===----------------------------------------------------------------------===//
6575// 13.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
6576//===----------------------------------------------------------------------===//
6577let mayRaiseFPException = true, hasSideEffects = 0, hasPostISelHook = 1 in {
6578defm PseudoVFMACC  : VPseudoVMAC_VV_VF_AAXA_RM;
6579defm PseudoVFNMACC : VPseudoVMAC_VV_VF_AAXA_RM;
6580defm PseudoVFMSAC  : VPseudoVMAC_VV_VF_AAXA_RM;
6581defm PseudoVFNMSAC : VPseudoVMAC_VV_VF_AAXA_RM;
6582defm PseudoVFMADD  : VPseudoVMAC_VV_VF_AAXA_RM;
6583defm PseudoVFNMADD : VPseudoVMAC_VV_VF_AAXA_RM;
6584defm PseudoVFMSUB  : VPseudoVMAC_VV_VF_AAXA_RM;
6585defm PseudoVFNMSUB : VPseudoVMAC_VV_VF_AAXA_RM;
6586}
6587
6588//===----------------------------------------------------------------------===//
6589// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
6590//===----------------------------------------------------------------------===//
6591let mayRaiseFPException = true, hasSideEffects = 0, hasPostISelHook = 1 in {
6592defm PseudoVFWMACC  : VPseudoVWMAC_VV_VF_RM;
6593defm PseudoVFWNMACC : VPseudoVWMAC_VV_VF_RM;
6594defm PseudoVFWMSAC  : VPseudoVWMAC_VV_VF_RM;
6595defm PseudoVFWNMSAC : VPseudoVWMAC_VV_VF_RM;
6596}
6597
6598//===----------------------------------------------------------------------===//
6599// 13.8. Vector Floating-Point Square-Root Instruction
6600//===----------------------------------------------------------------------===//
6601let mayRaiseFPException = true, hasSideEffects = 0 in
6602defm PseudoVFSQRT : VPseudoVSQR_V_RM;
6603
6604//===----------------------------------------------------------------------===//
6605// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
6606//===----------------------------------------------------------------------===//
6607let mayRaiseFPException = true in
6608defm PseudoVFRSQRT7 : VPseudoVRCP_V;
6609
6610//===----------------------------------------------------------------------===//
6611// 13.10. Vector Floating-Point Reciprocal Estimate Instruction
6612//===----------------------------------------------------------------------===//
6613let mayRaiseFPException = true, hasSideEffects = 0 in
6614defm PseudoVFREC7 : VPseudoVRCP_V_RM;
6615
6616//===----------------------------------------------------------------------===//
6617// 13.11. Vector Floating-Point Min/Max Instructions
6618//===----------------------------------------------------------------------===//
6619let mayRaiseFPException = true in {
6620defm PseudoVFMIN : VPseudoVMAX_VV_VF;
6621defm PseudoVFMAX : VPseudoVMAX_VV_VF;
6622}
6623
6624//===----------------------------------------------------------------------===//
6625// 13.12. Vector Floating-Point Sign-Injection Instructions
6626//===----------------------------------------------------------------------===//
6627defm PseudoVFSGNJ  : VPseudoVSGNJ_VV_VF;
6628defm PseudoVFSGNJN : VPseudoVSGNJ_VV_VF;
6629defm PseudoVFSGNJX : VPseudoVSGNJ_VV_VF;
6630
6631//===----------------------------------------------------------------------===//
6632// 13.13. Vector Floating-Point Compare Instructions
6633//===----------------------------------------------------------------------===//
6634let mayRaiseFPException = true in {
6635defm PseudoVMFEQ : VPseudoVCMPM_VV_VF;
6636defm PseudoVMFNE : VPseudoVCMPM_VV_VF;
6637defm PseudoVMFLT : VPseudoVCMPM_VV_VF;
6638defm PseudoVMFLE : VPseudoVCMPM_VV_VF;
6639defm PseudoVMFGT : VPseudoVCMPM_VF;
6640defm PseudoVMFGE : VPseudoVCMPM_VF;
6641}
6642
6643//===----------------------------------------------------------------------===//
6644// 13.14. Vector Floating-Point Classify Instruction
6645//===----------------------------------------------------------------------===//
6646defm PseudoVFCLASS : VPseudoVCLS_V;
6647
6648//===----------------------------------------------------------------------===//
6649// 13.15. Vector Floating-Point Merge Instruction
6650//===----------------------------------------------------------------------===//
6651defm PseudoVFMERGE : VPseudoVMRG_FM;
6652
6653//===----------------------------------------------------------------------===//
6654// 13.16. Vector Floating-Point Move Instruction
6655//===----------------------------------------------------------------------===//
6656defm PseudoVFMV_V : VPseudoVMV_F;
6657
6658//===----------------------------------------------------------------------===//
6659// 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions
6660//===----------------------------------------------------------------------===//
6661let mayRaiseFPException = true in {
6662let hasSideEffects = 0, hasPostISelHook = 1 in {
6663defm PseudoVFCVT_XU_F : VPseudoVCVTI_V_RM;
6664defm PseudoVFCVT_X_F : VPseudoVCVTI_V_RM;
6665}
6666
6667defm PseudoVFCVT_RM_XU_F : VPseudoVCVTI_RM_V;
6668defm PseudoVFCVT_RM_X_F : VPseudoVCVTI_RM_V;
6669
6670defm PseudoVFCVT_RTZ_XU_F : VPseudoVCVTI_V;
6671defm PseudoVFCVT_RTZ_X_F : VPseudoVCVTI_V;
6672
6673defm PseudoVFROUND_NOEXCEPT : VPseudoVFROUND_NOEXCEPT_V;
6674let hasSideEffects = 0, hasPostISelHook = 1 in {
6675defm PseudoVFCVT_F_XU : VPseudoVCVTF_V_RM;
6676defm PseudoVFCVT_F_X : VPseudoVCVTF_V_RM;
6677}
6678defm PseudoVFCVT_RM_F_XU : VPseudoVCVTF_RM_V;
6679defm PseudoVFCVT_RM_F_X  : VPseudoVCVTF_RM_V;
6680} // mayRaiseFPException = true
6681
6682//===----------------------------------------------------------------------===//
6683// 13.18. Widening Floating-Point/Integer Type-Convert Instructions
6684//===----------------------------------------------------------------------===//
6685let mayRaiseFPException = true in {
6686let hasSideEffects = 0, hasPostISelHook = 1 in {
6687defm PseudoVFWCVT_XU_F     : VPseudoVWCVTI_V_RM;
6688defm PseudoVFWCVT_X_F      : VPseudoVWCVTI_V_RM;
6689}
6690defm PseudoVFWCVT_RM_XU_F  : VPseudoVWCVTI_RM_V;
6691defm PseudoVFWCVT_RM_X_F   : VPseudoVWCVTI_RM_V;
6692
6693defm PseudoVFWCVT_RTZ_XU_F : VPseudoVWCVTI_V;
6694defm PseudoVFWCVT_RTZ_X_F  : VPseudoVWCVTI_V;
6695
6696defm PseudoVFWCVT_F_XU     : VPseudoVWCVTF_V;
6697defm PseudoVFWCVT_F_X      : VPseudoVWCVTF_V;
6698
6699defm PseudoVFWCVT_F_F      : VPseudoVWCVTD_V;
6700} // mayRaiseFPException = true
6701
6702//===----------------------------------------------------------------------===//
6703// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions
6704//===----------------------------------------------------------------------===//
6705let mayRaiseFPException = true in {
6706let hasSideEffects = 0, hasPostISelHook = 1 in {
6707defm PseudoVFNCVT_XU_F     : VPseudoVNCVTI_W_RM;
6708defm PseudoVFNCVT_X_F      : VPseudoVNCVTI_W_RM;
6709}
6710defm PseudoVFNCVT_RM_XU_F  : VPseudoVNCVTI_RM_W;
6711defm PseudoVFNCVT_RM_X_F   : VPseudoVNCVTI_RM_W;
6712
6713defm PseudoVFNCVT_RTZ_XU_F : VPseudoVNCVTI_W;
6714defm PseudoVFNCVT_RTZ_X_F  : VPseudoVNCVTI_W;
6715
6716let hasSideEffects = 0, hasPostISelHook = 1 in {
6717defm PseudoVFNCVT_F_XU     : VPseudoVNCVTF_W_RM;
6718defm PseudoVFNCVT_F_X      : VPseudoVNCVTF_W_RM;
6719}
6720defm PseudoVFNCVT_RM_F_XU  : VPseudoVNCVTF_RM_W;
6721defm PseudoVFNCVT_RM_F_X   : VPseudoVNCVTF_RM_W;
6722
6723let hasSideEffects = 0, hasPostISelHook = 1 in
6724defm PseudoVFNCVT_F_F      : VPseudoVNCVTD_W_RM;
6725
6726defm PseudoVFNCVT_ROD_F_F  : VPseudoVNCVTD_W;
6727} // mayRaiseFPException = true
6728} // Predicates = [HasVInstructionsAnyF]
6729
6730//===----------------------------------------------------------------------===//
6731// 14. Vector Reduction Operations
6732//===----------------------------------------------------------------------===//
6733
6734let Predicates = [HasVInstructions] in {
6735//===----------------------------------------------------------------------===//
6736// 14.1. Vector Single-Width Integer Reduction Instructions
6737//===----------------------------------------------------------------------===//
6738defm PseudoVREDSUM  : VPseudoVRED_VS;
6739defm PseudoVREDAND  : VPseudoVRED_VS;
6740defm PseudoVREDOR   : VPseudoVRED_VS;
6741defm PseudoVREDXOR  : VPseudoVRED_VS;
6742defm PseudoVREDMINU : VPseudoVREDMINMAX_VS;
6743defm PseudoVREDMIN  : VPseudoVREDMINMAX_VS;
6744defm PseudoVREDMAXU : VPseudoVREDMINMAX_VS;
6745defm PseudoVREDMAX  : VPseudoVREDMINMAX_VS;
6746
6747//===----------------------------------------------------------------------===//
6748// 14.2. Vector Widening Integer Reduction Instructions
6749//===----------------------------------------------------------------------===//
6750let IsRVVWideningReduction = 1 in {
6751defm PseudoVWREDSUMU   : VPseudoVWRED_VS;
6752defm PseudoVWREDSUM    : VPseudoVWRED_VS;
6753}
6754} // Predicates = [HasVInstructions]
6755
6756let Predicates = [HasVInstructionsAnyF] in {
6757//===----------------------------------------------------------------------===//
6758// 14.3. Vector Single-Width Floating-Point Reduction Instructions
6759//===----------------------------------------------------------------------===//
6760let mayRaiseFPException = true,
6761    hasSideEffects = 0 in {
6762defm PseudoVFREDOSUM : VPseudoVFREDO_VS_RM;
6763defm PseudoVFREDUSUM : VPseudoVFRED_VS_RM;
6764}
6765let mayRaiseFPException = true in {
6766defm PseudoVFREDMIN  : VPseudoVFREDMINMAX_VS;
6767defm PseudoVFREDMAX  : VPseudoVFREDMINMAX_VS;
6768}
6769
6770//===----------------------------------------------------------------------===//
6771// 14.4. Vector Widening Floating-Point Reduction Instructions
6772//===----------------------------------------------------------------------===//
6773let IsRVVWideningReduction = 1,
6774    hasSideEffects = 0,
6775    mayRaiseFPException = true in {
6776defm PseudoVFWREDUSUM  : VPseudoVFWRED_VS_RM;
6777defm PseudoVFWREDOSUM  : VPseudoVFWRED_VS_RM;
6778}
6779
6780} // Predicates = [HasVInstructionsAnyF]
6781
6782//===----------------------------------------------------------------------===//
6783// 15. Vector Mask Instructions
6784//===----------------------------------------------------------------------===//
6785
6786//===----------------------------------------------------------------------===//
6787// 15.1 Vector Mask-Register Logical Instructions
6788//===----------------------------------------------------------------------===//
6789
6790defm PseudoVMAND: VPseudoVALU_MM;
6791defm PseudoVMNAND: VPseudoVALU_MM;
6792defm PseudoVMANDN: VPseudoVALU_MM;
6793defm PseudoVMXOR: VPseudoVALU_MM;
6794defm PseudoVMOR: VPseudoVALU_MM;
6795defm PseudoVMNOR: VPseudoVALU_MM;
6796defm PseudoVMORN: VPseudoVALU_MM;
6797defm PseudoVMXNOR: VPseudoVALU_MM;
6798
6799// Pseudo instructions
6800defm PseudoVMCLR : VPseudoNullaryPseudoM<"VMXOR">;
6801defm PseudoVMSET : VPseudoNullaryPseudoM<"VMXNOR">;
6802
6803//===----------------------------------------------------------------------===//
6804// 15.2. Vector mask population count vcpop
6805//===----------------------------------------------------------------------===//
6806
6807defm PseudoVCPOP: VPseudoVPOP_M;
6808
6809//===----------------------------------------------------------------------===//
6810// 15.3. vfirst find-first-set mask bit
6811//===----------------------------------------------------------------------===//
6812
6813defm PseudoVFIRST: VPseudoV1ST_M;
6814
6815//===----------------------------------------------------------------------===//
6816// 15.4. vmsbf.m set-before-first mask bit
6817//===----------------------------------------------------------------------===//
6818defm PseudoVMSBF: VPseudoVSFS_M;
6819
6820//===----------------------------------------------------------------------===//
6821// 15.5. vmsif.m set-including-first mask bit
6822//===----------------------------------------------------------------------===//
6823defm PseudoVMSIF: VPseudoVSFS_M;
6824
6825//===----------------------------------------------------------------------===//
6826// 15.6. vmsof.m set-only-first mask bit
6827//===----------------------------------------------------------------------===//
6828defm PseudoVMSOF: VPseudoVSFS_M;
6829
6830//===----------------------------------------------------------------------===//
6831// 15.8.  Vector Iota Instruction
6832//===----------------------------------------------------------------------===//
6833defm PseudoVIOTA_M: VPseudoVIOT_M;
6834
6835//===----------------------------------------------------------------------===//
6836// 15.9. Vector Element Index Instruction
6837//===----------------------------------------------------------------------===//
6838defm PseudoVID : VPseudoVID_V;
6839
6840//===----------------------------------------------------------------------===//
6841// 16. Vector Permutation Instructions
6842//===----------------------------------------------------------------------===//
6843
6844//===----------------------------------------------------------------------===//
6845// 16.1. Integer Scalar Move Instructions
6846//===----------------------------------------------------------------------===//
6847
6848let Predicates = [HasVInstructions] in {
6849let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
6850  foreach m = MxList in {
6851    defvar mx = m.MX;
6852    let VLMul = m.value in {
6853      let HasSEWOp = 1, BaseInstr = VMV_X_S in
6854      def PseudoVMV_X_S # "_" # mx:
6855        Pseudo<(outs GPR:$rd), (ins m.vrclass:$rs2, ixlenimm:$sew), []>,
6856        Sched<[WriteVIMovVX, ReadVIMovVX]>,
6857        RISCVVPseudo;
6858      let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X,
6859          Constraints = "$rd = $rs1" in
6860      def PseudoVMV_S_X # "_" # mx: Pseudo<(outs m.vrclass:$rd),
6861                                             (ins m.vrclass:$rs1, GPR:$rs2,
6862                                                  AVL:$vl, ixlenimm:$sew),
6863                                             []>,
6864        Sched<[WriteVIMovXV, ReadVIMovXV, ReadVIMovXX]>,
6865        RISCVVPseudo;
6866    }
6867  }
6868}
6869} // Predicates = [HasVInstructions]
6870
6871//===----------------------------------------------------------------------===//
6872// 16.2. Floating-Point Scalar Move Instructions
6873//===----------------------------------------------------------------------===//
6874
6875let Predicates = [HasVInstructionsAnyF] in {
6876let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
6877  foreach f = FPList in {
6878    foreach m = f.MxList in {
6879      defvar mx = m.MX;
6880      let VLMul = m.value in {
6881        let HasSEWOp = 1, BaseInstr = VFMV_F_S in
6882        def "PseudoVFMV_" # f.FX # "_S_" # mx :
6883          Pseudo<(outs f.fprclass:$rd),
6884                 (ins m.vrclass:$rs2, ixlenimm:$sew), []>,
6885          Sched<[WriteVFMovVF, ReadVFMovVF]>,
6886          RISCVVPseudo;
6887        let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F,
6888            Constraints = "$rd = $rs1" in
6889        def "PseudoVFMV_S_" # f.FX # "_" # mx :
6890                                          Pseudo<(outs m.vrclass:$rd),
6891                                                 (ins m.vrclass:$rs1, f.fprclass:$rs2,
6892                                                      AVL:$vl, ixlenimm:$sew),
6893                                                 []>,
6894          Sched<[WriteVFMovFV, ReadVFMovFV, ReadVFMovFX]>,
6895          RISCVVPseudo;
6896      }
6897    }
6898  }
6899}
6900} // Predicates = [HasVInstructionsAnyF]
6901
6902//===----------------------------------------------------------------------===//
6903// 16.3. Vector Slide Instructions
6904//===----------------------------------------------------------------------===//
6905let Predicates = [HasVInstructions] in {
6906  defm PseudoVSLIDEUP    : VPseudoVSLD_VX_VI<uimm5, "@earlyclobber $rd">;
6907  defm PseudoVSLIDEDOWN  : VPseudoVSLD_VX_VI<uimm5>;
6908  defm PseudoVSLIDE1UP   : VPseudoVSLD1_VX<"@earlyclobber $rd">;
6909  defm PseudoVSLIDE1DOWN : VPseudoVSLD1_VX;
6910} // Predicates = [HasVInstructions]
6911
6912let Predicates = [HasVInstructionsAnyF] in {
6913  defm PseudoVFSLIDE1UP  : VPseudoVSLD1_VF<"@earlyclobber $rd">;
6914  defm PseudoVFSLIDE1DOWN : VPseudoVSLD1_VF;
6915} // Predicates = [HasVInstructionsAnyF]
6916
6917//===----------------------------------------------------------------------===//
6918// 16.4. Vector Register Gather Instructions
6919//===----------------------------------------------------------------------===//
6920defm PseudoVRGATHER     : VPseudoVGTR_VV_VX_VI<uimm5, "@earlyclobber $rd">;
6921defm PseudoVRGATHEREI16 : VPseudoVGTR_VV_EEW<eew=16,
6922                                             Constraint="@earlyclobber $rd">;
6923
6924//===----------------------------------------------------------------------===//
6925// 16.5. Vector Compress Instruction
6926//===----------------------------------------------------------------------===//
6927defm PseudoVCOMPRESS : VPseudoVCPR_V;
6928
6929//===----------------------------------------------------------------------===//
6930// Patterns.
6931//===----------------------------------------------------------------------===//
6932
6933//===----------------------------------------------------------------------===//
6934// 11. Vector Integer Arithmetic Instructions
6935//===----------------------------------------------------------------------===//
6936
6937//===----------------------------------------------------------------------===//
6938// 11.1. Vector Single-Width Integer Add and Subtract
6939//===----------------------------------------------------------------------===//
6940defm : VPatBinaryV_VV_VX_VI<"int_riscv_vadd", "PseudoVADD", AllIntegerVectors>;
6941defm : VPatBinaryV_VV_VX<"int_riscv_vsub", "PseudoVSUB", AllIntegerVectors>;
6942defm : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>;
6943
6944//===----------------------------------------------------------------------===//
6945// 11.2. Vector Widening Integer Add/Subtract
6946//===----------------------------------------------------------------------===//
6947defm : VPatBinaryW_VV_VX<"int_riscv_vwaddu", "PseudoVWADDU", AllWidenableIntVectors>;
6948defm : VPatBinaryW_VV_VX<"int_riscv_vwsubu", "PseudoVWSUBU", AllWidenableIntVectors>;
6949defm : VPatBinaryW_VV_VX<"int_riscv_vwadd", "PseudoVWADD", AllWidenableIntVectors>;
6950defm : VPatBinaryW_VV_VX<"int_riscv_vwsub", "PseudoVWSUB", AllWidenableIntVectors>;
6951defm : VPatBinaryW_WV_WX<"int_riscv_vwaddu_w", "PseudoVWADDU", AllWidenableIntVectors>;
6952defm : VPatBinaryW_WV_WX<"int_riscv_vwsubu_w", "PseudoVWSUBU", AllWidenableIntVectors>;
6953defm : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD", AllWidenableIntVectors>;
6954defm : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB", AllWidenableIntVectors>;
6955
6956//===----------------------------------------------------------------------===//
6957// 11.3. Vector Integer Extension
6958//===----------------------------------------------------------------------===//
6959defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF2",
6960                     AllFractionableVF2IntVectors>;
6961defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF4",
6962                     AllFractionableVF4IntVectors>;
6963defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF8",
6964                     AllFractionableVF8IntVectors>;
6965defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF2",
6966                     AllFractionableVF2IntVectors>;
6967defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF4",
6968                     AllFractionableVF4IntVectors>;
6969defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF8",
6970                     AllFractionableVF8IntVectors>;
6971
6972//===----------------------------------------------------------------------===//
6973// 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
6974//===----------------------------------------------------------------------===//
6975defm : VPatBinaryV_VM_XM_IM<"int_riscv_vadc", "PseudoVADC">;
6976defm : VPatBinaryM_VM_XM_IM<"int_riscv_vmadc_carry_in", "PseudoVMADC">;
6977defm : VPatBinaryM_V_X_I<"int_riscv_vmadc", "PseudoVMADC">;
6978
6979defm : VPatBinaryV_VM_XM<"int_riscv_vsbc", "PseudoVSBC">;
6980defm : VPatBinaryM_VM_XM<"int_riscv_vmsbc_borrow_in", "PseudoVMSBC">;
6981defm : VPatBinaryM_V_X<"int_riscv_vmsbc", "PseudoVMSBC">;
6982
6983//===----------------------------------------------------------------------===//
6984// 11.5. Vector Bitwise Logical Instructions
6985//===----------------------------------------------------------------------===//
6986defm : VPatBinaryV_VV_VX_VI<"int_riscv_vand", "PseudoVAND", AllIntegerVectors>;
6987defm : VPatBinaryV_VV_VX_VI<"int_riscv_vor", "PseudoVOR", AllIntegerVectors>;
6988defm : VPatBinaryV_VV_VX_VI<"int_riscv_vxor", "PseudoVXOR", AllIntegerVectors>;
6989
6990//===----------------------------------------------------------------------===//
6991// 11.6. Vector Single-Width Bit Shift Instructions
6992//===----------------------------------------------------------------------===//
6993defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsll", "PseudoVSLL", AllIntegerVectors,
6994                            uimm5>;
6995defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsrl", "PseudoVSRL", AllIntegerVectors,
6996                            uimm5>;
6997defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors,
6998                            uimm5>;
6999
7000foreach vti = AllIntegerVectors in {
7001  // Emit shift by 1 as an add since it might be faster.
7002  let Predicates = GetVTypePredicates<vti>.Predicates in {
7003    def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector undef),
7004                                          (vti.Vector vti.RegClass:$rs1),
7005                                          (XLenVT 1), VLOpFrag)),
7006              (!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX)
7007                 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1,
7008                 vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>;
7009    def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$merge),
7010                                               (vti.Vector vti.RegClass:$rs1),
7011                                               (XLenVT 1),
7012                                               (vti.Mask V0),
7013                                               VLOpFrag,
7014                                               (XLenVT timm:$policy))),
7015              (!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX#"_MASK")
7016                                                          vti.RegClass:$merge,
7017                                                          vti.RegClass:$rs1,
7018                                                          vti.RegClass:$rs1,
7019                                                          (vti.Mask V0),
7020                                                          GPR:$vl,
7021                                                          vti.Log2SEW,
7022                                                          (XLenVT timm:$policy))>;
7023  }
7024}
7025
7026//===----------------------------------------------------------------------===//
7027// 11.7. Vector Narrowing Integer Right Shift Instructions
7028//===----------------------------------------------------------------------===//
7029defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsrl", "PseudoVNSRL", AllWidenableIntVectors>;
7030defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsra", "PseudoVNSRA", AllWidenableIntVectors>;
7031
7032//===----------------------------------------------------------------------===//
7033// 11.8. Vector Integer Comparison Instructions
7034//===----------------------------------------------------------------------===//
7035defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmseq", "PseudoVMSEQ", AllIntegerVectors>;
7036defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsne", "PseudoVMSNE", AllIntegerVectors>;
7037defm : VPatBinaryM_VV_VX<"int_riscv_vmsltu", "PseudoVMSLTU", AllIntegerVectors>;
7038defm : VPatBinaryM_VV_VX<"int_riscv_vmslt", "PseudoVMSLT", AllIntegerVectors>;
7039defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsleu", "PseudoVMSLEU", AllIntegerVectors>;
7040defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsle", "PseudoVMSLE", AllIntegerVectors>;
7041
7042defm : VPatBinaryM_VX_VI<"int_riscv_vmsgtu", "PseudoVMSGTU", AllIntegerVectors>;
7043defm : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>;
7044
7045// Match vmsgt with 2 vector operands to vmslt with the operands swapped.
7046defm : VPatBinarySwappedM_VV<"int_riscv_vmsgtu", "PseudoVMSLTU", AllIntegerVectors>;
7047defm : VPatBinarySwappedM_VV<"int_riscv_vmsgt", "PseudoVMSLT", AllIntegerVectors>;
7048
7049defm : VPatBinarySwappedM_VV<"int_riscv_vmsgeu", "PseudoVMSLEU", AllIntegerVectors>;
7050defm : VPatBinarySwappedM_VV<"int_riscv_vmsge", "PseudoVMSLE", AllIntegerVectors>;
7051
7052// Match vmslt(u).vx intrinsics to vmsle(u).vi if the scalar is -15 to 16 and
7053// non-zero. Zero can be .vx with x0. This avoids the user needing to know that
7054// there is no vmslt(u).vi instruction. Similar for vmsge(u).vx intrinsics
7055// using vmslt(u).vi.
7056defm : VPatCompare_VI<"int_riscv_vmslt", "PseudoVMSLE", simm5_plus1_nonzero>;
7057defm : VPatCompare_VI<"int_riscv_vmsltu", "PseudoVMSLEU", simm5_plus1_nonzero>;
7058
7059// We need to handle 0 for vmsge.vi using vmslt.vi because there is no vmsge.vx.
7060defm : VPatCompare_VI<"int_riscv_vmsge", "PseudoVMSGT", simm5_plus1>;
7061defm : VPatCompare_VI<"int_riscv_vmsgeu", "PseudoVMSGTU", simm5_plus1_nonzero>;
7062
7063//===----------------------------------------------------------------------===//
7064// 11.9. Vector Integer Min/Max Instructions
7065//===----------------------------------------------------------------------===//
7066defm : VPatBinaryV_VV_VX<"int_riscv_vminu", "PseudoVMINU", AllIntegerVectors>;
7067defm : VPatBinaryV_VV_VX<"int_riscv_vmin", "PseudoVMIN", AllIntegerVectors>;
7068defm : VPatBinaryV_VV_VX<"int_riscv_vmaxu", "PseudoVMAXU", AllIntegerVectors>;
7069defm : VPatBinaryV_VV_VX<"int_riscv_vmax", "PseudoVMAX", AllIntegerVectors>;
7070
7071//===----------------------------------------------------------------------===//
7072// 11.10. Vector Single-Width Integer Multiply Instructions
7073//===----------------------------------------------------------------------===//
7074defm : VPatBinaryV_VV_VX<"int_riscv_vmul", "PseudoVMUL", AllIntegerVectors>;
7075
7076defvar IntegerVectorsExceptI64 = !filter(vti, AllIntegerVectors,
7077                                         !ne(vti.SEW, 64));
7078defm : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH",
7079                         IntegerVectorsExceptI64>;
7080defm : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU",
7081                         IntegerVectorsExceptI64>;
7082defm : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU",
7083                         IntegerVectorsExceptI64>;
7084
7085// vmulh, vmulhu, vmulhsu are not included for EEW=64 in Zve64*.
7086defvar I64IntegerVectors = !filter(vti, AllIntegerVectors, !eq(vti.SEW, 64));
7087let Predicates = [HasVInstructionsFullMultiply] in {
7088  defm : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH",
7089                           I64IntegerVectors>;
7090  defm : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU",
7091                           I64IntegerVectors>;
7092  defm : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU",
7093                           I64IntegerVectors>;
7094}
7095
7096//===----------------------------------------------------------------------===//
7097// 11.11. Vector Integer Divide Instructions
7098//===----------------------------------------------------------------------===//
7099defm : VPatBinaryV_VV_VX<"int_riscv_vdivu", "PseudoVDIVU", AllIntegerVectors, isSEWAware=1>;
7100defm : VPatBinaryV_VV_VX<"int_riscv_vdiv", "PseudoVDIV", AllIntegerVectors, isSEWAware=1>;
7101defm : VPatBinaryV_VV_VX<"int_riscv_vremu", "PseudoVREMU", AllIntegerVectors, isSEWAware=1>;
7102defm : VPatBinaryV_VV_VX<"int_riscv_vrem", "PseudoVREM", AllIntegerVectors, isSEWAware=1>;
7103
7104//===----------------------------------------------------------------------===//
7105// 11.12. Vector Widening Integer Multiply Instructions
7106//===----------------------------------------------------------------------===//
7107defm : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL", AllWidenableIntVectors>;
7108defm : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU", AllWidenableIntVectors>;
7109defm : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU", AllWidenableIntVectors>;
7110
7111//===----------------------------------------------------------------------===//
7112// 11.13. Vector Single-Width Integer Multiply-Add Instructions
7113//===----------------------------------------------------------------------===//
7114defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmadd", "PseudoVMADD", AllIntegerVectors>;
7115defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsub", "PseudoVNMSUB", AllIntegerVectors>;
7116defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmacc", "PseudoVMACC", AllIntegerVectors>;
7117defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsac", "PseudoVNMSAC", AllIntegerVectors>;
7118
7119//===----------------------------------------------------------------------===//
7120// 11.14. Vector Widening Integer Multiply-Add Instructions
7121//===----------------------------------------------------------------------===//
7122defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccu", "PseudoVWMACCU", AllWidenableIntVectors>;
7123defm : VPatTernaryW_VV_VX<"int_riscv_vwmacc", "PseudoVWMACC", AllWidenableIntVectors>;
7124defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccsu", "PseudoVWMACCSU", AllWidenableIntVectors>;
7125defm : VPatTernaryW_VX<"int_riscv_vwmaccus", "PseudoVWMACCUS", AllWidenableIntVectors>;
7126
7127//===----------------------------------------------------------------------===//
7128// 11.15. Vector Integer Merge Instructions
7129//===----------------------------------------------------------------------===//
7130defm : VPatBinaryV_VM_XM_IM<"int_riscv_vmerge", "PseudoVMERGE">;
7131
7132//===----------------------------------------------------------------------===//
7133// 11.16. Vector Integer Move Instructions
7134//===----------------------------------------------------------------------===//
7135foreach vti = AllVectors in {
7136  let Predicates = GetVTypePredicates<vti>.Predicates in {
7137    def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$passthru),
7138                                             (vti.Vector vti.RegClass:$rs1),
7139                                             VLOpFrag)),
7140              (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX)
7141               $passthru, $rs1, GPR:$vl, vti.Log2SEW, TU_MU)>;
7142
7143    // vmv.v.x/vmv.v.i are handled in RISCInstrVInstrInfoVVLPatterns.td
7144  }
7145}
7146
7147//===----------------------------------------------------------------------===//
7148// 12. Vector Fixed-Point Arithmetic Instructions
7149//===----------------------------------------------------------------------===//
7150
7151//===----------------------------------------------------------------------===//
7152// 12.1. Vector Single-Width Saturating Add and Subtract
7153//===----------------------------------------------------------------------===//
7154defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsaddu", "PseudoVSADDU", AllIntegerVectors>;
7155defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsadd", "PseudoVSADD", AllIntegerVectors>;
7156defm : VPatBinaryV_VV_VX<"int_riscv_vssubu", "PseudoVSSUBU", AllIntegerVectors>;
7157defm : VPatBinaryV_VV_VX<"int_riscv_vssub", "PseudoVSSUB", AllIntegerVectors>;
7158
7159//===----------------------------------------------------------------------===//
7160// 12.2. Vector Single-Width Averaging Add and Subtract
7161//===----------------------------------------------------------------------===//
7162defm : VPatBinaryV_VV_VX_RM<"int_riscv_vaaddu", "PseudoVAADDU",
7163                            AllIntegerVectors>;
7164defm : VPatBinaryV_VV_VX_RM<"int_riscv_vasubu", "PseudoVASUBU",
7165                            AllIntegerVectors>;
7166defm : VPatBinaryV_VV_VX_RM<"int_riscv_vasub", "PseudoVASUB",
7167                            AllIntegerVectors>;
7168defm : VPatBinaryV_VV_VX_RM<"int_riscv_vaadd", "PseudoVAADD",
7169                            AllIntegerVectors>;
7170
7171//===----------------------------------------------------------------------===//
7172// 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
7173//===----------------------------------------------------------------------===//
7174defm : VPatBinaryV_VV_VX_RM<"int_riscv_vsmul", "PseudoVSMUL",
7175                             IntegerVectorsExceptI64>;
7176// vsmul.vv and vsmul.vx are not included in EEW=64 in Zve64*.
7177let Predicates = [HasVInstructionsFullMultiply] in
7178defm : VPatBinaryV_VV_VX_RM<"int_riscv_vsmul", "PseudoVSMUL",
7179                             I64IntegerVectors>;
7180
7181//===----------------------------------------------------------------------===//
7182// 12.4. Vector Single-Width Scaling Shift Instructions
7183//===----------------------------------------------------------------------===//
7184defm : VPatBinaryV_VV_VX_VI_RM<"int_riscv_vssrl", "PseudoVSSRL",
7185                               AllIntegerVectors, uimm5>;
7186defm : VPatBinaryV_VV_VX_VI_RM<"int_riscv_vssra", "PseudoVSSRA",
7187                               AllIntegerVectors, uimm5>;
7188
7189//===----------------------------------------------------------------------===//
7190// 12.5. Vector Narrowing Fixed-Point Clip Instructions
7191//===----------------------------------------------------------------------===//
7192defm : VPatBinaryV_WV_WX_WI_RM<"int_riscv_vnclipu", "PseudoVNCLIPU",
7193                               AllWidenableIntVectors>;
7194defm : VPatBinaryV_WV_WX_WI_RM<"int_riscv_vnclip", "PseudoVNCLIP",
7195                               AllWidenableIntVectors>;
7196
7197//===----------------------------------------------------------------------===//
7198// 13. Vector Floating-Point Instructions
7199//===----------------------------------------------------------------------===//
7200
7201//===----------------------------------------------------------------------===//
7202// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
7203//===----------------------------------------------------------------------===//
7204defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfadd", "PseudoVFADD",
7205                            AllFloatVectors>;
7206defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfsub", "PseudoVFSUB",
7207                            AllFloatVectors>;
7208defm : VPatBinaryV_VX_RM<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>;
7209
7210//===----------------------------------------------------------------------===//
7211// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
7212//===----------------------------------------------------------------------===//
7213defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwadd", "PseudoVFWADD",
7214                            AllWidenableFloatVectors>;
7215defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwsub", "PseudoVFWSUB",
7216                            AllWidenableFloatVectors>;
7217defm : VPatBinaryW_WV_WX_RM<"int_riscv_vfwadd_w", "PseudoVFWADD",
7218                            AllWidenableFloatVectors>;
7219defm : VPatBinaryW_WV_WX_RM<"int_riscv_vfwsub_w", "PseudoVFWSUB",
7220                            AllWidenableFloatVectors>;
7221
7222//===----------------------------------------------------------------------===//
7223// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
7224//===----------------------------------------------------------------------===//
7225defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfmul", "PseudoVFMUL",
7226                            AllFloatVectors>;
7227defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfdiv", "PseudoVFDIV",
7228                            AllFloatVectors, isSEWAware=1>;
7229defm : VPatBinaryV_VX_RM<"int_riscv_vfrdiv", "PseudoVFRDIV",
7230                         AllFloatVectors, isSEWAware=1>;
7231
7232//===----------------------------------------------------------------------===//
7233// 13.5. Vector Widening Floating-Point Multiply
7234//===----------------------------------------------------------------------===//
7235defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwmul", "PseudoVFWMUL",
7236                            AllWidenableFloatVectors>;
7237
7238//===----------------------------------------------------------------------===//
7239// 13.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
7240//===----------------------------------------------------------------------===//
7241defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmacc", "PseudoVFMACC", AllFloatVectors>;
7242defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmacc", "PseudoVFNMACC", AllFloatVectors>;
7243defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmsac", "PseudoVFMSAC", AllFloatVectors>;
7244defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmsac", "PseudoVFNMSAC", AllFloatVectors>;
7245defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmadd", "PseudoVFMADD", AllFloatVectors>;
7246defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmadd", "PseudoVFNMADD", AllFloatVectors>;
7247defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmsub", "PseudoVFMSUB", AllFloatVectors>;
7248defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmsub", "PseudoVFNMSUB", AllFloatVectors>;
7249
7250//===----------------------------------------------------------------------===//
7251// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
7252//===----------------------------------------------------------------------===//
7253defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmacc", "PseudoVFWMACC",
7254                             AllWidenableFloatVectors>;
7255defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwnmacc", "PseudoVFWNMACC",
7256                             AllWidenableFloatVectors>;
7257defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmsac", "PseudoVFWMSAC",
7258                             AllWidenableFloatVectors>;
7259defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwnmsac", "PseudoVFWNMSAC",
7260                             AllWidenableFloatVectors>;
7261
7262//===----------------------------------------------------------------------===//
7263// 13.8. Vector Floating-Point Square-Root Instruction
7264//===----------------------------------------------------------------------===//
7265defm : VPatUnaryV_V_RM<"int_riscv_vfsqrt", "PseudoVFSQRT", AllFloatVectors, isSEWAware=1>;
7266
7267//===----------------------------------------------------------------------===//
7268// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
7269//===----------------------------------------------------------------------===//
7270defm : VPatUnaryV_V<"int_riscv_vfrsqrt7", "PseudoVFRSQRT7", AllFloatVectors>;
7271
7272//===----------------------------------------------------------------------===//
7273// 13.10. Vector Floating-Point Reciprocal Estimate Instruction
7274//===----------------------------------------------------------------------===//
7275defm : VPatUnaryV_V_RM<"int_riscv_vfrec7", "PseudoVFREC7", AllFloatVectors>;
7276
7277//===----------------------------------------------------------------------===//
7278// 13.11. Vector Floating-Point Min/Max Instructions
7279//===----------------------------------------------------------------------===//
7280defm : VPatBinaryV_VV_VX<"int_riscv_vfmin", "PseudoVFMIN", AllFloatVectors>;
7281defm : VPatBinaryV_VV_VX<"int_riscv_vfmax", "PseudoVFMAX", AllFloatVectors>;
7282
7283//===----------------------------------------------------------------------===//
7284// 13.12. Vector Floating-Point Sign-Injection Instructions
7285//===----------------------------------------------------------------------===//
7286defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnj", "PseudoVFSGNJ", AllFloatVectors>;
7287defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjn", "PseudoVFSGNJN", AllFloatVectors>;
7288defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjx", "PseudoVFSGNJX", AllFloatVectors>;
7289
7290//===----------------------------------------------------------------------===//
7291// 13.13. Vector Floating-Point Compare Instructions
7292//===----------------------------------------------------------------------===//
7293defm : VPatBinaryM_VV_VX<"int_riscv_vmfeq", "PseudoVMFEQ", AllFloatVectors>;
7294defm : VPatBinaryM_VV_VX<"int_riscv_vmfle", "PseudoVMFLE", AllFloatVectors>;
7295defm : VPatBinaryM_VV_VX<"int_riscv_vmflt", "PseudoVMFLT", AllFloatVectors>;
7296defm : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE", AllFloatVectors>;
7297defm : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT", AllFloatVectors>;
7298defm : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE", AllFloatVectors>;
7299defm : VPatBinarySwappedM_VV<"int_riscv_vmfgt", "PseudoVMFLT", AllFloatVectors>;
7300defm : VPatBinarySwappedM_VV<"int_riscv_vmfge", "PseudoVMFLE", AllFloatVectors>;
7301
7302//===----------------------------------------------------------------------===//
7303// 13.14. Vector Floating-Point Classify Instruction
7304//===----------------------------------------------------------------------===//
7305defm : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">;
7306
7307//===----------------------------------------------------------------------===//
7308// 13.15. Vector Floating-Point Merge Instruction
7309//===----------------------------------------------------------------------===//
7310// We can use vmerge.vvm to support vector-vector vfmerge.
7311// NOTE: Clang previously used int_riscv_vfmerge for vector-vector, but now uses
7312// int_riscv_vmerge. Support both for compatibility.
7313foreach vti = AllFloatVectors in {
7314  let Predicates = GetVTypePredicates<vti>.Predicates in {
7315    defm : VPatBinaryCarryInTAIL<"int_riscv_vmerge", "PseudoVMERGE", "VVM",
7316                                 vti.Vector,
7317                                 vti.Vector, vti.Vector, vti.Mask,
7318                                 vti.Log2SEW, vti.LMul, vti.RegClass,
7319                                 vti.RegClass, vti.RegClass>;
7320    defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVMERGE", "VVM",
7321                                 vti.Vector,
7322                                 vti.Vector, vti.Vector, vti.Mask,
7323                                 vti.Log2SEW, vti.LMul, vti.RegClass,
7324                                 vti.RegClass, vti.RegClass>;
7325    defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVFMERGE",
7326                                 "V"#vti.ScalarSuffix#"M",
7327                                 vti.Vector,
7328                                 vti.Vector, vti.Scalar, vti.Mask,
7329                                 vti.Log2SEW, vti.LMul, vti.RegClass,
7330                                 vti.RegClass, vti.ScalarRegClass>;
7331  }
7332}
7333
7334foreach fvti = AllFloatVectors in {
7335  defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX);
7336  let Predicates = GetVTypePredicates<fvti>.Predicates in
7337  def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$merge),
7338                                            (fvti.Vector fvti.RegClass:$rs2),
7339                                            (fvti.Scalar (fpimm0)),
7340                                            (fvti.Mask V0), VLOpFrag)),
7341            (instr fvti.RegClass:$merge, fvti.RegClass:$rs2, 0,
7342                   (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
7343}
7344
7345//===----------------------------------------------------------------------===//
7346// 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions
7347//===----------------------------------------------------------------------===//
7348defm : VPatConversionVI_VF_RM<"int_riscv_vfcvt_x_f_v", "PseudoVFCVT_X_F">;
7349defm : VPatConversionVI_VF_RM<"int_riscv_vfcvt_xu_f_v", "PseudoVFCVT_XU_F">;
7350defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_xu_f_v", "PseudoVFCVT_RTZ_XU_F">;
7351defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_x_f_v", "PseudoVFCVT_RTZ_X_F">;
7352defm : VPatConversionVF_VI_RM<"int_riscv_vfcvt_f_x_v", "PseudoVFCVT_F_X">;
7353defm : VPatConversionVF_VI_RM<"int_riscv_vfcvt_f_xu_v", "PseudoVFCVT_F_XU">;
7354
7355//===----------------------------------------------------------------------===//
7356// 13.18. Widening Floating-Point/Integer Type-Convert Instructions
7357//===----------------------------------------------------------------------===//
7358defm : VPatConversionWI_VF_RM<"int_riscv_vfwcvt_xu_f_v", "PseudoVFWCVT_XU_F">;
7359defm : VPatConversionWI_VF_RM<"int_riscv_vfwcvt_x_f_v", "PseudoVFWCVT_X_F">;
7360defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_xu_f_v", "PseudoVFWCVT_RTZ_XU_F">;
7361defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_x_f_v", "PseudoVFWCVT_RTZ_X_F">;
7362defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU">;
7363defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X">;
7364defm : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F">;
7365
7366//===----------------------------------------------------------------------===//
7367// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions
7368//===----------------------------------------------------------------------===//
7369defm : VPatConversionVI_WF_RM<"int_riscv_vfncvt_xu_f_w", "PseudoVFNCVT_XU_F">;
7370defm : VPatConversionVI_WF_RM<"int_riscv_vfncvt_x_f_w", "PseudoVFNCVT_X_F">;
7371defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_xu_f_w", "PseudoVFNCVT_RTZ_XU_F">;
7372defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_x_f_w", "PseudoVFNCVT_RTZ_X_F">;
7373defm : VPatConversionVF_WI_RM <"int_riscv_vfncvt_f_xu_w", "PseudoVFNCVT_F_XU">;
7374defm : VPatConversionVF_WI_RM <"int_riscv_vfncvt_f_x_w", "PseudoVFNCVT_F_X">;
7375defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F">;
7376defm : VPatConversionVF_WF<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F">;
7377
7378//===----------------------------------------------------------------------===//
7379// 14. Vector Reduction Operations
7380//===----------------------------------------------------------------------===//
7381
7382//===----------------------------------------------------------------------===//
7383// 14.1. Vector Single-Width Integer Reduction Instructions
7384//===----------------------------------------------------------------------===//
7385defm : VPatReductionV_VS<"int_riscv_vredsum", "PseudoVREDSUM">;
7386defm : VPatReductionV_VS<"int_riscv_vredand", "PseudoVREDAND">;
7387defm : VPatReductionV_VS<"int_riscv_vredor", "PseudoVREDOR">;
7388defm : VPatReductionV_VS<"int_riscv_vredxor", "PseudoVREDXOR">;
7389defm : VPatReductionV_VS<"int_riscv_vredminu", "PseudoVREDMINU">;
7390defm : VPatReductionV_VS<"int_riscv_vredmin", "PseudoVREDMIN">;
7391defm : VPatReductionV_VS<"int_riscv_vredmaxu", "PseudoVREDMAXU">;
7392defm : VPatReductionV_VS<"int_riscv_vredmax", "PseudoVREDMAX">;
7393
7394//===----------------------------------------------------------------------===//
7395// 14.2. Vector Widening Integer Reduction Instructions
7396//===----------------------------------------------------------------------===//
7397defm : VPatReductionW_VS<"int_riscv_vwredsumu", "PseudoVWREDSUMU">;
7398defm : VPatReductionW_VS<"int_riscv_vwredsum", "PseudoVWREDSUM">;
7399
7400//===----------------------------------------------------------------------===//
7401// 14.3. Vector Single-Width Floating-Point Reduction Instructions
7402//===----------------------------------------------------------------------===//
7403defm : VPatReductionV_VS_RM<"int_riscv_vfredosum", "PseudoVFREDOSUM", IsFloat=1>;
7404defm : VPatReductionV_VS_RM<"int_riscv_vfredusum", "PseudoVFREDUSUM", IsFloat=1>;
7405defm : VPatReductionV_VS<"int_riscv_vfredmin", "PseudoVFREDMIN", IsFloat=1>;
7406defm : VPatReductionV_VS<"int_riscv_vfredmax", "PseudoVFREDMAX", IsFloat=1>;
7407
7408//===----------------------------------------------------------------------===//
7409// 14.4. Vector Widening Floating-Point Reduction Instructions
7410//===----------------------------------------------------------------------===//
7411defm : VPatReductionW_VS_RM<"int_riscv_vfwredusum", "PseudoVFWREDUSUM", IsFloat=1>;
7412defm : VPatReductionW_VS_RM<"int_riscv_vfwredosum", "PseudoVFWREDOSUM", IsFloat=1>;
7413
7414//===----------------------------------------------------------------------===//
7415// 15. Vector Mask Instructions
7416//===----------------------------------------------------------------------===//
7417
7418//===----------------------------------------------------------------------===//
7419// 15.1 Vector Mask-Register Logical Instructions
7420//===----------------------------------------------------------------------===//
7421defm : VPatBinaryM_MM<"int_riscv_vmand", "PseudoVMAND">;
7422defm : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">;
7423defm : VPatBinaryM_MM<"int_riscv_vmandn", "PseudoVMANDN">;
7424defm : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">;
7425defm : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">;
7426defm : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">;
7427defm : VPatBinaryM_MM<"int_riscv_vmorn", "PseudoVMORN">;
7428defm : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">;
7429
7430// pseudo instructions
7431defm : VPatNullaryM<"int_riscv_vmclr", "PseudoVMCLR">;
7432defm : VPatNullaryM<"int_riscv_vmset", "PseudoVMSET">;
7433
7434//===----------------------------------------------------------------------===//
7435// 15.2. Vector count population in mask vcpop.m
7436//===----------------------------------------------------------------------===//
7437defm : VPatUnaryS_M<"int_riscv_vcpop", "PseudoVCPOP">;
7438
7439//===----------------------------------------------------------------------===//
7440// 15.3. vfirst find-first-set mask bit
7441//===----------------------------------------------------------------------===//
7442defm : VPatUnaryS_M<"int_riscv_vfirst", "PseudoVFIRST">;
7443
7444//===----------------------------------------------------------------------===//
7445// 15.4. vmsbf.m set-before-first mask bit
7446//===----------------------------------------------------------------------===//
7447defm : VPatUnaryM_M<"int_riscv_vmsbf", "PseudoVMSBF">;
7448
7449//===----------------------------------------------------------------------===//
7450// 15.5. vmsif.m set-including-first mask bit
7451//===----------------------------------------------------------------------===//
7452defm : VPatUnaryM_M<"int_riscv_vmsif", "PseudoVMSIF">;
7453
7454//===----------------------------------------------------------------------===//
7455// 15.6. vmsof.m set-only-first mask bit
7456//===----------------------------------------------------------------------===//
7457defm : VPatUnaryM_M<"int_riscv_vmsof", "PseudoVMSOF">;
7458
7459//===----------------------------------------------------------------------===//
7460// 15.8.  Vector Iota Instruction
7461//===----------------------------------------------------------------------===//
7462defm : VPatUnaryV_M<"int_riscv_viota", "PseudoVIOTA">;
7463
7464//===----------------------------------------------------------------------===//
7465// 15.9. Vector Element Index Instruction
7466//===----------------------------------------------------------------------===//
7467defm : VPatNullaryV<"int_riscv_vid", "PseudoVID">;
7468
7469
7470//===----------------------------------------------------------------------===//
7471// 16. Vector Permutation Instructions
7472//===----------------------------------------------------------------------===//
7473
7474//===----------------------------------------------------------------------===//
7475// 16.1. Integer Scalar Move Instructions
7476//===----------------------------------------------------------------------===//
7477
7478foreach vti = AllIntegerVectors in {
7479  let Predicates = GetVTypePredicates<vti>.Predicates in
7480  def : Pat<(XLenVT (riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2))),
7481            (!cast<Instruction>("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.Log2SEW)>;
7482  // vmv.s.x is handled with a custom node in RISCVInstrInfoVVLPatterns.td
7483}
7484
7485//===----------------------------------------------------------------------===//
7486// 16.2. Floating-Point Scalar Move Instructions
7487//===----------------------------------------------------------------------===//
7488
7489foreach fvti = AllFloatVectors in {
7490  let Predicates = GetVTypePredicates<fvti>.Predicates in {
7491    def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1),
7492                           (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)),
7493              (!cast<Instruction>("PseudoVFMV_S_"#fvti.ScalarSuffix#"_" #
7494                                  fvti.LMul.MX)
7495               (fvti.Vector $rs1),
7496               (fvti.Scalar fvti.ScalarRegClass:$rs2),
7497               GPR:$vl, fvti.Log2SEW)>;
7498
7499    def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1),
7500                           (fvti.Scalar (fpimm0)), VLOpFrag)),
7501              (!cast<Instruction>("PseudoVMV_S_X_" # fvti.LMul.MX)
7502               (fvti.Vector $rs1), (XLenVT X0), GPR:$vl, fvti.Log2SEW)>;
7503  }
7504}
7505
7506//===----------------------------------------------------------------------===//
7507// 16.3. Vector Slide Instructions
7508//===----------------------------------------------------------------------===//
7509defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllIntegerVectors, uimm5>;
7510defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllIntegerVectors, uimm5>;
7511defm : VPatBinaryV_VX<"int_riscv_vslide1up", "PseudoVSLIDE1UP", AllIntegerVectors>;
7512defm : VPatBinaryV_VX<"int_riscv_vslide1down", "PseudoVSLIDE1DOWN", AllIntegerVectors>;
7513
7514defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllFloatVectors, uimm5>;
7515defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVectors, uimm5>;
7516defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>;
7517defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>;
7518
7519//===----------------------------------------------------------------------===//
7520// 16.4. Vector Register Gather Instructions
7521//===----------------------------------------------------------------------===//
7522defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
7523                                AllIntegerVectors, uimm5>;
7524defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
7525                              eew=16, vtilist=AllIntegerVectors>;
7526
7527defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
7528                                AllFloatVectors, uimm5>;
7529defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
7530                              eew=16, vtilist=AllFloatVectors>;
7531//===----------------------------------------------------------------------===//
7532// 16.5. Vector Compress Instruction
7533//===----------------------------------------------------------------------===//
7534defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>;
7535defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>;
7536defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>;
7537defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>;
7538defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>;
7539defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>;
7540
7541// Include the non-intrinsic ISel patterns
7542include "RISCVInstrInfoVVLPatterns.td"
7543include "RISCVInstrInfoVSDPatterns.td"
7544