xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td (revision dab59af3bcc7cb7ba01569d3044894b3e860ad56)
1//===-- RISCVInstrInfoVPseudos.td - RISC-V 'V' Pseudos -----*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// This file contains the required infrastructure to support code generation
10/// for the standard 'V' (Vector) extension, version 1.0.
11///
12/// This file is included from RISCVInstrInfoV.td
13///
14/// Overview of our vector instruction pseudos.  Many of the instructions
15/// have behavior which depends on the value of VTYPE.  Several core aspects of
16/// the compiler - e.g. register allocation - depend on fields in this
17/// configuration register.  The details of which fields matter differ by the
18/// specific instruction, but the common dimensions are:
19///
20/// LMUL/EMUL - Most instructions can write to differently sized register groups
21/// depending on LMUL.
22///
23/// Masked vs Unmasked - Many instructions which allow a mask disallow register
24/// overlap.  As a result, masked vs unmasked require different register
25/// allocation constraints.
26///
27/// Policy - For each of mask and tail policy, there are three options:
28/// * "Undisturbed" - As defined in the specification, required to preserve the
29/// exact bit pattern of inactive lanes.
30/// * "Agnostic" - As defined in the specification, required to either preserve
31/// the exact bit pattern of inactive lanes, or produce the bit pattern -1 for
32/// those lanes.  Note that each lane can make this choice independently.
33/// Instructions which produce masks (and only those instructions) also have the
34/// option of producing a result as-if VL had been VLMAX.
35/// * "Undefined" - The bit pattern of the inactive lanes is unspecified, and
36/// can be changed without impacting the semantics of the program.  Note that
37/// this concept does not exist in the specification, and requires source
38/// knowledge to be preserved.
39///
40/// SEW - Some instructions have semantics which depend on SEW.  This is
41/// relatively rare, and mostly impacts scheduling and cost estimation.
42///
43/// We have two techniques we use to represent the impact of these fields:
44/// * For fields which don't impact register classes, we largely use
45/// dummy operands on the pseudo instructions which convey information
46/// about the value of VTYPE.
47/// * For fields which do impact register classes (and a few bits of
48/// legacy - see policy discussion below), we define a family of pseudo
49/// instructions for each actual instruction.  Said differently, we encode
50/// each of the preceding fields which are relevant for a given instruction
51/// in the opcode space.
52///
53/// Currently, the policy is represented via the following instrinsic families:
54/// * _MASK - Can represent all three policy states for both tail and mask.  If
55///   passthrough is IMPLICIT_DEF (or NoReg), then represents "undefined".
56///   Otherwise, policy operand and tablegen flags drive the interpretation.
57///   (If policy operand is not present - there are a couple, though we're
58///   rapidly removing them - a non-undefined policy defaults to "tail
59///   agnostic", and "mask undisturbed".  Since this is the only variant with
60///   a mask, all other variants are "mask undefined".
61/// * Unsuffixed w/ both passthrough and policy operand. Can represent all
62///   three policy states.  If passthrough is IMPLICIT_DEF (or NoReg), then
63///   represents "undefined".  Otherwise, policy operand and tablegen flags
64///   drive the interpretation.
65/// * Unsuffixed w/o passthrough or policy operand -- Does not have a
66///   passthrough operand, and thus represents the "undefined" state.  Note
67///   that terminology in code frequently refers to these as "TA" which is
68///   confusing.  We're in the process of migrating away from this
69///   representation.
70///
71//===----------------------------------------------------------------------===//
72
73def riscv_vmv_x_s : SDNode<"RISCVISD::VMV_X_S",
74                           SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>,
75                                                SDTCisInt<1>]>>;
76def riscv_read_vlenb : SDNode<"RISCVISD::READ_VLENB",
77                              SDTypeProfile<1, 0, [SDTCisVT<0, XLenVT>]>>;
78
79// Operand that is allowed to be a register other than X0, a 5 bit unsigned
80// immediate, or -1. -1 means VLMAX. This allows us to pick between VSETIVLI and
81// VSETVLI opcodes using the same pseudo instructions.
82def AVL : RegisterOperand<GPRNoX0> {
83  let OperandNamespace = "RISCVOp";
84  let OperandType = "OPERAND_AVL";
85}
86
87// X0 has special meaning for vsetvl/vsetvli.
88//  rd | rs1 |   AVL value | Effect on vl
89//--------------------------------------------------------------
90// !X0 |  X0 |       VLMAX | Set vl to VLMAX
91//  X0 |  X0 | Value in vl | Keep current vl, just change vtype.
92def VLOp : ComplexPattern<XLenVT, 1, "selectVLOp">;
93
94def DecImm : SDNodeXForm<imm, [{
95  return CurDAG->getTargetConstant(N->getSExtValue() - 1, SDLoc(N),
96                                   N->getValueType(0));
97}]>;
98
99defvar TAIL_AGNOSTIC = 1;
100defvar TU_MU = 0;
101defvar TA_MA = 3;
102
103//===----------------------------------------------------------------------===//
104// Utilities.
105//===----------------------------------------------------------------------===//
106
107class PseudoToVInst<string PseudoInst> {
108  defvar AffixSubsts = [["Pseudo", ""],
109                        ["_E64", ""],
110                        ["_E32", ""],
111                        ["_E16", ""],
112                        ["_E8", ""],
113                        ["FPR64", "F"],
114                        ["FPR32", "F"],
115                        ["FPR16", "F"],
116                        ["_TIED", ""],
117                        ["_MASK", ""],
118                        ["_B64", ""],
119                        ["_B32", ""],
120                        ["_B16", ""],
121                        ["_B8", ""],
122                        ["_B4", ""],
123                        ["_B2", ""],
124                        ["_B1", ""],
125                        ["_MF8", ""],
126                        ["_MF4", ""],
127                        ["_MF2", ""],
128                        ["_M1", ""],
129                        ["_M2", ""],
130                        ["_M4", ""],
131                        ["_M8", ""],
132                        ["_SE", ""],
133                        ["_RM", ""]
134                       ];
135  string VInst = !foldl(PseudoInst, AffixSubsts, Acc, AffixSubst,
136                        !subst(AffixSubst[0], AffixSubst[1], Acc));
137}
138
139// This class describes information associated to the LMUL.
140class LMULInfo<int lmul, int oct, VReg regclass, VReg wregclass,
141               VReg f2regclass, VReg f4regclass, VReg f8regclass, string mx> {
142  bits<3> value = lmul; // This is encoded as the vlmul field of vtype.
143  VReg vrclass = regclass;
144  VReg wvrclass = wregclass;
145  VReg f8vrclass = f8regclass;
146  VReg f4vrclass = f4regclass;
147  VReg f2vrclass = f2regclass;
148  string MX = mx;
149  int octuple = oct;
150}
151
152// Associate LMUL with tablegen records of register classes.
153def V_M1  : LMULInfo<0b000,  8,   VR,        VRM2,   VR,   VR, VR, "M1">;
154def V_M2  : LMULInfo<0b001, 16, VRM2,        VRM4,   VR,   VR, VR, "M2">;
155def V_M4  : LMULInfo<0b010, 32, VRM4,        VRM8, VRM2,   VR, VR, "M4">;
156def V_M8  : LMULInfo<0b011, 64, VRM8,/*NoVReg*/VR, VRM4, VRM2, VR, "M8">;
157
158def V_MF8 : LMULInfo<0b101, 1, VR, VR,/*NoVReg*/VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF8">;
159def V_MF4 : LMULInfo<0b110, 2, VR, VR,          VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF4">;
160def V_MF2 : LMULInfo<0b111, 4, VR, VR,          VR,          VR,/*NoVReg*/VR, "MF2">;
161
162// Used to iterate over all possible LMULs.
163defvar MxList = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
164// For floating point which don't need MF8.
165defvar MxListF = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
166
167// Used for widening and narrowing instructions as it doesn't contain M8.
168defvar MxListW = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4];
169// Used for widening reductions. It can contain M8 because wider operands are
170// scalar operands.
171defvar MxListWRed = MxList;
172// For floating point which don't need MF8.
173defvar MxListFW = [V_MF4, V_MF2, V_M1, V_M2, V_M4];
174// For widening floating-point Reduction as it doesn't contain MF8. It can
175// contain M8 because wider operands are scalar operands.
176defvar MxListFWRed = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
177
178// Use for zext/sext.vf2
179defvar MxListVF2 = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
180
181// Use for zext/sext.vf4 and vector crypto instructions
182defvar MxListVF4 = [V_MF2, V_M1, V_M2, V_M4, V_M8];
183
184// Use for zext/sext.vf8
185defvar MxListVF8 = [V_M1, V_M2, V_M4, V_M8];
186
187class MxSet<int eew> {
188  list<LMULInfo> m = !cond(!eq(eew, 8) : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
189                           !eq(eew, 16) : [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
190                           !eq(eew, 32) : [V_MF2, V_M1, V_M2, V_M4, V_M8],
191                           !eq(eew, 64) : [V_M1, V_M2, V_M4, V_M8]);
192}
193
194class FPR_Info<int sew> {
195  RegisterClass fprclass = !cast<RegisterClass>("FPR" # sew);
196  string FX = "FPR" # sew;
197  int SEW = sew;
198  list<LMULInfo> MxList = MxSet<sew>.m;
199  list<LMULInfo> MxListFW = !if(!eq(sew, 64), [], !listremove(MxList, [V_M8]));
200}
201
202def SCALAR_F16 : FPR_Info<16>;
203def SCALAR_F32 : FPR_Info<32>;
204def SCALAR_F64 : FPR_Info<64>;
205
206// BF16 uses the same register class as F16.
207def SCALAR_BF16 : FPR_Info<16>;
208
209defvar FPList = [SCALAR_F16, SCALAR_F32, SCALAR_F64];
210
211// Used for widening instructions. It excludes F64.
212defvar FPListW = [SCALAR_F16, SCALAR_F32];
213
214// Used for widening bf16 instructions.
215defvar BFPListW = [SCALAR_BF16];
216
217class NFSet<LMULInfo m> {
218  defvar lmul = !shl(1, m.value);
219  list<int> L = NFList<lmul>.L;
220}
221
222class octuple_to_str<int octuple> {
223  string ret = !cond(!eq(octuple, 1): "MF8",
224                     !eq(octuple, 2): "MF4",
225                     !eq(octuple, 4): "MF2",
226                     !eq(octuple, 8): "M1",
227                     !eq(octuple, 16): "M2",
228                     !eq(octuple, 32): "M4",
229                     !eq(octuple, 64): "M8");
230}
231
232def VLOpFrag : PatFrag<(ops), (XLenVT (VLOp (XLenVT AVL:$vl)))>;
233
234// Output pattern for X0 used to represent VLMAX in the pseudo instructions.
235// We can't use X0 register becuase the AVL operands use GPRNoX0.
236// This must be kept in sync with RISCV::VLMaxSentinel.
237def VLMax : OutPatFrag<(ops), (XLenVT -1)>;
238
239def SelectFPImm : ComplexPattern<fAny, 1, "selectFPImm", [], [], 1>;
240
241// List of EEW.
242defvar EEWList = [8, 16, 32, 64];
243
244class SegRegClass<LMULInfo m, int nf> {
245  VReg RC = !cast<VReg>("VRN" # nf # !cond(!eq(m.value, V_MF8.value): V_M1.MX,
246                                           !eq(m.value, V_MF4.value): V_M1.MX,
247                                           !eq(m.value, V_MF2.value): V_M1.MX,
248                                           true: m.MX));
249}
250
251//===----------------------------------------------------------------------===//
252// Vector register and vector group type information.
253//===----------------------------------------------------------------------===//
254
255class VTypeInfo<ValueType Vec, ValueType Mas, int Sew, LMULInfo M,
256                ValueType Scal = XLenVT, RegisterClass ScalarReg = GPR> {
257  ValueType Vector = Vec;
258  ValueType Mask = Mas;
259  int SEW = Sew;
260  int Log2SEW = !logtwo(Sew);
261  VReg RegClass = M.vrclass;
262  LMULInfo LMul = M;
263  ValueType Scalar = Scal;
264  RegisterClass ScalarRegClass = ScalarReg;
265  // The pattern fragment which produces the AVL operand, representing the
266  // "natural" vector length for this type. For scalable vectors this is VLMax.
267  OutPatFrag AVL = VLMax;
268
269  string ScalarSuffix = !cond(!eq(Scal, XLenVT) : "X",
270                              !eq(Scal, f16) : "FPR16",
271                              !eq(Scal, bf16) : "FPR16",
272                              !eq(Scal, f32) : "FPR32",
273                              !eq(Scal, f64) : "FPR64");
274}
275
276class GroupVTypeInfo<ValueType Vec, ValueType VecM1, ValueType Mas, int Sew,
277                     LMULInfo M, ValueType Scal = XLenVT,
278                     RegisterClass ScalarReg = GPR>
279    : VTypeInfo<Vec, Mas, Sew, M, Scal, ScalarReg> {
280  ValueType VectorM1 = VecM1;
281}
282
283defset list<VTypeInfo> AllVectors = {
284  defset list<VTypeInfo> AllIntegerVectors = {
285    defset list<VTypeInfo> NoGroupIntegerVectors = {
286      defset list<VTypeInfo> FractionalGroupIntegerVectors = {
287        def VI8MF8:  VTypeInfo<vint8mf8_t,  vbool64_t, 8,  V_MF8>;
288        def VI8MF4:  VTypeInfo<vint8mf4_t,  vbool32_t, 8,  V_MF4>;
289        def VI8MF2:  VTypeInfo<vint8mf2_t,  vbool16_t, 8,  V_MF2>;
290        def VI16MF4: VTypeInfo<vint16mf4_t, vbool64_t, 16, V_MF4>;
291        def VI16MF2: VTypeInfo<vint16mf2_t, vbool32_t, 16, V_MF2>;
292        def VI32MF2: VTypeInfo<vint32mf2_t, vbool64_t, 32, V_MF2>;
293      }
294      def VI8M1:  VTypeInfo<vint8m1_t,  vbool8_t,   8, V_M1>;
295      def VI16M1: VTypeInfo<vint16m1_t, vbool16_t, 16, V_M1>;
296      def VI32M1: VTypeInfo<vint32m1_t, vbool32_t, 32, V_M1>;
297      def VI64M1: VTypeInfo<vint64m1_t, vbool64_t, 64, V_M1>;
298    }
299    defset list<GroupVTypeInfo> GroupIntegerVectors = {
300      def VI8M2: GroupVTypeInfo<vint8m2_t, vint8m1_t, vbool4_t, 8, V_M2>;
301      def VI8M4: GroupVTypeInfo<vint8m4_t, vint8m1_t, vbool2_t, 8, V_M4>;
302      def VI8M8: GroupVTypeInfo<vint8m8_t, vint8m1_t, vbool1_t, 8, V_M8>;
303
304      def VI16M2: GroupVTypeInfo<vint16m2_t, vint16m1_t, vbool8_t, 16, V_M2>;
305      def VI16M4: GroupVTypeInfo<vint16m4_t, vint16m1_t, vbool4_t, 16, V_M4>;
306      def VI16M8: GroupVTypeInfo<vint16m8_t, vint16m1_t, vbool2_t, 16, V_M8>;
307
308      def VI32M2: GroupVTypeInfo<vint32m2_t, vint32m1_t, vbool16_t, 32, V_M2>;
309      def VI32M4: GroupVTypeInfo<vint32m4_t, vint32m1_t, vbool8_t,  32, V_M4>;
310      def VI32M8: GroupVTypeInfo<vint32m8_t, vint32m1_t, vbool4_t,  32, V_M8>;
311
312      def VI64M2: GroupVTypeInfo<vint64m2_t, vint64m1_t, vbool32_t, 64, V_M2>;
313      def VI64M4: GroupVTypeInfo<vint64m4_t, vint64m1_t, vbool16_t, 64, V_M4>;
314      def VI64M8: GroupVTypeInfo<vint64m8_t, vint64m1_t, vbool8_t,  64, V_M8>;
315    }
316  }
317
318  defset list<VTypeInfo> AllFloatVectors = {
319    defset list<VTypeInfo> NoGroupFloatVectors = {
320      defset list<VTypeInfo> FractionalGroupFloatVectors = {
321        def VF16MF4: VTypeInfo<vfloat16mf4_t, vbool64_t, 16, V_MF4, f16, FPR16>;
322        def VF16MF2: VTypeInfo<vfloat16mf2_t, vbool32_t, 16, V_MF2, f16, FPR16>;
323        def VF32MF2: VTypeInfo<vfloat32mf2_t, vbool64_t, 32, V_MF2, f32, FPR32>;
324      }
325      def VF16M1: VTypeInfo<vfloat16m1_t, vbool16_t, 16, V_M1, f16, FPR16>;
326      def VF32M1: VTypeInfo<vfloat32m1_t, vbool32_t, 32, V_M1, f32, FPR32>;
327      def VF64M1: VTypeInfo<vfloat64m1_t, vbool64_t, 64, V_M1, f64, FPR64>;
328    }
329
330    defset list<GroupVTypeInfo> GroupFloatVectors = {
331      def VF16M2: GroupVTypeInfo<vfloat16m2_t, vfloat16m1_t, vbool8_t, 16,
332                                 V_M2, f16, FPR16>;
333      def VF16M4: GroupVTypeInfo<vfloat16m4_t, vfloat16m1_t, vbool4_t, 16,
334                                 V_M4, f16, FPR16>;
335      def VF16M8: GroupVTypeInfo<vfloat16m8_t, vfloat16m1_t, vbool2_t, 16,
336                                 V_M8, f16, FPR16>;
337
338      def VF32M2: GroupVTypeInfo<vfloat32m2_t, vfloat32m1_t, vbool16_t, 32,
339                                 V_M2, f32, FPR32>;
340      def VF32M4: GroupVTypeInfo<vfloat32m4_t, vfloat32m1_t, vbool8_t,  32,
341                                 V_M4, f32, FPR32>;
342      def VF32M8: GroupVTypeInfo<vfloat32m8_t, vfloat32m1_t, vbool4_t,  32,
343                                 V_M8, f32, FPR32>;
344
345      def VF64M2: GroupVTypeInfo<vfloat64m2_t, vfloat64m1_t, vbool32_t, 64,
346                                 V_M2, f64, FPR64>;
347      def VF64M4: GroupVTypeInfo<vfloat64m4_t, vfloat64m1_t, vbool16_t, 64,
348                                 V_M4, f64, FPR64>;
349      def VF64M8: GroupVTypeInfo<vfloat64m8_t, vfloat64m1_t, vbool8_t,  64,
350                                 V_M8, f64, FPR64>;
351    }
352  }
353
354  defset list<VTypeInfo> AllBFloatVectors = {
355    defset list<VTypeInfo> NoGroupBFloatVectors = {
356      defset list<VTypeInfo> FractionalGroupBFloatVectors = {
357        def VBF16MF4: VTypeInfo<vbfloat16mf4_t, vbool64_t, 16, V_MF4, bf16, FPR16>;
358        def VBF16MF2: VTypeInfo<vbfloat16mf2_t, vbool32_t, 16, V_MF2, bf16, FPR16>;
359      }
360      def VBF16M1:  VTypeInfo<vbfloat16m1_t, vbool16_t, 16, V_M1, bf16, FPR16>;
361    }
362
363    defset list<GroupVTypeInfo> GroupBFloatVectors = {
364      def VBF16M2: GroupVTypeInfo<vbfloat16m2_t, vbfloat16m1_t, vbool8_t, 16,
365                                  V_M2, bf16, FPR16>;
366      def VBF16M4: GroupVTypeInfo<vbfloat16m4_t, vbfloat16m1_t, vbool4_t, 16,
367                                  V_M4, bf16, FPR16>;
368      def VBF16M8: GroupVTypeInfo<vbfloat16m8_t, vbfloat16m1_t, vbool2_t, 16,
369                                  V_M8, bf16, FPR16>;
370    }
371  }
372}
373
374// This functor is used to obtain the int vector type that has the same SEW and
375// multiplier as the input parameter type
376class GetIntVTypeInfo<VTypeInfo vti> {
377  // Equivalent integer vector type. Eg.
378  //   VI8M1 → VI8M1 (identity)
379  //   VF64M4 → VI64M4
380  VTypeInfo Vti = !cast<VTypeInfo>(!subst("VBF", "VI",
381                                          !subst("VF", "VI",
382                                                 !cast<string>(vti))));
383}
384
385class MTypeInfo<ValueType Mas, LMULInfo M, string Bx> {
386  ValueType Mask = Mas;
387  // {SEW, VLMul} values set a valid VType to deal with this mask type.
388  // we assume SEW=1 and set corresponding LMUL. vsetvli insertion will
389  // look for SEW=1 to optimize based on surrounding instructions.
390  int SEW = 1;
391  int Log2SEW = 0;
392  LMULInfo LMul = M;
393  string BX = Bx; // Appendix of mask operations.
394  // The pattern fragment which produces the AVL operand, representing the
395  // "natural" vector length for this mask type. For scalable masks this is
396  // VLMax.
397  OutPatFrag AVL = VLMax;
398}
399
400defset list<MTypeInfo> AllMasks = {
401  // vbool<n>_t, <n> = SEW/LMUL, we assume SEW=8 and corresponding LMUL.
402  def : MTypeInfo<vbool64_t, V_MF8, "B1">;
403  def : MTypeInfo<vbool32_t, V_MF4, "B2">;
404  def : MTypeInfo<vbool16_t, V_MF2, "B4">;
405  def : MTypeInfo<vbool8_t, V_M1, "B8">;
406  def : MTypeInfo<vbool4_t, V_M2, "B16">;
407  def : MTypeInfo<vbool2_t, V_M4, "B32">;
408  def : MTypeInfo<vbool1_t, V_M8, "B64">;
409}
410
411class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti> {
412  VTypeInfo Vti = vti;
413  VTypeInfo Wti = wti;
414}
415
416class VTypeInfoToFraction<VTypeInfo vti, VTypeInfo fti> {
417  VTypeInfo Vti = vti;
418  VTypeInfo Fti = fti;
419}
420
421defset list<VTypeInfoToWide> AllWidenableIntVectors = {
422  def : VTypeInfoToWide<VI8MF8,  VI16MF4>;
423  def : VTypeInfoToWide<VI8MF4,  VI16MF2>;
424  def : VTypeInfoToWide<VI8MF2,  VI16M1>;
425  def : VTypeInfoToWide<VI8M1,   VI16M2>;
426  def : VTypeInfoToWide<VI8M2,   VI16M4>;
427  def : VTypeInfoToWide<VI8M4,   VI16M8>;
428
429  def : VTypeInfoToWide<VI16MF4, VI32MF2>;
430  def : VTypeInfoToWide<VI16MF2, VI32M1>;
431  def : VTypeInfoToWide<VI16M1,  VI32M2>;
432  def : VTypeInfoToWide<VI16M2,  VI32M4>;
433  def : VTypeInfoToWide<VI16M4,  VI32M8>;
434
435  def : VTypeInfoToWide<VI32MF2, VI64M1>;
436  def : VTypeInfoToWide<VI32M1,  VI64M2>;
437  def : VTypeInfoToWide<VI32M2,  VI64M4>;
438  def : VTypeInfoToWide<VI32M4,  VI64M8>;
439}
440
441defset list<VTypeInfoToWide> AllWidenableFloatVectors = {
442  def : VTypeInfoToWide<VF16MF4, VF32MF2>;
443  def : VTypeInfoToWide<VF16MF2, VF32M1>;
444  def : VTypeInfoToWide<VF16M1, VF32M2>;
445  def : VTypeInfoToWide<VF16M2, VF32M4>;
446  def : VTypeInfoToWide<VF16M4, VF32M8>;
447
448  def : VTypeInfoToWide<VF32MF2, VF64M1>;
449  def : VTypeInfoToWide<VF32M1, VF64M2>;
450  def : VTypeInfoToWide<VF32M2, VF64M4>;
451  def : VTypeInfoToWide<VF32M4, VF64M8>;
452}
453
454defset list<VTypeInfoToFraction> AllFractionableVF2IntVectors = {
455  def : VTypeInfoToFraction<VI16MF4, VI8MF8>;
456  def : VTypeInfoToFraction<VI16MF2, VI8MF4>;
457  def : VTypeInfoToFraction<VI16M1, VI8MF2>;
458  def : VTypeInfoToFraction<VI16M2, VI8M1>;
459  def : VTypeInfoToFraction<VI16M4, VI8M2>;
460  def : VTypeInfoToFraction<VI16M8, VI8M4>;
461  def : VTypeInfoToFraction<VI32MF2, VI16MF4>;
462  def : VTypeInfoToFraction<VI32M1, VI16MF2>;
463  def : VTypeInfoToFraction<VI32M2, VI16M1>;
464  def : VTypeInfoToFraction<VI32M4, VI16M2>;
465  def : VTypeInfoToFraction<VI32M8, VI16M4>;
466  def : VTypeInfoToFraction<VI64M1, VI32MF2>;
467  def : VTypeInfoToFraction<VI64M2, VI32M1>;
468  def : VTypeInfoToFraction<VI64M4, VI32M2>;
469  def : VTypeInfoToFraction<VI64M8, VI32M4>;
470}
471
472defset list<VTypeInfoToFraction> AllFractionableVF4IntVectors = {
473  def : VTypeInfoToFraction<VI32MF2, VI8MF8>;
474  def : VTypeInfoToFraction<VI32M1, VI8MF4>;
475  def : VTypeInfoToFraction<VI32M2, VI8MF2>;
476  def : VTypeInfoToFraction<VI32M4, VI8M1>;
477  def : VTypeInfoToFraction<VI32M8, VI8M2>;
478  def : VTypeInfoToFraction<VI64M1, VI16MF4>;
479  def : VTypeInfoToFraction<VI64M2, VI16MF2>;
480  def : VTypeInfoToFraction<VI64M4, VI16M1>;
481  def : VTypeInfoToFraction<VI64M8, VI16M2>;
482}
483
484defset list<VTypeInfoToFraction> AllFractionableVF8IntVectors = {
485  def : VTypeInfoToFraction<VI64M1, VI8MF8>;
486  def : VTypeInfoToFraction<VI64M2, VI8MF4>;
487  def : VTypeInfoToFraction<VI64M4, VI8MF2>;
488  def : VTypeInfoToFraction<VI64M8, VI8M1>;
489}
490
491defset list<VTypeInfoToWide> AllWidenableIntToFloatVectors = {
492  def : VTypeInfoToWide<VI8MF8, VF16MF4>;
493  def : VTypeInfoToWide<VI8MF4, VF16MF2>;
494  def : VTypeInfoToWide<VI8MF2, VF16M1>;
495  def : VTypeInfoToWide<VI8M1, VF16M2>;
496  def : VTypeInfoToWide<VI8M2, VF16M4>;
497  def : VTypeInfoToWide<VI8M4, VF16M8>;
498
499  def : VTypeInfoToWide<VI16MF4, VF32MF2>;
500  def : VTypeInfoToWide<VI16MF2, VF32M1>;
501  def : VTypeInfoToWide<VI16M1, VF32M2>;
502  def : VTypeInfoToWide<VI16M2, VF32M4>;
503  def : VTypeInfoToWide<VI16M4, VF32M8>;
504
505  def : VTypeInfoToWide<VI32MF2, VF64M1>;
506  def : VTypeInfoToWide<VI32M1, VF64M2>;
507  def : VTypeInfoToWide<VI32M2, VF64M4>;
508  def : VTypeInfoToWide<VI32M4, VF64M8>;
509}
510
511defset list<VTypeInfoToWide> AllWidenableBFloatToFloatVectors = {
512  def : VTypeInfoToWide<VBF16MF4, VF32MF2>;
513  def : VTypeInfoToWide<VBF16MF2, VF32M1>;
514  def : VTypeInfoToWide<VBF16M1, VF32M2>;
515  def : VTypeInfoToWide<VBF16M2, VF32M4>;
516  def : VTypeInfoToWide<VBF16M4, VF32M8>;
517}
518
519// This class holds the record of the RISCVVPseudoTable below.
520// This represents the information we need in codegen for each pseudo.
521// The definition should be consistent with `struct PseudoInfo` in
522// RISCVInstrInfo.h.
523class RISCVVPseudo {
524  Pseudo Pseudo = !cast<Pseudo>(NAME); // Used as a key.
525  Instruction BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
526  // SEW = 0 is used to denote that the Pseudo is not SEW specific (or unknown).
527  bits<8> SEW = 0;
528  bit NeedBeInPseudoTable = 1;
529}
530
531// The actual table.
532def RISCVVPseudosTable : GenericTable {
533  let FilterClass = "RISCVVPseudo";
534  let FilterClassField = "NeedBeInPseudoTable";
535  let CppTypeName = "PseudoInfo";
536  let Fields = [ "Pseudo", "BaseInstr" ];
537  let PrimaryKey = [ "Pseudo" ];
538  let PrimaryKeyName = "getPseudoInfo";
539  let PrimaryKeyEarlyOut = true;
540}
541
542def RISCVVInversePseudosTable : GenericTable {
543  let FilterClass = "RISCVVPseudo";
544  let CppTypeName = "PseudoInfo";
545  let Fields = [ "Pseudo", "BaseInstr", "VLMul", "SEW"];
546  let PrimaryKey = [ "BaseInstr", "VLMul", "SEW"];
547  let PrimaryKeyName = "getBaseInfo";
548  let PrimaryKeyEarlyOut = true;
549}
550
551def RISCVVIntrinsicsTable : GenericTable {
552  let FilterClass = "RISCVVIntrinsic";
553  let CppTypeName = "RISCVVIntrinsicInfo";
554  let Fields = ["IntrinsicID", "ScalarOperand", "VLOperand"];
555  let PrimaryKey = ["IntrinsicID"];
556  let PrimaryKeyName = "getRISCVVIntrinsicInfo";
557}
558
559// Describes the relation of a masked pseudo to the unmasked variants.
560//    Note that all masked variants (in this table) have exactly one
561//    unmasked variant.  For all but compares, both the masked and
562//    unmasked variant have a passthru and policy operand.  For compares,
563//    neither has a policy op, and only the masked version has a passthru.
564class RISCVMaskedPseudo<bits<4> MaskIdx, bit ActiveAffectsRes=false> {
565  Pseudo MaskedPseudo = !cast<Pseudo>(NAME);
566  Pseudo UnmaskedPseudo = !cast<Pseudo>(!subst("_MASK", "", NAME));
567  bits<4> MaskOpIdx = MaskIdx;
568  bit ActiveElementsAffectResult = ActiveAffectsRes;
569}
570
571def RISCVMaskedPseudosTable : GenericTable {
572  let FilterClass = "RISCVMaskedPseudo";
573  let CppTypeName = "RISCVMaskedPseudoInfo";
574  let Fields = ["MaskedPseudo", "UnmaskedPseudo", "MaskOpIdx", "ActiveElementsAffectResult"];
575  let PrimaryKey = ["MaskedPseudo"];
576  let PrimaryKeyName = "getMaskedPseudoInfo";
577}
578
579class RISCVVLE<bit M, bit Str, bit F, bits<3> S, bits<3> L> {
580  bits<1> Masked = M;
581  bits<1> Strided = Str;
582  bits<1> FF = F;
583  bits<3> Log2SEW = S;
584  bits<3> LMUL = L;
585  Pseudo Pseudo = !cast<Pseudo>(NAME);
586}
587
588def lookupMaskedIntrinsicByUnmasked : SearchIndex {
589  let Table = RISCVMaskedPseudosTable;
590  let Key = ["UnmaskedPseudo"];
591}
592
593def RISCVVLETable : GenericTable {
594  let FilterClass = "RISCVVLE";
595  let CppTypeName = "VLEPseudo";
596  let Fields = ["Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
597  let PrimaryKey = ["Masked", "Strided", "FF", "Log2SEW", "LMUL"];
598  let PrimaryKeyName = "getVLEPseudo";
599}
600
601class RISCVVSE<bit M, bit Str, bits<3> S, bits<3> L> {
602  bits<1> Masked = M;
603  bits<1> Strided = Str;
604  bits<3> Log2SEW = S;
605  bits<3> LMUL = L;
606  Pseudo Pseudo = !cast<Pseudo>(NAME);
607}
608
609def RISCVVSETable : GenericTable {
610  let FilterClass = "RISCVVSE";
611  let CppTypeName = "VSEPseudo";
612  let Fields = ["Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
613  let PrimaryKey = ["Masked", "Strided", "Log2SEW", "LMUL"];
614  let PrimaryKeyName = "getVSEPseudo";
615}
616
617class RISCVVLX_VSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
618  bits<1> Masked = M;
619  bits<1> Ordered = O;
620  bits<3> Log2SEW = S;
621  bits<3> LMUL = L;
622  bits<3> IndexLMUL = IL;
623  Pseudo Pseudo = !cast<Pseudo>(NAME);
624}
625
626class RISCVVLX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
627  RISCVVLX_VSX<M, O, S, L, IL>;
628class RISCVVSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
629  RISCVVLX_VSX<M, O, S, L, IL>;
630
631class RISCVVLX_VSXTable : GenericTable {
632  let CppTypeName = "VLX_VSXPseudo";
633  let Fields = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
634  let PrimaryKey = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
635}
636
637def RISCVVLXTable : RISCVVLX_VSXTable {
638  let FilterClass = "RISCVVLX";
639  let PrimaryKeyName = "getVLXPseudo";
640}
641
642def RISCVVSXTable : RISCVVLX_VSXTable {
643  let FilterClass = "RISCVVSX";
644  let PrimaryKeyName = "getVSXPseudo";
645}
646
647class RISCVVLSEG<bits<4> N, bit M, bit Str, bit F, bits<3> S, bits<3> L> {
648  bits<4> NF = N;
649  bits<1> Masked = M;
650  bits<1> Strided = Str;
651  bits<1> FF = F;
652  bits<3> Log2SEW = S;
653  bits<3> LMUL = L;
654  Pseudo Pseudo = !cast<Pseudo>(NAME);
655}
656
657def RISCVVLSEGTable : GenericTable {
658  let FilterClass = "RISCVVLSEG";
659  let CppTypeName = "VLSEGPseudo";
660  let Fields = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
661  let PrimaryKey = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL"];
662  let PrimaryKeyName = "getVLSEGPseudo";
663}
664
665class RISCVVLXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
666  bits<4> NF = N;
667  bits<1> Masked = M;
668  bits<1> Ordered = O;
669  bits<3> Log2SEW = S;
670  bits<3> LMUL = L;
671  bits<3> IndexLMUL = IL;
672  Pseudo Pseudo = !cast<Pseudo>(NAME);
673}
674
675def RISCVVLXSEGTable : GenericTable {
676  let FilterClass = "RISCVVLXSEG";
677  let CppTypeName = "VLXSEGPseudo";
678  let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
679  let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
680  let PrimaryKeyName = "getVLXSEGPseudo";
681}
682
683class RISCVVSSEG<bits<4> N, bit M, bit Str, bits<3> S, bits<3> L> {
684  bits<4> NF = N;
685  bits<1> Masked = M;
686  bits<1> Strided = Str;
687  bits<3> Log2SEW = S;
688  bits<3> LMUL = L;
689  Pseudo Pseudo = !cast<Pseudo>(NAME);
690}
691
692def RISCVVSSEGTable : GenericTable {
693  let FilterClass = "RISCVVSSEG";
694  let CppTypeName = "VSSEGPseudo";
695  let Fields = ["NF", "Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
696  let PrimaryKey = ["NF", "Masked", "Strided", "Log2SEW", "LMUL"];
697  let PrimaryKeyName = "getVSSEGPseudo";
698}
699
700class RISCVVSXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
701  bits<4> NF = N;
702  bits<1> Masked = M;
703  bits<1> Ordered = O;
704  bits<3> Log2SEW = S;
705  bits<3> LMUL = L;
706  bits<3> IndexLMUL = IL;
707  Pseudo Pseudo = !cast<Pseudo>(NAME);
708}
709
710def RISCVVSXSEGTable : GenericTable {
711  let FilterClass = "RISCVVSXSEG";
712  let CppTypeName = "VSXSEGPseudo";
713  let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
714  let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
715  let PrimaryKeyName = "getVSXSEGPseudo";
716}
717
718//===----------------------------------------------------------------------===//
719// Helpers to define the different pseudo instructions.
720//===----------------------------------------------------------------------===//
721
722// The destination vector register group for a masked vector instruction cannot
723// overlap the source mask register (v0), unless the destination vector register
724// is being written with a mask value (e.g., comparisons) or the scalar result
725// of a reduction.
726class GetVRegNoV0<VReg VRegClass> {
727  VReg R = !cond(!eq(VRegClass, VR) : VRNoV0,
728                 !eq(VRegClass, VRM2) : VRM2NoV0,
729                 !eq(VRegClass, VRM4) : VRM4NoV0,
730                 !eq(VRegClass, VRM8) : VRM8NoV0,
731                 !eq(VRegClass, VRN2M1) : VRN2M1NoV0,
732                 !eq(VRegClass, VRN2M2) : VRN2M2NoV0,
733                 !eq(VRegClass, VRN2M4) : VRN2M4NoV0,
734                 !eq(VRegClass, VRN3M1) : VRN3M1NoV0,
735                 !eq(VRegClass, VRN3M2) : VRN3M2NoV0,
736                 !eq(VRegClass, VRN4M1) : VRN4M1NoV0,
737                 !eq(VRegClass, VRN4M2) : VRN4M2NoV0,
738                 !eq(VRegClass, VRN5M1) : VRN5M1NoV0,
739                 !eq(VRegClass, VRN6M1) : VRN6M1NoV0,
740                 !eq(VRegClass, VRN7M1) : VRN7M1NoV0,
741                 !eq(VRegClass, VRN8M1) : VRN8M1NoV0,
742                 true : VRegClass);
743}
744
745class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins, int sew = 0> :
746      Pseudo<outs, ins, []>, RISCVVPseudo {
747  let BaseInstr = instr;
748  let VLMul = m.value;
749  let SEW = sew;
750}
751
752class GetVTypePredicates<VTypeInfo vti> {
753  list<Predicate> Predicates = !cond(!eq(vti.Scalar, f16) : [HasVInstructionsF16],
754                                     !eq(vti.Scalar, bf16) : [HasVInstructionsBF16],
755                                     !eq(vti.Scalar, f32) : [HasVInstructionsAnyF],
756                                     !eq(vti.Scalar, f64) : [HasVInstructionsF64],
757                                     !eq(vti.SEW, 64) : [HasVInstructionsI64],
758                                     true : [HasVInstructions]);
759}
760
761class VPseudoUSLoadNoMask<VReg RetClass,
762                          int EEW> :
763      Pseudo<(outs RetClass:$rd),
764             (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew,
765                  ixlenimm:$policy), []>,
766      RISCVVPseudo,
767      RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
768  let mayLoad = 1;
769  let mayStore = 0;
770  let hasSideEffects = 0;
771  let HasVLOp = 1;
772  let HasSEWOp = 1;
773  let HasVecPolicyOp = 1;
774  let Constraints = "$rd = $dest";
775}
776
777class VPseudoUSLoadMask<VReg RetClass,
778                        int EEW> :
779      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
780             (ins GetVRegNoV0<RetClass>.R:$merge,
781                  GPRMem:$rs1,
782                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
783      RISCVVPseudo,
784      RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
785  let mayLoad = 1;
786  let mayStore = 0;
787  let hasSideEffects = 0;
788  let Constraints = "$rd = $merge";
789  let HasVLOp = 1;
790  let HasSEWOp = 1;
791  let HasVecPolicyOp = 1;
792  let UsesMaskPolicy = 1;
793}
794
795class VPseudoUSLoadFFNoMask<VReg RetClass,
796                            int EEW> :
797      Pseudo<(outs RetClass:$rd, GPR:$vl),
798             (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl,
799                  ixlenimm:$sew, ixlenimm:$policy), []>,
800      RISCVVPseudo,
801      RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
802  let mayLoad = 1;
803  let mayStore = 0;
804  let hasSideEffects = 0;
805  let HasVLOp = 1;
806  let HasSEWOp = 1;
807  let HasVecPolicyOp = 1;
808  let Constraints = "$rd = $dest";
809}
810
811class VPseudoUSLoadFFMask<VReg RetClass,
812                          int EEW> :
813      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
814             (ins GetVRegNoV0<RetClass>.R:$merge,
815                  GPRMem:$rs1,
816                  VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy), []>,
817      RISCVVPseudo,
818      RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
819  let mayLoad = 1;
820  let mayStore = 0;
821  let hasSideEffects = 0;
822  let Constraints = "$rd = $merge";
823  let HasVLOp = 1;
824  let HasSEWOp = 1;
825  let HasVecPolicyOp = 1;
826  let UsesMaskPolicy = 1;
827}
828
829class VPseudoSLoadNoMask<VReg RetClass,
830                         int EEW> :
831      Pseudo<(outs RetClass:$rd),
832             (ins RetClass:$dest, GPRMem:$rs1, GPR:$rs2, AVL:$vl,
833                  ixlenimm:$sew, ixlenimm:$policy), []>,
834      RISCVVPseudo,
835      RISCVVLE</*Masked*/0, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
836  let mayLoad = 1;
837  let mayStore = 0;
838  let hasSideEffects = 0;
839  let HasVLOp = 1;
840  let HasSEWOp = 1;
841  let HasVecPolicyOp = 1;
842  let Constraints = "$rd = $dest";
843}
844
845class VPseudoSLoadMask<VReg RetClass,
846                       int EEW> :
847      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
848             (ins GetVRegNoV0<RetClass>.R:$merge,
849                  GPRMem:$rs1, GPR:$rs2,
850                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
851      RISCVVPseudo,
852      RISCVVLE</*Masked*/1, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
853  let mayLoad = 1;
854  let mayStore = 0;
855  let hasSideEffects = 0;
856  let Constraints = "$rd = $merge";
857  let HasVLOp = 1;
858  let HasSEWOp = 1;
859  let HasVecPolicyOp = 1;
860  let UsesMaskPolicy = 1;
861}
862
863class VPseudoILoadNoMask<VReg RetClass,
864                         VReg IdxClass,
865                         int EEW,
866                         bits<3> LMUL,
867                         bit Ordered,
868                         bit EarlyClobber,
869                         int TargetConstraintType = 1> :
870      Pseudo<(outs RetClass:$rd),
871             (ins RetClass:$dest, GPRMem:$rs1, IdxClass:$rs2, AVL:$vl,
872                  ixlenimm:$sew, ixlenimm:$policy), []>,
873      RISCVVPseudo,
874      RISCVVLX</*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
875  let mayLoad = 1;
876  let mayStore = 0;
877  let hasSideEffects = 0;
878  let HasVLOp = 1;
879  let HasSEWOp = 1;
880  let HasVecPolicyOp = 1;
881  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $dest", "$rd = $dest");
882  let TargetOverlapConstraintType = TargetConstraintType;
883}
884
885class VPseudoILoadMask<VReg RetClass,
886                       VReg IdxClass,
887                       int EEW,
888                       bits<3> LMUL,
889                       bit Ordered,
890                       bit EarlyClobber,
891                       int TargetConstraintType = 1> :
892      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
893             (ins GetVRegNoV0<RetClass>.R:$merge,
894                  GPRMem:$rs1, IdxClass:$rs2,
895                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
896      RISCVVPseudo,
897      RISCVVLX</*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
898  let mayLoad = 1;
899  let mayStore = 0;
900  let hasSideEffects = 0;
901  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $merge", "$rd = $merge");
902  let TargetOverlapConstraintType = TargetConstraintType;
903  let HasVLOp = 1;
904  let HasSEWOp = 1;
905  let HasVecPolicyOp = 1;
906  let UsesMaskPolicy = 1;
907}
908
909class VPseudoUSStoreNoMask<VReg StClass,
910                           int EEW> :
911      Pseudo<(outs),
912             (ins StClass:$rd, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew), []>,
913      RISCVVPseudo,
914      RISCVVSE</*Masked*/0, /*Strided*/0, !logtwo(EEW), VLMul> {
915  let mayLoad = 0;
916  let mayStore = 1;
917  let hasSideEffects = 0;
918  let HasVLOp = 1;
919  let HasSEWOp = 1;
920}
921
922class VPseudoUSStoreMask<VReg StClass,
923                         int EEW> :
924      Pseudo<(outs),
925             (ins StClass:$rd, GPRMem:$rs1,
926                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
927      RISCVVPseudo,
928      RISCVVSE</*Masked*/1, /*Strided*/0, !logtwo(EEW), VLMul> {
929  let mayLoad = 0;
930  let mayStore = 1;
931  let hasSideEffects = 0;
932  let HasVLOp = 1;
933  let HasSEWOp = 1;
934}
935
936class VPseudoSStoreNoMask<VReg StClass,
937                          int EEW> :
938      Pseudo<(outs),
939             (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2,
940                  AVL:$vl, ixlenimm:$sew), []>,
941      RISCVVPseudo,
942      RISCVVSE</*Masked*/0, /*Strided*/1, !logtwo(EEW), VLMul> {
943  let mayLoad = 0;
944  let mayStore = 1;
945  let hasSideEffects = 0;
946  let HasVLOp = 1;
947  let HasSEWOp = 1;
948}
949
950class VPseudoSStoreMask<VReg StClass,
951                        int EEW> :
952      Pseudo<(outs),
953             (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2,
954                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
955      RISCVVPseudo,
956      RISCVVSE</*Masked*/1, /*Strided*/1, !logtwo(EEW), VLMul> {
957  let mayLoad = 0;
958  let mayStore = 1;
959  let hasSideEffects = 0;
960  let HasVLOp = 1;
961  let HasSEWOp = 1;
962}
963
964class VPseudoNullaryNoMask<VReg RegClass> :
965      Pseudo<(outs RegClass:$rd),
966             (ins RegClass:$merge,
967                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
968      RISCVVPseudo {
969  let mayLoad = 0;
970  let mayStore = 0;
971  let hasSideEffects = 0;
972  let Constraints = "$rd = $merge";
973  let HasVLOp = 1;
974  let HasSEWOp = 1;
975  let HasVecPolicyOp = 1;
976}
977
978class VPseudoNullaryMask<VReg RegClass> :
979      Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd),
980             (ins GetVRegNoV0<RegClass>.R:$merge,
981                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
982      RISCVVPseudo {
983  let mayLoad = 0;
984  let mayStore = 0;
985  let hasSideEffects = 0;
986  let Constraints ="$rd = $merge";
987  let HasVLOp = 1;
988  let HasSEWOp = 1;
989  let UsesMaskPolicy = 1;
990  let HasVecPolicyOp = 1;
991}
992
993// Nullary for pseudo instructions. They are expanded in
994// RISCVExpandPseudoInsts pass.
995class VPseudoNullaryPseudoM<string BaseInst> :
996      Pseudo<(outs VR:$rd), (ins AVL:$vl, ixlenimm:$sew), []>,
997      RISCVVPseudo {
998  let mayLoad = 0;
999  let mayStore = 0;
1000  let hasSideEffects = 0;
1001  let HasVLOp = 1;
1002  let HasSEWOp = 1;
1003  // BaseInstr is not used in RISCVExpandPseudoInsts pass.
1004  // Just fill a corresponding real v-inst to pass tablegen check.
1005  let BaseInstr = !cast<Instruction>(BaseInst);
1006  // We exclude them from RISCVVPseudoTable.
1007  let NeedBeInPseudoTable = 0;
1008}
1009
1010class VPseudoUnaryNoMask<DAGOperand RetClass,
1011                         DAGOperand OpClass,
1012                         string Constraint = "",
1013                         int TargetConstraintType = 1> :
1014      Pseudo<(outs RetClass:$rd),
1015             (ins RetClass:$merge, OpClass:$rs2,
1016                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1017      RISCVVPseudo {
1018  let mayLoad = 0;
1019  let mayStore = 0;
1020  let hasSideEffects = 0;
1021  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1022  let TargetOverlapConstraintType = TargetConstraintType;
1023  let HasVLOp = 1;
1024  let HasSEWOp = 1;
1025  let HasVecPolicyOp = 1;
1026}
1027
1028class VPseudoUnaryNoMaskNoPolicy<DAGOperand RetClass,
1029                                 DAGOperand OpClass,
1030                                 string Constraint = "",
1031                                 int TargetConstraintType = 1> :
1032      Pseudo<(outs RetClass:$rd),
1033             (ins OpClass:$rs2, AVL:$vl, ixlenimm:$sew), []>,
1034      RISCVVPseudo {
1035  let mayLoad = 0;
1036  let mayStore = 0;
1037  let hasSideEffects = 0;
1038  let Constraints = Constraint;
1039  let TargetOverlapConstraintType = TargetConstraintType;
1040  let HasVLOp = 1;
1041  let HasSEWOp = 1;
1042}
1043
1044class VPseudoUnaryNoMaskRoundingMode<DAGOperand RetClass,
1045                                     DAGOperand OpClass,
1046                                     string Constraint = "",
1047                                     int TargetConstraintType = 1> :
1048      Pseudo<(outs RetClass:$rd),
1049             (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm,
1050                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1051      RISCVVPseudo {
1052  let mayLoad = 0;
1053  let mayStore = 0;
1054  let hasSideEffects = 0;
1055  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1056  let TargetOverlapConstraintType = TargetConstraintType;
1057  let HasVLOp = 1;
1058  let HasSEWOp = 1;
1059  let HasVecPolicyOp = 1;
1060  let HasRoundModeOp = 1;
1061  let UsesVXRM = 0;
1062}
1063
1064class VPseudoUnaryMask<VReg RetClass,
1065                       VReg OpClass,
1066                       string Constraint = "",
1067                       int TargetConstraintType = 1> :
1068      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1069             (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
1070                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1071      RISCVVPseudo {
1072  let mayLoad = 0;
1073  let mayStore = 0;
1074  let hasSideEffects = 0;
1075  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1076  let TargetOverlapConstraintType = TargetConstraintType;
1077  let HasVLOp = 1;
1078  let HasSEWOp = 1;
1079  let HasVecPolicyOp = 1;
1080  let UsesMaskPolicy = 1;
1081}
1082
1083class VPseudoUnaryMaskRoundingMode<VReg RetClass,
1084                                   VReg OpClass,
1085                                   string Constraint = "",
1086                                   int TargetConstraintType = 1> :
1087      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1088             (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
1089                  VMaskOp:$vm, ixlenimm:$rm,
1090                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1091      RISCVVPseudo {
1092  let mayLoad = 0;
1093  let mayStore = 0;
1094  let hasSideEffects = 0;
1095  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1096  let TargetOverlapConstraintType = TargetConstraintType;
1097  let HasVLOp = 1;
1098  let HasSEWOp = 1;
1099  let HasVecPolicyOp = 1;
1100  let UsesMaskPolicy = 1;
1101  let HasRoundModeOp = 1;
1102  let UsesVXRM = 0;
1103}
1104
1105class VPseudoUnaryMask_NoExcept<VReg RetClass,
1106                                VReg OpClass,
1107                                string Constraint = ""> :
1108      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1109             (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
1110                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []> {
1111  let mayLoad = 0;
1112  let mayStore = 0;
1113  let hasSideEffects = 0;
1114  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1115  let HasVLOp = 1;
1116  let HasSEWOp = 1;
1117  let HasVecPolicyOp = 1;
1118  let UsesMaskPolicy = 1;
1119  let usesCustomInserter = 1;
1120}
1121
1122class VPseudoUnaryNoMask_FRM<VReg RetClass,
1123                             VReg OpClass,
1124                             string Constraint = "",
1125                             int TargetConstraintType = 1> :
1126      Pseudo<(outs RetClass:$rd),
1127             (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$frm,
1128                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1129      RISCVVPseudo {
1130  let mayLoad = 0;
1131  let mayStore = 0;
1132  let hasSideEffects = 0;
1133  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1134  let TargetOverlapConstraintType = TargetConstraintType;
1135  let HasVLOp = 1;
1136  let HasSEWOp = 1;
1137  let HasVecPolicyOp = 1;
1138  let HasRoundModeOp = 1;
1139}
1140
1141class VPseudoUnaryMask_FRM<VReg RetClass,
1142                           VReg OpClass,
1143                           string Constraint = "",
1144                           int TargetConstraintType = 1> :
1145      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1146             (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
1147                  VMaskOp:$vm, ixlenimm:$frm,
1148                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1149      RISCVVPseudo {
1150  let mayLoad = 0;
1151  let mayStore = 0;
1152  let hasSideEffects = 0;
1153  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1154  let TargetOverlapConstraintType = TargetConstraintType;
1155  let HasVLOp = 1;
1156  let HasSEWOp = 1;
1157  let HasVecPolicyOp = 1;
1158  let UsesMaskPolicy = 1;
1159  let HasRoundModeOp = 1;
1160}
1161
1162class VPseudoUnaryNoMaskGPROut :
1163      Pseudo<(outs GPR:$rd),
1164             (ins VR:$rs2, AVL:$vl, ixlenimm:$sew), []>,
1165      RISCVVPseudo {
1166  let mayLoad = 0;
1167  let mayStore = 0;
1168  let hasSideEffects = 0;
1169  let HasVLOp = 1;
1170  let HasSEWOp = 1;
1171}
1172
1173class VPseudoUnaryMaskGPROut :
1174      Pseudo<(outs GPR:$rd),
1175             (ins VR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1176      RISCVVPseudo {
1177  let mayLoad = 0;
1178  let mayStore = 0;
1179  let hasSideEffects = 0;
1180  let HasVLOp = 1;
1181  let HasSEWOp = 1;
1182}
1183
1184// Mask can be V0~V31
1185class VPseudoUnaryAnyMask<VReg RetClass,
1186                          VReg Op1Class> :
1187      Pseudo<(outs RetClass:$rd),
1188             (ins RetClass:$merge, Op1Class:$rs2,
1189                  VR:$vm, AVL:$vl, ixlenimm:$sew), []>,
1190      RISCVVPseudo {
1191  let mayLoad = 0;
1192  let mayStore = 0;
1193  let hasSideEffects = 0;
1194  let Constraints = "@earlyclobber $rd, $rd = $merge";
1195  let HasVLOp = 1;
1196  let HasSEWOp = 1;
1197}
1198
1199class VPseudoBinaryNoMask<VReg RetClass,
1200                          VReg Op1Class,
1201                          DAGOperand Op2Class,
1202                          string Constraint,
1203                          int TargetConstraintType = 1> :
1204      Pseudo<(outs RetClass:$rd),
1205             (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
1206      RISCVVPseudo {
1207  let mayLoad = 0;
1208  let mayStore = 0;
1209  let hasSideEffects = 0;
1210  let Constraints = Constraint;
1211  let TargetOverlapConstraintType = TargetConstraintType;
1212  let HasVLOp = 1;
1213  let HasSEWOp = 1;
1214}
1215
1216class VPseudoBinaryNoMaskPolicy<VReg RetClass,
1217                                VReg Op1Class,
1218                                DAGOperand Op2Class,
1219                                string Constraint,
1220                                int TargetConstraintType = 1> :
1221      Pseudo<(outs RetClass:$rd),
1222             (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl,
1223                  ixlenimm:$sew, ixlenimm:$policy), []>,
1224      RISCVVPseudo {
1225  let mayLoad = 0;
1226  let mayStore = 0;
1227  let hasSideEffects = 0;
1228  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1229  let TargetOverlapConstraintType = TargetConstraintType;
1230  let HasVLOp = 1;
1231  let HasSEWOp = 1;
1232  let HasVecPolicyOp = 1;
1233}
1234
1235class VPseudoBinaryNoMaskRoundingMode<VReg RetClass,
1236                                      VReg Op1Class,
1237                                      DAGOperand Op2Class,
1238                                      string Constraint,
1239                                      int UsesVXRM_ = 1,
1240                                      int TargetConstraintType = 1> :
1241      Pseudo<(outs RetClass:$rd),
1242             (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm,
1243                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1244      RISCVVPseudo {
1245  let mayLoad = 0;
1246  let mayStore = 0;
1247  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1248  let TargetOverlapConstraintType = TargetConstraintType;
1249  let HasVLOp = 1;
1250  let HasSEWOp = 1;
1251  let HasVecPolicyOp = 1;
1252  let HasRoundModeOp = 1;
1253  let UsesVXRM = UsesVXRM_;
1254}
1255
1256class VPseudoBinaryMaskPolicyRoundingMode<VReg RetClass,
1257                                          RegisterClass Op1Class,
1258                                          DAGOperand Op2Class,
1259                                          string Constraint,
1260                                          int UsesVXRM_,
1261                                          int TargetConstraintType = 1> :
1262      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1263             (ins GetVRegNoV0<RetClass>.R:$merge,
1264                  Op1Class:$rs2, Op2Class:$rs1,
1265                  VMaskOp:$vm, ixlenimm:$rm, AVL:$vl,
1266                  ixlenimm:$sew, ixlenimm:$policy), []>,
1267      RISCVVPseudo {
1268  let mayLoad = 0;
1269  let mayStore = 0;
1270  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1271  let TargetOverlapConstraintType = TargetConstraintType;
1272  let HasVLOp = 1;
1273  let HasSEWOp = 1;
1274  let HasVecPolicyOp = 1;
1275  let UsesMaskPolicy = 1;
1276  let HasRoundModeOp = 1;
1277  let UsesVXRM = UsesVXRM_;
1278}
1279
1280// Special version of VPseudoBinaryNoMask where we pretend the first source is
1281// tied to the destination.
1282// This allows maskedoff and rs2 to be the same register.
1283class VPseudoTiedBinaryNoMask<VReg RetClass,
1284                              DAGOperand Op2Class,
1285                              string Constraint,
1286                              int TargetConstraintType = 1> :
1287      Pseudo<(outs RetClass:$rd),
1288             (ins RetClass:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew,
1289                  ixlenimm:$policy), []>,
1290      RISCVVPseudo {
1291  let mayLoad = 0;
1292  let mayStore = 0;
1293  let hasSideEffects = 0;
1294  let Constraints = !interleave([Constraint, "$rd = $rs2"], ",");
1295  let TargetOverlapConstraintType = TargetConstraintType;
1296  let HasVLOp = 1;
1297  let HasSEWOp = 1;
1298  let HasVecPolicyOp = 1;
1299  let isConvertibleToThreeAddress = 1;
1300  let IsTiedPseudo = 1;
1301}
1302
1303class VPseudoTiedBinaryNoMaskRoundingMode<VReg RetClass,
1304                                          DAGOperand Op2Class,
1305                                          string Constraint,
1306                                          int TargetConstraintType = 1> :
1307      Pseudo<(outs RetClass:$rd),
1308             (ins RetClass:$rs2, Op2Class:$rs1,
1309                  ixlenimm:$rm,
1310                  AVL:$vl, ixlenimm:$sew,
1311                  ixlenimm:$policy), []>,
1312      RISCVVPseudo {
1313  let mayLoad = 0;
1314  let mayStore = 0;
1315  let hasSideEffects = 0;
1316  let Constraints = !interleave([Constraint, "$rd = $rs2"], ",");
1317  let TargetOverlapConstraintType = TargetConstraintType;
1318  let HasVLOp = 1;
1319  let HasSEWOp = 1;
1320  let HasVecPolicyOp = 1;
1321  let isConvertibleToThreeAddress = 1;
1322  let IsTiedPseudo = 1;
1323  let HasRoundModeOp = 1;
1324  let UsesVXRM = 0;
1325}
1326
1327class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
1328                          bit Ordered>:
1329      Pseudo<(outs),
1330             (ins StClass:$rd, GPRMem:$rs1, IdxClass:$rs2, AVL:$vl,
1331                  ixlenimm:$sew),[]>,
1332      RISCVVPseudo,
1333      RISCVVSX</*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
1334  let mayLoad = 0;
1335  let mayStore = 1;
1336  let hasSideEffects = 0;
1337  let HasVLOp = 1;
1338  let HasSEWOp = 1;
1339}
1340
1341class VPseudoIStoreMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
1342                        bit Ordered>:
1343      Pseudo<(outs),
1344             (ins StClass:$rd, GPRMem:$rs1, IdxClass:$rs2,
1345                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
1346      RISCVVPseudo,
1347      RISCVVSX</*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
1348  let mayLoad = 0;
1349  let mayStore = 1;
1350  let hasSideEffects = 0;
1351  let HasVLOp = 1;
1352  let HasSEWOp = 1;
1353}
1354
1355class VPseudoBinaryMaskPolicy<VReg RetClass,
1356                              RegisterClass Op1Class,
1357                              DAGOperand Op2Class,
1358                              string Constraint,
1359                              int TargetConstraintType = 1> :
1360      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1361             (ins GetVRegNoV0<RetClass>.R:$merge,
1362                  Op1Class:$rs2, Op2Class:$rs1,
1363                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1364      RISCVVPseudo {
1365  let mayLoad = 0;
1366  let mayStore = 0;
1367  let hasSideEffects = 0;
1368  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1369  let TargetOverlapConstraintType = TargetConstraintType;
1370  let HasVLOp = 1;
1371  let HasSEWOp = 1;
1372  let HasVecPolicyOp = 1;
1373  let UsesMaskPolicy = 1;
1374}
1375
1376class VPseudoTernaryMaskPolicy<VReg RetClass,
1377                               RegisterClass Op1Class,
1378                               DAGOperand Op2Class> :
1379      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1380             (ins GetVRegNoV0<RetClass>.R:$merge,
1381                  Op1Class:$rs2, Op2Class:$rs1,
1382                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1383      RISCVVPseudo {
1384  let mayLoad = 0;
1385  let mayStore = 0;
1386  let hasSideEffects = 0;
1387  let Constraints = "$rd = $merge";
1388  let HasVLOp = 1;
1389  let HasSEWOp = 1;
1390  let HasVecPolicyOp = 1;
1391}
1392
1393class VPseudoTernaryMaskPolicyRoundingMode<VReg RetClass,
1394                                           RegisterClass Op1Class,
1395                                           DAGOperand Op2Class> :
1396      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1397             (ins GetVRegNoV0<RetClass>.R:$merge,
1398                  Op1Class:$rs2, Op2Class:$rs1,
1399                  VMaskOp:$vm,
1400                  ixlenimm:$rm,
1401                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1402      RISCVVPseudo {
1403  let mayLoad = 0;
1404  let mayStore = 0;
1405  let hasSideEffects = 0;
1406  let Constraints = "$rd = $merge";
1407  let HasVLOp = 1;
1408  let HasSEWOp = 1;
1409  let HasVecPolicyOp = 1;
1410  let HasRoundModeOp = 1;
1411  let UsesVXRM = 0;
1412}
1413
1414// Like VPseudoBinaryMaskPolicy, but output can be V0 and there is no policy.
1415class VPseudoBinaryMOutMask<VReg RetClass,
1416                            RegisterClass Op1Class,
1417                            DAGOperand Op2Class,
1418                            string Constraint,
1419                            int TargetConstraintType = 1> :
1420      Pseudo<(outs RetClass:$rd),
1421             (ins RetClass:$merge,
1422                  Op1Class:$rs2, Op2Class:$rs1,
1423                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1424      RISCVVPseudo {
1425  let mayLoad = 0;
1426  let mayStore = 0;
1427  let hasSideEffects = 0;
1428  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1429  let TargetOverlapConstraintType = TargetConstraintType;
1430  let HasVLOp = 1;
1431  let HasSEWOp = 1;
1432  let UsesMaskPolicy = 1;
1433}
1434
1435// Special version of VPseudoBinaryMaskPolicy where we pretend the first source
1436// is tied to the destination so we can workaround the earlyclobber constraint.
1437// This allows maskedoff and rs2 to be the same register.
1438class VPseudoTiedBinaryMask<VReg RetClass,
1439                            DAGOperand Op2Class,
1440                            string Constraint,
1441                            int TargetConstraintType = 1> :
1442      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1443             (ins GetVRegNoV0<RetClass>.R:$merge,
1444                  Op2Class:$rs1,
1445                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1446      RISCVVPseudo {
1447  let mayLoad = 0;
1448  let mayStore = 0;
1449  let hasSideEffects = 0;
1450  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1451  let TargetOverlapConstraintType = TargetConstraintType;
1452  let HasVLOp = 1;
1453  let HasSEWOp = 1;
1454  let HasVecPolicyOp = 1;
1455  let UsesMaskPolicy = 1;
1456  let IsTiedPseudo = 1;
1457}
1458
1459class VPseudoTiedBinaryMaskRoundingMode<VReg RetClass,
1460                                        DAGOperand Op2Class,
1461                                        string Constraint,
1462                                        int TargetConstraintType = 1> :
1463      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1464             (ins GetVRegNoV0<RetClass>.R:$merge,
1465                  Op2Class:$rs1,
1466                  VMaskOp:$vm,
1467                  ixlenimm:$rm,
1468                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1469      RISCVVPseudo {
1470  let mayLoad = 0;
1471  let mayStore = 0;
1472  let hasSideEffects = 0;
1473  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
1474  let TargetOverlapConstraintType = TargetConstraintType;
1475  let HasVLOp = 1;
1476  let HasSEWOp = 1;
1477  let HasVecPolicyOp = 1;
1478  let UsesMaskPolicy = 1;
1479  let IsTiedPseudo = 1;
1480  let HasRoundModeOp = 1;
1481  let UsesVXRM = 0;
1482}
1483
1484class VPseudoBinaryCarry<VReg RetClass,
1485                         VReg Op1Class,
1486                         DAGOperand Op2Class,
1487                         LMULInfo MInfo,
1488                         bit CarryIn,
1489                         string Constraint,
1490                         int TargetConstraintType = 1> :
1491      Pseudo<(outs RetClass:$rd),
1492             !if(CarryIn,
1493                (ins Op1Class:$rs2, Op2Class:$rs1,
1494                     VMV0:$carry, AVL:$vl, ixlenimm:$sew),
1495                (ins Op1Class:$rs2, Op2Class:$rs1,
1496                     AVL:$vl, ixlenimm:$sew)), []>,
1497      RISCVVPseudo {
1498  let mayLoad = 0;
1499  let mayStore = 0;
1500  let hasSideEffects = 0;
1501  let Constraints = Constraint;
1502  let TargetOverlapConstraintType = TargetConstraintType;
1503  let HasVLOp = 1;
1504  let HasSEWOp = 1;
1505  let VLMul = MInfo.value;
1506}
1507
1508class VPseudoTiedBinaryCarryIn<VReg RetClass,
1509                               VReg Op1Class,
1510                               DAGOperand Op2Class,
1511                               LMULInfo MInfo,
1512                               int TargetConstraintType = 1> :
1513      Pseudo<(outs RetClass:$rd),
1514             (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1,
1515                  VMV0:$carry, AVL:$vl, ixlenimm:$sew), []>,
1516      RISCVVPseudo {
1517  let mayLoad = 0;
1518  let mayStore = 0;
1519  let hasSideEffects = 0;
1520  let Constraints = "$rd = $merge";
1521  let TargetOverlapConstraintType = TargetConstraintType;
1522  let HasVLOp = 1;
1523  let HasSEWOp = 1;
1524  let HasVecPolicyOp = 0;
1525  let VLMul = MInfo.value;
1526}
1527
1528class VPseudoTernaryNoMask<VReg RetClass,
1529                           RegisterClass Op1Class,
1530                           DAGOperand Op2Class,
1531                           string Constraint> :
1532      Pseudo<(outs RetClass:$rd),
1533             (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1534                  AVL:$vl, ixlenimm:$sew), []>,
1535      RISCVVPseudo {
1536  let mayLoad = 0;
1537  let mayStore = 0;
1538  let hasSideEffects = 0;
1539  let Constraints = !interleave([Constraint, "$rd = $rs3"], ",");
1540  let HasVLOp = 1;
1541  let HasSEWOp = 1;
1542}
1543
1544class VPseudoTernaryNoMaskWithPolicy<VReg RetClass,
1545                                     RegisterClass Op1Class,
1546                                     DAGOperand Op2Class,
1547                                     string Constraint = "",
1548                                     int TargetConstraintType = 1> :
1549      Pseudo<(outs RetClass:$rd),
1550             (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1551                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1552      RISCVVPseudo {
1553  let mayLoad = 0;
1554  let mayStore = 0;
1555  let hasSideEffects = 0;
1556  let Constraints = !interleave([Constraint, "$rd = $rs3"], ",");
1557  let TargetOverlapConstraintType = TargetConstraintType;
1558  let HasVecPolicyOp = 1;
1559  let HasVLOp = 1;
1560  let HasSEWOp = 1;
1561}
1562
1563class VPseudoTernaryNoMaskWithPolicyRoundingMode<VReg RetClass,
1564                                                 RegisterClass Op1Class,
1565                                                 DAGOperand Op2Class,
1566                                                 string Constraint = "",
1567                                                 int TargetConstraintType = 1> :
1568      Pseudo<(outs RetClass:$rd),
1569             (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1570                  ixlenimm:$rm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1571      RISCVVPseudo {
1572  let mayLoad = 0;
1573  let mayStore = 0;
1574  let hasSideEffects = 0;
1575  let Constraints = !interleave([Constraint, "$rd = $rs3"], ",");
1576  let TargetOverlapConstraintType = TargetConstraintType;
1577  let HasVecPolicyOp = 1;
1578  let HasVLOp = 1;
1579  let HasSEWOp = 1;
1580  let HasRoundModeOp = 1;
1581  let UsesVXRM = 0;
1582}
1583
1584class VPseudoUSSegLoadNoMask<VReg RetClass,
1585                             int EEW,
1586                             bits<4> NF> :
1587      Pseudo<(outs RetClass:$rd),
1588             (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl,
1589                  ixlenimm:$sew, ixlenimm:$policy), []>,
1590      RISCVVPseudo,
1591      RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
1592  let mayLoad = 1;
1593  let mayStore = 0;
1594  let hasSideEffects = 0;
1595  let HasVLOp = 1;
1596  let HasSEWOp = 1;
1597  let HasVecPolicyOp = 1;
1598  let Constraints = "$rd = $dest";
1599}
1600
1601class VPseudoUSSegLoadMask<VReg RetClass,
1602                           int EEW,
1603                           bits<4> NF> :
1604      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1605             (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
1606                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
1607      RISCVVPseudo,
1608      RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
1609  let mayLoad = 1;
1610  let mayStore = 0;
1611  let hasSideEffects = 0;
1612  let Constraints = "$rd = $merge";
1613  let HasVLOp = 1;
1614  let HasSEWOp = 1;
1615  let HasVecPolicyOp = 1;
1616  let UsesMaskPolicy = 1;
1617}
1618
1619class VPseudoUSSegLoadFFNoMask<VReg RetClass,
1620                               int EEW,
1621                               bits<4> NF> :
1622      Pseudo<(outs RetClass:$rd, GPR:$vl),
1623             (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl,
1624                  ixlenimm:$sew, ixlenimm:$policy), []>,
1625      RISCVVPseudo,
1626      RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
1627  let mayLoad = 1;
1628  let mayStore = 0;
1629  let hasSideEffects = 0;
1630  let HasVLOp = 1;
1631  let HasSEWOp = 1;
1632  let HasVecPolicyOp = 1;
1633  let Constraints = "$rd = $dest";
1634}
1635
1636class VPseudoUSSegLoadFFMask<VReg RetClass,
1637                             int EEW,
1638                             bits<4> NF> :
1639      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
1640             (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
1641                  VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy), []>,
1642      RISCVVPseudo,
1643      RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
1644  let mayLoad = 1;
1645  let mayStore = 0;
1646  let hasSideEffects = 0;
1647  let Constraints = "$rd = $merge";
1648  let HasVLOp = 1;
1649  let HasSEWOp = 1;
1650  let HasVecPolicyOp = 1;
1651  let UsesMaskPolicy = 1;
1652}
1653
1654class VPseudoSSegLoadNoMask<VReg RetClass,
1655                            int EEW,
1656                            bits<4> NF> :
1657      Pseudo<(outs RetClass:$rd),
1658             (ins RetClass:$merge, GPRMem:$rs1, GPR:$offset, AVL:$vl,
1659             ixlenimm:$sew, ixlenimm:$policy), []>,
1660      RISCVVPseudo,
1661      RISCVVLSEG<NF, /*Masked*/0, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
1662  let mayLoad = 1;
1663  let mayStore = 0;
1664  let hasSideEffects = 0;
1665  let HasVLOp = 1;
1666  let HasSEWOp = 1;
1667  let HasVecPolicyOp = 1;
1668  let Constraints = "$rd = $merge";
1669}
1670
1671class VPseudoSSegLoadMask<VReg RetClass,
1672                          int EEW,
1673                          bits<4> NF> :
1674      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1675             (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
1676                  GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
1677                  ixlenimm:$policy), []>,
1678      RISCVVPseudo,
1679      RISCVVLSEG<NF, /*Masked*/1, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
1680  let mayLoad = 1;
1681  let mayStore = 0;
1682  let hasSideEffects = 0;
1683  let Constraints = "$rd = $merge";
1684  let HasVLOp = 1;
1685  let HasSEWOp = 1;
1686  let HasVecPolicyOp = 1;
1687  let UsesMaskPolicy = 1;
1688}
1689
1690class VPseudoISegLoadNoMask<VReg RetClass,
1691                            VReg IdxClass,
1692                            int EEW,
1693                            bits<3> LMUL,
1694                            bits<4> NF,
1695                            bit Ordered> :
1696      Pseudo<(outs RetClass:$rd),
1697             (ins RetClass:$merge, GPRMem:$rs1, IdxClass:$offset, AVL:$vl,
1698                  ixlenimm:$sew, ixlenimm:$policy), []>,
1699      RISCVVPseudo,
1700      RISCVVLXSEG<NF, /*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
1701  let mayLoad = 1;
1702  let mayStore = 0;
1703  let hasSideEffects = 0;
1704  // For vector indexed segment loads, the destination vector register groups
1705  // cannot overlap the source vector register group
1706  let Constraints = "@earlyclobber $rd, $rd = $merge";
1707  let HasVLOp = 1;
1708  let HasSEWOp = 1;
1709  let HasVecPolicyOp = 1;
1710}
1711
1712class VPseudoISegLoadMask<VReg RetClass,
1713                          VReg IdxClass,
1714                          int EEW,
1715                          bits<3> LMUL,
1716                          bits<4> NF,
1717                          bit Ordered> :
1718      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1719             (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
1720                  IdxClass:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
1721                  ixlenimm:$policy), []>,
1722      RISCVVPseudo,
1723      RISCVVLXSEG<NF, /*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
1724  let mayLoad = 1;
1725  let mayStore = 0;
1726  let hasSideEffects = 0;
1727  // For vector indexed segment loads, the destination vector register groups
1728  // cannot overlap the source vector register group
1729  let Constraints = "@earlyclobber $rd, $rd = $merge";
1730  let HasVLOp = 1;
1731  let HasSEWOp = 1;
1732  let HasVecPolicyOp = 1;
1733  let UsesMaskPolicy = 1;
1734}
1735
1736class VPseudoUSSegStoreNoMask<VReg ValClass,
1737                              int EEW,
1738                              bits<4> NF> :
1739      Pseudo<(outs),
1740             (ins ValClass:$rd, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew), []>,
1741      RISCVVPseudo,
1742      RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, !logtwo(EEW), VLMul> {
1743  let mayLoad = 0;
1744  let mayStore = 1;
1745  let hasSideEffects = 0;
1746  let HasVLOp = 1;
1747  let HasSEWOp = 1;
1748}
1749
1750class VPseudoUSSegStoreMask<VReg ValClass,
1751                            int EEW,
1752                            bits<4> NF> :
1753      Pseudo<(outs),
1754             (ins ValClass:$rd, GPRMem:$rs1,
1755                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1756      RISCVVPseudo,
1757      RISCVVSSEG<NF, /*Masked*/1, /*Strided*/0, !logtwo(EEW), VLMul> {
1758  let mayLoad = 0;
1759  let mayStore = 1;
1760  let hasSideEffects = 0;
1761  let HasVLOp = 1;
1762  let HasSEWOp = 1;
1763}
1764
1765class VPseudoSSegStoreNoMask<VReg ValClass,
1766                             int EEW,
1767                             bits<4> NF> :
1768      Pseudo<(outs),
1769             (ins ValClass:$rd, GPRMem:$rs1, GPR:$offset,
1770                  AVL:$vl, ixlenimm:$sew), []>,
1771      RISCVVPseudo,
1772      RISCVVSSEG<NF, /*Masked*/0, /*Strided*/1, !logtwo(EEW), VLMul> {
1773  let mayLoad = 0;
1774  let mayStore = 1;
1775  let hasSideEffects = 0;
1776  let HasVLOp = 1;
1777  let HasSEWOp = 1;
1778}
1779
1780class VPseudoSSegStoreMask<VReg ValClass,
1781                           int EEW,
1782                           bits<4> NF> :
1783      Pseudo<(outs),
1784             (ins ValClass:$rd, GPRMem:$rs1, GPR: $offset,
1785                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1786      RISCVVPseudo,
1787      RISCVVSSEG<NF, /*Masked*/1, /*Strided*/1, !logtwo(EEW), VLMul> {
1788  let mayLoad = 0;
1789  let mayStore = 1;
1790  let hasSideEffects = 0;
1791  let HasVLOp = 1;
1792  let HasSEWOp = 1;
1793}
1794
1795class VPseudoISegStoreNoMask<VReg ValClass,
1796                             VReg IdxClass,
1797                             int EEW,
1798                             bits<3> LMUL,
1799                             bits<4> NF,
1800                             bit Ordered> :
1801      Pseudo<(outs),
1802             (ins ValClass:$rd, GPRMem:$rs1, IdxClass: $index,
1803                  AVL:$vl, ixlenimm:$sew), []>,
1804      RISCVVPseudo,
1805      RISCVVSXSEG<NF, /*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
1806  let mayLoad = 0;
1807  let mayStore = 1;
1808  let hasSideEffects = 0;
1809  let HasVLOp = 1;
1810  let HasSEWOp = 1;
1811}
1812
1813class VPseudoISegStoreMask<VReg ValClass,
1814                           VReg IdxClass,
1815                           int EEW,
1816                           bits<3> LMUL,
1817                           bits<4> NF,
1818                           bit Ordered> :
1819      Pseudo<(outs),
1820             (ins ValClass:$rd, GPRMem:$rs1, IdxClass: $index,
1821                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
1822      RISCVVPseudo,
1823      RISCVVSXSEG<NF, /*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
1824  let mayLoad = 0;
1825  let mayStore = 1;
1826  let hasSideEffects = 0;
1827  let HasVLOp = 1;
1828  let HasSEWOp = 1;
1829}
1830
1831multiclass VPseudoUSLoad {
1832  foreach eew = EEWList in {
1833    foreach lmul = MxSet<eew>.m in {
1834      defvar LInfo = lmul.MX;
1835      defvar vreg = lmul.vrclass;
1836      let VLMul = lmul.value, SEW=eew in {
1837        def "E" # eew # "_V_" # LInfo :
1838          VPseudoUSLoadNoMask<vreg, eew>,
1839          VLESched<LInfo>;
1840        def "E" # eew # "_V_" # LInfo # "_MASK" :
1841          VPseudoUSLoadMask<vreg, eew>,
1842          RISCVMaskedPseudo<MaskIdx=2>,
1843          VLESched<LInfo>;
1844      }
1845    }
1846  }
1847}
1848
1849multiclass VPseudoFFLoad {
1850  foreach eew = EEWList in {
1851    foreach lmul = MxSet<eew>.m in {
1852      defvar LInfo = lmul.MX;
1853      defvar vreg = lmul.vrclass;
1854      let VLMul = lmul.value, SEW=eew in {
1855        def "E" # eew # "FF_V_" # LInfo:
1856          VPseudoUSLoadFFNoMask<vreg, eew>,
1857          VLFSched<LInfo>;
1858        def "E" # eew # "FF_V_" # LInfo # "_MASK":
1859          VPseudoUSLoadFFMask<vreg, eew>,
1860          RISCVMaskedPseudo<MaskIdx=2>,
1861          VLFSched<LInfo>;
1862      }
1863    }
1864  }
1865}
1866
1867multiclass VPseudoLoadMask {
1868  foreach mti = AllMasks in {
1869    defvar mx = mti.LMul.MX;
1870    defvar WriteVLDM_MX = !cast<SchedWrite>("WriteVLDM_" # mx);
1871    let VLMul = mti.LMul.value in {
1872      def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, EEW=1>,
1873        Sched<[WriteVLDM_MX, ReadVLDX]>;
1874    }
1875  }
1876}
1877
1878multiclass VPseudoSLoad {
1879  foreach eew = EEWList in {
1880    foreach lmul = MxSet<eew>.m in {
1881      defvar LInfo = lmul.MX;
1882      defvar vreg = lmul.vrclass;
1883      let VLMul = lmul.value, SEW=eew in {
1884        def "E" # eew # "_V_" # LInfo : VPseudoSLoadNoMask<vreg, eew>,
1885                                        VLSSched<eew, LInfo>;
1886        def "E" # eew # "_V_" # LInfo # "_MASK" :
1887          VPseudoSLoadMask<vreg, eew>,
1888          RISCVMaskedPseudo<MaskIdx=3>,
1889          VLSSched<eew, LInfo>;
1890      }
1891    }
1892  }
1893}
1894
1895multiclass VPseudoILoad<bit Ordered> {
1896  foreach idxEEW = EEWList in {
1897    foreach dataEEW = EEWList in {
1898      foreach dataEMUL = MxSet<dataEEW>.m in {
1899        defvar dataEMULOctuple = dataEMUL.octuple;
1900        // Calculate emul = eew * lmul / sew
1901        defvar idxEMULOctuple =
1902          !srl(!mul(idxEEW, dataEMULOctuple), !logtwo(dataEEW));
1903        if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
1904          defvar DataLInfo = dataEMUL.MX;
1905          defvar IdxLInfo = octuple_to_str<idxEMULOctuple>.ret;
1906          defvar idxEMUL = !cast<LMULInfo>("V_" # IdxLInfo);
1907          defvar Vreg = dataEMUL.vrclass;
1908          defvar IdxVreg = idxEMUL.vrclass;
1909          defvar HasConstraint = !ne(dataEEW, idxEEW);
1910          defvar TypeConstraints =
1911            !if(!eq(dataEEW, idxEEW), 1, !if(!gt(dataEEW, idxEEW), !if(!ge(idxEMULOctuple, 8), 3, 1), 2));
1912          let VLMul = dataEMUL.value in {
1913            def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo :
1914              VPseudoILoadNoMask<Vreg, IdxVreg, idxEEW, idxEMUL.value, Ordered, HasConstraint, TypeConstraints>,
1915              VLXSched<dataEEW, Ordered, DataLInfo, IdxLInfo>;
1916            def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" :
1917              VPseudoILoadMask<Vreg, IdxVreg, idxEEW, idxEMUL.value, Ordered, HasConstraint, TypeConstraints>,
1918              RISCVMaskedPseudo<MaskIdx=3>,
1919              VLXSched<dataEEW, Ordered, DataLInfo, IdxLInfo>;
1920          }
1921        }
1922      }
1923    }
1924  }
1925}
1926
1927multiclass VPseudoUSStore {
1928  foreach eew = EEWList in {
1929    foreach lmul = MxSet<eew>.m in {
1930      defvar LInfo = lmul.MX;
1931      defvar vreg = lmul.vrclass;
1932      let VLMul = lmul.value, SEW=eew in {
1933        def "E" # eew # "_V_" # LInfo : VPseudoUSStoreNoMask<vreg, eew>,
1934                                        VSESched<LInfo>;
1935        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask<vreg, eew>,
1936                                                  VSESched<LInfo>;
1937      }
1938    }
1939  }
1940}
1941
1942multiclass VPseudoStoreMask {
1943  foreach mti = AllMasks in {
1944    defvar mx = mti.LMul.MX;
1945    defvar WriteVSTM_MX = !cast<SchedWrite>("WriteVSTM_" # mx);
1946    let VLMul = mti.LMul.value in {
1947      def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, EEW=1>,
1948        Sched<[WriteVSTM_MX, ReadVSTX]>;
1949    }
1950  }
1951}
1952
1953multiclass VPseudoSStore {
1954  foreach eew = EEWList in {
1955    foreach lmul = MxSet<eew>.m in {
1956      defvar LInfo = lmul.MX;
1957      defvar vreg = lmul.vrclass;
1958      let VLMul = lmul.value, SEW=eew in {
1959        def "E" # eew # "_V_" # LInfo : VPseudoSStoreNoMask<vreg, eew>,
1960                                        VSSSched<eew, LInfo>;
1961        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSStoreMask<vreg, eew>,
1962                                                  VSSSched<eew, LInfo>;
1963      }
1964    }
1965  }
1966}
1967
1968multiclass VPseudoIStore<bit Ordered> {
1969  foreach idxEEW = EEWList in {
1970    foreach dataEEW = EEWList in {
1971      foreach dataEMUL = MxSet<dataEEW>.m in {
1972        defvar dataEMULOctuple = dataEMUL.octuple;
1973        // Calculate emul = eew * lmul / sew
1974        defvar idxEMULOctuple =
1975          !srl(!mul(idxEEW, dataEMULOctuple), !logtwo(dataEEW));
1976        if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
1977          defvar DataLInfo = dataEMUL.MX;
1978          defvar IdxLInfo = octuple_to_str<idxEMULOctuple>.ret;
1979          defvar idxEMUL = !cast<LMULInfo>("V_" # IdxLInfo);
1980          defvar Vreg = dataEMUL.vrclass;
1981          defvar IdxVreg = idxEMUL.vrclass;
1982          let VLMul = dataEMUL.value in {
1983            def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo :
1984              VPseudoIStoreNoMask<Vreg, IdxVreg, idxEEW, idxEMUL.value, Ordered>,
1985              VSXSched<dataEEW, Ordered, DataLInfo, IdxLInfo>;
1986            def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" :
1987              VPseudoIStoreMask<Vreg, IdxVreg, idxEEW, idxEMUL.value, Ordered>,
1988              VSXSched<dataEEW, Ordered, DataLInfo, IdxLInfo>;
1989          }
1990        }
1991      }
1992    }
1993  }
1994}
1995
1996multiclass VPseudoVPOP_M {
1997  foreach mti = AllMasks in {
1998    defvar mx = mti.LMul.MX;
1999    let VLMul = mti.LMul.value in {
2000      def "_M_" # mti.BX : VPseudoUnaryNoMaskGPROut,
2001          SchedBinary<"WriteVMPopV", "ReadVMPopV", "ReadVMPopV", mx>;
2002      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMaskGPROut,
2003          SchedBinary<"WriteVMPopV", "ReadVMPopV", "ReadVMPopV", mx>;
2004    }
2005  }
2006}
2007
2008multiclass VPseudoV1ST_M {
2009  foreach mti = AllMasks in {
2010    defvar mx = mti.LMul.MX;
2011    let VLMul = mti.LMul.value in {
2012      def "_M_" #mti.BX : VPseudoUnaryNoMaskGPROut,
2013          SchedBinary<"WriteVMFFSV", "ReadVMFFSV", "ReadVMFFSV", mx>;
2014      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMaskGPROut,
2015          SchedBinary<"WriteVMFFSV", "ReadVMFFSV", "ReadVMFFSV", mx>;
2016    }
2017  }
2018}
2019
2020multiclass VPseudoVSFS_M {
2021  defvar constraint = "@earlyclobber $rd";
2022  foreach mti = AllMasks in {
2023    defvar mx = mti.LMul.MX;
2024    let VLMul = mti.LMul.value in {
2025      def "_M_" # mti.BX : VPseudoUnaryNoMaskNoPolicy<VR, VR, constraint>,
2026                           SchedUnary<"WriteVMSFSV", "ReadVMSFSV", mx,
2027                                      forceMergeOpRead=true>;
2028      let ForceTailAgnostic = true in
2029      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR, constraint>,
2030                                     SchedUnary<"WriteVMSFSV", "ReadVMSFSV", mx,
2031                                                forceMergeOpRead=true>;
2032    }
2033  }
2034}
2035
2036multiclass VPseudoVID_V {
2037  foreach m = MxList in {
2038    defvar mx = m.MX;
2039    let VLMul = m.value in {
2040      def "_V_" # mx : VPseudoNullaryNoMask<m.vrclass>,
2041                         SchedNullary<"WriteVIdxV", mx, forceMergeOpRead=true>;
2042      def "_V_" # mx # "_MASK" : VPseudoNullaryMask<m.vrclass>,
2043                                   RISCVMaskedPseudo<MaskIdx=1>,
2044                                   SchedNullary<"WriteVIdxV", mx,
2045                                                forceMergeOpRead=true>;
2046    }
2047  }
2048}
2049
2050multiclass VPseudoNullaryPseudoM <string BaseInst> {
2051  foreach mti = AllMasks in {
2052    let VLMul = mti.LMul.value in {
2053      def "_M_" # mti.BX : VPseudoNullaryPseudoM<BaseInst # "_MM">,
2054        SchedBinary<"WriteVMALUV", "ReadVMALUV", "ReadVMALUV", mti.LMul.MX>;
2055    }
2056  }
2057}
2058
2059multiclass VPseudoVIOTA_M {
2060  defvar constraint = "@earlyclobber $rd";
2061  foreach m = MxList in {
2062    defvar mx = m.MX;
2063    let VLMul = m.value in {
2064      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, VR, constraint>,
2065                       SchedUnary<"WriteVIotaV", "ReadVIotaV", mx,
2066                                  forceMergeOpRead=true>;
2067      def "_" # mx # "_MASK" : VPseudoUnaryMask<m.vrclass, VR, constraint>,
2068                                 RISCVMaskedPseudo<MaskIdx=2, ActiveAffectsRes=true>,
2069                                 SchedUnary<"WriteVIotaV", "ReadVIotaV", mx,
2070                                            forceMergeOpRead=true>;
2071    }
2072  }
2073}
2074
2075multiclass VPseudoVCPR_V {
2076  foreach m = MxList in {
2077    defvar mx = m.MX;
2078    defvar sews = SchedSEWSet<mx>.val;
2079    let VLMul = m.value in
2080      foreach e = sews in {
2081        defvar suffix = "_" # m.MX # "_E" # e;
2082        let SEW = e in
2083        def _VM # suffix
2084          : VPseudoUnaryAnyMask<m.vrclass, m.vrclass>,
2085            SchedBinary<"WriteVCompressV", "ReadVCompressV", "ReadVCompressV",
2086                        mx, e>;
2087      }
2088  }
2089}
2090
2091multiclass VPseudoBinary<VReg RetClass,
2092                         VReg Op1Class,
2093                         DAGOperand Op2Class,
2094                         LMULInfo MInfo,
2095                         string Constraint = "",
2096                         int sew = 0,
2097                         int TargetConstraintType = 1,
2098                         bit Commutable = 0> {
2099  let VLMul = MInfo.value, SEW=sew, isCommutable = Commutable in {
2100    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
2101    def suffix : VPseudoBinaryNoMaskPolicy<RetClass, Op1Class, Op2Class,
2102                                           Constraint, TargetConstraintType>;
2103    def suffix # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
2104                                                   Constraint, TargetConstraintType>,
2105                           RISCVMaskedPseudo<MaskIdx=3>;
2106  }
2107}
2108
2109multiclass VPseudoBinaryRoundingMode<VReg RetClass,
2110                                     VReg Op1Class,
2111                                     DAGOperand Op2Class,
2112                                     LMULInfo MInfo,
2113                                     string Constraint = "",
2114                                     int sew = 0,
2115                                     int UsesVXRM = 1,
2116                                     int TargetConstraintType = 1,
2117                                     bit Commutable = 0> {
2118  let VLMul = MInfo.value, SEW=sew, isCommutable = Commutable in {
2119    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
2120    def suffix : VPseudoBinaryNoMaskRoundingMode<RetClass, Op1Class, Op2Class,
2121                                                 Constraint, UsesVXRM,
2122                                                 TargetConstraintType>;
2123    def suffix # "_MASK" : VPseudoBinaryMaskPolicyRoundingMode<RetClass,
2124                                                               Op1Class,
2125                                                               Op2Class,
2126                                                               Constraint,
2127                                                               UsesVXRM,
2128                                                               TargetConstraintType>,
2129                           RISCVMaskedPseudo<MaskIdx=3>;
2130  }
2131}
2132
2133
2134multiclass VPseudoBinaryM<VReg RetClass,
2135                          VReg Op1Class,
2136                          DAGOperand Op2Class,
2137                          LMULInfo MInfo,
2138                          string Constraint = "",
2139                          int TargetConstraintType = 1,
2140                          bit Commutable = 0> {
2141  let VLMul = MInfo.value, isCommutable = Commutable in {
2142    def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
2143                                             Constraint, TargetConstraintType>;
2144    let ForceTailAgnostic = true in
2145    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask<RetClass, Op1Class,
2146                                                         Op2Class, Constraint, TargetConstraintType>,
2147                                   RISCVMaskedPseudo<MaskIdx=3>;
2148  }
2149}
2150
2151multiclass VPseudoBinaryEmul<VReg RetClass,
2152                             VReg Op1Class,
2153                             DAGOperand Op2Class,
2154                             LMULInfo lmul,
2155                             LMULInfo emul,
2156                             string Constraint = "",
2157                             int sew> {
2158  let VLMul = lmul.value, SEW=sew in {
2159    defvar suffix = !if(sew, "_" # lmul.MX # "_E" # sew, "_" # lmul.MX);
2160    def suffix # "_" # emul.MX : VPseudoBinaryNoMaskPolicy<RetClass, Op1Class, Op2Class,
2161                                                           Constraint>;
2162    def suffix # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
2163                                                                          Constraint>,
2164                                                  RISCVMaskedPseudo<MaskIdx=3>;
2165  }
2166}
2167
2168multiclass VPseudoTiedBinary<VReg RetClass,
2169                             DAGOperand Op2Class,
2170                             LMULInfo MInfo,
2171                             string Constraint = "",
2172                             int TargetConstraintType = 1> {
2173  let VLMul = MInfo.value in {
2174    def "_" # MInfo.MX # "_TIED": VPseudoTiedBinaryNoMask<RetClass, Op2Class,
2175                                                          Constraint, TargetConstraintType>;
2176    def "_" # MInfo.MX # "_MASK_TIED" : VPseudoTiedBinaryMask<RetClass, Op2Class,
2177                                                         Constraint, TargetConstraintType>,
2178                                        RISCVMaskedPseudo<MaskIdx=2>;
2179  }
2180}
2181
2182multiclass VPseudoTiedBinaryRoundingMode<VReg RetClass,
2183                                         DAGOperand Op2Class,
2184                                         LMULInfo MInfo,
2185                                         string Constraint = "",
2186                                         int sew = 0,
2187                                         int TargetConstraintType = 1> {
2188    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
2189    let VLMul = MInfo.value in {
2190    def suffix # "_TIED":
2191      VPseudoTiedBinaryNoMaskRoundingMode<RetClass, Op2Class, Constraint, TargetConstraintType>;
2192    def suffix # "_MASK_TIED" :
2193      VPseudoTiedBinaryMaskRoundingMode<RetClass, Op2Class, Constraint, TargetConstraintType>,
2194      RISCVMaskedPseudo<MaskIdx=2>;
2195  }
2196}
2197
2198
2199multiclass VPseudoBinaryV_VV<LMULInfo m, string Constraint = "", int sew = 0, bit Commutable = 0> {
2200  defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint, sew, Commutable=Commutable>;
2201}
2202
2203multiclass VPseudoBinaryV_VV_RM<LMULInfo m, string Constraint = "", bit Commutable = 0> {
2204  defm _VV : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, m.vrclass, m, Constraint,
2205                                       Commutable=Commutable>;
2206}
2207
2208multiclass VPseudoBinaryFV_VV_RM<LMULInfo m, int sew> {
2209  defm _VV : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, m.vrclass, m,
2210                                       "", sew, UsesVXRM=0>;
2211}
2212
2213multiclass VPseudoVGTR_EI16_VV {
2214  defvar constraint = "@earlyclobber $rd";
2215  foreach m = MxList in {
2216    defvar mx = m.MX;
2217    foreach sew = EEWList in {
2218      defvar dataEMULOctuple = m.octuple;
2219      // emul = lmul * 16 / sew
2220      defvar idxEMULOctuple = !srl(!mul(dataEMULOctuple, 16), !logtwo(sew));
2221      if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
2222        defvar emulMX = octuple_to_str<idxEMULOctuple>.ret;
2223        defvar emul = !cast<LMULInfo>("V_" # emulMX);
2224        defvar sews = SchedSEWSet<mx>.val;
2225        foreach e = sews in {
2226          defm _VV
2227              : VPseudoBinaryEmul<m.vrclass, m.vrclass, emul.vrclass, m, emul,
2228                                  constraint, e>,
2229                SchedBinary<"WriteVRGatherEI16VV", "ReadVRGatherEI16VV_data",
2230                            "ReadVRGatherEI16VV_index", mx, e, forceMergeOpRead=true>;
2231        }
2232      }
2233    }
2234  }
2235}
2236
2237multiclass VPseudoBinaryV_VX<LMULInfo m, string Constraint = "", int sew = 0> {
2238  defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint, sew>;
2239}
2240
2241multiclass VPseudoBinaryV_VX_RM<LMULInfo m, string Constraint = ""> {
2242  defm "_VX" : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, GPR, m, Constraint>;
2243}
2244
2245multiclass VPseudoVSLD1_VX<string Constraint = ""> {
2246  foreach m = MxList in {
2247    defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>,
2248                 SchedBinary<"WriteVISlide1X", "ReadVISlideV", "ReadVISlideX",
2249                             m.MX, forceMergeOpRead=true>;
2250  }
2251}
2252
2253multiclass VPseudoBinaryV_VF<LMULInfo m, FPR_Info f, int sew> {
2254  defm "_V" # f.FX : VPseudoBinary<m.vrclass, m.vrclass,
2255                                   f.fprclass, m, "", sew>;
2256}
2257
2258multiclass VPseudoBinaryV_VF_RM<LMULInfo m, FPR_Info f, int sew> {
2259  defm "_V" # f.FX : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass,
2260                                               f.fprclass, m, "", sew,
2261                                               UsesVXRM=0>;
2262}
2263
2264multiclass VPseudoVSLD1_VF<string Constraint = ""> {
2265  foreach f = FPList in {
2266    foreach m = f.MxList in {
2267      defm "_V" #f.FX
2268          : VPseudoBinary<m.vrclass, m.vrclass, f.fprclass, m, Constraint>,
2269            SchedBinary<"WriteVFSlide1F", "ReadVFSlideV", "ReadVFSlideF", m.MX,
2270                      forceMergeOpRead=true>;
2271    }
2272  }
2273}
2274
2275multiclass VPseudoBinaryV_VI<Operand ImmType, LMULInfo m, string Constraint = ""> {
2276  defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
2277}
2278
2279multiclass VPseudoBinaryV_VI_RM<Operand ImmType, LMULInfo m, string Constraint = ""> {
2280  defm _VI : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, ImmType, m, Constraint>;
2281}
2282
2283multiclass VPseudoVALU_MM<bit Commutable = 0> {
2284  foreach m = MxList in {
2285    defvar mx = m.MX;
2286    let VLMul = m.value, isCommutable = Commutable in {
2287      def "_MM_" # mx : VPseudoBinaryNoMask<VR, VR, VR, "">,
2288                        SchedBinary<"WriteVMALUV", "ReadVMALUV", "ReadVMALUV", mx>;
2289    }
2290  }
2291}
2292
2293// We use earlyclobber here due to
2294// * The destination EEW is smaller than the source EEW and the overlap is
2295//   in the lowest-numbered part of the source register group is legal.
2296//   Otherwise, it is illegal.
2297// * The destination EEW is greater than the source EEW, the source EMUL is
2298//   at least 1, and the overlap is in the highest-numbered part of the
2299//   destination register group is legal. Otherwise, it is illegal.
2300multiclass VPseudoBinaryW_VV<LMULInfo m, bit Commutable = 0> {
2301  defm _VV : VPseudoBinary<m.wvrclass, m.vrclass, m.vrclass, m,
2302                           "@earlyclobber $rd", TargetConstraintType=3,
2303                           Commutable=Commutable>;
2304}
2305
2306multiclass VPseudoBinaryW_VV_RM<LMULInfo m, int sew> {
2307  defm _VV : VPseudoBinaryRoundingMode<m.wvrclass, m.vrclass, m.vrclass, m,
2308                                      "@earlyclobber $rd", sew, UsesVXRM=0,
2309                                      TargetConstraintType=3>;
2310}
2311
2312multiclass VPseudoBinaryW_VX<LMULInfo m> {
2313  defm "_VX" : VPseudoBinary<m.wvrclass, m.vrclass, GPR, m,
2314                             "@earlyclobber $rd", TargetConstraintType=3>;
2315}
2316
2317multiclass VPseudoBinaryW_VI<Operand ImmType, LMULInfo m> {
2318  defm "_VI" : VPseudoBinary<m.wvrclass, m.vrclass, ImmType, m,
2319                             "@earlyclobber $rd", TargetConstraintType=3>;
2320}
2321
2322multiclass VPseudoBinaryW_VF_RM<LMULInfo m, FPR_Info f, int sew> {
2323  defm "_V" # f.FX : VPseudoBinaryRoundingMode<m.wvrclass, m.vrclass,
2324                                               f.fprclass, m,
2325                                               "@earlyclobber $rd", sew,
2326                                               UsesVXRM=0,
2327                                               TargetConstraintType=3>;
2328}
2329
2330multiclass VPseudoBinaryW_WV<LMULInfo m> {
2331  defm _WV : VPseudoBinary<m.wvrclass, m.wvrclass, m.vrclass, m,
2332                           "@earlyclobber $rd", TargetConstraintType=3>;
2333  defm _WV : VPseudoTiedBinary<m.wvrclass, m.vrclass, m,
2334                               "@earlyclobber $rd", TargetConstraintType=3>;
2335}
2336
2337multiclass VPseudoBinaryW_WV_RM<LMULInfo m, int sew> {
2338  defm _WV : VPseudoBinaryRoundingMode<m.wvrclass, m.wvrclass, m.vrclass, m,
2339                                       "@earlyclobber $rd", sew, UsesVXRM = 0,
2340                                       TargetConstraintType = 3>;
2341  defm _WV : VPseudoTiedBinaryRoundingMode<m.wvrclass, m.vrclass, m,
2342                                           "@earlyclobber $rd", sew,
2343                                           TargetConstraintType = 3>;
2344}
2345
2346multiclass VPseudoBinaryW_WX<LMULInfo m> {
2347  defm "_WX" : VPseudoBinary<m.wvrclass, m.wvrclass, GPR, m, /*Constraint*/ "", TargetConstraintType=3>;
2348}
2349
2350multiclass VPseudoBinaryW_WF_RM<LMULInfo m, FPR_Info f, int sew> {
2351  defm "_W" # f.FX : VPseudoBinaryRoundingMode<m.wvrclass, m.wvrclass,
2352                                               f.fprclass, m,
2353                                               Constraint="",
2354                                               sew=sew,
2355                                               UsesVXRM=0,
2356                                               TargetConstraintType=3>;
2357}
2358
2359// Narrowing instructions like vnsrl/vnsra/vnclip(u) don't need @earlyclobber
2360// if the source and destination have an LMUL<=1. This matches this overlap
2361// exception from the spec.
2362// "The destination EEW is smaller than the source EEW and the overlap is in the
2363//  lowest-numbered part of the source register group."
2364multiclass VPseudoBinaryV_WV<LMULInfo m> {
2365  defm _WV : VPseudoBinary<m.vrclass, m.wvrclass, m.vrclass, m,
2366                           !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""),
2367                           TargetConstraintType=2>;
2368}
2369
2370multiclass VPseudoBinaryV_WV_RM<LMULInfo m> {
2371  defm _WV : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, m.vrclass, m,
2372                                       !if(!ge(m.octuple, 8),
2373                                       "@earlyclobber $rd", ""),
2374                                       TargetConstraintType=2>;
2375}
2376
2377multiclass VPseudoBinaryV_WX<LMULInfo m> {
2378  defm _WX : VPseudoBinary<m.vrclass, m.wvrclass, GPR, m,
2379                           !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""),
2380                           TargetConstraintType=2>;
2381}
2382
2383multiclass VPseudoBinaryV_WX_RM<LMULInfo m> {
2384  defm _WX : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, GPR, m,
2385                                       !if(!ge(m.octuple, 8),
2386                                       "@earlyclobber $rd", ""),
2387                                       TargetConstraintType=2>;
2388}
2389
2390multiclass VPseudoBinaryV_WI<LMULInfo m> {
2391  defm _WI : VPseudoBinary<m.vrclass, m.wvrclass, uimm5, m,
2392                           !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""),
2393                           TargetConstraintType=2>;
2394}
2395
2396multiclass VPseudoBinaryV_WI_RM<LMULInfo m> {
2397  defm _WI : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, uimm5, m,
2398                                       !if(!ge(m.octuple, 8),
2399                                       "@earlyclobber $rd", ""),
2400                                       TargetConstraintType=2>;
2401}
2402
2403// For vadc and vsbc, the instruction encoding is reserved if the destination
2404// vector register is v0.
2405// For vadc and vsbc, CarryIn == 1 and CarryOut == 0
2406multiclass VPseudoBinaryV_VM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2407                             string Constraint = "",
2408                             bit Commutable = 0,
2409                             int TargetConstraintType = 1> {
2410  let isCommutable = Commutable in
2411  def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX :
2412    VPseudoBinaryCarry<!if(CarryOut, VR,
2413                       !if(!and(CarryIn, !not(CarryOut)),
2414                           GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2415                       m.vrclass, m.vrclass, m, CarryIn, Constraint, TargetConstraintType>;
2416}
2417
2418multiclass VPseudoTiedBinaryV_VM<LMULInfo m, bit Commutable = 0> {
2419  let isCommutable = Commutable in
2420  def "_VVM" # "_" # m.MX:
2421    VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
2422                             m.vrclass, m.vrclass, m>;
2423}
2424
2425multiclass VPseudoBinaryV_XM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2426                             string Constraint = "", int TargetConstraintType = 1> {
2427  def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX :
2428    VPseudoBinaryCarry<!if(CarryOut, VR,
2429                       !if(!and(CarryIn, !not(CarryOut)),
2430                           GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2431                       m.vrclass, GPR, m, CarryIn, Constraint, TargetConstraintType>;
2432}
2433
2434multiclass VPseudoTiedBinaryV_XM<LMULInfo m> {
2435  def "_VXM" # "_" # m.MX:
2436    VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
2437                             m.vrclass, GPR, m>;
2438}
2439
2440multiclass VPseudoVMRG_FM {
2441  foreach f = FPList in {
2442    foreach m = f.MxList in {
2443      defvar mx = m.MX;
2444      def "_V" # f.FX # "M_" # mx
2445          : VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R, m.vrclass,
2446                                     f.fprclass, m>,
2447          SchedBinary<"WriteVFMergeV", "ReadVFMergeV", "ReadVFMergeF", mx,
2448                      forceMasked=1, forceMergeOpRead=true>;
2449    }
2450  }
2451}
2452
2453multiclass VPseudoBinaryV_IM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2454                             string Constraint = "", int TargetConstraintType = 1> {
2455  def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX :
2456    VPseudoBinaryCarry<!if(CarryOut, VR,
2457                       !if(!and(CarryIn, !not(CarryOut)),
2458                           GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2459                       m.vrclass, simm5, m, CarryIn, Constraint, TargetConstraintType>;
2460}
2461
2462multiclass VPseudoTiedBinaryV_IM<LMULInfo m> {
2463  def "_VIM" # "_" # m.MX:
2464    VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
2465                             m.vrclass, simm5, m>;
2466}
2467
2468multiclass VPseudoUnaryVMV_V_X_I {
2469  foreach m = MxList in {
2470    let VLMul = m.value in {
2471      defvar mx = m.MX;
2472      let VLMul = m.value in {
2473        def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2474                         SchedUnary<"WriteVIMovV", "ReadVIMovV", mx,
2475                                    forceMergeOpRead=true>;
2476        def "_X_" # mx : VPseudoUnaryNoMask<m.vrclass, GPR>,
2477                         SchedUnary<"WriteVIMovX", "ReadVIMovX", mx,
2478                                    forceMergeOpRead=true>;
2479        def "_I_" # mx : VPseudoUnaryNoMask<m.vrclass, simm5>,
2480                         SchedNullary<"WriteVIMovI", mx,
2481                                      forceMergeOpRead=true>;
2482      }
2483    }
2484  }
2485}
2486
2487multiclass VPseudoVMV_F {
2488  foreach f = FPList in {
2489    foreach m = f.MxList in {
2490      defvar mx = m.MX;
2491      let VLMul = m.value in {
2492        def "_" # f.FX # "_" # mx :
2493          VPseudoUnaryNoMask<m.vrclass, f.fprclass>,
2494          SchedUnary<"WriteVFMovV", "ReadVFMovF", mx, forceMergeOpRead=true>;
2495      }
2496    }
2497  }
2498}
2499
2500multiclass VPseudoVCLS_V {
2501  foreach m = MxListF in {
2502    defvar mx = m.MX;
2503    let VLMul = m.value in {
2504      def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2505                       SchedUnary<"WriteVFClassV", "ReadVFClassV", mx,
2506                                  forceMergeOpRead=true>;
2507      def "_V_" # mx # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>,
2508                                 RISCVMaskedPseudo<MaskIdx=2>,
2509                                 SchedUnary<"WriteVFClassV", "ReadVFClassV", mx,
2510                                            forceMergeOpRead=true>;
2511    }
2512  }
2513}
2514
2515multiclass VPseudoVSQR_V_RM {
2516  foreach m = MxListF in {
2517    defvar mx = m.MX;
2518    defvar sews = SchedSEWSet<m.MX, isF=1>.val;
2519
2520    let VLMul = m.value in
2521      foreach e = sews in {
2522        defvar suffix = "_" # mx # "_E" # e;
2523        let SEW = e in {
2524          def "_V" # suffix : VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass>,
2525                              SchedUnary<"WriteVFSqrtV", "ReadVFSqrtV", mx, e,
2526                                         forceMergeOpRead=true>;
2527          def "_V" #suffix # "_MASK"
2528              : VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass>,
2529                RISCVMaskedPseudo<MaskIdx = 2>,
2530                SchedUnary<"WriteVFSqrtV", "ReadVFSqrtV", mx, e,
2531                           forceMergeOpRead=true>;
2532        }
2533      }
2534  }
2535}
2536
2537multiclass VPseudoVRCP_V {
2538  foreach m = MxListF in {
2539    defvar mx = m.MX;
2540    foreach e = SchedSEWSet<mx, isF=1>.val in {
2541      let VLMul = m.value in {
2542        def "_V_" # mx # "_E" # e
2543            : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2544              SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forceMergeOpRead=true>;
2545        def "_V_" # mx # "_E" # e # "_MASK"
2546            : VPseudoUnaryMask<m.vrclass, m.vrclass>,
2547              RISCVMaskedPseudo<MaskIdx = 2>,
2548              SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forceMergeOpRead=true>;
2549      }
2550    }
2551  }
2552}
2553
2554multiclass VPseudoVRCP_V_RM {
2555  foreach m = MxListF in {
2556    defvar mx = m.MX;
2557    foreach e = SchedSEWSet<mx, isF=1>.val in {
2558      let VLMul = m.value in {
2559        def "_V_" # mx # "_E" # e
2560            : VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass>,
2561              SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forceMergeOpRead=true>;
2562        def "_V_" # mx # "_E" # e # "_MASK"
2563            : VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass>,
2564              RISCVMaskedPseudo<MaskIdx = 2>,
2565              SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forceMergeOpRead=true>;
2566      }
2567    }
2568  }
2569}
2570
2571multiclass PseudoVEXT_VF2 {
2572  defvar constraints = "@earlyclobber $rd";
2573  foreach m = MxListVF2 in {
2574    defvar mx = m.MX;
2575    defvar CurrTypeConstraints = !if(!or(!eq(mx, "MF4"), !eq(mx, "MF2"), !eq(mx, "M1")), 1, 3);
2576    let VLMul = m.value in {
2577      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f2vrclass, constraints, CurrTypeConstraints>,
2578                     SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
2579      def "_" # mx # "_MASK" :
2580        VPseudoUnaryMask<m.vrclass, m.f2vrclass, constraints, CurrTypeConstraints>,
2581        RISCVMaskedPseudo<MaskIdx=2>,
2582        SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
2583    }
2584  }
2585}
2586
2587multiclass PseudoVEXT_VF4 {
2588  defvar constraints = "@earlyclobber $rd";
2589  foreach m = MxListVF4 in {
2590    defvar mx = m.MX;
2591    defvar CurrTypeConstraints = !if(!or(!eq(mx, "MF2"), !eq(mx, "M1"), !eq(mx, "M2")), 1, 3);
2592    let VLMul = m.value in {
2593      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f4vrclass, constraints, CurrTypeConstraints>,
2594                     SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
2595      def "_" # mx # "_MASK" :
2596        VPseudoUnaryMask<m.vrclass, m.f4vrclass, constraints, CurrTypeConstraints>,
2597        RISCVMaskedPseudo<MaskIdx=2>,
2598        SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
2599    }
2600  }
2601}
2602
2603multiclass PseudoVEXT_VF8 {
2604  defvar constraints = "@earlyclobber $rd";
2605  foreach m = MxListVF8 in {
2606    defvar mx = m.MX;
2607    defvar CurrTypeConstraints = !if(!or(!eq(mx, "M1"), !eq(mx, "M2"), !eq(mx, "M4")), 1, 3);
2608    let VLMul = m.value in {
2609      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f8vrclass, constraints, CurrTypeConstraints>,
2610                     SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
2611      def "_" # mx # "_MASK" :
2612        VPseudoUnaryMask<m.vrclass, m.f8vrclass, constraints, CurrTypeConstraints>,
2613        RISCVMaskedPseudo<MaskIdx=2>,
2614        SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
2615    }
2616  }
2617}
2618
2619// The destination EEW is 1 since "For the purposes of register group overlap
2620// constraints, mask elements have EEW=1."
2621// The source EEW is 8, 16, 32, or 64.
2622// When the destination EEW is different from source EEW, we need to use
2623// @earlyclobber to avoid the overlap between destination and source registers.
2624// We don't need @earlyclobber for LMUL<=1 since that matches this overlap
2625// exception from the spec
2626// "The destination EEW is smaller than the source EEW and the overlap is in the
2627//  lowest-numbered part of the source register group".
2628// With LMUL<=1 the source and dest occupy a single register so any overlap
2629// is in the lowest-numbered part.
2630multiclass VPseudoBinaryM_VV<LMULInfo m, int TargetConstraintType = 1,
2631                             bit Commutable = 0> {
2632  defm _VV : VPseudoBinaryM<VR, m.vrclass, m.vrclass, m,
2633                            !if(!ge(m.octuple, 16), "@earlyclobber $rd", ""),
2634                            TargetConstraintType, Commutable=Commutable>;
2635}
2636
2637multiclass VPseudoBinaryM_VX<LMULInfo m, int TargetConstraintType = 1> {
2638  defm "_VX" :
2639    VPseudoBinaryM<VR, m.vrclass, GPR, m,
2640                   !if(!ge(m.octuple, 16), "@earlyclobber $rd", ""), TargetConstraintType>;
2641}
2642
2643multiclass VPseudoBinaryM_VF<LMULInfo m, FPR_Info f, int TargetConstraintType = 1> {
2644  defm "_V" # f.FX :
2645    VPseudoBinaryM<VR, m.vrclass, f.fprclass, m,
2646                   !if(!ge(m.octuple, 16), "@earlyclobber $rd", ""), TargetConstraintType>;
2647}
2648
2649multiclass VPseudoBinaryM_VI<LMULInfo m, int TargetConstraintType = 1> {
2650  defm _VI : VPseudoBinaryM<VR, m.vrclass, simm5, m,
2651                            !if(!ge(m.octuple, 16), "@earlyclobber $rd", ""), TargetConstraintType>;
2652}
2653
2654multiclass VPseudoVGTR_VV_VX_VI {
2655  defvar constraint = "@earlyclobber $rd";
2656  foreach m = MxList in {
2657    defvar mx = m.MX;
2658    defm "" : VPseudoBinaryV_VX<m, constraint>,
2659              SchedBinary<"WriteVRGatherVX", "ReadVRGatherVX_data",
2660                          "ReadVRGatherVX_index", mx, forceMergeOpRead=true>;
2661    defm "" : VPseudoBinaryV_VI<uimm5, m, constraint>,
2662              SchedUnary<"WriteVRGatherVI", "ReadVRGatherVI_data", mx,
2663                         forceMergeOpRead=true>;
2664
2665    defvar sews = SchedSEWSet<mx>.val;
2666    foreach e = sews in {
2667      defm "" : VPseudoBinaryV_VV<m, constraint, e>,
2668                SchedBinary<"WriteVRGatherVV", "ReadVRGatherVV_data",
2669                              "ReadVRGatherVV_index", mx, e, forceMergeOpRead=true>;
2670    }
2671  }
2672}
2673
2674multiclass VPseudoVSALU_VV_VX_VI<bit Commutable = 0> {
2675  foreach m = MxList in {
2676    defvar mx = m.MX;
2677    defm "" : VPseudoBinaryV_VV<m, Commutable=Commutable>,
2678              SchedBinary<"WriteVSALUV", "ReadVSALUV", "ReadVSALUX", mx,
2679                          forceMergeOpRead=true>;
2680    defm "" : VPseudoBinaryV_VX<m>,
2681              SchedBinary<"WriteVSALUX", "ReadVSALUV", "ReadVSALUX", mx,
2682                          forceMergeOpRead=true>;
2683    defm "" : VPseudoBinaryV_VI<simm5, m>,
2684              SchedUnary<"WriteVSALUI", "ReadVSALUV", mx, forceMergeOpRead=true>;
2685  }
2686}
2687
2688
2689multiclass VPseudoVSHT_VV_VX_VI {
2690  foreach m = MxList in {
2691    defvar mx = m.MX;
2692    defm "" : VPseudoBinaryV_VV<m>,
2693              SchedBinary<"WriteVShiftV", "ReadVShiftV", "ReadVShiftV", mx,
2694                          forceMergeOpRead=true>;
2695    defm "" : VPseudoBinaryV_VX<m>,
2696              SchedBinary<"WriteVShiftX", "ReadVShiftV", "ReadVShiftX", mx,
2697                          forceMergeOpRead=true>;
2698    defm "" : VPseudoBinaryV_VI<uimm5, m>,
2699              SchedUnary<"WriteVShiftI", "ReadVShiftV", mx, forceMergeOpRead=true>;
2700  }
2701}
2702
2703multiclass VPseudoVSSHT_VV_VX_VI_RM {
2704  foreach m = MxList in {
2705    defvar mx = m.MX;
2706    defm "" : VPseudoBinaryV_VV_RM<m>,
2707              SchedBinary<"WriteVSShiftV", "ReadVSShiftV", "ReadVSShiftV", mx,
2708                          forceMergeOpRead=true>;
2709    defm "" : VPseudoBinaryV_VX_RM<m>,
2710              SchedBinary<"WriteVSShiftX", "ReadVSShiftV", "ReadVSShiftX", mx,
2711                          forceMergeOpRead=true>;
2712    defm "" : VPseudoBinaryV_VI_RM<uimm5, m>,
2713              SchedUnary<"WriteVSShiftI", "ReadVSShiftV", mx, forceMergeOpRead=true>;
2714  }
2715}
2716
2717multiclass VPseudoVALU_VV_VX_VI<bit Commutable = 0> {
2718  foreach m = MxList in {
2719    defvar mx = m.MX;
2720    defm "" : VPseudoBinaryV_VV<m, Commutable=Commutable>,
2721            SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", mx,
2722                        forceMergeOpRead=true>;
2723    defm "" : VPseudoBinaryV_VX<m>,
2724            SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", mx,
2725                        forceMergeOpRead=true>;
2726    defm "" : VPseudoBinaryV_VI<simm5, m>,
2727            SchedUnary<"WriteVIALUI", "ReadVIALUV", mx, forceMergeOpRead=true>;
2728  }
2729}
2730
2731multiclass VPseudoVSALU_VV_VX {
2732  foreach m = MxList in {
2733    defvar mx = m.MX;
2734    defm "" : VPseudoBinaryV_VV<m>,
2735              SchedBinary<"WriteVSALUV", "ReadVSALUV", "ReadVSALUV", mx,
2736                          forceMergeOpRead=true>;
2737    defm "" : VPseudoBinaryV_VX<m>,
2738              SchedBinary<"WriteVSALUX", "ReadVSALUV", "ReadVSALUX", mx,
2739                          forceMergeOpRead=true>;
2740  }
2741}
2742
2743multiclass VPseudoVSMUL_VV_VX_RM {
2744  foreach m = MxList in {
2745    defvar mx = m.MX;
2746    defm "" : VPseudoBinaryV_VV_RM<m, Commutable=1>,
2747              SchedBinary<"WriteVSMulV", "ReadVSMulV", "ReadVSMulV", mx,
2748                          forceMergeOpRead=true>;
2749    defm "" : VPseudoBinaryV_VX_RM<m>,
2750              SchedBinary<"WriteVSMulX", "ReadVSMulV", "ReadVSMulX", mx,
2751                          forceMergeOpRead=true>;
2752  }
2753}
2754
2755multiclass VPseudoVAALU_VV_VX_RM<bit Commutable = 0> {
2756  foreach m = MxList in {
2757    defvar mx = m.MX;
2758    defm "" : VPseudoBinaryV_VV_RM<m, Commutable=Commutable>,
2759              SchedBinary<"WriteVAALUV", "ReadVAALUV", "ReadVAALUV", mx,
2760                          forceMergeOpRead=true>;
2761    defm "" : VPseudoBinaryV_VX_RM<m>,
2762              SchedBinary<"WriteVAALUX", "ReadVAALUV", "ReadVAALUX", mx,
2763                          forceMergeOpRead=true>;
2764  }
2765}
2766
2767multiclass VPseudoVMINMAX_VV_VX {
2768  foreach m = MxList in {
2769    defvar mx = m.MX;
2770    defm "" : VPseudoBinaryV_VV<m, Commutable=1>,
2771              SchedBinary<"WriteVIMinMaxV", "ReadVIMinMaxV", "ReadVIMinMaxV", mx>;
2772    defm "" : VPseudoBinaryV_VX<m>,
2773              SchedBinary<"WriteVIMinMaxX", "ReadVIMinMaxV", "ReadVIMinMaxX", mx>;
2774  }
2775}
2776
2777multiclass VPseudoVMUL_VV_VX<bit Commutable = 0> {
2778  foreach m = MxList in {
2779    defvar mx = m.MX;
2780    defm "" : VPseudoBinaryV_VV<m, Commutable=Commutable>,
2781              SchedBinary<"WriteVIMulV", "ReadVIMulV", "ReadVIMulV", mx>;
2782    defm "" : VPseudoBinaryV_VX<m>,
2783              SchedBinary<"WriteVIMulX", "ReadVIMulV", "ReadVIMulX", mx>;
2784  }
2785}
2786
2787multiclass VPseudoVDIV_VV_VX {
2788  foreach m = MxList in {
2789    defvar mx = m.MX;
2790    defvar sews = SchedSEWSet<mx>.val;
2791    foreach e = sews in {
2792      defm "" : VPseudoBinaryV_VV<m, "", e>,
2793                SchedBinary<"WriteVIDivV", "ReadVIDivV", "ReadVIDivV", mx, e>;
2794      defm "" : VPseudoBinaryV_VX<m, "", e>,
2795                SchedBinary<"WriteVIDivX", "ReadVIDivV", "ReadVIDivX", mx, e>;
2796    }
2797  }
2798}
2799
2800multiclass VPseudoVFMUL_VV_VF_RM {
2801  foreach m = MxListF in {
2802    foreach e = SchedSEWSet<m.MX, isF=1>.val in
2803      defm "" : VPseudoBinaryFV_VV_RM<m, e>,
2804                SchedBinary<"WriteVFMulV", "ReadVFMulV", "ReadVFMulV", m.MX, e,
2805                            forceMergeOpRead=true>;
2806  }
2807
2808  foreach f = FPList in {
2809    foreach m = f.MxList in {
2810      defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
2811                SchedBinary<"WriteVFMulF", "ReadVFMulV", "ReadVFMulF", m.MX,
2812                            f.SEW, forceMergeOpRead=true>;
2813    }
2814  }
2815}
2816
2817multiclass VPseudoVFDIV_VV_VF_RM {
2818  foreach m = MxListF in {
2819    defvar mx = m.MX;
2820    defvar sews = SchedSEWSet<mx, isF=1>.val;
2821    foreach e = sews in {
2822      defm "" : VPseudoBinaryFV_VV_RM<m, e>,
2823                SchedBinary<"WriteVFDivV", "ReadVFDivV", "ReadVFDivV", mx, e,
2824                            forceMergeOpRead=true>;
2825    }
2826  }
2827
2828  foreach f = FPList in {
2829    foreach m = f.MxList in {
2830      defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
2831                SchedBinary<"WriteVFDivF", "ReadVFDivV", "ReadVFDivF", m.MX, f.SEW,
2832                            forceMergeOpRead=true>;
2833    }
2834  }
2835}
2836
2837multiclass VPseudoVFRDIV_VF_RM {
2838  foreach f = FPList in {
2839    foreach m = f.MxList in {
2840      defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
2841                SchedBinary<"WriteVFDivF", "ReadVFDivV", "ReadVFDivF", m.MX, f.SEW,
2842                            forceMergeOpRead=true>;
2843    }
2844  }
2845}
2846
2847multiclass VPseudoVALU_VV_VX {
2848 foreach m = MxList in {
2849    defm "" : VPseudoBinaryV_VV<m>,
2850            SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", m.MX,
2851                        forceMergeOpRead=true>;
2852    defm "" : VPseudoBinaryV_VX<m>,
2853            SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", m.MX,
2854                        forceMergeOpRead=true>;
2855  }
2856}
2857
2858multiclass VPseudoVSGNJ_VV_VF {
2859  foreach m = MxListF in {
2860    foreach e = SchedSEWSet<m.MX, isF=1>.val in
2861    defm "" : VPseudoBinaryV_VV<m, sew=e>,
2862              SchedBinary<"WriteVFSgnjV", "ReadVFSgnjV", "ReadVFSgnjV", m.MX,
2863                          e, forceMergeOpRead=true>;
2864  }
2865
2866  foreach f = FPList in {
2867    foreach m = f.MxList in {
2868      defm "" : VPseudoBinaryV_VF<m, f, sew=f.SEW>,
2869                SchedBinary<"WriteVFSgnjF", "ReadVFSgnjV", "ReadVFSgnjF", m.MX,
2870                            f.SEW, forceMergeOpRead=true>;
2871    }
2872  }
2873}
2874
2875multiclass VPseudoVMAX_VV_VF {
2876  foreach m = MxListF in {
2877    foreach e = SchedSEWSet<m.MX, isF=1>.val in
2878      defm "" : VPseudoBinaryV_VV<m, sew=e>,
2879                SchedBinary<"WriteVFMinMaxV", "ReadVFMinMaxV", "ReadVFMinMaxV",
2880                            m.MX, e, forceMergeOpRead=true>;
2881  }
2882
2883  foreach f = FPList in {
2884    foreach m = f.MxList in {
2885      defm "" : VPseudoBinaryV_VF<m, f, sew=f.SEW>,
2886                SchedBinary<"WriteVFMinMaxF", "ReadVFMinMaxV", "ReadVFMinMaxF",
2887                            m.MX, f.SEW, forceMergeOpRead=true>;
2888    }
2889  }
2890}
2891
2892multiclass VPseudoVALU_VV_VF_RM {
2893  foreach m = MxListF in {
2894    foreach e = SchedSEWSet<m.MX, isF=1>.val in
2895      defm "" : VPseudoBinaryFV_VV_RM<m, e>,
2896                SchedBinary<"WriteVFALUV", "ReadVFALUV", "ReadVFALUV", m.MX, e,
2897                            forceMergeOpRead=true>;
2898  }
2899
2900  foreach f = FPList in {
2901    foreach m = f.MxList in {
2902      defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
2903                SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX,
2904                            f.SEW, forceMergeOpRead=true>;
2905    }
2906  }
2907}
2908
2909multiclass VPseudoVALU_VF_RM {
2910  foreach f = FPList in {
2911    foreach m = f.MxList in {
2912      defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
2913                SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX,
2914                            f.SEW, forceMergeOpRead=true>;
2915    }
2916  }
2917}
2918
2919multiclass VPseudoVALU_VX_VI {
2920  foreach m = MxList in {
2921    defvar mx = m.MX;
2922    defm "" : VPseudoBinaryV_VX<m>,
2923              SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", mx,
2924                          forceMergeOpRead=true>;
2925    defm "" : VPseudoBinaryV_VI<simm5, m>,
2926              SchedUnary<"WriteVIALUI", "ReadVIALUV", mx, forceMergeOpRead=true>;
2927  }
2928}
2929
2930multiclass VPseudoVWALU_VV_VX<bit Commutable = 0> {
2931  foreach m = MxListW in {
2932    defvar mx = m.MX;
2933    defm "" : VPseudoBinaryW_VV<m, Commutable=Commutable>,
2934              SchedBinary<"WriteVIWALUV", "ReadVIWALUV", "ReadVIWALUV", mx,
2935                          forceMergeOpRead=true>;
2936    defm "" : VPseudoBinaryW_VX<m>,
2937              SchedBinary<"WriteVIWALUX", "ReadVIWALUV", "ReadVIWALUX", mx,
2938                          forceMergeOpRead=true>;
2939  }
2940}
2941
2942multiclass VPseudoVWMUL_VV_VX<bit Commutable = 0> {
2943  foreach m = MxListW in {
2944    defvar mx = m.MX;
2945    defm "" : VPseudoBinaryW_VV<m, Commutable=Commutable>,
2946              SchedBinary<"WriteVIWMulV", "ReadVIWMulV", "ReadVIWMulV", mx,
2947                          forceMergeOpRead=true>;
2948    defm "" : VPseudoBinaryW_VX<m>,
2949              SchedBinary<"WriteVIWMulX", "ReadVIWMulV", "ReadVIWMulX", mx,
2950                          forceMergeOpRead=true>;
2951  }
2952}
2953
2954multiclass VPseudoVWMUL_VV_VF_RM {
2955  foreach m = MxListFW in {
2956    foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
2957    defm "" : VPseudoBinaryW_VV_RM<m, sew=e>,
2958              SchedBinary<"WriteVFWMulV", "ReadVFWMulV", "ReadVFWMulV", m.MX,
2959                          e, forceMergeOpRead=true>;
2960  }
2961
2962  foreach f = FPListW in {
2963    foreach m = f.MxListFW in {
2964      defm "" : VPseudoBinaryW_VF_RM<m, f, sew=f.SEW>,
2965                SchedBinary<"WriteVFWMulF", "ReadVFWMulV", "ReadVFWMulF", m.MX,
2966                          f.SEW, forceMergeOpRead=true>;
2967    }
2968  }
2969}
2970
2971multiclass VPseudoVWALU_WV_WX {
2972  foreach m = MxListW in {
2973    defvar mx = m.MX;
2974    defm "" : VPseudoBinaryW_WV<m>,
2975              SchedBinary<"WriteVIWALUV", "ReadVIWALUV", "ReadVIWALUV", mx,
2976                          forceMergeOpRead=true>;
2977    defm "" : VPseudoBinaryW_WX<m>,
2978              SchedBinary<"WriteVIWALUX", "ReadVIWALUV", "ReadVIWALUX", mx,
2979                          forceMergeOpRead=true>;
2980  }
2981}
2982
2983multiclass VPseudoVFWALU_VV_VF_RM {
2984  foreach m = MxListFW in {
2985    foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
2986      defm "" : VPseudoBinaryW_VV_RM<m, sew=e>,
2987                SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX,
2988                            e, forceMergeOpRead=true>;
2989  }
2990
2991  foreach f = FPListW in {
2992    foreach m = f.MxListFW in {
2993      defm "" : VPseudoBinaryW_VF_RM<m, f, sew=f.SEW>,
2994                SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX,
2995                          f.SEW, forceMergeOpRead=true>;
2996    }
2997  }
2998}
2999
3000multiclass VPseudoVFWALU_WV_WF_RM {
3001  foreach m = MxListFW in {
3002    foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
3003      defm "" : VPseudoBinaryW_WV_RM<m, sew=e>,
3004                SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX,
3005                            e, forceMergeOpRead=true>;
3006  }
3007  foreach f = FPListW in {
3008    foreach m = f.MxListFW in {
3009      defm "" : VPseudoBinaryW_WF_RM<m, f, sew=f.SEW>,
3010                SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX,
3011                            f.SEW, forceMergeOpRead=true>;
3012    }
3013  }
3014}
3015
3016multiclass VPseudoVMRG_VM_XM_IM {
3017  foreach m = MxList in {
3018    defvar mx = m.MX;
3019    def "_VVM" # "_" # m.MX:
3020      VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
3021                               m.vrclass, m.vrclass, m>,
3022      SchedBinary<"WriteVIMergeV", "ReadVIMergeV", "ReadVIMergeV", mx,
3023                          forceMergeOpRead=true>;
3024    def "_VXM" # "_" # m.MX:
3025      VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
3026                               m.vrclass, GPR, m>,
3027      SchedBinary<"WriteVIMergeX", "ReadVIMergeV", "ReadVIMergeX", mx,
3028                          forceMergeOpRead=true>;
3029    def "_VIM" # "_" # m.MX:
3030      VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
3031                               m.vrclass, simm5, m>,
3032      SchedUnary<"WriteVIMergeI", "ReadVIMergeV", mx,
3033                          forceMergeOpRead=true>;
3034  }
3035}
3036
3037multiclass VPseudoVCALU_VM_XM_IM {
3038  foreach m = MxList in {
3039    defvar mx = m.MX;
3040    defm "" : VPseudoTiedBinaryV_VM<m, Commutable=1>,
3041              SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx,
3042                          forceMergeOpRead=true>;
3043    defm "" : VPseudoTiedBinaryV_XM<m>,
3044              SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx,
3045                          forceMergeOpRead=true>;
3046    defm "" : VPseudoTiedBinaryV_IM<m>,
3047              SchedUnary<"WriteVICALUI", "ReadVICALUV", mx,
3048                          forceMergeOpRead=true>;
3049  }
3050}
3051
3052multiclass VPseudoVCALU_VM_XM {
3053  foreach m = MxList in {
3054    defvar mx = m.MX;
3055    defm "" : VPseudoTiedBinaryV_VM<m>,
3056              SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx,
3057                          forceMergeOpRead=true>;
3058    defm "" : VPseudoTiedBinaryV_XM<m>,
3059              SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx,
3060                          forceMergeOpRead=true>;
3061  }
3062}
3063
3064multiclass VPseudoVCALUM_VM_XM_IM {
3065  defvar constraint = "@earlyclobber $rd";
3066  foreach m = MxList in {
3067    defvar mx = m.MX;
3068    defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=1, Constraint=constraint,
3069                                Commutable=1, TargetConstraintType=2>,
3070              SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx, forceMasked=1,
3071                          forceMergeOpRead=true>;
3072    defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=1, Constraint=constraint, TargetConstraintType=2>,
3073              SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx, forceMasked=1,
3074                          forceMergeOpRead=true>;
3075    defm "" : VPseudoBinaryV_IM<m, CarryOut=1, CarryIn=1, Constraint=constraint, TargetConstraintType=2>,
3076              SchedUnary<"WriteVICALUI", "ReadVICALUV", mx, forceMasked=1,
3077                          forceMergeOpRead=true>;
3078  }
3079}
3080
3081multiclass VPseudoVCALUM_VM_XM {
3082  defvar constraint = "@earlyclobber $rd";
3083  foreach m = MxList in {
3084    defvar mx = m.MX;
3085    defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=1, Constraint=constraint,
3086                                TargetConstraintType=2>,
3087              SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx, forceMasked=1,
3088                          forceMergeOpRead=true>;
3089    defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=1, Constraint=constraint,
3090                                TargetConstraintType=2>,
3091              SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx, forceMasked=1,
3092                          forceMergeOpRead=true>;
3093  }
3094}
3095
3096multiclass VPseudoVCALUM_V_X_I {
3097  defvar constraint = "@earlyclobber $rd";
3098  foreach m = MxList in {
3099    defvar mx = m.MX;
3100    defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=0, Constraint=constraint,
3101                                Commutable=1, TargetConstraintType=2>,
3102              SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx,
3103                          forceMergeOpRead=true>;
3104    defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=0, Constraint=constraint, TargetConstraintType=2>,
3105              SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx,
3106                          forceMergeOpRead=true>;
3107    defm "" : VPseudoBinaryV_IM<m, CarryOut=1, CarryIn=0, Constraint=constraint>,
3108              SchedUnary<"WriteVICALUI", "ReadVICALUV", mx,
3109                          forceMergeOpRead=true>;
3110  }
3111}
3112
3113multiclass VPseudoVCALUM_V_X {
3114  defvar constraint = "@earlyclobber $rd";
3115  foreach m = MxList in {
3116    defvar mx = m.MX;
3117    defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=0, Constraint=constraint, TargetConstraintType=2>,
3118              SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx,
3119                          forceMergeOpRead=true>;
3120    defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=0, Constraint=constraint, TargetConstraintType=2>,
3121              SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx,
3122                          forceMergeOpRead=true>;
3123  }
3124}
3125
3126multiclass VPseudoVNCLP_WV_WX_WI_RM {
3127  foreach m = MxListW in {
3128    defvar mx = m.MX;
3129    defm "" : VPseudoBinaryV_WV_RM<m>,
3130              SchedBinary<"WriteVNClipV", "ReadVNClipV", "ReadVNClipV", mx,
3131                          forceMergeOpRead=true>;
3132    defm "" : VPseudoBinaryV_WX_RM<m>,
3133              SchedBinary<"WriteVNClipX", "ReadVNClipV", "ReadVNClipX", mx,
3134                          forceMergeOpRead=true>;
3135    defm "" : VPseudoBinaryV_WI_RM<m>,
3136              SchedUnary<"WriteVNClipI", "ReadVNClipV", mx,
3137                          forceMergeOpRead=true>;
3138  }
3139}
3140
3141multiclass VPseudoVNSHT_WV_WX_WI {
3142  foreach m = MxListW in {
3143    defvar mx = m.MX;
3144    defm "" : VPseudoBinaryV_WV<m>,
3145              SchedBinary<"WriteVNShiftV", "ReadVNShiftV", "ReadVNShiftV", mx,
3146                          forceMergeOpRead=true>;
3147    defm "" : VPseudoBinaryV_WX<m>,
3148              SchedBinary<"WriteVNShiftX", "ReadVNShiftV", "ReadVNShiftX", mx,
3149                          forceMergeOpRead=true>;
3150    defm "" : VPseudoBinaryV_WI<m>,
3151              SchedUnary<"WriteVNShiftI", "ReadVNShiftV", mx,
3152                          forceMergeOpRead=true>;
3153  }
3154}
3155
3156multiclass VPseudoTernaryWithTailPolicy<VReg RetClass,
3157                                          RegisterClass Op1Class,
3158                                          DAGOperand Op2Class,
3159                                          LMULInfo MInfo,
3160                                          int sew> {
3161  let VLMul = MInfo.value, SEW=sew in {
3162    defvar mx = MInfo.MX;
3163    def "_" # mx # "_E" # sew : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class>;
3164    def "_" # mx # "_E" # sew # "_MASK" : VPseudoTernaryMaskPolicy<RetClass, Op1Class, Op2Class>,
3165                                          RISCVMaskedPseudo<MaskIdx=3, ActiveAffectsRes=true>;
3166  }
3167}
3168
3169multiclass VPseudoTernaryWithTailPolicyRoundingMode<VReg RetClass,
3170                                                    RegisterClass Op1Class,
3171                                                    DAGOperand Op2Class,
3172                                                    LMULInfo MInfo,
3173                                                    int sew> {
3174  let VLMul = MInfo.value, SEW=sew in {
3175    defvar mx = MInfo.MX;
3176    def "_" # mx # "_E" # sew
3177        : VPseudoTernaryNoMaskWithPolicyRoundingMode<RetClass, Op1Class,
3178                                                     Op2Class>;
3179    def "_" # mx # "_E" # sew # "_MASK"
3180        : VPseudoTernaryMaskPolicyRoundingMode<RetClass, Op1Class,
3181                                               Op2Class>,
3182          RISCVMaskedPseudo<MaskIdx=3, ActiveAffectsRes=true>;
3183  }
3184}
3185
3186multiclass VPseudoTernaryWithPolicy<VReg RetClass,
3187                                    RegisterClass Op1Class,
3188                                    DAGOperand Op2Class,
3189                                    LMULInfo MInfo,
3190                                    string Constraint = "",
3191                                    bit Commutable = 0,
3192                                    int TargetConstraintType = 1> {
3193  let VLMul = MInfo.value in {
3194    let isCommutable = Commutable in
3195    def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint, TargetConstraintType>;
3196    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class, Constraint, TargetConstraintType>,
3197                                   RISCVMaskedPseudo<MaskIdx=3>;
3198  }
3199}
3200
3201multiclass VPseudoTernaryWithPolicyRoundingMode<VReg RetClass,
3202                                                RegisterClass Op1Class,
3203                                                DAGOperand Op2Class,
3204                                                LMULInfo MInfo,
3205                                                string Constraint = "",
3206                                                int sew = 0,
3207                                                bit Commutable = 0,
3208                                                int TargetConstraintType = 1> {
3209  let VLMul = MInfo.value in {
3210    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
3211    let isCommutable = Commutable in
3212    def suffix :
3213        VPseudoTernaryNoMaskWithPolicyRoundingMode<RetClass, Op1Class,
3214                                                   Op2Class, Constraint,
3215                                                   TargetConstraintType>;
3216    def suffix # "_MASK" :
3217        VPseudoBinaryMaskPolicyRoundingMode<RetClass, Op1Class,
3218                                            Op2Class, Constraint,
3219                                            UsesVXRM_=0,
3220                                            TargetConstraintType=TargetConstraintType>,
3221                                   RISCVMaskedPseudo<MaskIdx=3>;
3222  }
3223}
3224
3225multiclass VPseudoTernaryV_VV_AAXA<LMULInfo m> {
3226  defm _VV : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, m.vrclass, m,
3227                                      Commutable=1>;
3228}
3229
3230multiclass VPseudoTernaryV_VV_AAXA_RM<LMULInfo m, int sew> {
3231  defm _VV : VPseudoTernaryWithPolicyRoundingMode<m.vrclass, m.vrclass, m.vrclass, m,
3232                                                  sew=sew, Commutable=1>;
3233}
3234
3235multiclass VPseudoTernaryV_VX_AAXA<LMULInfo m> {
3236  defm "_VX" : VPseudoTernaryWithPolicy<m.vrclass, GPR, m.vrclass, m,
3237                                        Commutable=1>;
3238}
3239
3240multiclass VPseudoTernaryV_VF_AAXA_RM<LMULInfo m, FPR_Info f,
3241                                      int sew> {
3242  defm "_V" # f.FX : VPseudoTernaryWithPolicyRoundingMode<m.vrclass, f.fprclass,
3243                                                          m.vrclass, m,
3244                                                          sew=sew, Commutable=1>;
3245}
3246
3247multiclass VPseudoTernaryW_VV<LMULInfo m, bit Commutable = 0> {
3248  defvar constraint = "@earlyclobber $rd";
3249  defm _VV : VPseudoTernaryWithPolicy<m.wvrclass, m.vrclass, m.vrclass, m,
3250                                      constraint, Commutable=Commutable, TargetConstraintType=3>;
3251}
3252
3253multiclass VPseudoTernaryW_VV_RM<LMULInfo m, int sew> {
3254  defvar constraint = "@earlyclobber $rd";
3255  defm _VV : VPseudoTernaryWithPolicyRoundingMode<m.wvrclass, m.vrclass, m.vrclass, m,
3256                                                  constraint, sew,
3257                                                  TargetConstraintType=3>;
3258}
3259
3260multiclass VPseudoTernaryW_VX<LMULInfo m> {
3261  defvar constraint = "@earlyclobber $rd";
3262  defm "_VX" : VPseudoTernaryWithPolicy<m.wvrclass, GPR, m.vrclass, m,
3263                                        constraint, TargetConstraintType=3>;
3264}
3265
3266multiclass VPseudoTernaryW_VF_RM<LMULInfo m, FPR_Info f, int sew> {
3267  defvar constraint = "@earlyclobber $rd";
3268  defm "_V" # f.FX : VPseudoTernaryWithPolicyRoundingMode<m.wvrclass, f.fprclass,
3269                                                          m.vrclass, m, constraint,
3270                                                          sew=sew,
3271                                                          TargetConstraintType=3>;
3272}
3273
3274multiclass VPseudoVSLDVWithPolicy<VReg RetClass,
3275                                  RegisterClass Op1Class,
3276                                  DAGOperand Op2Class,
3277                                  LMULInfo MInfo,
3278                                  string Constraint = ""> {
3279  let VLMul = MInfo.value in {
3280    def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint>;
3281    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class, Constraint>,
3282                                   RISCVMaskedPseudo<MaskIdx=3>;
3283  }
3284}
3285
3286multiclass VPseudoVSLDV_VX<LMULInfo m, string Constraint = ""> {
3287  defm _VX : VPseudoVSLDVWithPolicy<m.vrclass, m.vrclass, GPR, m, Constraint>;
3288}
3289
3290multiclass VPseudoVSLDV_VI<LMULInfo m, string Constraint = ""> {
3291  defm _VI : VPseudoVSLDVWithPolicy<m.vrclass, m.vrclass, uimm5, m, Constraint>;
3292}
3293
3294multiclass VPseudoVMAC_VV_VX_AAXA {
3295  foreach m = MxList in {
3296    defvar mx = m.MX;
3297    defm "" : VPseudoTernaryV_VV_AAXA<m>,
3298              SchedTernary<"WriteVIMulAddV", "ReadVIMulAddV", "ReadVIMulAddV",
3299                           "ReadVIMulAddV", mx>;
3300    defm "" : VPseudoTernaryV_VX_AAXA<m>,
3301              SchedTernary<"WriteVIMulAddX", "ReadVIMulAddV", "ReadVIMulAddX",
3302                           "ReadVIMulAddV", mx>;
3303  }
3304}
3305
3306multiclass VPseudoVMAC_VV_VF_AAXA_RM {
3307  foreach m = MxListF in {
3308    foreach e = SchedSEWSet<m.MX, isF=1>.val in
3309      defm "" : VPseudoTernaryV_VV_AAXA_RM<m, sew=e>,
3310                SchedTernary<"WriteVFMulAddV", "ReadVFMulAddV", "ReadVFMulAddV",
3311                             "ReadVFMulAddV", m.MX, e>;
3312  }
3313
3314  foreach f = FPList in {
3315    foreach m = f.MxList in {
3316      defm "" : VPseudoTernaryV_VF_AAXA_RM<m, f, sew=f.SEW>,
3317                SchedTernary<"WriteVFMulAddF", "ReadVFMulAddV", "ReadVFMulAddF",
3318                             "ReadVFMulAddV", m.MX, f.SEW>;
3319    }
3320  }
3321}
3322
3323multiclass VPseudoVSLD_VX_VI<bit slidesUp = false, string Constraint = ""> {
3324  defvar WriteSlideX = !if(slidesUp, "WriteVSlideUpX", "WriteVSlideDownX");
3325  foreach m = MxList in {
3326    defvar mx = m.MX;
3327    defm "" : VPseudoVSLDV_VX<m, Constraint>,
3328              SchedTernary<WriteSlideX, "ReadVISlideV", "ReadVISlideV",
3329                           "ReadVISlideX", mx>;
3330    defm "" : VPseudoVSLDV_VI<m, Constraint>,
3331              SchedBinary<"WriteVSlideI", "ReadVISlideV", "ReadVISlideV", mx>;
3332  }
3333}
3334
3335multiclass VPseudoVWMAC_VV_VX<bit Commutable = 0> {
3336  foreach m = MxListW in {
3337    defvar mx = m.MX;
3338    defm "" : VPseudoTernaryW_VV<m, Commutable=Commutable>,
3339              SchedTernary<"WriteVIWMulAddV", "ReadVIWMulAddV", "ReadVIWMulAddV",
3340                           "ReadVIWMulAddV", mx>;
3341    defm "" : VPseudoTernaryW_VX<m>,
3342              SchedTernary<"WriteVIWMulAddX", "ReadVIWMulAddV", "ReadVIWMulAddX",
3343                           "ReadVIWMulAddV", mx>;
3344  }
3345}
3346
3347multiclass VPseudoVWMAC_VX {
3348  foreach m = MxListW in {
3349    defm "" : VPseudoTernaryW_VX<m>,
3350              SchedTernary<"WriteVIWMulAddX", "ReadVIWMulAddV", "ReadVIWMulAddX",
3351                           "ReadVIWMulAddV", m.MX>;
3352  }
3353}
3354
3355multiclass VPseudoVWMAC_VV_VF_RM {
3356  foreach m = MxListFW in {
3357    foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
3358      defm "" : VPseudoTernaryW_VV_RM<m, sew=e>,
3359                SchedTernary<"WriteVFWMulAddV", "ReadVFWMulAddV",
3360                             "ReadVFWMulAddV", "ReadVFWMulAddV", m.MX, e>;
3361  }
3362
3363  foreach f = FPListW in {
3364    foreach m = f.MxListFW in {
3365      defm "" : VPseudoTernaryW_VF_RM<m, f, sew=f.SEW>,
3366                SchedTernary<"WriteVFWMulAddF", "ReadVFWMulAddV",
3367                             "ReadVFWMulAddF", "ReadVFWMulAddV", m.MX, f.SEW>;
3368    }
3369  }
3370}
3371
3372multiclass VPseudoVWMAC_VV_VF_BF_RM {
3373  foreach m = MxListFW in {
3374    defvar mx = m.MX;
3375    foreach e = SchedSEWSet<mx, isF=1, isWidening=1>.val in
3376      defm "" : VPseudoTernaryW_VV_RM<m, sew=e>,
3377                SchedTernary<"WriteVFWMulAddV", "ReadVFWMulAddV",
3378                             "ReadVFWMulAddV", "ReadVFWMulAddV", mx, e>;
3379  }
3380
3381  foreach f = BFPListW in {
3382    foreach m = f.MxListFW in {
3383      defvar mx = m.MX;
3384      defm "" : VPseudoTernaryW_VF_RM<m, f, sew=f.SEW>,
3385                SchedTernary<"WriteVFWMulAddF", "ReadVFWMulAddV",
3386                             "ReadVFWMulAddF", "ReadVFWMulAddV", mx, f.SEW>;
3387    }
3388  }
3389}
3390
3391multiclass VPseudoVCMPM_VV_VX_VI<bit Commutable = 0> {
3392  foreach m = MxList in {
3393    defvar mx = m.MX;
3394    defm "" : VPseudoBinaryM_VV<m, TargetConstraintType=2, Commutable=Commutable>,
3395              SchedBinary<"WriteVICmpV", "ReadVICmpV", "ReadVICmpV", mx>;
3396    defm "" : VPseudoBinaryM_VX<m, TargetConstraintType=2>,
3397              SchedBinary<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX", mx>;
3398    defm "" : VPseudoBinaryM_VI<m, TargetConstraintType=2>,
3399              SchedUnary<"WriteVICmpI", "ReadVICmpV", mx>;
3400  }
3401}
3402
3403multiclass VPseudoVCMPM_VV_VX {
3404  foreach m = MxList in {
3405    defvar mx = m.MX;
3406    defm "" : VPseudoBinaryM_VV<m, TargetConstraintType=2>,
3407              SchedBinary<"WriteVICmpV", "ReadVICmpV", "ReadVICmpV", mx>;
3408    defm "" : VPseudoBinaryM_VX<m, TargetConstraintType=2>,
3409              SchedBinary<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX", mx>;
3410  }
3411}
3412
3413multiclass VPseudoVCMPM_VV_VF {
3414  foreach m = MxListF in {
3415    defm "" : VPseudoBinaryM_VV<m, TargetConstraintType=2>,
3416              SchedBinary<"WriteVFCmpV", "ReadVFCmpV", "ReadVFCmpV", m.MX>;
3417  }
3418
3419  foreach f = FPList in {
3420    foreach m = f.MxList in {
3421      defm "" : VPseudoBinaryM_VF<m, f, TargetConstraintType=2>,
3422                SchedBinary<"WriteVFCmpF", "ReadVFCmpV", "ReadVFCmpF", m.MX>;
3423    }
3424  }
3425}
3426
3427multiclass VPseudoVCMPM_VF {
3428  foreach f = FPList in {
3429    foreach m = f.MxList in {
3430      defm "" : VPseudoBinaryM_VF<m, f, TargetConstraintType=2>,
3431                SchedBinary<"WriteVFCmpF", "ReadVFCmpV", "ReadVFCmpF", m.MX>;
3432    }
3433  }
3434}
3435
3436multiclass VPseudoVCMPM_VX_VI {
3437  foreach m = MxList in {
3438    defvar mx = m.MX;
3439    defm "" : VPseudoBinaryM_VX<m, TargetConstraintType=2>,
3440              SchedBinary<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX", mx>;
3441    defm "" : VPseudoBinaryM_VI<m, TargetConstraintType=2>,
3442              SchedUnary<"WriteVICmpI", "ReadVICmpV", mx>;
3443  }
3444}
3445
3446multiclass VPseudoVRED_VS {
3447  foreach m = MxList in {
3448    defvar mx = m.MX;
3449    foreach e = SchedSEWSet<mx>.val in {
3450      defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m, e>,
3451                 SchedReduction<"WriteVIRedV_From", "ReadVIRedV", mx, e>;
3452    }
3453  }
3454}
3455
3456multiclass VPseudoVREDMINMAX_VS {
3457  foreach m = MxList in {
3458    defvar mx = m.MX;
3459    foreach e = SchedSEWSet<mx>.val in {
3460      defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m, e>,
3461                 SchedReduction<"WriteVIRedMinMaxV_From", "ReadVIRedV", mx, e>;
3462    }
3463  }
3464}
3465
3466multiclass VPseudoVWRED_VS {
3467  foreach m = MxListWRed in {
3468    defvar mx = m.MX;
3469    foreach e = SchedSEWSet<mx, isWidening=1>.val in {
3470      defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m, e>,
3471                 SchedReduction<"WriteVIWRedV_From", "ReadVIWRedV", mx, e>;
3472    }
3473  }
3474}
3475
3476multiclass VPseudoVFRED_VS_RM {
3477  foreach m = MxListF in {
3478    defvar mx = m.MX;
3479    foreach e = SchedSEWSet<mx, isF=1>.val in {
3480      defm _VS
3481          : VPseudoTernaryWithTailPolicyRoundingMode<V_M1.vrclass, m.vrclass,
3482                                                     V_M1.vrclass, m, e>,
3483            SchedReduction<"WriteVFRedV_From", "ReadVFRedV", mx, e>;
3484    }
3485  }
3486}
3487
3488multiclass VPseudoVFREDMINMAX_VS {
3489  foreach m = MxListF in {
3490    defvar mx = m.MX;
3491    foreach e = SchedSEWSet<mx, isF=1>.val in {
3492      defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m, e>,
3493                 SchedReduction<"WriteVFRedMinMaxV_From", "ReadVFRedV", mx, e>;
3494    }
3495  }
3496}
3497
3498multiclass VPseudoVFREDO_VS_RM {
3499  foreach m = MxListF in {
3500    defvar mx = m.MX;
3501    foreach e = SchedSEWSet<mx, isF=1>.val in {
3502      defm _VS : VPseudoTernaryWithTailPolicyRoundingMode<V_M1.vrclass, m.vrclass,
3503                                                          V_M1.vrclass, m, e>,
3504                 SchedReduction<"WriteVFRedOV_From", "ReadVFRedOV", mx, e>;
3505    }
3506  }
3507}
3508
3509multiclass VPseudoVFWRED_VS_RM {
3510  foreach m = MxListFWRed in {
3511    defvar mx = m.MX;
3512    foreach e = SchedSEWSet<mx, isF=1, isWidening=1>.val in {
3513      defm _VS
3514          : VPseudoTernaryWithTailPolicyRoundingMode<V_M1.vrclass, m.vrclass,
3515                                                     V_M1.vrclass, m, e>,
3516            SchedReduction<"WriteVFWRedV_From", "ReadVFWRedV", mx, e>;
3517    }
3518  }
3519}
3520
3521multiclass VPseudoVFWREDO_VS_RM {
3522  foreach m = MxListFWRed in {
3523    defvar mx = m.MX;
3524    foreach e = SchedSEWSet<mx, isF=1, isWidening=1>.val in {
3525      defm _VS
3526          : VPseudoTernaryWithTailPolicyRoundingMode<V_M1.vrclass, m.vrclass,
3527                                                     V_M1.vrclass, m, e>,
3528            SchedReduction<"WriteVFWRedOV_From", "ReadVFWRedV", mx, e>;
3529    }
3530  }
3531}
3532
3533multiclass VPseudoConversion<VReg RetClass,
3534                             VReg Op1Class,
3535                             LMULInfo MInfo,
3536                             string Constraint = "",
3537                             int sew = 0,
3538                             int TargetConstraintType = 1> {
3539  defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
3540  let VLMul = MInfo.value, SEW=sew in {
3541    def suffix : VPseudoUnaryNoMask<RetClass, Op1Class, Constraint, TargetConstraintType>;
3542    def suffix # "_MASK" : VPseudoUnaryMask<RetClass, Op1Class,
3543                                            Constraint, TargetConstraintType>,
3544                           RISCVMaskedPseudo<MaskIdx=2>;
3545  }
3546}
3547
3548multiclass VPseudoConversionRoundingMode<VReg RetClass,
3549                             VReg Op1Class,
3550                             LMULInfo MInfo,
3551                             string Constraint = "",
3552                             int sew = 0,
3553                             int TargetConstraintType = 1> {
3554  let VLMul = MInfo.value, SEW=sew in {
3555    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
3556    def suffix : VPseudoUnaryNoMaskRoundingMode<RetClass, Op1Class, Constraint, TargetConstraintType>;
3557    def suffix # "_MASK" : VPseudoUnaryMaskRoundingMode<RetClass, Op1Class,
3558                                                        Constraint,
3559                                                        TargetConstraintType>,
3560                           RISCVMaskedPseudo<MaskIdx=2>;
3561  }
3562}
3563
3564
3565multiclass VPseudoConversionRM<VReg RetClass,
3566                               VReg Op1Class,
3567                               LMULInfo MInfo,
3568                               string Constraint = "",
3569                               int sew = 0,
3570                               int TargetConstraintType = 1> {
3571  let VLMul = MInfo.value, SEW=sew in {
3572    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
3573    def suffix : VPseudoUnaryNoMask_FRM<RetClass, Op1Class,
3574                                        Constraint, TargetConstraintType>;
3575    def suffix # "_MASK" : VPseudoUnaryMask_FRM<RetClass, Op1Class,
3576                                                Constraint, TargetConstraintType>,
3577                           RISCVMaskedPseudo<MaskIdx=2>;
3578  }
3579}
3580
3581multiclass VPseudoConversionNoExcept<VReg RetClass,
3582                                     VReg Op1Class,
3583                                     LMULInfo MInfo,
3584                                     string Constraint = ""> {
3585  let VLMul = MInfo.value in {
3586    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMask_NoExcept<RetClass, Op1Class, Constraint>;
3587  }
3588}
3589
3590multiclass VPseudoVCVTI_V {
3591  foreach m = MxListF in {
3592    defm _V : VPseudoConversion<m.vrclass, m.vrclass, m>,
3593              SchedUnary<"WriteVFCvtFToIV", "ReadVFCvtFToIV", m.MX,
3594                         forceMergeOpRead=true>;
3595  }
3596}
3597
3598multiclass VPseudoVCVTI_V_RM {
3599  foreach m = MxListF in {
3600    defm _V : VPseudoConversionRoundingMode<m.vrclass, m.vrclass, m>,
3601              SchedUnary<"WriteVFCvtFToIV", "ReadVFCvtFToIV", m.MX,
3602                         forceMergeOpRead=true>;
3603  }
3604}
3605
3606multiclass VPseudoVCVTI_RM_V {
3607  foreach m = MxListF in {
3608    defm _V : VPseudoConversionRM<m.vrclass, m.vrclass, m>,
3609              SchedUnary<"WriteVFCvtFToIV", "ReadVFCvtFToIV", m.MX,
3610                         forceMergeOpRead=true>;
3611  }
3612}
3613
3614multiclass VPseudoVFROUND_NOEXCEPT_V {
3615  foreach m = MxListF in {
3616    defm _V : VPseudoConversionNoExcept<m.vrclass, m.vrclass, m>,
3617              SchedUnary<"WriteVFCvtFToIV", "ReadVFCvtFToIV", m.MX,
3618                         forceMergeOpRead=true>;
3619  }
3620}
3621
3622multiclass VPseudoVCVTF_V_RM {
3623  foreach m = MxListF in {
3624    foreach e = SchedSEWSet<m.MX, isF=1>.val in
3625      defm _V : VPseudoConversionRoundingMode<m.vrclass, m.vrclass, m, sew=e>,
3626                SchedUnary<"WriteVFCvtIToFV", "ReadVFCvtIToFV", m.MX, e,
3627                           forceMergeOpRead=true>;
3628  }
3629}
3630
3631multiclass VPseudoVCVTF_RM_V {
3632  foreach m = MxListF in {
3633    foreach e = SchedSEWSet<m.MX, isF=1>.val in
3634      defm _V : VPseudoConversionRM<m.vrclass, m.vrclass, m, sew=e>,
3635                SchedUnary<"WriteVFCvtIToFV", "ReadVFCvtIToFV", m.MX, e,
3636                           forceMergeOpRead=true>;
3637  }
3638}
3639
3640multiclass VPseudoVWCVTI_V {
3641  defvar constraint = "@earlyclobber $rd";
3642  foreach m = MxListFW in {
3643    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, TargetConstraintType=3>,
3644              SchedUnary<"WriteVFWCvtFToIV", "ReadVFWCvtFToIV", m.MX,
3645                         forceMergeOpRead=true>;
3646  }
3647}
3648
3649multiclass VPseudoVWCVTI_V_RM {
3650  defvar constraint = "@earlyclobber $rd";
3651  foreach m = MxListFW in {
3652    defm _V : VPseudoConversionRoundingMode<m.wvrclass, m.vrclass, m, constraint, TargetConstraintType=3>,
3653              SchedUnary<"WriteVFWCvtFToIV", "ReadVFWCvtFToIV", m.MX,
3654                         forceMergeOpRead=true>;
3655  }
3656}
3657
3658multiclass VPseudoVWCVTI_RM_V {
3659  defvar constraint = "@earlyclobber $rd";
3660  foreach m = MxListFW in {
3661    defm _V : VPseudoConversionRM<m.wvrclass, m.vrclass, m, constraint>,
3662              SchedUnary<"WriteVFWCvtFToIV", "ReadVFWCvtFToIV", m.MX,
3663                         forceMergeOpRead=true>;
3664  }
3665}
3666
3667multiclass VPseudoVWCVTF_V {
3668  defvar constraint = "@earlyclobber $rd";
3669  foreach m = MxListW in {
3670    foreach e = SchedSEWSet<m.MX, isF=0, isWidening=1>.val in
3671      defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, sew=e,
3672                                  TargetConstraintType=3>,
3673                SchedUnary<"WriteVFWCvtIToFV", "ReadVFWCvtIToFV", m.MX, e,
3674                           forceMergeOpRead=true>;
3675  }
3676}
3677
3678multiclass VPseudoVWCVTD_V {
3679  defvar constraint = "@earlyclobber $rd";
3680  foreach m = MxListFW in {
3681    foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
3682      defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, sew=e,
3683                                  TargetConstraintType=3>,
3684                SchedUnary<"WriteVFWCvtFToFV", "ReadVFWCvtFToFV", m.MX, e,
3685                           forceMergeOpRead=true>;
3686  }
3687}
3688
3689multiclass VPseudoVNCVTI_W {
3690  defvar constraint = "@earlyclobber $rd";
3691  foreach m = MxListW in {
3692    defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint, TargetConstraintType=2>,
3693              SchedUnary<"WriteVFNCvtFToIV", "ReadVFNCvtFToIV", m.MX,
3694                         forceMergeOpRead=true>;
3695  }
3696}
3697
3698multiclass VPseudoVNCVTI_W_RM {
3699  defvar constraint = "@earlyclobber $rd";
3700  foreach m = MxListW in {
3701    defm _W : VPseudoConversionRoundingMode<m.vrclass, m.wvrclass, m, constraint, TargetConstraintType=2>,
3702              SchedUnary<"WriteVFNCvtFToIV", "ReadVFNCvtFToIV", m.MX,
3703                         forceMergeOpRead=true>;
3704  }
3705}
3706
3707multiclass VPseudoVNCVTI_RM_W {
3708  defvar constraint = "@earlyclobber $rd";
3709  foreach m = MxListW in {
3710    defm _W : VPseudoConversionRM<m.vrclass, m.wvrclass, m, constraint, TargetConstraintType=2>,
3711              SchedUnary<"WriteVFNCvtFToIV", "ReadVFNCvtFToIV", m.MX,
3712                         forceMergeOpRead=true>;
3713  }
3714}
3715
3716multiclass VPseudoVNCVTF_W_RM {
3717  defvar constraint = "@earlyclobber $rd";
3718  foreach m = MxListFW in {
3719    foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
3720      defm _W : VPseudoConversionRoundingMode<m.vrclass, m.wvrclass, m,
3721                                              constraint, sew=e,
3722                                              TargetConstraintType=2>,
3723                SchedUnary<"WriteVFNCvtIToFV", "ReadVFNCvtIToFV", m.MX, e,
3724                           forceMergeOpRead=true>;
3725  }
3726}
3727
3728multiclass VPseudoVNCVTF_RM_W {
3729  defvar constraint = "@earlyclobber $rd";
3730  foreach m = MxListFW in {
3731    foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
3732      defm _W : VPseudoConversionRM<m.vrclass, m.wvrclass, m, constraint, sew=e>,
3733                SchedUnary<"WriteVFNCvtIToFV", "ReadVFNCvtIToFV", m.MX, e,
3734                           forceMergeOpRead=true>;
3735  }
3736}
3737
3738multiclass VPseudoVNCVTD_W {
3739  defvar constraint = "@earlyclobber $rd";
3740  foreach m = MxListFW in {
3741    foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
3742      defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint, sew=e,
3743                                  TargetConstraintType=2>,
3744                SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX, e,
3745                           forceMergeOpRead=true>;
3746  }
3747}
3748
3749multiclass VPseudoVNCVTD_W_RM {
3750  defvar constraint = "@earlyclobber $rd";
3751  foreach m = MxListFW in {
3752    foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
3753      defm _W : VPseudoConversionRoundingMode<m.vrclass, m.wvrclass, m,
3754                                              constraint, sew=e,
3755                                              TargetConstraintType=2>,
3756                SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX, e,
3757                           forceMergeOpRead=true>;
3758  }
3759}
3760
3761multiclass VPseudoUSSegLoad {
3762  foreach eew = EEWList in {
3763    foreach lmul = MxSet<eew>.m in {
3764      defvar LInfo = lmul.MX;
3765      let VLMul = lmul.value, SEW=eew in {
3766        foreach nf = NFSet<lmul>.L in {
3767          defvar vreg = SegRegClass<lmul, nf>.RC;
3768          def nf # "E" # eew # "_V_" # LInfo :
3769            VPseudoUSSegLoadNoMask<vreg, eew, nf>, VLSEGSched<nf, eew, LInfo>;
3770          def nf # "E" # eew # "_V_" # LInfo # "_MASK" :
3771            VPseudoUSSegLoadMask<vreg, eew, nf>, VLSEGSched<nf, eew, LInfo>;
3772        }
3773      }
3774    }
3775  }
3776}
3777
3778multiclass VPseudoUSSegLoadFF {
3779  foreach eew = EEWList in {
3780    foreach lmul = MxSet<eew>.m in {
3781      defvar LInfo = lmul.MX;
3782      let VLMul = lmul.value, SEW=eew in {
3783        foreach nf = NFSet<lmul>.L in {
3784          defvar vreg = SegRegClass<lmul, nf>.RC;
3785          def nf # "E" # eew # "FF_V_" # LInfo :
3786            VPseudoUSSegLoadFFNoMask<vreg, eew, nf>, VLSEGFFSched<nf, eew, LInfo>;
3787          def nf # "E" # eew # "FF_V_" # LInfo # "_MASK" :
3788            VPseudoUSSegLoadFFMask<vreg, eew, nf>, VLSEGFFSched<nf, eew, LInfo>;
3789        }
3790      }
3791    }
3792  }
3793}
3794
3795multiclass VPseudoSSegLoad {
3796  foreach eew = EEWList in {
3797    foreach lmul = MxSet<eew>.m in {
3798      defvar LInfo = lmul.MX;
3799      let VLMul = lmul.value, SEW=eew in {
3800        foreach nf = NFSet<lmul>.L in {
3801          defvar vreg = SegRegClass<lmul, nf>.RC;
3802          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew, nf>,
3803                                               VLSSEGSched<nf, eew, LInfo>;
3804          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew, nf>,
3805                                                         VLSSEGSched<nf, eew, LInfo>;
3806        }
3807      }
3808    }
3809  }
3810}
3811
3812multiclass VPseudoISegLoad<bit Ordered> {
3813  foreach idxEEW = EEWList in {
3814    foreach dataEEW = EEWList in {
3815      foreach dataEMUL = MxSet<dataEEW>.m in {
3816        defvar dataEMULOctuple = dataEMUL.octuple;
3817        // Calculate emul = eew * lmul / sew
3818        defvar idxEMULOctuple = !srl(!mul(idxEEW, dataEMULOctuple), !logtwo(dataEEW));
3819        if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
3820          defvar DataLInfo = dataEMUL.MX;
3821          defvar IdxLInfo = octuple_to_str<idxEMULOctuple>.ret;
3822          defvar idxEMUL = !cast<LMULInfo>("V_" # IdxLInfo);
3823          defvar DataVreg = dataEMUL.vrclass;
3824          defvar IdxVreg = idxEMUL.vrclass;
3825          let VLMul = dataEMUL.value in {
3826            foreach nf = NFSet<dataEMUL>.L in {
3827              defvar Vreg = SegRegClass<dataEMUL, nf>.RC;
3828              def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo :
3829                VPseudoISegLoadNoMask<Vreg, IdxVreg, idxEEW, idxEMUL.value,
3830                                      nf, Ordered>,
3831                VLXSEGSched<nf, dataEEW, Ordered, DataLInfo>;
3832              def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" :
3833                VPseudoISegLoadMask<Vreg, IdxVreg, idxEEW, idxEMUL.value,
3834                                    nf, Ordered>,
3835                VLXSEGSched<nf, dataEEW, Ordered, DataLInfo>;
3836            }
3837          }
3838        }
3839      }
3840    }
3841  }
3842}
3843
3844multiclass VPseudoUSSegStore {
3845  foreach eew = EEWList in {
3846    foreach lmul = MxSet<eew>.m in {
3847      defvar LInfo = lmul.MX;
3848      let VLMul = lmul.value, SEW=eew in {
3849        foreach nf = NFSet<lmul>.L in {
3850          defvar vreg = SegRegClass<lmul, nf>.RC;
3851          def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew, nf>,
3852                                               VSSEGSched<nf, eew, LInfo>;
3853          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew, nf>,
3854                                                         VSSEGSched<nf, eew, LInfo>;
3855        }
3856      }
3857    }
3858  }
3859}
3860
3861multiclass VPseudoSSegStore {
3862  foreach eew = EEWList in {
3863    foreach lmul = MxSet<eew>.m in {
3864      defvar LInfo = lmul.MX;
3865      let VLMul = lmul.value, SEW=eew in {
3866        foreach nf = NFSet<lmul>.L in {
3867          defvar vreg = SegRegClass<lmul, nf>.RC;
3868          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew, nf>,
3869                                               VSSSEGSched<nf, eew, LInfo>;
3870          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew, nf>,
3871                                                         VSSSEGSched<nf, eew, LInfo>;
3872        }
3873      }
3874    }
3875  }
3876}
3877
3878multiclass VPseudoISegStore<bit Ordered> {
3879  foreach idxEEW = EEWList in {
3880    foreach dataEEW = EEWList in {
3881      foreach dataEMUL = MxSet<dataEEW>.m in {
3882        defvar dataEMULOctuple = dataEMUL.octuple;
3883        // Calculate emul = eew * lmul / sew
3884        defvar idxEMULOctuple = !srl(!mul(idxEEW, dataEMULOctuple), !logtwo(dataEEW));
3885        if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
3886          defvar DataLInfo = dataEMUL.MX;
3887          defvar IdxLInfo = octuple_to_str<idxEMULOctuple>.ret;
3888          defvar idxEMUL = !cast<LMULInfo>("V_" # IdxLInfo);
3889          defvar DataVreg = dataEMUL.vrclass;
3890          defvar IdxVreg = idxEMUL.vrclass;
3891          let VLMul = dataEMUL.value in {
3892            foreach nf = NFSet<dataEMUL>.L in {
3893              defvar Vreg = SegRegClass<dataEMUL, nf>.RC;
3894              def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo :
3895                VPseudoISegStoreNoMask<Vreg, IdxVreg, idxEEW, idxEMUL.value,
3896                                       nf, Ordered>,
3897                VSXSEGSched<nf, idxEEW, Ordered, DataLInfo>;
3898              def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" :
3899                VPseudoISegStoreMask<Vreg, IdxVreg, idxEEW, idxEMUL.value,
3900                                     nf, Ordered>,
3901                VSXSEGSched<nf, idxEEW, Ordered, DataLInfo>;
3902            }
3903          }
3904        }
3905      }
3906    }
3907  }
3908}
3909
3910//===----------------------------------------------------------------------===//
3911// Helpers to define the intrinsic patterns.
3912//===----------------------------------------------------------------------===//
3913
3914class VPatUnaryNoMask<string intrinsic_name,
3915                      string inst,
3916                      string kind,
3917                      ValueType result_type,
3918                      ValueType op2_type,
3919                      int log2sew,
3920                      LMULInfo vlmul,
3921                      VReg result_reg_class,
3922                      VReg op2_reg_class,
3923                      bit isSEWAware = 0> :
3924  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3925                   (result_type result_reg_class:$merge),
3926                   (op2_type op2_reg_class:$rs2),
3927                   VLOpFrag)),
3928                   (!cast<Instruction>(
3929                     !if(isSEWAware,
3930                         inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
3931                         inst#"_"#kind#"_"#vlmul.MX))
3932                   (result_type result_reg_class:$merge),
3933                   (op2_type op2_reg_class:$rs2),
3934                   GPR:$vl, log2sew, TU_MU)>;
3935
3936class VPatUnaryNoMaskRoundingMode<string intrinsic_name,
3937                                  string inst,
3938                                  string kind,
3939                                  ValueType result_type,
3940                                  ValueType op2_type,
3941                                  int log2sew,
3942                                  LMULInfo vlmul,
3943                                  VReg result_reg_class,
3944                                  VReg op2_reg_class,
3945                                  bit isSEWAware = 0> :
3946  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3947                   (result_type result_reg_class:$merge),
3948                   (op2_type op2_reg_class:$rs2),
3949                   (XLenVT timm:$round),
3950                   VLOpFrag)),
3951                   (!cast<Instruction>(
3952                      !if(isSEWAware,
3953                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
3954                          inst#"_"#kind#"_"#vlmul.MX))
3955                   (result_type result_reg_class:$merge),
3956                   (op2_type op2_reg_class:$rs2),
3957                   (XLenVT timm:$round),
3958                   GPR:$vl, log2sew, TU_MU)>;
3959
3960class VPatUnaryNoMaskRTZ<string intrinsic_name,
3961                         string inst,
3962                         string kind,
3963                         ValueType result_type,
3964                         ValueType op2_type,
3965                         int log2sew,
3966                         LMULInfo vlmul,
3967                         VReg result_reg_class,
3968                         VReg op2_reg_class,
3969                         bit isSEWAware = 0> :
3970  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3971                   (result_type result_reg_class:$merge),
3972                   (op2_type op2_reg_class:$rs2),
3973                   (XLenVT 0b001),
3974                   VLOpFrag)),
3975                   (!cast<Instruction>(
3976                      !if(isSEWAware,
3977                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
3978                          inst#"_"#kind#"_"#vlmul.MX))
3979                   (result_type result_reg_class:$merge),
3980                   (op2_type op2_reg_class:$rs2),
3981                   GPR:$vl, log2sew, TU_MU)>;
3982
3983class VPatUnaryMask<string intrinsic_name,
3984                    string inst,
3985                    string kind,
3986                    ValueType result_type,
3987                    ValueType op2_type,
3988                    ValueType mask_type,
3989                    int log2sew,
3990                    LMULInfo vlmul,
3991                    VReg result_reg_class,
3992                    VReg op2_reg_class,
3993                    bit isSEWAware = 0> :
3994  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
3995                   (result_type result_reg_class:$merge),
3996                   (op2_type op2_reg_class:$rs2),
3997                   (mask_type V0),
3998                   VLOpFrag, (XLenVT timm:$policy))),
3999                   (!cast<Instruction>(
4000                      !if(isSEWAware,
4001                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
4002                          inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
4003                   (result_type result_reg_class:$merge),
4004                   (op2_type op2_reg_class:$rs2),
4005                   (mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy))>;
4006
4007class VPatUnaryMaskRoundingMode<string intrinsic_name,
4008                                string inst,
4009                                string kind,
4010                                ValueType result_type,
4011                                ValueType op2_type,
4012                                ValueType mask_type,
4013                                int log2sew,
4014                                LMULInfo vlmul,
4015                                VReg result_reg_class,
4016                                VReg op2_reg_class,
4017                                bit isSEWAware = 0> :
4018  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4019                   (result_type result_reg_class:$merge),
4020                   (op2_type op2_reg_class:$rs2),
4021                   (mask_type V0),
4022                   (XLenVT timm:$round),
4023                   VLOpFrag, (XLenVT timm:$policy))),
4024                   (!cast<Instruction>(
4025                      !if(isSEWAware,
4026                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
4027                          inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
4028                   (result_type result_reg_class:$merge),
4029                   (op2_type op2_reg_class:$rs2),
4030                   (mask_type V0),
4031                   (XLenVT timm:$round),
4032                   GPR:$vl, log2sew, (XLenVT timm:$policy))>;
4033
4034class VPatUnaryMaskRTZ<string intrinsic_name,
4035                       string inst,
4036                       string kind,
4037                       ValueType result_type,
4038                       ValueType op2_type,
4039                       ValueType mask_type,
4040                       int log2sew,
4041                       LMULInfo vlmul,
4042                       VReg result_reg_class,
4043                       VReg op2_reg_class,
4044                       bit isSEWAware = 0> :
4045  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4046                   (result_type result_reg_class:$merge),
4047                   (op2_type op2_reg_class:$rs2),
4048                   (mask_type V0),
4049                   (XLenVT 0b001),
4050                   VLOpFrag, (XLenVT timm:$policy))),
4051                   (!cast<Instruction>(
4052                      !if(isSEWAware,
4053                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
4054                          inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
4055                   (result_type result_reg_class:$merge),
4056                   (op2_type op2_reg_class:$rs2),
4057                   (mask_type V0),
4058                   GPR:$vl, log2sew, (XLenVT timm:$policy))>;
4059
4060class VPatMaskUnaryNoMask<string intrinsic_name,
4061                          string inst,
4062                          MTypeInfo mti> :
4063  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name)
4064                (mti.Mask VR:$rs2),
4065                VLOpFrag)),
4066                (!cast<Instruction>(inst#"_M_"#mti.BX)
4067                (mti.Mask VR:$rs2),
4068                GPR:$vl, mti.Log2SEW)>;
4069
4070class VPatMaskUnaryMask<string intrinsic_name,
4071                        string inst,
4072                        MTypeInfo mti> :
4073  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name#"_mask")
4074                (mti.Mask VR:$merge),
4075                (mti.Mask VR:$rs2),
4076                (mti.Mask V0),
4077                VLOpFrag)),
4078                (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
4079                (mti.Mask VR:$merge),
4080                (mti.Mask VR:$rs2),
4081                (mti.Mask V0), GPR:$vl, mti.Log2SEW, TU_MU)>;
4082
4083class VPatUnaryAnyMask<string intrinsic,
4084                       string inst,
4085                       string kind,
4086                       ValueType result_type,
4087                       ValueType op1_type,
4088                       ValueType mask_type,
4089                       int log2sew,
4090                       LMULInfo vlmul,
4091                       VReg result_reg_class,
4092                       VReg op1_reg_class> :
4093  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4094                   (result_type result_reg_class:$merge),
4095                   (op1_type op1_reg_class:$rs1),
4096                   (mask_type VR:$rs2),
4097                   VLOpFrag)),
4098                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
4099                   (result_type result_reg_class:$merge),
4100                   (op1_type op1_reg_class:$rs1),
4101                   (mask_type VR:$rs2),
4102                   GPR:$vl, log2sew)>;
4103
4104class VPatBinaryM<string intrinsic_name,
4105                  string inst,
4106                  ValueType result_type,
4107                  ValueType op1_type,
4108                  ValueType op2_type,
4109                  int sew,
4110                  VReg op1_reg_class,
4111                  DAGOperand op2_kind> :
4112  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4113                   (op1_type op1_reg_class:$rs1),
4114                   (op2_type op2_kind:$rs2),
4115                   VLOpFrag)),
4116                   (!cast<Instruction>(inst)
4117                   (op1_type op1_reg_class:$rs1),
4118                   (op2_type op2_kind:$rs2),
4119                   GPR:$vl, sew)>;
4120
4121class VPatBinaryNoMaskTU<string intrinsic_name,
4122                         string inst,
4123                         ValueType result_type,
4124                         ValueType op1_type,
4125                         ValueType op2_type,
4126                         int sew,
4127                         VReg result_reg_class,
4128                         VReg op1_reg_class,
4129                         DAGOperand op2_kind> :
4130  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4131                   (result_type result_reg_class:$merge),
4132                   (op1_type op1_reg_class:$rs1),
4133                   (op2_type op2_kind:$rs2),
4134                   VLOpFrag)),
4135                   (!cast<Instruction>(inst)
4136                   (result_type result_reg_class:$merge),
4137                   (op1_type op1_reg_class:$rs1),
4138                   (op2_type op2_kind:$rs2),
4139                   GPR:$vl, sew, TU_MU)>;
4140
4141class VPatBinaryNoMaskTURoundingMode<string intrinsic_name,
4142                                     string inst,
4143                                     ValueType result_type,
4144                                     ValueType op1_type,
4145                                     ValueType op2_type,
4146                                     int sew,
4147                                     VReg result_reg_class,
4148                                     VReg op1_reg_class,
4149                                     DAGOperand op2_kind> :
4150  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4151                   (result_type result_reg_class:$merge),
4152                   (op1_type op1_reg_class:$rs1),
4153                   (op2_type op2_kind:$rs2),
4154                   (XLenVT timm:$round),
4155                   VLOpFrag)),
4156                   (!cast<Instruction>(inst)
4157                   (result_type result_reg_class:$merge),
4158                   (op1_type op1_reg_class:$rs1),
4159                   (op2_type op2_kind:$rs2),
4160                   (XLenVT timm:$round),
4161                   GPR:$vl, sew, TU_MU)>;
4162
4163
4164// Same as VPatBinaryM but source operands are swapped.
4165class VPatBinaryMSwapped<string intrinsic_name,
4166                         string inst,
4167                         ValueType result_type,
4168                         ValueType op1_type,
4169                         ValueType op2_type,
4170                         int sew,
4171                         VReg op1_reg_class,
4172                         DAGOperand op2_kind> :
4173  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4174                   (op2_type op2_kind:$rs2),
4175                   (op1_type op1_reg_class:$rs1),
4176                   VLOpFrag)),
4177                   (!cast<Instruction>(inst)
4178                   (op1_type op1_reg_class:$rs1),
4179                   (op2_type op2_kind:$rs2),
4180                   GPR:$vl, sew)>;
4181
4182class VPatBinaryMask<string intrinsic_name,
4183                     string inst,
4184                     ValueType result_type,
4185                     ValueType op1_type,
4186                     ValueType op2_type,
4187                     ValueType mask_type,
4188                     int sew,
4189                     VReg result_reg_class,
4190                     VReg op1_reg_class,
4191                     DAGOperand op2_kind> :
4192  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4193                   (result_type result_reg_class:$merge),
4194                   (op1_type op1_reg_class:$rs1),
4195                   (op2_type op2_kind:$rs2),
4196                   (mask_type V0),
4197                   VLOpFrag)),
4198                   (!cast<Instruction>(inst#"_MASK")
4199                   (result_type result_reg_class:$merge),
4200                   (op1_type op1_reg_class:$rs1),
4201                   (op2_type op2_kind:$rs2),
4202                   (mask_type V0), GPR:$vl, sew)>;
4203
4204class VPatBinaryMaskPolicy<string intrinsic_name,
4205                           string inst,
4206                           ValueType result_type,
4207                           ValueType op1_type,
4208                           ValueType op2_type,
4209                           ValueType mask_type,
4210                           int sew,
4211                           VReg result_reg_class,
4212                           VReg op1_reg_class,
4213                           DAGOperand op2_kind> :
4214  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4215                   (result_type result_reg_class:$merge),
4216                   (op1_type op1_reg_class:$rs1),
4217                   (op2_type op2_kind:$rs2),
4218                   (mask_type V0),
4219                   VLOpFrag, (XLenVT timm:$policy))),
4220                   (!cast<Instruction>(inst#"_MASK")
4221                   (result_type result_reg_class:$merge),
4222                   (op1_type op1_reg_class:$rs1),
4223                   (op2_type op2_kind:$rs2),
4224                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
4225
4226class VPatBinaryMaskPolicyRoundingMode<string intrinsic_name,
4227                                       string inst,
4228                                       ValueType result_type,
4229                                       ValueType op1_type,
4230                                       ValueType op2_type,
4231                                       ValueType mask_type,
4232                                       int sew,
4233                                       VReg result_reg_class,
4234                                       VReg op1_reg_class,
4235                                       DAGOperand op2_kind> :
4236  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4237                   (result_type result_reg_class:$merge),
4238                   (op1_type op1_reg_class:$rs1),
4239                   (op2_type op2_kind:$rs2),
4240                   (mask_type V0),
4241                   (XLenVT timm:$round),
4242                   VLOpFrag, (XLenVT timm:$policy))),
4243                   (!cast<Instruction>(inst#"_MASK")
4244                   (result_type result_reg_class:$merge),
4245                   (op1_type op1_reg_class:$rs1),
4246                   (op2_type op2_kind:$rs2),
4247                   (mask_type V0),
4248                   (XLenVT timm:$round),
4249                   GPR:$vl, sew, (XLenVT timm:$policy))>;
4250
4251// Same as VPatBinaryMask but source operands are swapped.
4252class VPatBinaryMaskSwapped<string intrinsic_name,
4253                            string inst,
4254                            ValueType result_type,
4255                            ValueType op1_type,
4256                            ValueType op2_type,
4257                            ValueType mask_type,
4258                            int sew,
4259                            VReg result_reg_class,
4260                            VReg op1_reg_class,
4261                            DAGOperand op2_kind> :
4262  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4263                   (result_type result_reg_class:$merge),
4264                   (op2_type op2_kind:$rs2),
4265                   (op1_type op1_reg_class:$rs1),
4266                   (mask_type V0),
4267                   VLOpFrag)),
4268                   (!cast<Instruction>(inst#"_MASK")
4269                   (result_type result_reg_class:$merge),
4270                   (op1_type op1_reg_class:$rs1),
4271                   (op2_type op2_kind:$rs2),
4272                   (mask_type V0), GPR:$vl, sew)>;
4273
4274class VPatTiedBinaryNoMask<string intrinsic_name,
4275                           string inst,
4276                           ValueType result_type,
4277                           ValueType op2_type,
4278                           int sew,
4279                           VReg result_reg_class,
4280                           DAGOperand op2_kind> :
4281  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4282                   (result_type (undef)),
4283                   (result_type result_reg_class:$rs1),
4284                   (op2_type op2_kind:$rs2),
4285                   VLOpFrag)),
4286                   (!cast<Instruction>(inst#"_TIED")
4287                   (result_type result_reg_class:$rs1),
4288                   (op2_type op2_kind:$rs2),
4289                   GPR:$vl, sew, TAIL_AGNOSTIC)>;
4290
4291class VPatTiedBinaryNoMaskRoundingMode<string intrinsic_name,
4292                                       string inst,
4293                                       ValueType result_type,
4294                                       ValueType op2_type,
4295                                       int sew,
4296                                       VReg result_reg_class,
4297                                       DAGOperand op2_kind> :
4298  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4299                   (result_type (undef)),
4300                   (result_type result_reg_class:$rs1),
4301                   (op2_type op2_kind:$rs2),
4302                   (XLenVT timm:$round),
4303                   VLOpFrag)),
4304                   (!cast<Instruction>(inst#"_TIED")
4305                   (result_type result_reg_class:$rs1),
4306                   (op2_type op2_kind:$rs2),
4307                   (XLenVT timm:$round),
4308                   GPR:$vl, sew, TAIL_AGNOSTIC)>;
4309
4310class VPatTiedBinaryNoMaskTU<string intrinsic_name,
4311                             string inst,
4312                             ValueType result_type,
4313                             ValueType op2_type,
4314                             int sew,
4315                             VReg result_reg_class,
4316                             DAGOperand op2_kind> :
4317  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4318                   (result_type result_reg_class:$merge),
4319                   (result_type result_reg_class:$merge),
4320                   (op2_type op2_kind:$rs2),
4321                   VLOpFrag)),
4322                   (!cast<Instruction>(inst#"_TIED")
4323                   (result_type result_reg_class:$merge),
4324                   (op2_type op2_kind:$rs2),
4325                   GPR:$vl, sew, TU_MU)>;
4326
4327class VPatTiedBinaryNoMaskTURoundingMode<string intrinsic_name,
4328                                         string inst,
4329                                         ValueType result_type,
4330                                         ValueType op2_type,
4331                                         int sew,
4332                                         VReg result_reg_class,
4333                                         DAGOperand op2_kind> :
4334  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4335                   (result_type result_reg_class:$merge),
4336                   (result_type result_reg_class:$merge),
4337                   (op2_type op2_kind:$rs2),
4338                   (XLenVT timm:$round),
4339                   VLOpFrag)),
4340                   (!cast<Instruction>(inst#"_TIED")
4341                   (result_type result_reg_class:$merge),
4342                   (op2_type op2_kind:$rs2),
4343                   (XLenVT timm:$round),
4344                   GPR:$vl, sew, TU_MU)>;
4345
4346class VPatTiedBinaryMask<string intrinsic_name,
4347                         string inst,
4348                         ValueType result_type,
4349                         ValueType op2_type,
4350                         ValueType mask_type,
4351                         int sew,
4352                         VReg result_reg_class,
4353                         DAGOperand op2_kind> :
4354  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4355                   (result_type result_reg_class:$merge),
4356                   (result_type result_reg_class:$merge),
4357                   (op2_type op2_kind:$rs2),
4358                   (mask_type V0),
4359                   VLOpFrag, (XLenVT timm:$policy))),
4360                   (!cast<Instruction>(inst#"_MASK_TIED")
4361                   (result_type result_reg_class:$merge),
4362                   (op2_type op2_kind:$rs2),
4363                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
4364
4365class VPatTiedBinaryMaskRoundingMode<string intrinsic_name,
4366                                     string inst,
4367                                     ValueType result_type,
4368                                     ValueType op2_type,
4369                                     ValueType mask_type,
4370                                     int sew,
4371                                     VReg result_reg_class,
4372                                     DAGOperand op2_kind> :
4373  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4374                   (result_type result_reg_class:$merge),
4375                   (result_type result_reg_class:$merge),
4376                   (op2_type op2_kind:$rs2),
4377                   (mask_type V0),
4378                   (XLenVT timm:$round),
4379                   VLOpFrag, (XLenVT timm:$policy))),
4380                   (!cast<Instruction>(inst#"_MASK_TIED")
4381                   (result_type result_reg_class:$merge),
4382                   (op2_type op2_kind:$rs2),
4383                   (mask_type V0),
4384                   (XLenVT timm:$round),
4385                   GPR:$vl, sew, (XLenVT timm:$policy))>;
4386
4387class VPatTernaryNoMaskTU<string intrinsic,
4388                          string inst,
4389                          string kind,
4390                          ValueType result_type,
4391                          ValueType op1_type,
4392                          ValueType op2_type,
4393                          int log2sew,
4394                          LMULInfo vlmul,
4395                          VReg result_reg_class,
4396                          RegisterClass op1_reg_class,
4397                          DAGOperand op2_kind> :
4398  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4399                    (result_type result_reg_class:$rs3),
4400                    (op1_type op1_reg_class:$rs1),
4401                    (op2_type op2_kind:$rs2),
4402                    VLOpFrag)),
4403                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
4404                    result_reg_class:$rs3,
4405                    (op1_type op1_reg_class:$rs1),
4406                    op2_kind:$rs2,
4407                    GPR:$vl, log2sew, TU_MU)>;
4408
4409class VPatTernaryNoMaskTURoundingMode<string intrinsic,
4410                                      string inst,
4411                                      string kind,
4412                                      ValueType result_type,
4413                                      ValueType op1_type,
4414                                      ValueType op2_type,
4415                                      int log2sew,
4416                                      LMULInfo vlmul,
4417                                      VReg result_reg_class,
4418                                      RegisterClass op1_reg_class,
4419                                      DAGOperand op2_kind> :
4420  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4421                    (result_type result_reg_class:$rs3),
4422                    (op1_type op1_reg_class:$rs1),
4423                    (op2_type op2_kind:$rs2),
4424                    (XLenVT timm:$round),
4425                    VLOpFrag)),
4426                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
4427                    result_reg_class:$rs3,
4428                    (op1_type op1_reg_class:$rs1),
4429                    op2_kind:$rs2,
4430                    (XLenVT timm:$round),
4431                    GPR:$vl, log2sew, TU_MU)>;
4432
4433class VPatTernaryNoMaskWithPolicy<string intrinsic,
4434                                  string inst,
4435                                  string kind,
4436                                  ValueType result_type,
4437                                  ValueType op1_type,
4438                                  ValueType op2_type,
4439                                  int sew,
4440                                  LMULInfo vlmul,
4441                                  VReg result_reg_class,
4442                                  RegisterClass op1_reg_class,
4443                                  DAGOperand op2_kind> :
4444  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4445                    (result_type result_reg_class:$rs3),
4446                    (op1_type op1_reg_class:$rs1),
4447                    (op2_type op2_kind:$rs2),
4448                    VLOpFrag, (XLenVT timm:$policy))),
4449                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4450                    result_reg_class:$rs3,
4451                    (op1_type op1_reg_class:$rs1),
4452                    op2_kind:$rs2,
4453                    GPR:$vl, sew, (XLenVT timm:$policy))>;
4454
4455class VPatTernaryNoMaskWithPolicyRoundingMode<string intrinsic,
4456                                  string inst,
4457                                  string kind,
4458                                  ValueType result_type,
4459                                  ValueType op1_type,
4460                                  ValueType op2_type,
4461                                  int log2sew,
4462                                  LMULInfo vlmul,
4463                                  VReg result_reg_class,
4464                                  RegisterClass op1_reg_class,
4465                                  DAGOperand op2_kind,
4466                                  bit isSEWAware = 0> :
4467  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4468                    (result_type result_reg_class:$rs3),
4469                    (op1_type op1_reg_class:$rs1),
4470                    (op2_type op2_kind:$rs2),
4471                    (XLenVT timm:$round),
4472                    VLOpFrag, (XLenVT timm:$policy))),
4473                   (!cast<Instruction>(!if(isSEWAware,
4474                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
4475                          inst#"_"#kind#"_"#vlmul.MX))
4476                    result_reg_class:$rs3,
4477                    (op1_type op1_reg_class:$rs1),
4478                    op2_kind:$rs2,
4479                    (XLenVT timm:$round),
4480                    GPR:$vl, log2sew, (XLenVT timm:$policy))>;
4481
4482class VPatTernaryMaskPolicy<string intrinsic,
4483                            string inst,
4484                            string kind,
4485                            ValueType result_type,
4486                            ValueType op1_type,
4487                            ValueType op2_type,
4488                            ValueType mask_type,
4489                            int sew,
4490                            LMULInfo vlmul,
4491                            VReg result_reg_class,
4492                            RegisterClass op1_reg_class,
4493                            DAGOperand op2_kind> :
4494  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4495                    (result_type result_reg_class:$rs3),
4496                    (op1_type op1_reg_class:$rs1),
4497                    (op2_type op2_kind:$rs2),
4498                    (mask_type V0),
4499                    VLOpFrag, (XLenVT timm:$policy))),
4500                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
4501                    result_reg_class:$rs3,
4502                    (op1_type op1_reg_class:$rs1),
4503                    op2_kind:$rs2,
4504                    (mask_type V0),
4505                    GPR:$vl, sew, (XLenVT timm:$policy))>;
4506
4507class VPatTernaryMaskPolicyRoundingMode<string intrinsic,
4508                                        string inst,
4509                                        string kind,
4510                                        ValueType result_type,
4511                                        ValueType op1_type,
4512                                        ValueType op2_type,
4513                                        ValueType mask_type,
4514                                        int log2sew,
4515                                        LMULInfo vlmul,
4516                                        VReg result_reg_class,
4517                                        RegisterClass op1_reg_class,
4518                                        DAGOperand op2_kind,
4519                                        bit isSEWAware = 0> :
4520  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4521                    (result_type result_reg_class:$rs3),
4522                    (op1_type op1_reg_class:$rs1),
4523                    (op2_type op2_kind:$rs2),
4524                    (mask_type V0),
4525                    (XLenVT timm:$round),
4526                    VLOpFrag, (XLenVT timm:$policy))),
4527                   (!cast<Instruction>(!if(isSEWAware,
4528                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew) # "_MASK",
4529                          inst#"_"#kind#"_"#vlmul.MX # "_MASK"))
4530                    result_reg_class:$rs3,
4531                    (op1_type op1_reg_class:$rs1),
4532                    op2_kind:$rs2,
4533                    (mask_type V0),
4534                    (XLenVT timm:$round),
4535                    GPR:$vl, log2sew, (XLenVT timm:$policy))>;
4536
4537class VPatTernaryMaskTU<string intrinsic,
4538                        string inst,
4539                        string kind,
4540                        ValueType result_type,
4541                        ValueType op1_type,
4542                        ValueType op2_type,
4543                        ValueType mask_type,
4544                        int log2sew,
4545                        LMULInfo vlmul,
4546                        VReg result_reg_class,
4547                        RegisterClass op1_reg_class,
4548                        DAGOperand op2_kind> :
4549  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4550                    (result_type result_reg_class:$rs3),
4551                    (op1_type op1_reg_class:$rs1),
4552                    (op2_type op2_kind:$rs2),
4553                    (mask_type V0),
4554                    VLOpFrag)),
4555                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)# "_MASK")
4556                    result_reg_class:$rs3,
4557                    (op1_type op1_reg_class:$rs1),
4558                    op2_kind:$rs2,
4559                    (mask_type V0),
4560                    GPR:$vl, log2sew, TU_MU)>;
4561
4562class VPatTernaryMaskTURoundingMode<string intrinsic,
4563                                    string inst,
4564                                    string kind,
4565                                    ValueType result_type,
4566                                    ValueType op1_type,
4567                                    ValueType op2_type,
4568                                    ValueType mask_type,
4569                                    int log2sew,
4570                                    LMULInfo vlmul,
4571                                    VReg result_reg_class,
4572                                    RegisterClass op1_reg_class,
4573                                    DAGOperand op2_kind> :
4574  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4575                    (result_type result_reg_class:$rs3),
4576                    (op1_type op1_reg_class:$rs1),
4577                    (op2_type op2_kind:$rs2),
4578                    (mask_type V0),
4579                    (XLenVT timm:$round),
4580                    VLOpFrag)),
4581                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)# "_MASK")
4582                    result_reg_class:$rs3,
4583                    (op1_type op1_reg_class:$rs1),
4584                    op2_kind:$rs2,
4585                    (mask_type V0),
4586                    (XLenVT timm:$round),
4587                    GPR:$vl, log2sew, TU_MU)>;
4588
4589multiclass VPatUnaryS_M<string intrinsic_name,
4590                             string inst> {
4591  foreach mti = AllMasks in {
4592    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name)
4593                      (mti.Mask VR:$rs1), VLOpFrag)),
4594                      (!cast<Instruction>(inst#"_M_"#mti.BX) $rs1,
4595                      GPR:$vl, mti.Log2SEW)>;
4596    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name # "_mask")
4597                      (mti.Mask VR:$rs1), (mti.Mask V0), VLOpFrag)),
4598                      (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK") $rs1,
4599                      (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
4600  }
4601}
4602
4603multiclass VPatUnaryV_V_AnyMask<string intrinsic, string instruction,
4604                                list<VTypeInfo> vtilist> {
4605  foreach vti = vtilist in {
4606    let Predicates = GetVTypePredicates<vti>.Predicates in
4607    def : VPatUnaryAnyMask<intrinsic, instruction, "VM",
4608                           vti.Vector, vti.Vector, vti.Mask,
4609                           vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
4610  }
4611}
4612
4613multiclass VPatUnaryM_M<string intrinsic,
4614                         string inst> {
4615  foreach mti = AllMasks in {
4616    def : VPatMaskUnaryNoMask<intrinsic, inst, mti>;
4617    def : VPatMaskUnaryMask<intrinsic, inst, mti>;
4618  }
4619}
4620
4621multiclass VPatUnaryV_M<string intrinsic, string instruction> {
4622  foreach vti = AllIntegerVectors in {
4623    let Predicates = GetVTypePredicates<vti>.Predicates in {
4624      def : VPatUnaryNoMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
4625                            vti.Log2SEW, vti.LMul, vti.RegClass, VR>;
4626      def : VPatUnaryMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
4627                          vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, VR>;
4628    }
4629  }
4630}
4631
4632multiclass VPatUnaryV_VF<string intrinsic, string instruction, string suffix,
4633                         list<VTypeInfoToFraction> fractionList> {
4634  foreach vtiTofti = fractionList in {
4635      defvar vti = vtiTofti.Vti;
4636      defvar fti = vtiTofti.Fti;
4637      let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
4638                                   GetVTypePredicates<fti>.Predicates) in {
4639        def : VPatUnaryNoMask<intrinsic, instruction, suffix,
4640                              vti.Vector, fti.Vector,
4641                              vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
4642        def : VPatUnaryMask<intrinsic, instruction, suffix,
4643                            vti.Vector, fti.Vector, vti.Mask,
4644                            vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
4645      }
4646  }
4647}
4648
4649multiclass VPatUnaryV_V<string intrinsic, string instruction,
4650                        list<VTypeInfo> vtilist, bit isSEWAware = 0> {
4651  foreach vti = vtilist in {
4652    let Predicates = GetVTypePredicates<vti>.Predicates in {
4653      def : VPatUnaryNoMask<intrinsic, instruction, "V",
4654                            vti.Vector, vti.Vector, vti.Log2SEW,
4655                            vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>;
4656      def : VPatUnaryMask<intrinsic, instruction, "V",
4657                          vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
4658                          vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>;
4659    }
4660  }
4661}
4662
4663multiclass VPatUnaryV_V_RM<string intrinsic, string instruction,
4664                        list<VTypeInfo> vtilist, bit isSEWAware = 0> {
4665  foreach vti = vtilist in {
4666    let Predicates = GetVTypePredicates<vti>.Predicates in {
4667      def : VPatUnaryNoMaskRoundingMode<intrinsic, instruction, "V",
4668                                        vti.Vector, vti.Vector, vti.Log2SEW,
4669                                        vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>;
4670      def : VPatUnaryMaskRoundingMode<intrinsic, instruction, "V",
4671                                      vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
4672                                      vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>;
4673    }
4674  }
4675}
4676
4677multiclass VPatNullaryV<string intrinsic, string instruction> {
4678  foreach vti = AllIntegerVectors in {
4679    let Predicates = GetVTypePredicates<vti>.Predicates in {
4680      def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
4681                            (vti.Vector vti.RegClass:$merge),
4682                            VLOpFrag)),
4683                            (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
4684                            vti.RegClass:$merge, GPR:$vl, vti.Log2SEW, TU_MU)>;
4685      def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
4686                            (vti.Vector vti.RegClass:$merge),
4687                            (vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))),
4688                            (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK")
4689                            vti.RegClass:$merge, (vti.Mask V0),
4690                            GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
4691  }
4692  }
4693}
4694
4695multiclass VPatNullaryM<string intrinsic, string inst> {
4696  foreach mti = AllMasks in
4697    def : Pat<(mti.Mask (!cast<Intrinsic>(intrinsic)
4698                        VLOpFrag)),
4699                        (!cast<Instruction>(inst#"_M_"#mti.BX)
4700                        GPR:$vl, mti.Log2SEW)>;
4701}
4702
4703multiclass VPatBinaryM<string intrinsic,
4704                      string inst,
4705                      ValueType result_type,
4706                      ValueType op1_type,
4707                      ValueType op2_type,
4708                      ValueType mask_type,
4709                      int sew,
4710                      VReg result_reg_class,
4711                      VReg op1_reg_class,
4712                      DAGOperand op2_kind> {
4713  def : VPatBinaryM<intrinsic, inst, result_type, op1_type, op2_type,
4714                    sew, op1_reg_class, op2_kind>;
4715  def : VPatBinaryMask<intrinsic, inst, result_type, op1_type, op2_type,
4716                       mask_type, sew, result_reg_class, op1_reg_class,
4717                       op2_kind>;
4718}
4719
4720multiclass VPatBinary<string intrinsic,
4721                      string inst,
4722                      ValueType result_type,
4723                      ValueType op1_type,
4724                      ValueType op2_type,
4725                      ValueType mask_type,
4726                      int sew,
4727                      VReg result_reg_class,
4728                      VReg op1_reg_class,
4729                      DAGOperand op2_kind> {
4730  def : VPatBinaryNoMaskTU<intrinsic, inst, result_type, op1_type, op2_type,
4731                           sew, result_reg_class, op1_reg_class, op2_kind>;
4732  def : VPatBinaryMaskPolicy<intrinsic, inst, result_type, op1_type, op2_type,
4733                             mask_type, sew, result_reg_class, op1_reg_class,
4734                             op2_kind>;
4735}
4736
4737multiclass VPatBinaryRoundingMode<string intrinsic,
4738                                  string inst,
4739                                  ValueType result_type,
4740                                  ValueType op1_type,
4741                                  ValueType op2_type,
4742                                  ValueType mask_type,
4743                                  int sew,
4744                                  VReg result_reg_class,
4745                                  VReg op1_reg_class,
4746                                  DAGOperand op2_kind> {
4747  def : VPatBinaryNoMaskTURoundingMode<intrinsic, inst, result_type, op1_type, op2_type,
4748                                       sew, result_reg_class, op1_reg_class, op2_kind>;
4749  def : VPatBinaryMaskPolicyRoundingMode<intrinsic, inst, result_type, op1_type, op2_type,
4750                                         mask_type, sew, result_reg_class, op1_reg_class,
4751                                         op2_kind>;
4752}
4753
4754multiclass VPatBinaryMSwapped<string intrinsic,
4755                              string inst,
4756                              ValueType result_type,
4757                              ValueType op1_type,
4758                              ValueType op2_type,
4759                              ValueType mask_type,
4760                              int sew,
4761                              VReg result_reg_class,
4762                              VReg op1_reg_class,
4763                              DAGOperand op2_kind> {
4764  def : VPatBinaryMSwapped<intrinsic, inst, result_type, op1_type, op2_type,
4765                           sew, op1_reg_class, op2_kind>;
4766  def : VPatBinaryMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type,
4767                              mask_type, sew, result_reg_class, op1_reg_class,
4768                              op2_kind>;
4769}
4770
4771multiclass VPatBinaryCarryInTAIL<string intrinsic,
4772                                 string inst,
4773                                 string kind,
4774                                 ValueType result_type,
4775                                 ValueType op1_type,
4776                                 ValueType op2_type,
4777                                 ValueType mask_type,
4778                                 int sew,
4779                                 LMULInfo vlmul,
4780                                 VReg result_reg_class,
4781                                 VReg op1_reg_class,
4782                                 DAGOperand op2_kind> {
4783  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
4784                         (result_type result_reg_class:$merge),
4785                         (op1_type op1_reg_class:$rs1),
4786                         (op2_type op2_kind:$rs2),
4787                         (mask_type V0),
4788                         VLOpFrag)),
4789                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4790                         (result_type result_reg_class:$merge),
4791                         (op1_type op1_reg_class:$rs1),
4792                         (op2_type op2_kind:$rs2),
4793                         (mask_type V0), GPR:$vl, sew)>;
4794}
4795
4796multiclass VPatBinaryCarryIn<string intrinsic,
4797                             string inst,
4798                             string kind,
4799                             ValueType result_type,
4800                             ValueType op1_type,
4801                             ValueType op2_type,
4802                             ValueType mask_type,
4803                             int sew,
4804                             LMULInfo vlmul,
4805                             VReg op1_reg_class,
4806                             DAGOperand op2_kind> {
4807  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
4808                         (op1_type op1_reg_class:$rs1),
4809                         (op2_type op2_kind:$rs2),
4810                         (mask_type V0),
4811                         VLOpFrag)),
4812                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4813                         (op1_type op1_reg_class:$rs1),
4814                         (op2_type op2_kind:$rs2),
4815                         (mask_type V0), GPR:$vl, sew)>;
4816}
4817
4818multiclass VPatBinaryMaskOut<string intrinsic,
4819                             string inst,
4820                             string kind,
4821                             ValueType result_type,
4822                             ValueType op1_type,
4823                             ValueType op2_type,
4824                             int sew,
4825                             LMULInfo vlmul,
4826                             VReg op1_reg_class,
4827                             DAGOperand op2_kind> {
4828  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
4829                         (op1_type op1_reg_class:$rs1),
4830                         (op2_type op2_kind:$rs2),
4831                         VLOpFrag)),
4832                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4833                         (op1_type op1_reg_class:$rs1),
4834                         (op2_type op2_kind:$rs2),
4835                         GPR:$vl, sew)>;
4836}
4837
4838multiclass VPatConversion<string intrinsic,
4839                          string inst,
4840                          string kind,
4841                          ValueType result_type,
4842                          ValueType op1_type,
4843                          ValueType mask_type,
4844                          int log2sew,
4845                          LMULInfo vlmul,
4846                          VReg result_reg_class,
4847                          VReg op1_reg_class,
4848                          bit isSEWAware = 0> {
4849  def : VPatUnaryNoMask<intrinsic, inst, kind, result_type, op1_type,
4850                        log2sew, vlmul, result_reg_class, op1_reg_class,
4851                        isSEWAware>;
4852  def : VPatUnaryMask<intrinsic, inst, kind, result_type, op1_type,
4853                      mask_type, log2sew, vlmul, result_reg_class, op1_reg_class,
4854                      isSEWAware>;
4855}
4856
4857multiclass VPatConversionRoundingMode<string intrinsic,
4858                                      string inst,
4859                                      string kind,
4860                                      ValueType result_type,
4861                                      ValueType op1_type,
4862                                      ValueType mask_type,
4863                                      int log2sew,
4864                                      LMULInfo vlmul,
4865                                      VReg result_reg_class,
4866                                      VReg op1_reg_class,
4867                                      bit isSEWAware = 0> {
4868  def : VPatUnaryNoMaskRoundingMode<intrinsic, inst, kind, result_type, op1_type,
4869                                    log2sew, vlmul, result_reg_class,
4870                                    op1_reg_class, isSEWAware>;
4871  def : VPatUnaryMaskRoundingMode<intrinsic, inst, kind, result_type, op1_type,
4872                                  mask_type, log2sew, vlmul, result_reg_class,
4873                                  op1_reg_class, isSEWAware>;
4874}
4875
4876multiclass VPatConversionRTZ<string intrinsic,
4877                             string inst,
4878                             string kind,
4879                             ValueType result_type,
4880                             ValueType op1_type,
4881                             ValueType mask_type,
4882                             int log2sew,
4883                             LMULInfo vlmul,
4884                             VReg result_reg_class,
4885                             VReg op1_reg_class,
4886                             bit isSEWAware = 0> {
4887  def : VPatUnaryNoMaskRTZ<intrinsic, inst, kind, result_type, op1_type,
4888                                    log2sew, vlmul, result_reg_class,
4889                                    op1_reg_class, isSEWAware>;
4890  def : VPatUnaryMaskRTZ<intrinsic, inst, kind, result_type, op1_type,
4891                                  mask_type, log2sew, vlmul, result_reg_class,
4892                                  op1_reg_class, isSEWAware>;
4893}
4894
4895multiclass VPatBinaryV_VV<string intrinsic, string instruction,
4896                          list<VTypeInfo> vtilist, bit isSEWAware = 0> {
4897  foreach vti = vtilist in
4898    let Predicates = GetVTypePredicates<vti>.Predicates in
4899    defm : VPatBinary<intrinsic,
4900                      !if(isSEWAware,
4901                          instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW,
4902                          instruction # "_VV_" # vti.LMul.MX),
4903                      vti.Vector, vti.Vector, vti.Vector,vti.Mask,
4904                      vti.Log2SEW, vti.RegClass,
4905                      vti.RegClass, vti.RegClass>;
4906}
4907
4908multiclass VPatBinaryV_VV_RM<string intrinsic, string instruction,
4909                             list<VTypeInfo> vtilist, bit isSEWAware = 0> {
4910  foreach vti = vtilist in
4911    let Predicates = GetVTypePredicates<vti>.Predicates in
4912    defm : VPatBinaryRoundingMode<intrinsic,
4913                                  !if(isSEWAware,
4914                                      instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW,
4915                                      instruction # "_VV_" # vti.LMul.MX),
4916                                  vti.Vector, vti.Vector, vti.Vector,vti.Mask,
4917                                  vti.Log2SEW, vti.RegClass,
4918                                  vti.RegClass, vti.RegClass>;
4919}
4920
4921multiclass VPatBinaryV_VV_INT<string intrinsic, string instruction,
4922                              list<VTypeInfo> vtilist> {
4923  foreach vti = vtilist in {
4924    defvar ivti = GetIntVTypeInfo<vti>.Vti;
4925    let Predicates = GetVTypePredicates<vti>.Predicates in
4926    defm : VPatBinary<intrinsic,
4927                      instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW,
4928                      vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
4929                      vti.Log2SEW, vti.RegClass,
4930                      vti.RegClass, vti.RegClass>;
4931  }
4932}
4933
4934multiclass VPatBinaryV_VV_INT_EEW<string intrinsic, string instruction,
4935                                  int eew, list<VTypeInfo> vtilist> {
4936  foreach vti = vtilist in {
4937    // emul = lmul * eew / sew
4938    defvar vlmul = vti.LMul;
4939    defvar octuple_lmul = vlmul.octuple;
4940    defvar octuple_emul = !srl(!mul(octuple_lmul, eew), vti.Log2SEW);
4941    if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
4942      defvar emul_str = octuple_to_str<octuple_emul>.ret;
4943      defvar ivti = !cast<VTypeInfo>("VI" # eew # emul_str);
4944      defvar inst = instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str;
4945      let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
4946                                   GetVTypePredicates<ivti>.Predicates) in
4947      defm : VPatBinary<intrinsic, inst,
4948                        vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
4949                        vti.Log2SEW, vti.RegClass,
4950                        vti.RegClass, ivti.RegClass>;
4951    }
4952  }
4953}
4954
4955multiclass VPatBinaryV_VX<string intrinsic, string instruction,
4956                          list<VTypeInfo> vtilist, bit isSEWAware = 0> {
4957  foreach vti = vtilist in {
4958    defvar kind = "V"#vti.ScalarSuffix;
4959    let Predicates = GetVTypePredicates<vti>.Predicates in
4960    defm : VPatBinary<intrinsic,
4961                      !if(isSEWAware,
4962                          instruction#"_"#kind#"_"#vti.LMul.MX#"_E"#vti.SEW,
4963                          instruction#"_"#kind#"_"#vti.LMul.MX),
4964                      vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
4965                      vti.Log2SEW, vti.RegClass,
4966                      vti.RegClass, vti.ScalarRegClass>;
4967  }
4968}
4969
4970multiclass VPatBinaryV_VX_RM<string intrinsic, string instruction,
4971                             list<VTypeInfo> vtilist, bit isSEWAware = 0> {
4972  foreach vti = vtilist in {
4973    defvar kind = "V"#vti.ScalarSuffix;
4974    let Predicates = GetVTypePredicates<vti>.Predicates in
4975    defm : VPatBinaryRoundingMode<intrinsic,
4976                                  !if(isSEWAware,
4977                                      instruction#"_"#kind#"_"#vti.LMul.MX#"_E"#vti.SEW,
4978                                      instruction#"_"#kind#"_"#vti.LMul.MX),
4979                                  vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
4980                                  vti.Log2SEW, vti.RegClass,
4981                                  vti.RegClass, vti.ScalarRegClass>;
4982  }
4983}
4984
4985multiclass VPatBinaryV_VX_INT<string intrinsic, string instruction,
4986                          list<VTypeInfo> vtilist> {
4987  foreach vti = vtilist in
4988    let Predicates = GetVTypePredicates<vti>.Predicates in
4989    defm : VPatBinary<intrinsic, instruction # "_VX_" # vti.LMul.MX,
4990                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
4991                      vti.Log2SEW, vti.RegClass,
4992                      vti.RegClass, GPR>;
4993}
4994
4995multiclass VPatBinaryV_VI<string intrinsic, string instruction,
4996                          list<VTypeInfo> vtilist, Operand imm_type> {
4997  foreach vti = vtilist in
4998    let Predicates = GetVTypePredicates<vti>.Predicates in
4999    defm : VPatBinary<intrinsic, instruction # "_VI_" # vti.LMul.MX,
5000                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
5001                      vti.Log2SEW, vti.RegClass,
5002                      vti.RegClass, imm_type>;
5003}
5004
5005multiclass VPatBinaryV_VI_RM<string intrinsic, string instruction,
5006                             list<VTypeInfo> vtilist,
5007                             Operand imm_type> {
5008  foreach vti = vtilist in
5009    let Predicates = GetVTypePredicates<vti>.Predicates in
5010    defm : VPatBinaryRoundingMode<intrinsic,
5011                                  instruction # "_VI_" # vti.LMul.MX,
5012                                  vti.Vector, vti.Vector, XLenVT, vti.Mask,
5013                                  vti.Log2SEW, vti.RegClass,
5014                                  vti.RegClass, imm_type>;
5015}
5016
5017multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
5018  foreach mti = AllMasks in
5019    let Predicates = [HasVInstructions] in
5020    def : VPatBinaryM<intrinsic, instruction # "_MM_" # mti.LMul.MX,
5021                      mti.Mask, mti.Mask, mti.Mask,
5022                      mti.Log2SEW, VR, VR>;
5023}
5024
5025multiclass VPatBinaryW_VV<string intrinsic, string instruction,
5026                          list<VTypeInfoToWide> vtilist> {
5027  foreach VtiToWti = vtilist in {
5028    defvar Vti = VtiToWti.Vti;
5029    defvar Wti = VtiToWti.Wti;
5030    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5031                                 GetVTypePredicates<Wti>.Predicates) in
5032    defm : VPatBinary<intrinsic, instruction # "_VV_" # Vti.LMul.MX,
5033                      Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
5034                      Vti.Log2SEW, Wti.RegClass,
5035                      Vti.RegClass, Vti.RegClass>;
5036  }
5037}
5038
5039multiclass VPatBinaryW_VV_RM<string intrinsic, string instruction,
5040                             list<VTypeInfoToWide> vtilist, bit isSEWAware = 0> {
5041  foreach VtiToWti = vtilist in {
5042    defvar Vti = VtiToWti.Vti;
5043    defvar Wti = VtiToWti.Wti;
5044    defvar name = !if(isSEWAware,
5045                      instruction # "_VV_" # Vti.LMul.MX # "_E" # Vti.SEW,
5046                      instruction # "_VV_" # Vti.LMul.MX);
5047    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5048                                 GetVTypePredicates<Wti>.Predicates) in
5049    defm : VPatBinaryRoundingMode<intrinsic, name,
5050                                  Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
5051                                  Vti.Log2SEW, Wti.RegClass,
5052                                  Vti.RegClass, Vti.RegClass>;
5053  }
5054}
5055
5056multiclass VPatBinaryW_VX<string intrinsic, string instruction,
5057                          list<VTypeInfoToWide> vtilist> {
5058  foreach VtiToWti = vtilist in {
5059    defvar Vti = VtiToWti.Vti;
5060    defvar Wti = VtiToWti.Wti;
5061    defvar kind = "V"#Vti.ScalarSuffix;
5062    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5063                                 GetVTypePredicates<Wti>.Predicates) in
5064    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
5065                      Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
5066                      Vti.Log2SEW, Wti.RegClass,
5067                      Vti.RegClass, Vti.ScalarRegClass>;
5068  }
5069}
5070
5071multiclass VPatBinaryW_VX_RM<string intrinsic, string instruction,
5072                          list<VTypeInfoToWide> vtilist, bit isSEWAware = 0> {
5073  foreach VtiToWti = vtilist in {
5074    defvar Vti = VtiToWti.Vti;
5075    defvar Wti = VtiToWti.Wti;
5076    defvar kind = "V"#Vti.ScalarSuffix;
5077    defvar name = !if(isSEWAware,
5078                      instruction#"_"#kind#"_"#Vti.LMul.MX # "_E" # Vti.SEW,
5079                      instruction#"_"#kind#"_"#Vti.LMul.MX);
5080    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5081                                 GetVTypePredicates<Wti>.Predicates) in
5082    defm : VPatBinaryRoundingMode<intrinsic, name,
5083                                  Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
5084                                  Vti.Log2SEW, Wti.RegClass,
5085                                  Vti.RegClass, Vti.ScalarRegClass>;
5086  }
5087}
5088
5089multiclass VPatBinaryW_WV<string intrinsic, string instruction,
5090                          list<VTypeInfoToWide> vtilist> {
5091  foreach VtiToWti = vtilist in {
5092    defvar Vti = VtiToWti.Vti;
5093    defvar Wti = VtiToWti.Wti;
5094    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5095                                 GetVTypePredicates<Wti>.Predicates) in {
5096      def : VPatTiedBinaryNoMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5097                                 Wti.Vector, Vti.Vector,
5098                                 Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5099      def : VPatBinaryNoMaskTU<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5100                               Wti.Vector, Wti.Vector, Vti.Vector, Vti.Log2SEW,
5101                               Wti.RegClass, Wti.RegClass, Vti.RegClass>;
5102      let AddedComplexity = 1 in {
5103      def : VPatTiedBinaryNoMaskTU<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5104                                   Wti.Vector, Vti.Vector,
5105                                   Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5106      def : VPatTiedBinaryMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5107                               Wti.Vector, Vti.Vector, Vti.Mask,
5108                               Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5109      }
5110      def : VPatBinaryMaskPolicy<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5111                                 Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
5112                                 Vti.Log2SEW, Wti.RegClass,
5113                                 Wti.RegClass, Vti.RegClass>;
5114    }
5115  }
5116}
5117
5118multiclass VPatBinaryW_WV_RM<string intrinsic, string instruction,
5119                             list<VTypeInfoToWide> vtilist, bit isSEWAware = 0> {
5120  foreach VtiToWti = vtilist in {
5121    defvar Vti = VtiToWti.Vti;
5122    defvar Wti = VtiToWti.Wti;
5123    defvar name = !if(isSEWAware,
5124                      instruction # "_WV_" # Vti.LMul.MX # "_E" # Vti.SEW,
5125                      instruction # "_WV_" # Vti.LMul.MX);
5126    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5127                                 GetVTypePredicates<Wti>.Predicates) in {
5128      def : VPatTiedBinaryNoMaskRoundingMode<intrinsic, name,
5129                                             Wti.Vector, Vti.Vector,
5130                                             Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5131      def : VPatBinaryNoMaskTURoundingMode<intrinsic, name,
5132                                           Wti.Vector, Wti.Vector, Vti.Vector, Vti.Log2SEW,
5133                                           Wti.RegClass, Wti.RegClass, Vti.RegClass>;
5134      let AddedComplexity = 1 in {
5135      def : VPatTiedBinaryNoMaskTURoundingMode<intrinsic, name,
5136                                               Wti.Vector, Vti.Vector,
5137                                               Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5138      def : VPatTiedBinaryMaskRoundingMode<intrinsic, name,
5139                                           Wti.Vector, Vti.Vector, Vti.Mask,
5140                                           Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5141      }
5142      def : VPatBinaryMaskPolicyRoundingMode<intrinsic, name,
5143                                             Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
5144                                             Vti.Log2SEW, Wti.RegClass,
5145                                             Wti.RegClass, Vti.RegClass>;
5146    }
5147  }
5148}
5149
5150multiclass VPatBinaryW_WX<string intrinsic, string instruction,
5151                          list<VTypeInfoToWide> vtilist> {
5152  foreach VtiToWti = vtilist in {
5153    defvar Vti = VtiToWti.Vti;
5154    defvar Wti = VtiToWti.Wti;
5155    defvar kind = "W"#Vti.ScalarSuffix;
5156    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5157                                 GetVTypePredicates<Wti>.Predicates) in
5158    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
5159                      Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
5160                      Vti.Log2SEW, Wti.RegClass,
5161                      Wti.RegClass, Vti.ScalarRegClass>;
5162  }
5163}
5164
5165multiclass VPatBinaryW_WX_RM<string intrinsic, string instruction,
5166                             list<VTypeInfoToWide> vtilist, bit isSEWAware = 0> {
5167  foreach VtiToWti = vtilist in {
5168    defvar Vti = VtiToWti.Vti;
5169    defvar Wti = VtiToWti.Wti;
5170    defvar kind = "W"#Vti.ScalarSuffix;
5171    defvar name = !if(isSEWAware,
5172                      instruction#"_"#kind#"_"#Vti.LMul.MX#"_E"#Vti.SEW,
5173                      instruction#"_"#kind#"_"#Vti.LMul.MX);
5174    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5175                                 GetVTypePredicates<Wti>.Predicates) in
5176    defm : VPatBinaryRoundingMode<intrinsic, name,
5177                                  Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
5178                                  Vti.Log2SEW, Wti.RegClass,
5179                                  Wti.RegClass, Vti.ScalarRegClass>;
5180  }
5181}
5182
5183multiclass VPatBinaryV_WV<string intrinsic, string instruction,
5184                          list<VTypeInfoToWide> vtilist> {
5185  foreach VtiToWti = vtilist in {
5186    defvar Vti = VtiToWti.Vti;
5187    defvar Wti = VtiToWti.Wti;
5188    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5189                                 GetVTypePredicates<Wti>.Predicates) in
5190    defm : VPatBinary<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5191                      Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
5192                      Vti.Log2SEW, Vti.RegClass,
5193                      Wti.RegClass, Vti.RegClass>;
5194  }
5195}
5196
5197multiclass VPatBinaryV_WV_RM<string intrinsic, string instruction,
5198                             list<VTypeInfoToWide> vtilist> {
5199  foreach VtiToWti = vtilist in {
5200    defvar Vti = VtiToWti.Vti;
5201    defvar Wti = VtiToWti.Wti;
5202    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5203                                 GetVTypePredicates<Wti>.Predicates) in
5204    defm : VPatBinaryRoundingMode<intrinsic,
5205                                  instruction # "_WV_" # Vti.LMul.MX,
5206                                  Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
5207                                  Vti.Log2SEW, Vti.RegClass,
5208                                  Wti.RegClass, Vti.RegClass>;
5209  }
5210}
5211
5212multiclass VPatBinaryV_WX<string intrinsic, string instruction,
5213                          list<VTypeInfoToWide> vtilist> {
5214  foreach VtiToWti = vtilist in {
5215    defvar Vti = VtiToWti.Vti;
5216    defvar Wti = VtiToWti.Wti;
5217    defvar kind = "W"#Vti.ScalarSuffix;
5218    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5219                                 GetVTypePredicates<Wti>.Predicates) in
5220    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
5221                      Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
5222                      Vti.Log2SEW, Vti.RegClass,
5223                      Wti.RegClass, Vti.ScalarRegClass>;
5224  }
5225}
5226
5227multiclass VPatBinaryV_WX_RM<string intrinsic, string instruction,
5228                             list<VTypeInfoToWide> vtilist> {
5229  foreach VtiToWti = vtilist in {
5230    defvar Vti = VtiToWti.Vti;
5231    defvar Wti = VtiToWti.Wti;
5232    defvar kind = "W"#Vti.ScalarSuffix;
5233    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5234                                 GetVTypePredicates<Wti>.Predicates) in
5235    defm : VPatBinaryRoundingMode<intrinsic,
5236                                  instruction#"_"#kind#"_"#Vti.LMul.MX,
5237                                  Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
5238                                  Vti.Log2SEW, Vti.RegClass,
5239                                  Wti.RegClass, Vti.ScalarRegClass>;
5240  }
5241}
5242
5243
5244multiclass VPatBinaryV_WI<string intrinsic, string instruction,
5245                          list<VTypeInfoToWide> vtilist> {
5246  foreach VtiToWti = vtilist in {
5247    defvar Vti = VtiToWti.Vti;
5248    defvar Wti = VtiToWti.Wti;
5249    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5250                                 GetVTypePredicates<Wti>.Predicates) in
5251    defm : VPatBinary<intrinsic, instruction # "_WI_" # Vti.LMul.MX,
5252                      Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
5253                      Vti.Log2SEW, Vti.RegClass,
5254                      Wti.RegClass, uimm5>;
5255  }
5256}
5257
5258multiclass VPatBinaryV_WI_RM<string intrinsic, string instruction,
5259                             list<VTypeInfoToWide> vtilist> {
5260  foreach VtiToWti = vtilist in {
5261    defvar Vti = VtiToWti.Vti;
5262    defvar Wti = VtiToWti.Wti;
5263    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5264                                 GetVTypePredicates<Wti>.Predicates) in
5265    defm : VPatBinaryRoundingMode<intrinsic,
5266                                  instruction # "_WI_" # Vti.LMul.MX,
5267                                  Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
5268                                  Vti.Log2SEW, Vti.RegClass,
5269                                  Wti.RegClass, uimm5>;
5270  }
5271}
5272
5273multiclass VPatBinaryV_VM<string intrinsic, string instruction,
5274                          bit CarryOut = 0,
5275                          list<VTypeInfo> vtilist = AllIntegerVectors> {
5276  foreach vti = vtilist in
5277    let Predicates = GetVTypePredicates<vti>.Predicates in
5278    defm : VPatBinaryCarryIn<intrinsic, instruction, "VVM",
5279                             !if(CarryOut, vti.Mask, vti.Vector),
5280                             vti.Vector, vti.Vector, vti.Mask,
5281                             vti.Log2SEW, vti.LMul,
5282                             vti.RegClass, vti.RegClass>;
5283}
5284
5285multiclass VPatBinaryV_XM<string intrinsic, string instruction,
5286                          bit CarryOut = 0,
5287                          list<VTypeInfo> vtilist = AllIntegerVectors> {
5288  foreach vti = vtilist in
5289    let Predicates = GetVTypePredicates<vti>.Predicates in
5290    defm : VPatBinaryCarryIn<intrinsic, instruction,
5291                             "V"#vti.ScalarSuffix#"M",
5292                             !if(CarryOut, vti.Mask, vti.Vector),
5293                             vti.Vector, vti.Scalar, vti.Mask,
5294                             vti.Log2SEW, vti.LMul,
5295                             vti.RegClass, vti.ScalarRegClass>;
5296}
5297
5298multiclass VPatBinaryV_IM<string intrinsic, string instruction,
5299                          bit CarryOut = 0> {
5300  foreach vti = AllIntegerVectors in
5301    let Predicates = GetVTypePredicates<vti>.Predicates in
5302    defm : VPatBinaryCarryIn<intrinsic, instruction, "VIM",
5303                             !if(CarryOut, vti.Mask, vti.Vector),
5304                             vti.Vector, XLenVT, vti.Mask,
5305                             vti.Log2SEW, vti.LMul,
5306                             vti.RegClass, simm5>;
5307}
5308
5309multiclass VPatBinaryV_VM_TAIL<string intrinsic, string instruction> {
5310  foreach vti = AllIntegerVectors in
5311    let Predicates = GetVTypePredicates<vti>.Predicates in
5312    defm : VPatBinaryCarryInTAIL<intrinsic, instruction, "VVM",
5313                                 vti.Vector,
5314                                 vti.Vector, vti.Vector, vti.Mask,
5315                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5316                                 vti.RegClass, vti.RegClass>;
5317}
5318
5319multiclass VPatBinaryV_XM_TAIL<string intrinsic, string instruction> {
5320  foreach vti = AllIntegerVectors in
5321    let Predicates = GetVTypePredicates<vti>.Predicates in
5322    defm : VPatBinaryCarryInTAIL<intrinsic, instruction,
5323                                 "V"#vti.ScalarSuffix#"M",
5324                                 vti.Vector,
5325                                 vti.Vector, vti.Scalar, vti.Mask,
5326                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5327                                 vti.RegClass, vti.ScalarRegClass>;
5328}
5329
5330multiclass VPatBinaryV_IM_TAIL<string intrinsic, string instruction> {
5331  foreach vti = AllIntegerVectors in
5332    let Predicates = GetVTypePredicates<vti>.Predicates in
5333    defm : VPatBinaryCarryInTAIL<intrinsic, instruction, "VIM",
5334                                 vti.Vector,
5335                                 vti.Vector, XLenVT, vti.Mask,
5336                                 vti.Log2SEW, vti.LMul,
5337                                 vti.RegClass, vti.RegClass, simm5>;
5338}
5339
5340multiclass VPatBinaryV_V<string intrinsic, string instruction> {
5341  foreach vti = AllIntegerVectors in
5342    let Predicates = GetVTypePredicates<vti>.Predicates in
5343    defm : VPatBinaryMaskOut<intrinsic, instruction, "VV",
5344                             vti.Mask, vti.Vector, vti.Vector,
5345                             vti.Log2SEW, vti.LMul,
5346                             vti.RegClass, vti.RegClass>;
5347}
5348
5349multiclass VPatBinaryV_X<string intrinsic, string instruction> {
5350  foreach vti = AllIntegerVectors in
5351    let Predicates = GetVTypePredicates<vti>.Predicates in
5352    defm : VPatBinaryMaskOut<intrinsic, instruction, "VX",
5353                             vti.Mask, vti.Vector, XLenVT,
5354                             vti.Log2SEW, vti.LMul,
5355                             vti.RegClass, GPR>;
5356}
5357
5358multiclass VPatBinaryV_I<string intrinsic, string instruction> {
5359  foreach vti = AllIntegerVectors in
5360    let Predicates = GetVTypePredicates<vti>.Predicates in
5361    defm : VPatBinaryMaskOut<intrinsic, instruction, "VI",
5362                             vti.Mask, vti.Vector, XLenVT,
5363                             vti.Log2SEW, vti.LMul,
5364                             vti.RegClass, simm5>;
5365}
5366
5367multiclass VPatBinaryM_VV<string intrinsic, string instruction,
5368                          list<VTypeInfo> vtilist> {
5369  foreach vti = vtilist in
5370    let Predicates = GetVTypePredicates<vti>.Predicates in
5371    defm : VPatBinaryM<intrinsic, instruction # "_VV_" # vti.LMul.MX,
5372                       vti.Mask, vti.Vector, vti.Vector, vti.Mask,
5373                       vti.Log2SEW, VR,
5374                       vti.RegClass, vti.RegClass>;
5375}
5376
5377multiclass VPatBinarySwappedM_VV<string intrinsic, string instruction,
5378                                 list<VTypeInfo> vtilist> {
5379  foreach vti = vtilist in
5380    let Predicates = GetVTypePredicates<vti>.Predicates in
5381    defm : VPatBinaryMSwapped<intrinsic, instruction # "_VV_" # vti.LMul.MX,
5382                              vti.Mask, vti.Vector, vti.Vector, vti.Mask,
5383                              vti.Log2SEW, VR,
5384                              vti.RegClass, vti.RegClass>;
5385}
5386
5387multiclass VPatBinaryM_VX<string intrinsic, string instruction,
5388                          list<VTypeInfo> vtilist> {
5389  foreach vti = vtilist in {
5390    defvar kind = "V"#vti.ScalarSuffix;
5391    let Predicates = GetVTypePredicates<vti>.Predicates in
5392    defm : VPatBinaryM<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
5393                       vti.Mask, vti.Vector, vti.Scalar, vti.Mask,
5394                       vti.Log2SEW, VR,
5395                       vti.RegClass, vti.ScalarRegClass>;
5396  }
5397}
5398
5399multiclass VPatBinaryM_VI<string intrinsic, string instruction,
5400                          list<VTypeInfo> vtilist> {
5401  foreach vti = vtilist in
5402    let Predicates = GetVTypePredicates<vti>.Predicates in
5403    defm : VPatBinaryM<intrinsic, instruction # "_VI_" # vti.LMul.MX,
5404                       vti.Mask, vti.Vector, XLenVT, vti.Mask,
5405                       vti.Log2SEW, VR,
5406                       vti.RegClass, simm5>;
5407}
5408
5409multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
5410                                list<VTypeInfo> vtilist, Operand ImmType = simm5>
5411    : VPatBinaryV_VV<intrinsic, instruction, vtilist>,
5412      VPatBinaryV_VX<intrinsic, instruction, vtilist>,
5413      VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
5414
5415multiclass VPatBinaryV_VV_VX_VI_RM<string intrinsic, string instruction,
5416                                   list<VTypeInfo> vtilist, Operand ImmType>
5417    : VPatBinaryV_VV_RM<intrinsic, instruction, vtilist>,
5418      VPatBinaryV_VX_RM<intrinsic, instruction, vtilist>,
5419      VPatBinaryV_VI_RM<intrinsic, instruction, vtilist, ImmType>;
5420
5421multiclass VPatBinaryV_VV_VX<string intrinsic, string instruction,
5422                             list<VTypeInfo> vtilist, bit isSEWAware = 0>
5423    : VPatBinaryV_VV<intrinsic, instruction, vtilist, isSEWAware>,
5424      VPatBinaryV_VX<intrinsic, instruction, vtilist, isSEWAware>;
5425
5426multiclass VPatBinaryV_VV_VX_RM<string intrinsic, string instruction,
5427                                list<VTypeInfo> vtilist, bit isSEWAware = 0>
5428    : VPatBinaryV_VV_RM<intrinsic, instruction, vtilist, isSEWAware>,
5429      VPatBinaryV_VX_RM<intrinsic, instruction, vtilist, isSEWAware>;
5430
5431multiclass VPatBinaryV_VX_VI<string intrinsic, string instruction,
5432                             list<VTypeInfo> vtilist>
5433    : VPatBinaryV_VX<intrinsic, instruction, vtilist>,
5434      VPatBinaryV_VI<intrinsic, instruction, vtilist, simm5>;
5435
5436multiclass VPatBinaryW_VV_VX<string intrinsic, string instruction,
5437                             list<VTypeInfoToWide> vtilist>
5438    : VPatBinaryW_VV<intrinsic, instruction, vtilist>,
5439      VPatBinaryW_VX<intrinsic, instruction, vtilist>;
5440
5441multiclass
5442    VPatBinaryW_VV_VX_RM<string intrinsic, string instruction,
5443                         list<VTypeInfoToWide> vtilist, bit isSEWAware = 0>
5444    : VPatBinaryW_VV_RM<intrinsic, instruction, vtilist, isSEWAware>,
5445      VPatBinaryW_VX_RM<intrinsic, instruction, vtilist, isSEWAware>;
5446
5447multiclass VPatBinaryW_WV_WX<string intrinsic, string instruction,
5448                             list<VTypeInfoToWide> vtilist>
5449    : VPatBinaryW_WV<intrinsic, instruction, vtilist>,
5450      VPatBinaryW_WX<intrinsic, instruction, vtilist>;
5451
5452multiclass
5453    VPatBinaryW_WV_WX_RM<string intrinsic, string instruction,
5454                         list<VTypeInfoToWide> vtilist, bit isSEWAware = 0>
5455    : VPatBinaryW_WV_RM<intrinsic, instruction, vtilist, isSEWAware>,
5456      VPatBinaryW_WX_RM<intrinsic, instruction, vtilist, isSEWAware>;
5457
5458multiclass VPatBinaryV_WV_WX_WI<string intrinsic, string instruction,
5459                                list<VTypeInfoToWide> vtilist>
5460    : VPatBinaryV_WV<intrinsic, instruction, vtilist>,
5461      VPatBinaryV_WX<intrinsic, instruction, vtilist>,
5462      VPatBinaryV_WI<intrinsic, instruction, vtilist>;
5463
5464multiclass VPatBinaryV_WV_WX_WI_RM<string intrinsic, string instruction,
5465                                   list<VTypeInfoToWide> vtilist>
5466    : VPatBinaryV_WV_RM<intrinsic, instruction, vtilist>,
5467      VPatBinaryV_WX_RM<intrinsic, instruction, vtilist>,
5468      VPatBinaryV_WI_RM<intrinsic, instruction, vtilist>;
5469
5470multiclass VPatBinaryV_VM_XM_IM<string intrinsic, string instruction>
5471    : VPatBinaryV_VM_TAIL<intrinsic, instruction>,
5472      VPatBinaryV_XM_TAIL<intrinsic, instruction>,
5473      VPatBinaryV_IM_TAIL<intrinsic, instruction>;
5474
5475multiclass VPatBinaryM_VM_XM_IM<string intrinsic, string instruction>
5476    : VPatBinaryV_VM<intrinsic, instruction, CarryOut=1>,
5477      VPatBinaryV_XM<intrinsic, instruction, CarryOut=1>,
5478      VPatBinaryV_IM<intrinsic, instruction, CarryOut=1>;
5479
5480multiclass VPatBinaryM_V_X_I<string intrinsic, string instruction>
5481    : VPatBinaryV_V<intrinsic, instruction>,
5482      VPatBinaryV_X<intrinsic, instruction>,
5483      VPatBinaryV_I<intrinsic, instruction>;
5484
5485multiclass VPatBinaryV_VM_XM<string intrinsic, string instruction>
5486    : VPatBinaryV_VM_TAIL<intrinsic, instruction>,
5487      VPatBinaryV_XM_TAIL<intrinsic, instruction>;
5488
5489multiclass VPatBinaryM_VM_XM<string intrinsic, string instruction>
5490    : VPatBinaryV_VM<intrinsic, instruction, CarryOut=1>,
5491      VPatBinaryV_XM<intrinsic, instruction, CarryOut=1>;
5492
5493multiclass VPatBinaryM_V_X<string intrinsic, string instruction>
5494    : VPatBinaryV_V<intrinsic, instruction>,
5495      VPatBinaryV_X<intrinsic, instruction>;
5496
5497multiclass VPatTernaryWithPolicy<string intrinsic,
5498                                 string inst,
5499                                 string kind,
5500                                 ValueType result_type,
5501                                 ValueType op1_type,
5502                                 ValueType op2_type,
5503                                 ValueType mask_type,
5504                                 int sew,
5505                                 LMULInfo vlmul,
5506                                 VReg result_reg_class,
5507                                 RegisterClass op1_reg_class,
5508                                 DAGOperand op2_kind> {
5509  def : VPatTernaryNoMaskWithPolicy<intrinsic, inst, kind, result_type, op1_type,
5510                                    op2_type, sew, vlmul, result_reg_class,
5511                                    op1_reg_class, op2_kind>;
5512  def : VPatTernaryMaskPolicy<intrinsic, inst, kind, result_type, op1_type, op2_type,
5513                              mask_type, sew, vlmul, result_reg_class, op1_reg_class,
5514                              op2_kind>;
5515}
5516
5517multiclass VPatTernaryWithPolicyRoundingMode<string intrinsic,
5518                                             string inst,
5519                                             string kind,
5520                                             ValueType result_type,
5521                                             ValueType op1_type,
5522                                             ValueType op2_type,
5523                                             ValueType mask_type,
5524                                             int sew,
5525                                             LMULInfo vlmul,
5526                                             VReg result_reg_class,
5527                                             RegisterClass op1_reg_class,
5528                                             DAGOperand op2_kind,
5529                                             bit isSEWAware = 0> {
5530  def : VPatTernaryNoMaskWithPolicyRoundingMode<intrinsic, inst, kind, result_type,
5531                                                op1_type, op2_type, sew, vlmul,
5532                                                result_reg_class, op1_reg_class,
5533                                                op2_kind, isSEWAware>;
5534  def : VPatTernaryMaskPolicyRoundingMode<intrinsic, inst, kind, result_type, op1_type,
5535                                                op2_type, mask_type, sew, vlmul,
5536                                                result_reg_class, op1_reg_class,
5537                                                op2_kind, isSEWAware>;
5538}
5539
5540multiclass VPatTernaryTU<string intrinsic,
5541                         string inst,
5542                         string kind,
5543                         ValueType result_type,
5544                         ValueType op1_type,
5545                         ValueType op2_type,
5546                         ValueType mask_type,
5547                         int log2sew,
5548                         LMULInfo vlmul,
5549                         VReg result_reg_class,
5550                         RegisterClass op1_reg_class,
5551                         DAGOperand op2_kind> {
5552  def : VPatTernaryNoMaskTU<intrinsic, inst, kind, result_type, op1_type,
5553                            op2_type, log2sew, vlmul, result_reg_class,
5554                            op1_reg_class, op2_kind>;
5555  def : VPatTernaryMaskTU<intrinsic, inst, kind, result_type, op1_type,
5556                          op2_type, mask_type, log2sew, vlmul,
5557                          result_reg_class, op1_reg_class, op2_kind>;
5558}
5559
5560multiclass VPatTernaryTURoundingMode<string intrinsic,
5561                                     string inst,
5562                                     string kind,
5563                                     ValueType result_type,
5564                                     ValueType op1_type,
5565                                     ValueType op2_type,
5566                                     ValueType mask_type,
5567                                     int log2sew,
5568                                     LMULInfo vlmul,
5569                                     VReg result_reg_class,
5570                                     RegisterClass op1_reg_class,
5571                                     DAGOperand op2_kind> {
5572  def : VPatTernaryNoMaskTURoundingMode<intrinsic, inst, kind, result_type, op1_type,
5573                            op2_type, log2sew, vlmul, result_reg_class,
5574                            op1_reg_class, op2_kind>;
5575  def : VPatTernaryMaskTURoundingMode<intrinsic, inst, kind, result_type, op1_type,
5576                          op2_type, mask_type, log2sew, vlmul,
5577                          result_reg_class, op1_reg_class, op2_kind>;
5578}
5579
5580multiclass VPatTernaryV_VV_AAXA<string intrinsic, string instruction,
5581                                list<VTypeInfo> vtilist> {
5582  foreach vti = vtilist in
5583    let Predicates = GetVTypePredicates<vti>.Predicates in
5584    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VV",
5585                                 vti.Vector, vti.Vector, vti.Vector, vti.Mask,
5586                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5587                                 vti.RegClass, vti.RegClass>;
5588}
5589
5590multiclass VPatTernaryV_VV_AAXA_RM<string intrinsic, string instruction,
5591                                list<VTypeInfo> vtilist, bit isSEWAware = 0> {
5592  foreach vti = vtilist in
5593    let Predicates = GetVTypePredicates<vti>.Predicates in
5594    defm : VPatTernaryWithPolicyRoundingMode<intrinsic, instruction, "VV",
5595                                             vti.Vector, vti.Vector, vti.Vector, vti.Mask,
5596                                             vti.Log2SEW, vti.LMul, vti.RegClass,
5597                                             vti.RegClass, vti.RegClass, isSEWAware>;
5598}
5599
5600multiclass VPatTernaryV_VX<string intrinsic, string instruction,
5601                           list<VTypeInfo> vtilist> {
5602  foreach vti = vtilist in
5603    let Predicates = GetVTypePredicates<vti>.Predicates in
5604    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VX",
5605                                 vti.Vector, vti.Vector, XLenVT, vti.Mask,
5606                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5607                                 vti.RegClass, GPR>;
5608}
5609
5610multiclass VPatTernaryV_VX_AAXA<string intrinsic, string instruction,
5611                           list<VTypeInfo> vtilist> {
5612  foreach vti = vtilist in
5613    let Predicates = GetVTypePredicates<vti>.Predicates in
5614    defm : VPatTernaryWithPolicy<intrinsic, instruction,
5615                                 "V"#vti.ScalarSuffix,
5616                                 vti.Vector, vti.Scalar, vti.Vector, vti.Mask,
5617                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5618                                 vti.ScalarRegClass, vti.RegClass>;
5619}
5620
5621multiclass VPatTernaryV_VX_AAXA_RM<string intrinsic, string instruction,
5622                           list<VTypeInfo> vtilist, bit isSEWAware = 0> {
5623  foreach vti = vtilist in
5624    let Predicates = GetVTypePredicates<vti>.Predicates in
5625    defm : VPatTernaryWithPolicyRoundingMode<intrinsic, instruction,
5626                                             "V"#vti.ScalarSuffix,
5627                                             vti.Vector, vti.Scalar, vti.Vector, vti.Mask,
5628                                             vti.Log2SEW, vti.LMul, vti.RegClass,
5629                                             vti.ScalarRegClass, vti.RegClass, isSEWAware>;
5630}
5631
5632multiclass VPatTernaryV_VI<string intrinsic, string instruction,
5633                           list<VTypeInfo> vtilist, Operand Imm_type> {
5634  foreach vti = vtilist in
5635    let Predicates = GetVTypePredicates<vti>.Predicates in
5636    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VI",
5637                                 vti.Vector, vti.Vector, XLenVT, vti.Mask,
5638                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5639                                 vti.RegClass, Imm_type>;
5640}
5641
5642multiclass VPatTernaryW_VV<string intrinsic, string instruction,
5643                           list<VTypeInfoToWide> vtilist> {
5644  foreach vtiToWti = vtilist in {
5645    defvar vti = vtiToWti.Vti;
5646    defvar wti = vtiToWti.Wti;
5647    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5648                                 GetVTypePredicates<wti>.Predicates) in
5649    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VV",
5650                                 wti.Vector, vti.Vector, vti.Vector,
5651                                 vti.Mask, vti.Log2SEW, vti.LMul,
5652                                 wti.RegClass, vti.RegClass, vti.RegClass>;
5653  }
5654}
5655
5656multiclass VPatTernaryW_VV_RM<string intrinsic, string instruction,
5657                           list<VTypeInfoToWide> vtilist, bit isSEWAware = 0> {
5658  foreach vtiToWti = vtilist in {
5659    defvar vti = vtiToWti.Vti;
5660    defvar wti = vtiToWti.Wti;
5661    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5662                                 GetVTypePredicates<wti>.Predicates) in
5663    defm : VPatTernaryWithPolicyRoundingMode<intrinsic, instruction, "VV",
5664                                             wti.Vector, vti.Vector, vti.Vector,
5665                                             vti.Mask, vti.Log2SEW, vti.LMul,
5666                                             wti.RegClass, vti.RegClass,
5667                                             vti.RegClass, isSEWAware>;
5668  }
5669}
5670
5671multiclass VPatTernaryW_VX<string intrinsic, string instruction,
5672                           list<VTypeInfoToWide> vtilist> {
5673  foreach vtiToWti = vtilist in {
5674    defvar vti = vtiToWti.Vti;
5675    defvar wti = vtiToWti.Wti;
5676    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5677                                 GetVTypePredicates<wti>.Predicates) in
5678    defm : VPatTernaryWithPolicy<intrinsic, instruction,
5679                                 "V"#vti.ScalarSuffix,
5680                                 wti.Vector, vti.Scalar, vti.Vector,
5681                                 vti.Mask, vti.Log2SEW, vti.LMul,
5682                                 wti.RegClass, vti.ScalarRegClass, vti.RegClass>;
5683  }
5684}
5685
5686multiclass
5687    VPatTernaryW_VX_RM<string intrinsic, string instruction,
5688                       list<VTypeInfoToWide> vtilist, bit isSEWAware = 0> {
5689  foreach vtiToWti = vtilist in {
5690    defvar vti = vtiToWti.Vti;
5691    defvar wti = vtiToWti.Wti;
5692    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5693                                 GetVTypePredicates<wti>.Predicates) in defm
5694        : VPatTernaryWithPolicyRoundingMode<
5695              intrinsic, instruction, "V" #vti.ScalarSuffix, wti.Vector,
5696              vti.Scalar, vti.Vector, vti.Mask, vti.Log2SEW, vti.LMul,
5697              wti.RegClass, vti.ScalarRegClass, vti.RegClass, isSEWAware>;
5698  }
5699}
5700
5701multiclass VPatTernaryV_VV_VX_AAXA<string intrinsic, string instruction,
5702                              list<VTypeInfo> vtilist>
5703    : VPatTernaryV_VV_AAXA<intrinsic, instruction, vtilist>,
5704      VPatTernaryV_VX_AAXA<intrinsic, instruction, vtilist>;
5705
5706multiclass VPatTernaryV_VV_VX_AAXA_RM<string intrinsic, string instruction,
5707                              list<VTypeInfo> vtilist, bit isSEWAware = 0>
5708    : VPatTernaryV_VV_AAXA_RM<intrinsic, instruction, vtilist, isSEWAware>,
5709      VPatTernaryV_VX_AAXA_RM<intrinsic, instruction, vtilist, isSEWAware>;
5710
5711multiclass VPatTernaryV_VX_VI<string intrinsic, string instruction,
5712                              list<VTypeInfo> vtilist, Operand Imm_type>
5713    : VPatTernaryV_VX<intrinsic, instruction, vtilist>,
5714      VPatTernaryV_VI<intrinsic, instruction, vtilist, Imm_type>;
5715
5716
5717multiclass VPatBinaryM_VV_VX_VI<string intrinsic, string instruction,
5718                                list<VTypeInfo> vtilist>
5719    : VPatBinaryM_VV<intrinsic, instruction, vtilist>,
5720      VPatBinaryM_VX<intrinsic, instruction, vtilist>,
5721      VPatBinaryM_VI<intrinsic, instruction, vtilist>;
5722
5723multiclass VPatTernaryW_VV_VX<string intrinsic, string instruction,
5724                              list<VTypeInfoToWide> vtilist>
5725    : VPatTernaryW_VV<intrinsic, instruction, vtilist>,
5726      VPatTernaryW_VX<intrinsic, instruction, vtilist>;
5727
5728multiclass VPatTernaryW_VV_VX_RM<string intrinsic, string instruction,
5729                              list<VTypeInfoToWide> vtilist, bit isSEWAware = 1>
5730    : VPatTernaryW_VV_RM<intrinsic, instruction, vtilist, isSEWAware>,
5731      VPatTernaryW_VX_RM<intrinsic, instruction, vtilist, isSEWAware>;
5732
5733multiclass VPatBinaryM_VV_VX<string intrinsic, string instruction,
5734                             list<VTypeInfo> vtilist>
5735    : VPatBinaryM_VV<intrinsic, instruction, vtilist>,
5736      VPatBinaryM_VX<intrinsic, instruction, vtilist>;
5737
5738multiclass VPatBinaryM_VX_VI<string intrinsic, string instruction,
5739                             list<VTypeInfo> vtilist>
5740    : VPatBinaryM_VX<intrinsic, instruction, vtilist>,
5741      VPatBinaryM_VI<intrinsic, instruction, vtilist>;
5742
5743multiclass VPatBinaryV_VV_VX_VI_INT<string intrinsic, string instruction,
5744                                    list<VTypeInfo> vtilist, Operand ImmType>
5745    : VPatBinaryV_VV_INT<intrinsic#"_vv", instruction, vtilist>,
5746      VPatBinaryV_VX_INT<intrinsic#"_vx", instruction, vtilist>,
5747      VPatBinaryV_VI<intrinsic#"_vx", instruction, vtilist, ImmType>;
5748
5749multiclass VPatReductionV_VS<string intrinsic, string instruction, bit IsFloat = 0> {
5750  foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in {
5751    defvar vectorM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # vti.SEW # "M1");
5752    let Predicates = GetVTypePredicates<vti>.Predicates in
5753    defm : VPatTernaryTU<intrinsic, instruction, "VS",
5754                         vectorM1.Vector, vti.Vector,
5755                         vectorM1.Vector, vti.Mask,
5756                         vti.Log2SEW, vti.LMul,
5757                         VR, vti.RegClass, VR>;
5758  }
5759  foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in {
5760    let Predicates = GetVTypePredicates<gvti>.Predicates in
5761    defm : VPatTernaryTU<intrinsic, instruction, "VS",
5762                         gvti.VectorM1, gvti.Vector,
5763                         gvti.VectorM1, gvti.Mask,
5764                         gvti.Log2SEW, gvti.LMul,
5765                         VR, gvti.RegClass, VR>;
5766  }
5767}
5768
5769multiclass VPatReductionV_VS_RM<string intrinsic, string instruction, bit IsFloat = 0> {
5770  foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in {
5771    defvar vectorM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # vti.SEW # "M1");
5772    let Predicates = GetVTypePredicates<vti>.Predicates in
5773    defm : VPatTernaryTURoundingMode<intrinsic, instruction, "VS",
5774                                     vectorM1.Vector, vti.Vector,
5775                                     vectorM1.Vector, vti.Mask,
5776                                     vti.Log2SEW, vti.LMul,
5777                                     VR, vti.RegClass, VR>;
5778  }
5779  foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in {
5780    let Predicates = GetVTypePredicates<gvti>.Predicates in
5781    defm : VPatTernaryTURoundingMode<intrinsic, instruction, "VS",
5782                                     gvti.VectorM1, gvti.Vector,
5783                                     gvti.VectorM1, gvti.Mask,
5784                                     gvti.Log2SEW, gvti.LMul,
5785                                     VR, gvti.RegClass, VR>;
5786  }
5787}
5788
5789multiclass VPatReductionW_VS<string intrinsic, string instruction, bit IsFloat = 0> {
5790  foreach vti = !if(IsFloat, AllFloatVectors, AllIntegerVectors) in {
5791    defvar wtiSEW = !mul(vti.SEW, 2);
5792    if !le(wtiSEW, 64) then {
5793      defvar wtiM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # wtiSEW # "M1");
5794      let Predicates = GetVTypePredicates<vti>.Predicates in
5795      defm : VPatTernaryTU<intrinsic, instruction, "VS",
5796                           wtiM1.Vector, vti.Vector,
5797                           wtiM1.Vector, vti.Mask,
5798                           vti.Log2SEW, vti.LMul,
5799                           wtiM1.RegClass, vti.RegClass,
5800                           wtiM1.RegClass>;
5801    }
5802  }
5803}
5804
5805multiclass VPatReductionW_VS_RM<string intrinsic, string instruction, bit IsFloat = 0> {
5806  foreach vti = !if(IsFloat, AllFloatVectors, AllIntegerVectors) in {
5807    defvar wtiSEW = !mul(vti.SEW, 2);
5808    if !le(wtiSEW, 64) then {
5809      defvar wtiM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # wtiSEW # "M1");
5810      let Predicates = GetVTypePredicates<vti>.Predicates in
5811      defm : VPatTernaryTURoundingMode<intrinsic, instruction, "VS",
5812                                       wtiM1.Vector, vti.Vector,
5813                                       wtiM1.Vector, vti.Mask,
5814                                       vti.Log2SEW, vti.LMul,
5815                                       wtiM1.RegClass, vti.RegClass,
5816                                       wtiM1.RegClass>;
5817    }
5818  }
5819}
5820
5821multiclass VPatConversionVI_VF<string intrinsic,
5822                               string instruction> {
5823  foreach fvti = AllFloatVectors in {
5824    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
5825    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5826                                 GetVTypePredicates<ivti>.Predicates) in
5827    defm : VPatConversion<intrinsic, instruction, "V",
5828                          ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
5829                          fvti.LMul, ivti.RegClass, fvti.RegClass>;
5830  }
5831}
5832
5833multiclass VPatConversionVI_VF_RM<string intrinsic,
5834                                  string instruction> {
5835  foreach fvti = AllFloatVectors in {
5836    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
5837    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5838                                 GetVTypePredicates<ivti>.Predicates) in
5839    defm : VPatConversionRoundingMode<intrinsic, instruction, "V",
5840                                      ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
5841                                      fvti.LMul, ivti.RegClass, fvti.RegClass>;
5842  }
5843}
5844
5845multiclass VPatConversionVI_VF_RTZ<string intrinsic,
5846                                           string instruction> {
5847  foreach fvti = AllFloatVectors in {
5848    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
5849    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5850                                 GetVTypePredicates<ivti>.Predicates) in
5851    defm : VPatConversionRTZ<intrinsic, instruction, "V",
5852                                              ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
5853                                              fvti.LMul, ivti.RegClass, fvti.RegClass>;
5854  }
5855}
5856
5857multiclass VPatConversionVF_VI_RM<string intrinsic, string instruction,
5858                                  bit isSEWAware = 0> {
5859  foreach fvti = AllFloatVectors in {
5860    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
5861    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5862                                 GetVTypePredicates<ivti>.Predicates) in
5863    defm : VPatConversionRoundingMode<intrinsic, instruction, "V",
5864                                      fvti.Vector, ivti.Vector, fvti.Mask, ivti.Log2SEW,
5865                                      ivti.LMul, fvti.RegClass, ivti.RegClass,
5866                                      isSEWAware>;
5867  }
5868}
5869
5870multiclass VPatConversionWI_VF<string intrinsic, string instruction> {
5871  foreach fvtiToFWti = AllWidenableFloatVectors in {
5872    defvar fvti = fvtiToFWti.Vti;
5873    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
5874    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5875                                 GetVTypePredicates<iwti>.Predicates) in
5876    defm : VPatConversion<intrinsic, instruction, "V",
5877                          iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
5878                          fvti.LMul, iwti.RegClass, fvti.RegClass>;
5879  }
5880}
5881
5882multiclass VPatConversionWI_VF_RM<string intrinsic, string instruction> {
5883  foreach fvtiToFWti = AllWidenableFloatVectors in {
5884    defvar fvti = fvtiToFWti.Vti;
5885    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
5886    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5887                                 GetVTypePredicates<iwti>.Predicates) in
5888    defm : VPatConversionRoundingMode<intrinsic, instruction, "V",
5889                                      iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
5890                                      fvti.LMul, iwti.RegClass, fvti.RegClass>;
5891  }
5892}
5893
5894multiclass VPatConversionWI_VF_RTZ<string intrinsic, string instruction> {
5895  foreach fvtiToFWti = AllWidenableFloatVectors in {
5896    defvar fvti = fvtiToFWti.Vti;
5897    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
5898    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5899                                 GetVTypePredicates<iwti>.Predicates) in
5900    defm : VPatConversionRTZ<intrinsic, instruction, "V",
5901                             iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
5902                             fvti.LMul, iwti.RegClass, fvti.RegClass>;
5903  }
5904}
5905
5906multiclass VPatConversionWF_VI<string intrinsic, string instruction,
5907                               bit isSEWAware = 0> {
5908  foreach vtiToWti = AllWidenableIntToFloatVectors in {
5909    defvar vti = vtiToWti.Vti;
5910    defvar fwti = vtiToWti.Wti;
5911    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5912                                 GetVTypePredicates<fwti>.Predicates) in
5913    defm : VPatConversion<intrinsic, instruction, "V",
5914                          fwti.Vector, vti.Vector, fwti.Mask, vti.Log2SEW,
5915                          vti.LMul, fwti.RegClass, vti.RegClass, isSEWAware>;
5916  }
5917}
5918
5919multiclass VPatConversionWF_VF<string intrinsic, string instruction,
5920                               bit isSEWAware = 0> {
5921  foreach fvtiToFWti = AllWidenableFloatVectors in {
5922    defvar fvti = fvtiToFWti.Vti;
5923    defvar fwti = fvtiToFWti.Wti;
5924    // Define vfwcvt.f.f.v for f16 when Zvfhmin is enable.
5925    let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal],
5926                         !listconcat(GetVTypePredicates<fvti>.Predicates,
5927                                     GetVTypePredicates<fwti>.Predicates)) in
5928      defm : VPatConversion<intrinsic, instruction, "V",
5929                            fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
5930                            fvti.LMul, fwti.RegClass, fvti.RegClass, isSEWAware>;
5931  }
5932}
5933
5934multiclass VPatConversionWF_VF_BF<string intrinsic, string instruction,
5935                                  bit isSEWAware = 0> {
5936  foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in
5937  {
5938    defvar fvti = fvtiToFWti.Vti;
5939    defvar fwti = fvtiToFWti.Wti;
5940    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5941                                 GetVTypePredicates<fwti>.Predicates) in
5942    defm : VPatConversion<intrinsic, instruction, "V",
5943                          fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
5944                          fvti.LMul, fwti.RegClass, fvti.RegClass, isSEWAware>;
5945  }
5946}
5947
5948multiclass VPatConversionVI_WF<string intrinsic, string instruction> {
5949  foreach vtiToWti = AllWidenableIntToFloatVectors in {
5950    defvar vti = vtiToWti.Vti;
5951    defvar fwti = vtiToWti.Wti;
5952    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5953                                 GetVTypePredicates<fwti>.Predicates) in
5954    defm : VPatConversion<intrinsic, instruction, "W",
5955                          vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
5956                          vti.LMul, vti.RegClass, fwti.RegClass>;
5957  }
5958}
5959
5960multiclass VPatConversionVI_WF_RM <string intrinsic, string instruction> {
5961  foreach vtiToWti = AllWidenableIntToFloatVectors in {
5962    defvar vti = vtiToWti.Vti;
5963    defvar fwti = vtiToWti.Wti;
5964    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5965                                 GetVTypePredicates<fwti>.Predicates) in
5966    defm : VPatConversionRoundingMode<intrinsic, instruction, "W",
5967                                      vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
5968                                      vti.LMul, vti.RegClass, fwti.RegClass>;
5969  }
5970}
5971
5972multiclass VPatConversionVI_WF_RTZ <string intrinsic, string instruction> {
5973  foreach vtiToWti = AllWidenableIntToFloatVectors in {
5974    defvar vti = vtiToWti.Vti;
5975    defvar fwti = vtiToWti.Wti;
5976    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5977                                 GetVTypePredicates<fwti>.Predicates) in
5978    defm : VPatConversionRTZ<intrinsic, instruction, "W",
5979                             vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
5980                             vti.LMul, vti.RegClass, fwti.RegClass>;
5981  }
5982}
5983
5984multiclass VPatConversionVF_WI_RM <string intrinsic, string instruction,
5985                                   bit isSEWAware = 0> {
5986  foreach fvtiToFWti = AllWidenableFloatVectors in {
5987    defvar fvti = fvtiToFWti.Vti;
5988    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
5989    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5990                                 GetVTypePredicates<iwti>.Predicates) in
5991    defm : VPatConversionRoundingMode<intrinsic, instruction, "W",
5992                                      fvti.Vector, iwti.Vector, fvti.Mask, fvti.Log2SEW,
5993                                      fvti.LMul, fvti.RegClass, iwti.RegClass,
5994                                      isSEWAware>;
5995  }
5996}
5997
5998multiclass VPatConversionVF_WF<string intrinsic, string instruction,
5999                               bit isSEWAware = 0> {
6000  foreach fvtiToFWti = AllWidenableFloatVectors in {
6001    defvar fvti = fvtiToFWti.Vti;
6002    defvar fwti = fvtiToFWti.Wti;
6003    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
6004                                 GetVTypePredicates<fwti>.Predicates) in
6005    defm : VPatConversion<intrinsic, instruction, "W",
6006                          fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
6007                          fvti.LMul, fvti.RegClass, fwti.RegClass, isSEWAware>;
6008  }
6009}
6010
6011multiclass VPatConversionVF_WF_RM<string intrinsic, string instruction,
6012                                   list<VTypeInfoToWide> wlist = AllWidenableFloatVectors,
6013                                   bit isSEWAware = 0> {
6014  foreach fvtiToFWti = wlist in {
6015    defvar fvti = fvtiToFWti.Vti;
6016    defvar fwti = fvtiToFWti.Wti;
6017    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
6018                                 GetVTypePredicates<fwti>.Predicates) in
6019    defm : VPatConversionRoundingMode<intrinsic, instruction, "W",
6020                                      fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
6021                                      fvti.LMul, fvti.RegClass, fwti.RegClass,
6022                                      isSEWAware>;
6023  }
6024}
6025
6026multiclass VPatConversionVF_WF_RTZ<string intrinsic, string instruction,
6027                                   list<VTypeInfoToWide> wlist = AllWidenableFloatVectors,
6028                                   bit isSEWAware = 0> {
6029  foreach fvtiToFWti = wlist in {
6030    defvar fvti = fvtiToFWti.Vti;
6031    defvar fwti = fvtiToFWti.Wti;
6032    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
6033                                 GetVTypePredicates<fwti>.Predicates) in
6034    defm : VPatConversionRTZ<intrinsic, instruction, "W",
6035                             fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
6036                             fvti.LMul, fvti.RegClass, fwti.RegClass, isSEWAware>;
6037  }
6038}
6039
6040multiclass VPatConversionVF_WF_BF_RM<string intrinsic, string instruction,
6041                                     bit isSEWAware = 0> {
6042  foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in {
6043    defvar fvti = fvtiToFWti.Vti;
6044    defvar fwti = fvtiToFWti.Wti;
6045    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
6046                                 GetVTypePredicates<fwti>.Predicates) in
6047    defm : VPatConversionRoundingMode<intrinsic, instruction, "W",
6048                                      fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
6049                                      fvti.LMul, fvti.RegClass, fwti.RegClass,
6050                                      isSEWAware>;
6051  }
6052}
6053
6054multiclass VPatCompare_VI<string intrinsic, string inst,
6055                          ImmLeaf ImmType> {
6056  foreach vti = AllIntegerVectors in {
6057    defvar Intr = !cast<Intrinsic>(intrinsic);
6058    defvar Pseudo = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX);
6059    let Predicates = GetVTypePredicates<vti>.Predicates in
6060    def : Pat<(vti.Mask (Intr (vti.Vector vti.RegClass:$rs1),
6061                              (vti.Scalar ImmType:$rs2),
6062                              VLOpFrag)),
6063              (Pseudo vti.RegClass:$rs1, (DecImm ImmType:$rs2),
6064                      GPR:$vl, vti.Log2SEW)>;
6065    defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
6066    defvar PseudoMask = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX#"_MASK");
6067    let Predicates = GetVTypePredicates<vti>.Predicates in
6068    def : Pat<(vti.Mask (IntrMask (vti.Mask VR:$merge),
6069                                  (vti.Vector vti.RegClass:$rs1),
6070                                  (vti.Scalar ImmType:$rs2),
6071                                  (vti.Mask V0),
6072                                  VLOpFrag)),
6073              (PseudoMask VR:$merge, vti.RegClass:$rs1, (DecImm ImmType:$rs2),
6074                          (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
6075  }
6076}
6077
6078//===----------------------------------------------------------------------===//
6079// Pseudo instructions
6080//===----------------------------------------------------------------------===//
6081
6082let Predicates = [HasVInstructions] in {
6083
6084//===----------------------------------------------------------------------===//
6085// Pseudo Instructions for CodeGen
6086//===----------------------------------------------------------------------===//
6087
6088let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1 in {
6089  def PseudoReadVLENB : Pseudo<(outs GPR:$rd), (ins),
6090                               [(set GPR:$rd, (riscv_read_vlenb))]>,
6091                        PseudoInstExpansion<(CSRRS GPR:$rd, SysRegVLENB.Encoding, X0)>,
6092                        Sched<[WriteRdVLENB]>;
6093}
6094
6095let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1,
6096    Uses = [VL] in
6097def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins), []>,
6098                   PseudoInstExpansion<(CSRRS GPR:$rd, SysRegVL.Encoding, X0)>;
6099
6100foreach lmul = MxList in {
6101  foreach nf = NFSet<lmul>.L in {
6102    defvar vreg = SegRegClass<lmul, nf>.RC;
6103    let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1,
6104        Size = !mul(4, !sub(!mul(nf, 2), 1)) in {
6105      def "PseudoVSPILL" # nf # "_" # lmul.MX :
6106        Pseudo<(outs), (ins vreg:$rs1, GPR:$rs2), []>;
6107    }
6108    let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1,
6109        Size = !mul(4, !sub(!mul(nf, 2), 1)) in {
6110      def "PseudoVRELOAD" # nf # "_" # lmul.MX :
6111        Pseudo<(outs vreg:$rs1), (ins GPR:$rs2), []>;
6112    }
6113  }
6114}
6115
6116/// Empty pseudo for RISCVInitUndefPass
6117let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 0,
6118    isCodeGenOnly = 1 in {
6119  def PseudoRVVInitUndefM1 : Pseudo<(outs VR:$vd), (ins), [], "">;
6120  def PseudoRVVInitUndefM2 : Pseudo<(outs VRM2:$vd), (ins), [], "">;
6121  def PseudoRVVInitUndefM4 : Pseudo<(outs VRM4:$vd), (ins), [], "">;
6122  def PseudoRVVInitUndefM8 : Pseudo<(outs VRM8:$vd), (ins), [], "">;
6123}
6124
6125//===----------------------------------------------------------------------===//
6126// 6. Configuration-Setting Instructions
6127//===----------------------------------------------------------------------===//
6128
6129// Pseudos.
6130let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Defs = [VL, VTYPE] in {
6131// Due to rs1=X0 having special meaning, we need a GPRNoX0 register class for
6132// the when we aren't using one of the special X0 encodings. Otherwise it could
6133// be accidentally be made X0 by MachineIR optimizations. To satisfy the
6134// verifier, we also need a GPRX0 instruction for the special encodings.
6135def PseudoVSETVLI : Pseudo<(outs GPR:$rd), (ins GPRNoX0:$rs1, VTypeIOp11:$vtypei), []>,
6136                    Sched<[WriteVSETVLI, ReadVSETVLI]>;
6137def PseudoVSETVLIX0 : Pseudo<(outs GPR:$rd), (ins GPRX0:$rs1, VTypeIOp11:$vtypei), []>,
6138                      Sched<[WriteVSETVLI, ReadVSETVLI]>;
6139def PseudoVSETIVLI : Pseudo<(outs GPR:$rd), (ins uimm5:$rs1, VTypeIOp10:$vtypei), []>,
6140                     Sched<[WriteVSETIVLI]>;
6141}
6142
6143//===----------------------------------------------------------------------===//
6144// 7. Vector Loads and Stores
6145//===----------------------------------------------------------------------===//
6146
6147//===----------------------------------------------------------------------===//
6148// 7.4 Vector Unit-Stride Instructions
6149//===----------------------------------------------------------------------===//
6150
6151// Pseudos Unit-Stride Loads and Stores
6152defm PseudoVL : VPseudoUSLoad;
6153defm PseudoVS : VPseudoUSStore;
6154
6155defm PseudoVLM : VPseudoLoadMask;
6156defm PseudoVSM : VPseudoStoreMask;
6157
6158//===----------------------------------------------------------------------===//
6159// 7.5 Vector Strided Instructions
6160//===----------------------------------------------------------------------===//
6161
6162// Vector Strided Loads and Stores
6163defm PseudoVLS : VPseudoSLoad;
6164defm PseudoVSS : VPseudoSStore;
6165
6166//===----------------------------------------------------------------------===//
6167// 7.6 Vector Indexed Instructions
6168//===----------------------------------------------------------------------===//
6169
6170// Vector Indexed Loads and Stores
6171defm PseudoVLUX : VPseudoILoad<Ordered=false>;
6172defm PseudoVLOX : VPseudoILoad<Ordered=true>;
6173defm PseudoVSOX : VPseudoIStore<Ordered=true>;
6174defm PseudoVSUX : VPseudoIStore<Ordered=false>;
6175
6176//===----------------------------------------------------------------------===//
6177// 7.7. Unit-stride Fault-Only-First Loads
6178//===----------------------------------------------------------------------===//
6179
6180// vleff may update VL register
6181let Defs = [VL] in
6182defm PseudoVL : VPseudoFFLoad;
6183
6184//===----------------------------------------------------------------------===//
6185// 7.8. Vector Load/Store Segment Instructions
6186//===----------------------------------------------------------------------===//
6187defm PseudoVLSEG : VPseudoUSSegLoad;
6188defm PseudoVLSSEG : VPseudoSSegLoad;
6189defm PseudoVLOXSEG : VPseudoISegLoad<Ordered=true>;
6190defm PseudoVLUXSEG : VPseudoISegLoad<Ordered=false>;
6191defm PseudoVSSEG : VPseudoUSSegStore;
6192defm PseudoVSSSEG : VPseudoSSegStore;
6193defm PseudoVSOXSEG : VPseudoISegStore<Ordered=true>;
6194defm PseudoVSUXSEG : VPseudoISegStore<Ordered=false>;
6195
6196// vlseg<nf>e<eew>ff.v may update VL register
6197let Defs = [VL] in {
6198defm PseudoVLSEG : VPseudoUSSegLoadFF;
6199}
6200
6201//===----------------------------------------------------------------------===//
6202// 11. Vector Integer Arithmetic Instructions
6203//===----------------------------------------------------------------------===//
6204
6205//===----------------------------------------------------------------------===//
6206// 11.1. Vector Single-Width Integer Add and Subtract
6207//===----------------------------------------------------------------------===//
6208defm PseudoVADD   : VPseudoVALU_VV_VX_VI<Commutable=1>;
6209defm PseudoVSUB   : VPseudoVALU_VV_VX;
6210defm PseudoVRSUB  : VPseudoVALU_VX_VI;
6211
6212foreach vti = AllIntegerVectors in {
6213  // Match vrsub with 2 vector operands to vsub.vv by swapping operands. This
6214  // Occurs when legalizing vrsub.vx intrinsics for i64 on RV32 since we need
6215  // to use a more complex splat sequence. Add the pattern for all VTs for
6216  // consistency.
6217  let Predicates = GetVTypePredicates<vti>.Predicates in {
6218    def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$merge),
6219                                           (vti.Vector vti.RegClass:$rs2),
6220                                           (vti.Vector vti.RegClass:$rs1),
6221                                           VLOpFrag)),
6222              (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX)
6223                                                        vti.RegClass:$merge,
6224                                                        vti.RegClass:$rs1,
6225                                                        vti.RegClass:$rs2,
6226                                                        GPR:$vl,
6227                                                        vti.Log2SEW, TU_MU)>;
6228    def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge),
6229                                                (vti.Vector vti.RegClass:$rs2),
6230                                                (vti.Vector vti.RegClass:$rs1),
6231                                                (vti.Mask V0),
6232                                                VLOpFrag,
6233                                                (XLenVT timm:$policy))),
6234              (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK")
6235                                                        vti.RegClass:$merge,
6236                                                        vti.RegClass:$rs1,
6237                                                        vti.RegClass:$rs2,
6238                                                        (vti.Mask V0),
6239                                                        GPR:$vl,
6240                                                        vti.Log2SEW,
6241                                                        (XLenVT timm:$policy))>;
6242
6243    // Match VSUB with a small immediate to vadd.vi by negating the immediate.
6244    def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$merge),
6245                                          (vti.Vector vti.RegClass:$rs1),
6246                                          (vti.Scalar simm5_plus1:$rs2),
6247                                          VLOpFrag)),
6248              (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX)
6249                                                      vti.RegClass:$merge,
6250                                                      vti.RegClass:$rs1,
6251                                                      (NegImm simm5_plus1:$rs2),
6252                                                      GPR:$vl,
6253                                                      vti.Log2SEW, TU_MU)>;
6254    def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge),
6255                                               (vti.Vector vti.RegClass:$rs1),
6256                                               (vti.Scalar simm5_plus1:$rs2),
6257                                               (vti.Mask V0),
6258                                               VLOpFrag,
6259                                               (XLenVT timm:$policy))),
6260              (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX#"_MASK")
6261                                                        vti.RegClass:$merge,
6262                                                        vti.RegClass:$rs1,
6263                                                        (NegImm simm5_plus1:$rs2),
6264                                                        (vti.Mask V0),
6265                                                        GPR:$vl,
6266                                                        vti.Log2SEW,
6267                                                        (XLenVT timm:$policy))>;
6268  }
6269}
6270
6271//===----------------------------------------------------------------------===//
6272// 11.2. Vector Widening Integer Add/Subtract
6273//===----------------------------------------------------------------------===//
6274defm PseudoVWADDU : VPseudoVWALU_VV_VX<Commutable=1>;
6275defm PseudoVWSUBU : VPseudoVWALU_VV_VX;
6276defm PseudoVWADD  : VPseudoVWALU_VV_VX<Commutable=1>;
6277defm PseudoVWSUB  : VPseudoVWALU_VV_VX;
6278defm PseudoVWADDU : VPseudoVWALU_WV_WX;
6279defm PseudoVWSUBU : VPseudoVWALU_WV_WX;
6280defm PseudoVWADD  : VPseudoVWALU_WV_WX;
6281defm PseudoVWSUB  : VPseudoVWALU_WV_WX;
6282
6283//===----------------------------------------------------------------------===//
6284// 11.3. Vector Integer Extension
6285//===----------------------------------------------------------------------===//
6286defm PseudoVZEXT_VF2 : PseudoVEXT_VF2;
6287defm PseudoVZEXT_VF4 : PseudoVEXT_VF4;
6288defm PseudoVZEXT_VF8 : PseudoVEXT_VF8;
6289defm PseudoVSEXT_VF2 : PseudoVEXT_VF2;
6290defm PseudoVSEXT_VF4 : PseudoVEXT_VF4;
6291defm PseudoVSEXT_VF8 : PseudoVEXT_VF8;
6292
6293//===----------------------------------------------------------------------===//
6294// 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
6295//===----------------------------------------------------------------------===//
6296defm PseudoVADC  : VPseudoVCALU_VM_XM_IM;
6297defm PseudoVMADC : VPseudoVCALUM_VM_XM_IM;
6298defm PseudoVMADC : VPseudoVCALUM_V_X_I;
6299
6300defm PseudoVSBC  : VPseudoVCALU_VM_XM;
6301defm PseudoVMSBC : VPseudoVCALUM_VM_XM;
6302defm PseudoVMSBC : VPseudoVCALUM_V_X;
6303
6304//===----------------------------------------------------------------------===//
6305// 11.5. Vector Bitwise Logical Instructions
6306//===----------------------------------------------------------------------===//
6307defm PseudoVAND : VPseudoVALU_VV_VX_VI<Commutable=1>;
6308defm PseudoVOR  : VPseudoVALU_VV_VX_VI<Commutable=1>;
6309defm PseudoVXOR : VPseudoVALU_VV_VX_VI<Commutable=1>;
6310
6311//===----------------------------------------------------------------------===//
6312// 11.6. Vector Single-Width Bit Shift Instructions
6313//===----------------------------------------------------------------------===//
6314defm PseudoVSLL : VPseudoVSHT_VV_VX_VI;
6315defm PseudoVSRL : VPseudoVSHT_VV_VX_VI;
6316defm PseudoVSRA : VPseudoVSHT_VV_VX_VI;
6317
6318//===----------------------------------------------------------------------===//
6319// 11.7. Vector Narrowing Integer Right Shift Instructions
6320//===----------------------------------------------------------------------===//
6321defm PseudoVNSRL : VPseudoVNSHT_WV_WX_WI;
6322defm PseudoVNSRA : VPseudoVNSHT_WV_WX_WI;
6323
6324//===----------------------------------------------------------------------===//
6325// 11.8. Vector Integer Comparison Instructions
6326//===----------------------------------------------------------------------===//
6327defm PseudoVMSEQ  : VPseudoVCMPM_VV_VX_VI<Commutable=1>;
6328defm PseudoVMSNE  : VPseudoVCMPM_VV_VX_VI<Commutable=1>;
6329defm PseudoVMSLTU : VPseudoVCMPM_VV_VX;
6330defm PseudoVMSLT  : VPseudoVCMPM_VV_VX;
6331defm PseudoVMSLEU : VPseudoVCMPM_VV_VX_VI;
6332defm PseudoVMSLE  : VPseudoVCMPM_VV_VX_VI;
6333defm PseudoVMSGTU : VPseudoVCMPM_VX_VI;
6334defm PseudoVMSGT  : VPseudoVCMPM_VX_VI;
6335
6336//===----------------------------------------------------------------------===//
6337// 11.9. Vector Integer Min/Max Instructions
6338//===----------------------------------------------------------------------===//
6339defm PseudoVMINU : VPseudoVMINMAX_VV_VX;
6340defm PseudoVMIN  : VPseudoVMINMAX_VV_VX;
6341defm PseudoVMAXU : VPseudoVMINMAX_VV_VX;
6342defm PseudoVMAX  : VPseudoVMINMAX_VV_VX;
6343
6344//===----------------------------------------------------------------------===//
6345// 11.10. Vector Single-Width Integer Multiply Instructions
6346//===----------------------------------------------------------------------===//
6347defm PseudoVMUL    : VPseudoVMUL_VV_VX<Commutable=1>;
6348defm PseudoVMULH   : VPseudoVMUL_VV_VX<Commutable=1>;
6349defm PseudoVMULHU  : VPseudoVMUL_VV_VX<Commutable=1>;
6350defm PseudoVMULHSU : VPseudoVMUL_VV_VX;
6351
6352//===----------------------------------------------------------------------===//
6353// 11.11. Vector Integer Divide Instructions
6354//===----------------------------------------------------------------------===//
6355defm PseudoVDIVU : VPseudoVDIV_VV_VX;
6356defm PseudoVDIV  : VPseudoVDIV_VV_VX;
6357defm PseudoVREMU : VPseudoVDIV_VV_VX;
6358defm PseudoVREM  : VPseudoVDIV_VV_VX;
6359
6360//===----------------------------------------------------------------------===//
6361// 11.12. Vector Widening Integer Multiply Instructions
6362//===----------------------------------------------------------------------===//
6363defm PseudoVWMUL   : VPseudoVWMUL_VV_VX<Commutable=1>;
6364defm PseudoVWMULU  : VPseudoVWMUL_VV_VX<Commutable=1>;
6365defm PseudoVWMULSU : VPseudoVWMUL_VV_VX;
6366
6367//===----------------------------------------------------------------------===//
6368// 11.13. Vector Single-Width Integer Multiply-Add Instructions
6369//===----------------------------------------------------------------------===//
6370defm PseudoVMACC  : VPseudoVMAC_VV_VX_AAXA;
6371defm PseudoVNMSAC : VPseudoVMAC_VV_VX_AAXA;
6372defm PseudoVMADD  : VPseudoVMAC_VV_VX_AAXA;
6373defm PseudoVNMSUB : VPseudoVMAC_VV_VX_AAXA;
6374
6375//===----------------------------------------------------------------------===//
6376// 11.14. Vector Widening Integer Multiply-Add Instructions
6377//===----------------------------------------------------------------------===//
6378defm PseudoVWMACCU  : VPseudoVWMAC_VV_VX<Commutable=1>;
6379defm PseudoVWMACC   : VPseudoVWMAC_VV_VX<Commutable=1>;
6380defm PseudoVWMACCSU : VPseudoVWMAC_VV_VX;
6381defm PseudoVWMACCUS : VPseudoVWMAC_VX;
6382
6383//===----------------------------------------------------------------------===//
6384// 11.15. Vector Integer Merge Instructions
6385//===----------------------------------------------------------------------===//
6386defm PseudoVMERGE : VPseudoVMRG_VM_XM_IM;
6387
6388//===----------------------------------------------------------------------===//
6389// 11.16. Vector Integer Move Instructions
6390//===----------------------------------------------------------------------===//
6391defm PseudoVMV_V : VPseudoUnaryVMV_V_X_I;
6392
6393//===----------------------------------------------------------------------===//
6394// 12. Vector Fixed-Point Arithmetic Instructions
6395//===----------------------------------------------------------------------===//
6396
6397//===----------------------------------------------------------------------===//
6398// 12.1. Vector Single-Width Saturating Add and Subtract
6399//===----------------------------------------------------------------------===//
6400let Defs = [VXSAT] in {
6401  defm PseudoVSADDU : VPseudoVSALU_VV_VX_VI<Commutable=1>;
6402  defm PseudoVSADD  : VPseudoVSALU_VV_VX_VI<Commutable=1>;
6403  defm PseudoVSSUBU : VPseudoVSALU_VV_VX;
6404  defm PseudoVSSUB  : VPseudoVSALU_VV_VX;
6405}
6406
6407//===----------------------------------------------------------------------===//
6408// 12.2. Vector Single-Width Averaging Add and Subtract
6409//===----------------------------------------------------------------------===//
6410defm PseudoVAADDU : VPseudoVAALU_VV_VX_RM<Commutable=1>;
6411defm PseudoVAADD  : VPseudoVAALU_VV_VX_RM<Commutable=1>;
6412defm PseudoVASUBU : VPseudoVAALU_VV_VX_RM;
6413defm PseudoVASUB  : VPseudoVAALU_VV_VX_RM;
6414
6415//===----------------------------------------------------------------------===//
6416// 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
6417//===----------------------------------------------------------------------===//
6418let Defs = [VXSAT] in {
6419  defm PseudoVSMUL : VPseudoVSMUL_VV_VX_RM;
6420}
6421
6422//===----------------------------------------------------------------------===//
6423// 12.4. Vector Single-Width Scaling Shift Instructions
6424//===----------------------------------------------------------------------===//
6425defm PseudoVSSRL : VPseudoVSSHT_VV_VX_VI_RM;
6426defm PseudoVSSRA : VPseudoVSSHT_VV_VX_VI_RM;
6427
6428//===----------------------------------------------------------------------===//
6429// 12.5. Vector Narrowing Fixed-Point Clip Instructions
6430//===----------------------------------------------------------------------===//
6431let Defs = [VXSAT] in {
6432  defm PseudoVNCLIP  : VPseudoVNCLP_WV_WX_WI_RM;
6433  defm PseudoVNCLIPU : VPseudoVNCLP_WV_WX_WI_RM;
6434}
6435
6436} // Predicates = [HasVInstructions]
6437
6438//===----------------------------------------------------------------------===//
6439// 13. Vector Floating-Point Instructions
6440//===----------------------------------------------------------------------===//
6441
6442let Predicates = [HasVInstructionsAnyF] in {
6443//===----------------------------------------------------------------------===//
6444// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
6445//===----------------------------------------------------------------------===//
6446let mayRaiseFPException = true, hasPostISelHook = 1 in {
6447defm PseudoVFADD  : VPseudoVALU_VV_VF_RM;
6448defm PseudoVFSUB  : VPseudoVALU_VV_VF_RM;
6449defm PseudoVFRSUB : VPseudoVALU_VF_RM;
6450}
6451
6452//===----------------------------------------------------------------------===//
6453// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
6454//===----------------------------------------------------------------------===//
6455let mayRaiseFPException = true, hasSideEffects = 0, hasPostISelHook = 1 in {
6456defm PseudoVFWADD : VPseudoVFWALU_VV_VF_RM;
6457defm PseudoVFWSUB : VPseudoVFWALU_VV_VF_RM;
6458defm PseudoVFWADD : VPseudoVFWALU_WV_WF_RM;
6459defm PseudoVFWSUB : VPseudoVFWALU_WV_WF_RM;
6460}
6461
6462//===----------------------------------------------------------------------===//
6463// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
6464//===----------------------------------------------------------------------===//
6465let mayRaiseFPException = true, hasSideEffects = 0, hasPostISelHook = 1 in {
6466defm PseudoVFMUL  : VPseudoVFMUL_VV_VF_RM;
6467defm PseudoVFDIV  : VPseudoVFDIV_VV_VF_RM;
6468defm PseudoVFRDIV : VPseudoVFRDIV_VF_RM;
6469}
6470
6471//===----------------------------------------------------------------------===//
6472// 13.5. Vector Widening Floating-Point Multiply
6473//===----------------------------------------------------------------------===//
6474let mayRaiseFPException = true, hasSideEffects = 0 in {
6475defm PseudoVFWMUL : VPseudoVWMUL_VV_VF_RM;
6476}
6477
6478//===----------------------------------------------------------------------===//
6479// 13.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
6480//===----------------------------------------------------------------------===//
6481let mayRaiseFPException = true, hasSideEffects = 0, hasPostISelHook = 1 in {
6482defm PseudoVFMACC  : VPseudoVMAC_VV_VF_AAXA_RM;
6483defm PseudoVFNMACC : VPseudoVMAC_VV_VF_AAXA_RM;
6484defm PseudoVFMSAC  : VPseudoVMAC_VV_VF_AAXA_RM;
6485defm PseudoVFNMSAC : VPseudoVMAC_VV_VF_AAXA_RM;
6486defm PseudoVFMADD  : VPseudoVMAC_VV_VF_AAXA_RM;
6487defm PseudoVFNMADD : VPseudoVMAC_VV_VF_AAXA_RM;
6488defm PseudoVFMSUB  : VPseudoVMAC_VV_VF_AAXA_RM;
6489defm PseudoVFNMSUB : VPseudoVMAC_VV_VF_AAXA_RM;
6490}
6491
6492//===----------------------------------------------------------------------===//
6493// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
6494//===----------------------------------------------------------------------===//
6495let mayRaiseFPException = true, hasSideEffects = 0, hasPostISelHook = 1 in {
6496defm PseudoVFWMACC  : VPseudoVWMAC_VV_VF_RM;
6497defm PseudoVFWNMACC : VPseudoVWMAC_VV_VF_RM;
6498defm PseudoVFWMSAC  : VPseudoVWMAC_VV_VF_RM;
6499defm PseudoVFWNMSAC : VPseudoVWMAC_VV_VF_RM;
6500let Predicates = [HasStdExtZvfbfwma] in
6501defm PseudoVFWMACCBF16  : VPseudoVWMAC_VV_VF_BF_RM;
6502}
6503
6504//===----------------------------------------------------------------------===//
6505// 13.8. Vector Floating-Point Square-Root Instruction
6506//===----------------------------------------------------------------------===//
6507let mayRaiseFPException = true, hasSideEffects = 0 in
6508defm PseudoVFSQRT : VPseudoVSQR_V_RM;
6509
6510//===----------------------------------------------------------------------===//
6511// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
6512//===----------------------------------------------------------------------===//
6513let mayRaiseFPException = true in
6514defm PseudoVFRSQRT7 : VPseudoVRCP_V;
6515
6516//===----------------------------------------------------------------------===//
6517// 13.10. Vector Floating-Point Reciprocal Estimate Instruction
6518//===----------------------------------------------------------------------===//
6519let mayRaiseFPException = true, hasSideEffects = 0 in
6520defm PseudoVFREC7 : VPseudoVRCP_V_RM;
6521
6522//===----------------------------------------------------------------------===//
6523// 13.11. Vector Floating-Point Min/Max Instructions
6524//===----------------------------------------------------------------------===//
6525let mayRaiseFPException = true in {
6526defm PseudoVFMIN : VPseudoVMAX_VV_VF;
6527defm PseudoVFMAX : VPseudoVMAX_VV_VF;
6528}
6529
6530//===----------------------------------------------------------------------===//
6531// 13.12. Vector Floating-Point Sign-Injection Instructions
6532//===----------------------------------------------------------------------===//
6533defm PseudoVFSGNJ  : VPseudoVSGNJ_VV_VF;
6534defm PseudoVFSGNJN : VPseudoVSGNJ_VV_VF;
6535defm PseudoVFSGNJX : VPseudoVSGNJ_VV_VF;
6536
6537//===----------------------------------------------------------------------===//
6538// 13.13. Vector Floating-Point Compare Instructions
6539//===----------------------------------------------------------------------===//
6540let mayRaiseFPException = true in {
6541defm PseudoVMFEQ : VPseudoVCMPM_VV_VF;
6542defm PseudoVMFNE : VPseudoVCMPM_VV_VF;
6543defm PseudoVMFLT : VPseudoVCMPM_VV_VF;
6544defm PseudoVMFLE : VPseudoVCMPM_VV_VF;
6545defm PseudoVMFGT : VPseudoVCMPM_VF;
6546defm PseudoVMFGE : VPseudoVCMPM_VF;
6547}
6548
6549//===----------------------------------------------------------------------===//
6550// 13.14. Vector Floating-Point Classify Instruction
6551//===----------------------------------------------------------------------===//
6552defm PseudoVFCLASS : VPseudoVCLS_V;
6553
6554//===----------------------------------------------------------------------===//
6555// 13.15. Vector Floating-Point Merge Instruction
6556//===----------------------------------------------------------------------===//
6557defm PseudoVFMERGE : VPseudoVMRG_FM;
6558
6559//===----------------------------------------------------------------------===//
6560// 13.16. Vector Floating-Point Move Instruction
6561//===----------------------------------------------------------------------===//
6562defm PseudoVFMV_V : VPseudoVMV_F;
6563
6564//===----------------------------------------------------------------------===//
6565// 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions
6566//===----------------------------------------------------------------------===//
6567let mayRaiseFPException = true in {
6568let hasSideEffects = 0, hasPostISelHook = 1 in {
6569defm PseudoVFCVT_XU_F : VPseudoVCVTI_V_RM;
6570defm PseudoVFCVT_X_F : VPseudoVCVTI_V_RM;
6571}
6572
6573defm PseudoVFCVT_RM_XU_F : VPseudoVCVTI_RM_V;
6574defm PseudoVFCVT_RM_X_F : VPseudoVCVTI_RM_V;
6575
6576defm PseudoVFCVT_RTZ_XU_F : VPseudoVCVTI_V;
6577defm PseudoVFCVT_RTZ_X_F : VPseudoVCVTI_V;
6578
6579defm PseudoVFROUND_NOEXCEPT : VPseudoVFROUND_NOEXCEPT_V;
6580let hasSideEffects = 0, hasPostISelHook = 1 in {
6581defm PseudoVFCVT_F_XU : VPseudoVCVTF_V_RM;
6582defm PseudoVFCVT_F_X : VPseudoVCVTF_V_RM;
6583}
6584defm PseudoVFCVT_RM_F_XU : VPseudoVCVTF_RM_V;
6585defm PseudoVFCVT_RM_F_X  : VPseudoVCVTF_RM_V;
6586} // mayRaiseFPException = true
6587
6588//===----------------------------------------------------------------------===//
6589// 13.18. Widening Floating-Point/Integer Type-Convert Instructions
6590//===----------------------------------------------------------------------===//
6591let mayRaiseFPException = true in {
6592let hasSideEffects = 0, hasPostISelHook = 1 in {
6593defm PseudoVFWCVT_XU_F     : VPseudoVWCVTI_V_RM;
6594defm PseudoVFWCVT_X_F      : VPseudoVWCVTI_V_RM;
6595}
6596defm PseudoVFWCVT_RM_XU_F  : VPseudoVWCVTI_RM_V;
6597defm PseudoVFWCVT_RM_X_F   : VPseudoVWCVTI_RM_V;
6598
6599defm PseudoVFWCVT_RTZ_XU_F : VPseudoVWCVTI_V;
6600defm PseudoVFWCVT_RTZ_X_F  : VPseudoVWCVTI_V;
6601
6602defm PseudoVFWCVT_F_XU     : VPseudoVWCVTF_V;
6603defm PseudoVFWCVT_F_X      : VPseudoVWCVTF_V;
6604
6605defm PseudoVFWCVT_F_F      : VPseudoVWCVTD_V;
6606defm PseudoVFWCVTBF16_F_F :  VPseudoVWCVTD_V;
6607} // mayRaiseFPException = true
6608
6609//===----------------------------------------------------------------------===//
6610// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions
6611//===----------------------------------------------------------------------===//
6612let mayRaiseFPException = true in {
6613let hasSideEffects = 0, hasPostISelHook = 1 in {
6614defm PseudoVFNCVT_XU_F     : VPseudoVNCVTI_W_RM;
6615defm PseudoVFNCVT_X_F      : VPseudoVNCVTI_W_RM;
6616}
6617defm PseudoVFNCVT_RM_XU_F  : VPseudoVNCVTI_RM_W;
6618defm PseudoVFNCVT_RM_X_F   : VPseudoVNCVTI_RM_W;
6619
6620defm PseudoVFNCVT_RTZ_XU_F : VPseudoVNCVTI_W;
6621defm PseudoVFNCVT_RTZ_X_F  : VPseudoVNCVTI_W;
6622
6623let hasSideEffects = 0, hasPostISelHook = 1 in {
6624defm PseudoVFNCVT_F_XU     : VPseudoVNCVTF_W_RM;
6625defm PseudoVFNCVT_F_X      : VPseudoVNCVTF_W_RM;
6626}
6627defm PseudoVFNCVT_RM_F_XU  : VPseudoVNCVTF_RM_W;
6628defm PseudoVFNCVT_RM_F_X   : VPseudoVNCVTF_RM_W;
6629
6630let hasSideEffects = 0, hasPostISelHook = 1 in
6631defm PseudoVFNCVT_F_F      : VPseudoVNCVTD_W_RM;
6632defm PseudoVFNCVTBF16_F_F :  VPseudoVNCVTD_W_RM;
6633
6634defm PseudoVFNCVT_ROD_F_F  : VPseudoVNCVTD_W;
6635} // mayRaiseFPException = true
6636} // Predicates = [HasVInstructionsAnyF]
6637
6638//===----------------------------------------------------------------------===//
6639// 14. Vector Reduction Operations
6640//===----------------------------------------------------------------------===//
6641
6642let Predicates = [HasVInstructions] in {
6643//===----------------------------------------------------------------------===//
6644// 14.1. Vector Single-Width Integer Reduction Instructions
6645//===----------------------------------------------------------------------===//
6646defm PseudoVREDSUM  : VPseudoVRED_VS;
6647defm PseudoVREDAND  : VPseudoVRED_VS;
6648defm PseudoVREDOR   : VPseudoVRED_VS;
6649defm PseudoVREDXOR  : VPseudoVRED_VS;
6650defm PseudoVREDMINU : VPseudoVREDMINMAX_VS;
6651defm PseudoVREDMIN  : VPseudoVREDMINMAX_VS;
6652defm PseudoVREDMAXU : VPseudoVREDMINMAX_VS;
6653defm PseudoVREDMAX  : VPseudoVREDMINMAX_VS;
6654
6655//===----------------------------------------------------------------------===//
6656// 14.2. Vector Widening Integer Reduction Instructions
6657//===----------------------------------------------------------------------===//
6658let IsRVVWideningReduction = 1 in {
6659defm PseudoVWREDSUMU   : VPseudoVWRED_VS;
6660defm PseudoVWREDSUM    : VPseudoVWRED_VS;
6661}
6662} // Predicates = [HasVInstructions]
6663
6664let Predicates = [HasVInstructionsAnyF] in {
6665//===----------------------------------------------------------------------===//
6666// 14.3. Vector Single-Width Floating-Point Reduction Instructions
6667//===----------------------------------------------------------------------===//
6668let mayRaiseFPException = true,
6669    hasSideEffects = 0 in {
6670defm PseudoVFREDOSUM : VPseudoVFREDO_VS_RM;
6671defm PseudoVFREDUSUM : VPseudoVFRED_VS_RM;
6672}
6673let mayRaiseFPException = true in {
6674defm PseudoVFREDMIN  : VPseudoVFREDMINMAX_VS;
6675defm PseudoVFREDMAX  : VPseudoVFREDMINMAX_VS;
6676}
6677
6678//===----------------------------------------------------------------------===//
6679// 14.4. Vector Widening Floating-Point Reduction Instructions
6680//===----------------------------------------------------------------------===//
6681let IsRVVWideningReduction = 1,
6682    hasSideEffects = 0,
6683    mayRaiseFPException = true in {
6684defm PseudoVFWREDUSUM  : VPseudoVFWRED_VS_RM;
6685defm PseudoVFWREDOSUM  : VPseudoVFWREDO_VS_RM;
6686}
6687
6688} // Predicates = [HasVInstructionsAnyF]
6689
6690//===----------------------------------------------------------------------===//
6691// 15. Vector Mask Instructions
6692//===----------------------------------------------------------------------===//
6693
6694let Predicates = [HasVInstructions] in {
6695//===----------------------------------------------------------------------===//
6696// 15.1 Vector Mask-Register Logical Instructions
6697//===----------------------------------------------------------------------===//
6698
6699defm PseudoVMAND: VPseudoVALU_MM<Commutable=1>;
6700defm PseudoVMNAND: VPseudoVALU_MM<Commutable=1>;
6701defm PseudoVMANDN: VPseudoVALU_MM;
6702defm PseudoVMXOR: VPseudoVALU_MM<Commutable=1>;
6703defm PseudoVMOR: VPseudoVALU_MM<Commutable=1>;
6704defm PseudoVMNOR: VPseudoVALU_MM<Commutable=1>;
6705defm PseudoVMORN: VPseudoVALU_MM;
6706defm PseudoVMXNOR: VPseudoVALU_MM<Commutable=1>;
6707
6708// Pseudo instructions
6709defm PseudoVMCLR : VPseudoNullaryPseudoM<"VMXOR">;
6710defm PseudoVMSET : VPseudoNullaryPseudoM<"VMXNOR">;
6711
6712//===----------------------------------------------------------------------===//
6713// 15.2. Vector mask population count vcpop
6714//===----------------------------------------------------------------------===//
6715let IsSignExtendingOpW = 1 in
6716defm PseudoVCPOP: VPseudoVPOP_M;
6717
6718//===----------------------------------------------------------------------===//
6719// 15.3. vfirst find-first-set mask bit
6720//===----------------------------------------------------------------------===//
6721
6722let IsSignExtendingOpW = 1 in
6723defm PseudoVFIRST: VPseudoV1ST_M;
6724
6725//===----------------------------------------------------------------------===//
6726// 15.4. vmsbf.m set-before-first mask bit
6727//===----------------------------------------------------------------------===//
6728defm PseudoVMSBF: VPseudoVSFS_M;
6729
6730//===----------------------------------------------------------------------===//
6731// 15.5. vmsif.m set-including-first mask bit
6732//===----------------------------------------------------------------------===//
6733defm PseudoVMSIF: VPseudoVSFS_M;
6734
6735//===----------------------------------------------------------------------===//
6736// 15.6. vmsof.m set-only-first mask bit
6737//===----------------------------------------------------------------------===//
6738defm PseudoVMSOF: VPseudoVSFS_M;
6739
6740//===----------------------------------------------------------------------===//
6741// 15.8.  Vector Iota Instruction
6742//===----------------------------------------------------------------------===//
6743defm PseudoVIOTA_M: VPseudoVIOTA_M;
6744
6745//===----------------------------------------------------------------------===//
6746// 15.9. Vector Element Index Instruction
6747//===----------------------------------------------------------------------===//
6748let isReMaterializable = 1 in
6749defm PseudoVID : VPseudoVID_V;
6750} // Predicates = [HasVInstructions]
6751
6752//===----------------------------------------------------------------------===//
6753// 16. Vector Permutation Instructions
6754//===----------------------------------------------------------------------===//
6755
6756//===----------------------------------------------------------------------===//
6757// 16.1. Integer Scalar Move Instructions
6758//===----------------------------------------------------------------------===//
6759
6760let Predicates = [HasVInstructions] in {
6761let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
6762  let HasSEWOp = 1, BaseInstr = VMV_X_S in
6763  def PseudoVMV_X_S:
6764    Pseudo<(outs GPR:$rd), (ins VR:$rs2, ixlenimm:$sew), []>,
6765    Sched<[WriteVMovXS, ReadVMovXS]>,
6766    RISCVVPseudo;
6767  let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X,
6768      Constraints = "$rd = $rs1" in
6769  def PseudoVMV_S_X: Pseudo<(outs VR:$rd),
6770                            (ins VR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),
6771                            []>,
6772    Sched<[WriteVMovSX, ReadVMovSX_V, ReadVMovSX_X]>,
6773    RISCVVPseudo;
6774}
6775} // Predicates = [HasVInstructions]
6776
6777//===----------------------------------------------------------------------===//
6778// 16.2. Floating-Point Scalar Move Instructions
6779//===----------------------------------------------------------------------===//
6780
6781let Predicates = [HasVInstructionsAnyF] in {
6782let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
6783  foreach f = FPList in {
6784    foreach m = f.MxList in {
6785      defvar mx = m.MX;
6786      let VLMul = m.value in {
6787        let HasSEWOp = 1, BaseInstr = VFMV_F_S in
6788        def "PseudoVFMV_" # f.FX # "_S_" # mx :
6789          Pseudo<(outs f.fprclass:$rd),
6790                 (ins m.vrclass:$rs2, ixlenimm:$sew), []>,
6791          Sched<[WriteVMovFS, ReadVMovFS]>,
6792          RISCVVPseudo;
6793        let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F,
6794            Constraints = "$rd = $rs1" in
6795        def "PseudoVFMV_S_" # f.FX # "_" # mx :
6796                                          Pseudo<(outs m.vrclass:$rd),
6797                                                 (ins m.vrclass:$rs1, f.fprclass:$rs2,
6798                                                      AVL:$vl, ixlenimm:$sew),
6799                                                 []>,
6800          Sched<[WriteVMovSF, ReadVMovSF_V, ReadVMovSF_F]>,
6801          RISCVVPseudo;
6802      }
6803    }
6804  }
6805}
6806} // Predicates = [HasVInstructionsAnyF]
6807
6808//===----------------------------------------------------------------------===//
6809// 16.3. Vector Slide Instructions
6810//===----------------------------------------------------------------------===//
6811let Predicates = [HasVInstructions] in {
6812  defm PseudoVSLIDEUP    : VPseudoVSLD_VX_VI</*slidesUp=*/true, "@earlyclobber $rd">;
6813  defm PseudoVSLIDEDOWN  : VPseudoVSLD_VX_VI</*slidesUp=*/false>;
6814  defm PseudoVSLIDE1UP   : VPseudoVSLD1_VX<"@earlyclobber $rd">;
6815  defm PseudoVSLIDE1DOWN : VPseudoVSLD1_VX;
6816} // Predicates = [HasVInstructions]
6817
6818let Predicates = [HasVInstructionsAnyF] in {
6819  defm PseudoVFSLIDE1UP  : VPseudoVSLD1_VF<"@earlyclobber $rd">;
6820  defm PseudoVFSLIDE1DOWN : VPseudoVSLD1_VF;
6821} // Predicates = [HasVInstructionsAnyF]
6822
6823//===----------------------------------------------------------------------===//
6824// 16.4. Vector Register Gather Instructions
6825//===----------------------------------------------------------------------===//
6826let Predicates = [HasVInstructions] in {
6827defm PseudoVRGATHER     : VPseudoVGTR_VV_VX_VI;
6828defm PseudoVRGATHEREI16 : VPseudoVGTR_EI16_VV;
6829
6830//===----------------------------------------------------------------------===//
6831// 16.5. Vector Compress Instruction
6832//===----------------------------------------------------------------------===//
6833defm PseudoVCOMPRESS : VPseudoVCPR_V;
6834} // Predicates = [HasVInstructions]
6835
6836//===----------------------------------------------------------------------===//
6837// Patterns.
6838//===----------------------------------------------------------------------===//
6839
6840//===----------------------------------------------------------------------===//
6841// 11. Vector Integer Arithmetic Instructions
6842//===----------------------------------------------------------------------===//
6843
6844//===----------------------------------------------------------------------===//
6845// 11.1. Vector Single-Width Integer Add and Subtract
6846//===----------------------------------------------------------------------===//
6847defm : VPatBinaryV_VV_VX_VI<"int_riscv_vadd", "PseudoVADD", AllIntegerVectors>;
6848defm : VPatBinaryV_VV_VX<"int_riscv_vsub", "PseudoVSUB", AllIntegerVectors>;
6849defm : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>;
6850
6851//===----------------------------------------------------------------------===//
6852// 11.2. Vector Widening Integer Add/Subtract
6853//===----------------------------------------------------------------------===//
6854defm : VPatBinaryW_VV_VX<"int_riscv_vwaddu", "PseudoVWADDU", AllWidenableIntVectors>;
6855defm : VPatBinaryW_VV_VX<"int_riscv_vwsubu", "PseudoVWSUBU", AllWidenableIntVectors>;
6856defm : VPatBinaryW_VV_VX<"int_riscv_vwadd", "PseudoVWADD", AllWidenableIntVectors>;
6857defm : VPatBinaryW_VV_VX<"int_riscv_vwsub", "PseudoVWSUB", AllWidenableIntVectors>;
6858defm : VPatBinaryW_WV_WX<"int_riscv_vwaddu_w", "PseudoVWADDU", AllWidenableIntVectors>;
6859defm : VPatBinaryW_WV_WX<"int_riscv_vwsubu_w", "PseudoVWSUBU", AllWidenableIntVectors>;
6860defm : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD", AllWidenableIntVectors>;
6861defm : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB", AllWidenableIntVectors>;
6862
6863//===----------------------------------------------------------------------===//
6864// 11.3. Vector Integer Extension
6865//===----------------------------------------------------------------------===//
6866defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF2",
6867                     AllFractionableVF2IntVectors>;
6868defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF4",
6869                     AllFractionableVF4IntVectors>;
6870defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF8",
6871                     AllFractionableVF8IntVectors>;
6872defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF2",
6873                     AllFractionableVF2IntVectors>;
6874defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF4",
6875                     AllFractionableVF4IntVectors>;
6876defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF8",
6877                     AllFractionableVF8IntVectors>;
6878
6879//===----------------------------------------------------------------------===//
6880// 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
6881//===----------------------------------------------------------------------===//
6882defm : VPatBinaryV_VM_XM_IM<"int_riscv_vadc", "PseudoVADC">;
6883defm : VPatBinaryM_VM_XM_IM<"int_riscv_vmadc_carry_in", "PseudoVMADC">;
6884defm : VPatBinaryM_V_X_I<"int_riscv_vmadc", "PseudoVMADC">;
6885
6886defm : VPatBinaryV_VM_XM<"int_riscv_vsbc", "PseudoVSBC">;
6887defm : VPatBinaryM_VM_XM<"int_riscv_vmsbc_borrow_in", "PseudoVMSBC">;
6888defm : VPatBinaryM_V_X<"int_riscv_vmsbc", "PseudoVMSBC">;
6889
6890//===----------------------------------------------------------------------===//
6891// 11.5. Vector Bitwise Logical Instructions
6892//===----------------------------------------------------------------------===//
6893defm : VPatBinaryV_VV_VX_VI<"int_riscv_vand", "PseudoVAND", AllIntegerVectors>;
6894defm : VPatBinaryV_VV_VX_VI<"int_riscv_vor", "PseudoVOR", AllIntegerVectors>;
6895defm : VPatBinaryV_VV_VX_VI<"int_riscv_vxor", "PseudoVXOR", AllIntegerVectors>;
6896
6897//===----------------------------------------------------------------------===//
6898// 11.6. Vector Single-Width Bit Shift Instructions
6899//===----------------------------------------------------------------------===//
6900defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsll", "PseudoVSLL", AllIntegerVectors,
6901                            uimm5>;
6902defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsrl", "PseudoVSRL", AllIntegerVectors,
6903                            uimm5>;
6904defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors,
6905                            uimm5>;
6906
6907foreach vti = AllIntegerVectors in {
6908  // Emit shift by 1 as an add since it might be faster.
6909  let Predicates = GetVTypePredicates<vti>.Predicates in {
6910    def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector vti.RegClass:$merge),
6911                                          (vti.Vector vti.RegClass:$rs1),
6912                                          (XLenVT 1), VLOpFrag)),
6913              (!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX)
6914                 vti.RegClass:$merge, vti.RegClass:$rs1,
6915                 vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>;
6916    def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$merge),
6917                                               (vti.Vector vti.RegClass:$rs1),
6918                                               (XLenVT 1),
6919                                               (vti.Mask V0),
6920                                               VLOpFrag,
6921                                               (XLenVT timm:$policy))),
6922              (!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX#"_MASK")
6923                                                          vti.RegClass:$merge,
6924                                                          vti.RegClass:$rs1,
6925                                                          vti.RegClass:$rs1,
6926                                                          (vti.Mask V0),
6927                                                          GPR:$vl,
6928                                                          vti.Log2SEW,
6929                                                          (XLenVT timm:$policy))>;
6930  }
6931}
6932
6933//===----------------------------------------------------------------------===//
6934// 11.7. Vector Narrowing Integer Right Shift Instructions
6935//===----------------------------------------------------------------------===//
6936defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsrl", "PseudoVNSRL", AllWidenableIntVectors>;
6937defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsra", "PseudoVNSRA", AllWidenableIntVectors>;
6938
6939//===----------------------------------------------------------------------===//
6940// 11.8. Vector Integer Comparison Instructions
6941//===----------------------------------------------------------------------===//
6942defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmseq", "PseudoVMSEQ", AllIntegerVectors>;
6943defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsne", "PseudoVMSNE", AllIntegerVectors>;
6944defm : VPatBinaryM_VV_VX<"int_riscv_vmsltu", "PseudoVMSLTU", AllIntegerVectors>;
6945defm : VPatBinaryM_VV_VX<"int_riscv_vmslt", "PseudoVMSLT", AllIntegerVectors>;
6946defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsleu", "PseudoVMSLEU", AllIntegerVectors>;
6947defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsle", "PseudoVMSLE", AllIntegerVectors>;
6948
6949defm : VPatBinaryM_VX_VI<"int_riscv_vmsgtu", "PseudoVMSGTU", AllIntegerVectors>;
6950defm : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>;
6951
6952// Match vmsgt with 2 vector operands to vmslt with the operands swapped.
6953defm : VPatBinarySwappedM_VV<"int_riscv_vmsgtu", "PseudoVMSLTU", AllIntegerVectors>;
6954defm : VPatBinarySwappedM_VV<"int_riscv_vmsgt", "PseudoVMSLT", AllIntegerVectors>;
6955
6956defm : VPatBinarySwappedM_VV<"int_riscv_vmsgeu", "PseudoVMSLEU", AllIntegerVectors>;
6957defm : VPatBinarySwappedM_VV<"int_riscv_vmsge", "PseudoVMSLE", AllIntegerVectors>;
6958
6959// Match vmslt(u).vx intrinsics to vmsle(u).vi if the scalar is -15 to 16 and
6960// non-zero. Zero can be .vx with x0. This avoids the user needing to know that
6961// there is no vmslt(u).vi instruction. Similar for vmsge(u).vx intrinsics
6962// using vmslt(u).vi.
6963defm : VPatCompare_VI<"int_riscv_vmslt", "PseudoVMSLE", simm5_plus1_nonzero>;
6964defm : VPatCompare_VI<"int_riscv_vmsltu", "PseudoVMSLEU", simm5_plus1_nonzero>;
6965
6966// We need to handle 0 for vmsge.vi using vmslt.vi because there is no vmsge.vx.
6967defm : VPatCompare_VI<"int_riscv_vmsge", "PseudoVMSGT", simm5_plus1>;
6968defm : VPatCompare_VI<"int_riscv_vmsgeu", "PseudoVMSGTU", simm5_plus1_nonzero>;
6969
6970//===----------------------------------------------------------------------===//
6971// 11.9. Vector Integer Min/Max Instructions
6972//===----------------------------------------------------------------------===//
6973defm : VPatBinaryV_VV_VX<"int_riscv_vminu", "PseudoVMINU", AllIntegerVectors>;
6974defm : VPatBinaryV_VV_VX<"int_riscv_vmin", "PseudoVMIN", AllIntegerVectors>;
6975defm : VPatBinaryV_VV_VX<"int_riscv_vmaxu", "PseudoVMAXU", AllIntegerVectors>;
6976defm : VPatBinaryV_VV_VX<"int_riscv_vmax", "PseudoVMAX", AllIntegerVectors>;
6977
6978//===----------------------------------------------------------------------===//
6979// 11.10. Vector Single-Width Integer Multiply Instructions
6980//===----------------------------------------------------------------------===//
6981defm : VPatBinaryV_VV_VX<"int_riscv_vmul", "PseudoVMUL", AllIntegerVectors>;
6982
6983defvar IntegerVectorsExceptI64 = !filter(vti, AllIntegerVectors,
6984                                         !ne(vti.SEW, 64));
6985defm : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH",
6986                         IntegerVectorsExceptI64>;
6987defm : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU",
6988                         IntegerVectorsExceptI64>;
6989defm : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU",
6990                         IntegerVectorsExceptI64>;
6991
6992// vmulh, vmulhu, vmulhsu are not included for EEW=64 in Zve64*.
6993defvar I64IntegerVectors = !filter(vti, AllIntegerVectors, !eq(vti.SEW, 64));
6994let Predicates = [HasVInstructionsFullMultiply] in {
6995  defm : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH",
6996                           I64IntegerVectors>;
6997  defm : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU",
6998                           I64IntegerVectors>;
6999  defm : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU",
7000                           I64IntegerVectors>;
7001}
7002
7003//===----------------------------------------------------------------------===//
7004// 11.11. Vector Integer Divide Instructions
7005//===----------------------------------------------------------------------===//
7006defm : VPatBinaryV_VV_VX<"int_riscv_vdivu", "PseudoVDIVU", AllIntegerVectors, isSEWAware=1>;
7007defm : VPatBinaryV_VV_VX<"int_riscv_vdiv", "PseudoVDIV", AllIntegerVectors, isSEWAware=1>;
7008defm : VPatBinaryV_VV_VX<"int_riscv_vremu", "PseudoVREMU", AllIntegerVectors, isSEWAware=1>;
7009defm : VPatBinaryV_VV_VX<"int_riscv_vrem", "PseudoVREM", AllIntegerVectors, isSEWAware=1>;
7010
7011//===----------------------------------------------------------------------===//
7012// 11.12. Vector Widening Integer Multiply Instructions
7013//===----------------------------------------------------------------------===//
7014defm : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL", AllWidenableIntVectors>;
7015defm : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU", AllWidenableIntVectors>;
7016defm : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU", AllWidenableIntVectors>;
7017
7018//===----------------------------------------------------------------------===//
7019// 11.13. Vector Single-Width Integer Multiply-Add Instructions
7020//===----------------------------------------------------------------------===//
7021defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmadd", "PseudoVMADD", AllIntegerVectors>;
7022defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsub", "PseudoVNMSUB", AllIntegerVectors>;
7023defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmacc", "PseudoVMACC", AllIntegerVectors>;
7024defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsac", "PseudoVNMSAC", AllIntegerVectors>;
7025
7026//===----------------------------------------------------------------------===//
7027// 11.14. Vector Widening Integer Multiply-Add Instructions
7028//===----------------------------------------------------------------------===//
7029defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccu", "PseudoVWMACCU", AllWidenableIntVectors>;
7030defm : VPatTernaryW_VV_VX<"int_riscv_vwmacc", "PseudoVWMACC", AllWidenableIntVectors>;
7031defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccsu", "PseudoVWMACCSU", AllWidenableIntVectors>;
7032defm : VPatTernaryW_VX<"int_riscv_vwmaccus", "PseudoVWMACCUS", AllWidenableIntVectors>;
7033
7034//===----------------------------------------------------------------------===//
7035// 11.15. Vector Integer Merge Instructions
7036//===----------------------------------------------------------------------===//
7037defm : VPatBinaryV_VM_XM_IM<"int_riscv_vmerge", "PseudoVMERGE">;
7038
7039//===----------------------------------------------------------------------===//
7040// 11.16. Vector Integer Move Instructions
7041//===----------------------------------------------------------------------===//
7042foreach vti = AllVectors in {
7043  let Predicates = GetVTypePredicates<vti>.Predicates in {
7044    def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$passthru),
7045                                             (vti.Vector vti.RegClass:$rs1),
7046                                             VLOpFrag)),
7047              (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX)
7048               $passthru, $rs1, GPR:$vl, vti.Log2SEW, TU_MU)>;
7049
7050    // vmv.v.x/vmv.v.i are handled in RISCInstrVInstrInfoVVLPatterns.td
7051  }
7052}
7053
7054//===----------------------------------------------------------------------===//
7055// 12. Vector Fixed-Point Arithmetic Instructions
7056//===----------------------------------------------------------------------===//
7057
7058//===----------------------------------------------------------------------===//
7059// 12.1. Vector Single-Width Saturating Add and Subtract
7060//===----------------------------------------------------------------------===//
7061defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsaddu", "PseudoVSADDU", AllIntegerVectors>;
7062defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsadd", "PseudoVSADD", AllIntegerVectors>;
7063defm : VPatBinaryV_VV_VX<"int_riscv_vssubu", "PseudoVSSUBU", AllIntegerVectors>;
7064defm : VPatBinaryV_VV_VX<"int_riscv_vssub", "PseudoVSSUB", AllIntegerVectors>;
7065
7066//===----------------------------------------------------------------------===//
7067// 12.2. Vector Single-Width Averaging Add and Subtract
7068//===----------------------------------------------------------------------===//
7069defm : VPatBinaryV_VV_VX_RM<"int_riscv_vaaddu", "PseudoVAADDU",
7070                            AllIntegerVectors>;
7071defm : VPatBinaryV_VV_VX_RM<"int_riscv_vasubu", "PseudoVASUBU",
7072                            AllIntegerVectors>;
7073defm : VPatBinaryV_VV_VX_RM<"int_riscv_vasub", "PseudoVASUB",
7074                            AllIntegerVectors>;
7075defm : VPatBinaryV_VV_VX_RM<"int_riscv_vaadd", "PseudoVAADD",
7076                            AllIntegerVectors>;
7077
7078//===----------------------------------------------------------------------===//
7079// 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
7080//===----------------------------------------------------------------------===//
7081defm : VPatBinaryV_VV_VX_RM<"int_riscv_vsmul", "PseudoVSMUL",
7082                             IntegerVectorsExceptI64>;
7083// vsmul.vv and vsmul.vx are not included in EEW=64 in Zve64*.
7084let Predicates = [HasVInstructionsFullMultiply] in
7085defm : VPatBinaryV_VV_VX_RM<"int_riscv_vsmul", "PseudoVSMUL",
7086                             I64IntegerVectors>;
7087
7088//===----------------------------------------------------------------------===//
7089// 12.4. Vector Single-Width Scaling Shift Instructions
7090//===----------------------------------------------------------------------===//
7091defm : VPatBinaryV_VV_VX_VI_RM<"int_riscv_vssrl", "PseudoVSSRL",
7092                               AllIntegerVectors, uimm5>;
7093defm : VPatBinaryV_VV_VX_VI_RM<"int_riscv_vssra", "PseudoVSSRA",
7094                               AllIntegerVectors, uimm5>;
7095
7096//===----------------------------------------------------------------------===//
7097// 12.5. Vector Narrowing Fixed-Point Clip Instructions
7098//===----------------------------------------------------------------------===//
7099defm : VPatBinaryV_WV_WX_WI_RM<"int_riscv_vnclipu", "PseudoVNCLIPU",
7100                               AllWidenableIntVectors>;
7101defm : VPatBinaryV_WV_WX_WI_RM<"int_riscv_vnclip", "PseudoVNCLIP",
7102                               AllWidenableIntVectors>;
7103
7104//===----------------------------------------------------------------------===//
7105// 13. Vector Floating-Point Instructions
7106//===----------------------------------------------------------------------===//
7107
7108//===----------------------------------------------------------------------===//
7109// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
7110//===----------------------------------------------------------------------===//
7111defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfadd", "PseudoVFADD", AllFloatVectors,
7112                            isSEWAware = 1>;
7113defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfsub", "PseudoVFSUB", AllFloatVectors,
7114                            isSEWAware = 1>;
7115defm : VPatBinaryV_VX_RM<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors,
7116                         isSEWAware = 1>;
7117
7118//===----------------------------------------------------------------------===//
7119// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
7120//===----------------------------------------------------------------------===//
7121defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwadd", "PseudoVFWADD",
7122                            AllWidenableFloatVectors, isSEWAware=1>;
7123defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwsub", "PseudoVFWSUB",
7124                            AllWidenableFloatVectors, isSEWAware=1>;
7125defm : VPatBinaryW_WV_WX_RM<"int_riscv_vfwadd_w", "PseudoVFWADD",
7126                            AllWidenableFloatVectors, isSEWAware=1>;
7127defm : VPatBinaryW_WV_WX_RM<"int_riscv_vfwsub_w", "PseudoVFWSUB",
7128                            AllWidenableFloatVectors, isSEWAware=1>;
7129
7130//===----------------------------------------------------------------------===//
7131// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
7132//===----------------------------------------------------------------------===//
7133defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfmul", "PseudoVFMUL",
7134                            AllFloatVectors, isSEWAware=1>;
7135defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfdiv", "PseudoVFDIV",
7136                            AllFloatVectors, isSEWAware=1>;
7137defm : VPatBinaryV_VX_RM<"int_riscv_vfrdiv", "PseudoVFRDIV",
7138                         AllFloatVectors, isSEWAware=1>;
7139
7140//===----------------------------------------------------------------------===//
7141// 13.5. Vector Widening Floating-Point Multiply
7142//===----------------------------------------------------------------------===//
7143defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwmul", "PseudoVFWMUL",
7144                            AllWidenableFloatVectors, isSEWAware=1>;
7145
7146//===----------------------------------------------------------------------===//
7147// 13.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
7148//===----------------------------------------------------------------------===//
7149defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmacc", "PseudoVFMACC",
7150                                  AllFloatVectors, isSEWAware=1>;
7151defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmacc", "PseudoVFNMACC",
7152                                  AllFloatVectors, isSEWAware=1>;
7153defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmsac", "PseudoVFMSAC",
7154                                  AllFloatVectors, isSEWAware=1>;
7155defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmsac", "PseudoVFNMSAC",
7156                                  AllFloatVectors, isSEWAware=1>;
7157defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmadd", "PseudoVFMADD",
7158                                  AllFloatVectors, isSEWAware=1>;
7159defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmadd", "PseudoVFNMADD",
7160                                  AllFloatVectors, isSEWAware=1>;
7161defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmsub", "PseudoVFMSUB",
7162                                  AllFloatVectors, isSEWAware=1>;
7163defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmsub", "PseudoVFNMSUB",
7164                                  AllFloatVectors, isSEWAware=1>;
7165
7166//===----------------------------------------------------------------------===//
7167// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
7168//===----------------------------------------------------------------------===//
7169defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmacc", "PseudoVFWMACC",
7170                             AllWidenableFloatVectors, isSEWAware=1>;
7171defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwnmacc", "PseudoVFWNMACC",
7172                             AllWidenableFloatVectors, isSEWAware=1>;
7173defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmsac", "PseudoVFWMSAC",
7174                             AllWidenableFloatVectors, isSEWAware=1>;
7175defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwnmsac", "PseudoVFWNMSAC",
7176                             AllWidenableFloatVectors, isSEWAware=1>;
7177let Predicates = [HasStdExtZvfbfwma] in
7178defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmaccbf16", "PseudoVFWMACCBF16",
7179                              AllWidenableBFloatToFloatVectors, isSEWAware=1>;
7180
7181//===----------------------------------------------------------------------===//
7182// 13.8. Vector Floating-Point Square-Root Instruction
7183//===----------------------------------------------------------------------===//
7184defm : VPatUnaryV_V_RM<"int_riscv_vfsqrt", "PseudoVFSQRT", AllFloatVectors, isSEWAware=1>;
7185
7186//===----------------------------------------------------------------------===//
7187// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
7188//===----------------------------------------------------------------------===//
7189defm : VPatUnaryV_V<"int_riscv_vfrsqrt7", "PseudoVFRSQRT7", AllFloatVectors, isSEWAware=1>;
7190
7191//===----------------------------------------------------------------------===//
7192// 13.10. Vector Floating-Point Reciprocal Estimate Instruction
7193//===----------------------------------------------------------------------===//
7194defm : VPatUnaryV_V_RM<"int_riscv_vfrec7", "PseudoVFREC7", AllFloatVectors, isSEWAware=1>;
7195
7196//===----------------------------------------------------------------------===//
7197// 13.11. Vector Floating-Point Min/Max Instructions
7198//===----------------------------------------------------------------------===//
7199defm : VPatBinaryV_VV_VX<"int_riscv_vfmin", "PseudoVFMIN", AllFloatVectors,
7200                         isSEWAware=1>;
7201defm : VPatBinaryV_VV_VX<"int_riscv_vfmax", "PseudoVFMAX", AllFloatVectors,
7202                         isSEWAware=1>;
7203
7204//===----------------------------------------------------------------------===//
7205// 13.12. Vector Floating-Point Sign-Injection Instructions
7206//===----------------------------------------------------------------------===//
7207defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnj", "PseudoVFSGNJ", AllFloatVectors,
7208                         isSEWAware=1>;
7209defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjn", "PseudoVFSGNJN", AllFloatVectors,
7210                         isSEWAware=1>;
7211defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjx", "PseudoVFSGNJX", AllFloatVectors,
7212                         isSEWAware=1>;
7213
7214//===----------------------------------------------------------------------===//
7215// 13.13. Vector Floating-Point Compare Instructions
7216//===----------------------------------------------------------------------===//
7217defm : VPatBinaryM_VV_VX<"int_riscv_vmfeq", "PseudoVMFEQ", AllFloatVectors>;
7218defm : VPatBinaryM_VV_VX<"int_riscv_vmfle", "PseudoVMFLE", AllFloatVectors>;
7219defm : VPatBinaryM_VV_VX<"int_riscv_vmflt", "PseudoVMFLT", AllFloatVectors>;
7220defm : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE", AllFloatVectors>;
7221defm : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT", AllFloatVectors>;
7222defm : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE", AllFloatVectors>;
7223defm : VPatBinarySwappedM_VV<"int_riscv_vmfgt", "PseudoVMFLT", AllFloatVectors>;
7224defm : VPatBinarySwappedM_VV<"int_riscv_vmfge", "PseudoVMFLE", AllFloatVectors>;
7225
7226//===----------------------------------------------------------------------===//
7227// 13.14. Vector Floating-Point Classify Instruction
7228//===----------------------------------------------------------------------===//
7229defm : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">;
7230
7231//===----------------------------------------------------------------------===//
7232// 13.15. Vector Floating-Point Merge Instruction
7233//===----------------------------------------------------------------------===//
7234// We can use vmerge.vvm to support vector-vector vfmerge.
7235// NOTE: Clang previously used int_riscv_vfmerge for vector-vector, but now uses
7236// int_riscv_vmerge. Support both for compatibility.
7237foreach vti = AllFloatVectors in {
7238  let Predicates = GetVTypePredicates<vti>.Predicates in {
7239    defm : VPatBinaryCarryInTAIL<"int_riscv_vmerge", "PseudoVMERGE", "VVM",
7240                                 vti.Vector,
7241                                 vti.Vector, vti.Vector, vti.Mask,
7242                                 vti.Log2SEW, vti.LMul, vti.RegClass,
7243                                 vti.RegClass, vti.RegClass>;
7244    defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVMERGE", "VVM",
7245                                 vti.Vector,
7246                                 vti.Vector, vti.Vector, vti.Mask,
7247                                 vti.Log2SEW, vti.LMul, vti.RegClass,
7248                                 vti.RegClass, vti.RegClass>;
7249    defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVFMERGE",
7250                                 "V"#vti.ScalarSuffix#"M",
7251                                 vti.Vector,
7252                                 vti.Vector, vti.Scalar, vti.Mask,
7253                                 vti.Log2SEW, vti.LMul, vti.RegClass,
7254                                 vti.RegClass, vti.ScalarRegClass>;
7255  }
7256}
7257
7258foreach fvti = AllFloatVectors in {
7259  defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX);
7260  let Predicates = GetVTypePredicates<fvti>.Predicates in
7261  def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$merge),
7262                                            (fvti.Vector fvti.RegClass:$rs2),
7263                                            (fvti.Scalar (fpimm0)),
7264                                            (fvti.Mask V0), VLOpFrag)),
7265            (instr fvti.RegClass:$merge, fvti.RegClass:$rs2, 0,
7266                   (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
7267}
7268
7269//===----------------------------------------------------------------------===//
7270// 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions
7271//===----------------------------------------------------------------------===//
7272defm : VPatConversionVI_VF_RTZ<"int_riscv_vfcvt_x_f_v", "PseudoVFCVT_RTZ_X_F">;
7273defm : VPatConversionVI_VF_RTZ<"int_riscv_vfcvt_xu_f_v", "PseudoVFCVT_RTZ_XU_F">;
7274defm : VPatConversionVI_VF_RM<"int_riscv_vfcvt_x_f_v", "PseudoVFCVT_X_F">;
7275defm : VPatConversionVI_VF_RM<"int_riscv_vfcvt_xu_f_v", "PseudoVFCVT_XU_F">;
7276defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_xu_f_v", "PseudoVFCVT_RTZ_XU_F">;
7277defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_x_f_v", "PseudoVFCVT_RTZ_X_F">;
7278defm : VPatConversionVF_VI_RM<"int_riscv_vfcvt_f_x_v", "PseudoVFCVT_F_X",
7279                              isSEWAware=1>;
7280defm : VPatConversionVF_VI_RM<"int_riscv_vfcvt_f_xu_v", "PseudoVFCVT_F_XU",
7281                              isSEWAware=1>;
7282
7283//===----------------------------------------------------------------------===//
7284// 13.18. Widening Floating-Point/Integer Type-Convert Instructions
7285//===----------------------------------------------------------------------===//
7286defm : VPatConversionWI_VF_RTZ<"int_riscv_vfwcvt_xu_f_v", "PseudoVFWCVT_RTZ_XU_F">;
7287defm : VPatConversionWI_VF_RTZ<"int_riscv_vfwcvt_x_f_v", "PseudoVFWCVT_RTZ_X_F">;
7288defm : VPatConversionWI_VF_RM<"int_riscv_vfwcvt_xu_f_v", "PseudoVFWCVT_XU_F">;
7289defm : VPatConversionWI_VF_RM<"int_riscv_vfwcvt_x_f_v", "PseudoVFWCVT_X_F">;
7290defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_xu_f_v", "PseudoVFWCVT_RTZ_XU_F">;
7291defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_x_f_v", "PseudoVFWCVT_RTZ_X_F">;
7292defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU",
7293                           isSEWAware=1>;
7294defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X",
7295                           isSEWAware=1>;
7296defm : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F",
7297                           isSEWAware=1>;
7298defm : VPatConversionWF_VF_BF<"int_riscv_vfwcvtbf16_f_f_v",
7299                              "PseudoVFWCVTBF16_F_F", isSEWAware=1>;
7300
7301//===----------------------------------------------------------------------===//
7302// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions
7303//===----------------------------------------------------------------------===//
7304defm : VPatConversionVI_WF_RTZ<"int_riscv_vfncvt_xu_f_w", "PseudoVFNCVT_RTZ_XU_F">;
7305defm : VPatConversionVI_WF_RTZ<"int_riscv_vfncvt_x_f_w", "PseudoVFNCVT_RTZ_X_F">;
7306defm : VPatConversionVI_WF_RM<"int_riscv_vfncvt_xu_f_w", "PseudoVFNCVT_XU_F">;
7307defm : VPatConversionVI_WF_RM<"int_riscv_vfncvt_x_f_w", "PseudoVFNCVT_X_F">;
7308defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_xu_f_w", "PseudoVFNCVT_RTZ_XU_F">;
7309defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_x_f_w", "PseudoVFNCVT_RTZ_X_F">;
7310defm : VPatConversionVF_WI_RM<"int_riscv_vfncvt_f_xu_w", "PseudoVFNCVT_F_XU",
7311                              isSEWAware=1>;
7312defm : VPatConversionVF_WI_RM<"int_riscv_vfncvt_f_x_w", "PseudoVFNCVT_F_X",
7313                              isSEWAware=1>;
7314defvar WidenableFloatVectorsExceptF16 = !filter(fvtiToFWti, AllWidenableFloatVectors,
7315                                                !ne(fvtiToFWti.Vti.Scalar, f16));
7316defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F",
7317                           WidenableFloatVectorsExceptF16, isSEWAware=1>;
7318// Define vfncvt.f.f.w for f16 when Zvfhmin is enable.
7319defvar F16WidenableFloatVectors = !filter(fvtiToFWti, AllWidenableFloatVectors,
7320                                          !eq(fvtiToFWti.Vti.Scalar, f16));
7321let Predicates = [HasVInstructionsF16Minimal] in
7322defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F",
7323                           F16WidenableFloatVectors, isSEWAware=1>;
7324defm : VPatConversionVF_WF_BF_RM<"int_riscv_vfncvtbf16_f_f_w",
7325                                 "PseudoVFNCVTBF16_F_F", isSEWAware=1>;
7326defm : VPatConversionVF_WF<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F",
7327                           isSEWAware=1>;
7328
7329//===----------------------------------------------------------------------===//
7330// 14. Vector Reduction Operations
7331//===----------------------------------------------------------------------===//
7332
7333//===----------------------------------------------------------------------===//
7334// 14.1. Vector Single-Width Integer Reduction Instructions
7335//===----------------------------------------------------------------------===//
7336defm : VPatReductionV_VS<"int_riscv_vredsum", "PseudoVREDSUM">;
7337defm : VPatReductionV_VS<"int_riscv_vredand", "PseudoVREDAND">;
7338defm : VPatReductionV_VS<"int_riscv_vredor", "PseudoVREDOR">;
7339defm : VPatReductionV_VS<"int_riscv_vredxor", "PseudoVREDXOR">;
7340defm : VPatReductionV_VS<"int_riscv_vredminu", "PseudoVREDMINU">;
7341defm : VPatReductionV_VS<"int_riscv_vredmin", "PseudoVREDMIN">;
7342defm : VPatReductionV_VS<"int_riscv_vredmaxu", "PseudoVREDMAXU">;
7343defm : VPatReductionV_VS<"int_riscv_vredmax", "PseudoVREDMAX">;
7344
7345//===----------------------------------------------------------------------===//
7346// 14.2. Vector Widening Integer Reduction Instructions
7347//===----------------------------------------------------------------------===//
7348defm : VPatReductionW_VS<"int_riscv_vwredsumu", "PseudoVWREDSUMU">;
7349defm : VPatReductionW_VS<"int_riscv_vwredsum", "PseudoVWREDSUM">;
7350
7351//===----------------------------------------------------------------------===//
7352// 14.3. Vector Single-Width Floating-Point Reduction Instructions
7353//===----------------------------------------------------------------------===//
7354defm : VPatReductionV_VS_RM<"int_riscv_vfredosum", "PseudoVFREDOSUM", IsFloat=1>;
7355defm : VPatReductionV_VS_RM<"int_riscv_vfredusum", "PseudoVFREDUSUM", IsFloat=1>;
7356defm : VPatReductionV_VS<"int_riscv_vfredmin", "PseudoVFREDMIN", IsFloat=1>;
7357defm : VPatReductionV_VS<"int_riscv_vfredmax", "PseudoVFREDMAX", IsFloat=1>;
7358
7359//===----------------------------------------------------------------------===//
7360// 14.4. Vector Widening Floating-Point Reduction Instructions
7361//===----------------------------------------------------------------------===//
7362defm : VPatReductionW_VS_RM<"int_riscv_vfwredusum", "PseudoVFWREDUSUM", IsFloat=1>;
7363defm : VPatReductionW_VS_RM<"int_riscv_vfwredosum", "PseudoVFWREDOSUM", IsFloat=1>;
7364
7365//===----------------------------------------------------------------------===//
7366// 15. Vector Mask Instructions
7367//===----------------------------------------------------------------------===//
7368
7369//===----------------------------------------------------------------------===//
7370// 15.1 Vector Mask-Register Logical Instructions
7371//===----------------------------------------------------------------------===//
7372defm : VPatBinaryM_MM<"int_riscv_vmand", "PseudoVMAND">;
7373defm : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">;
7374defm : VPatBinaryM_MM<"int_riscv_vmandn", "PseudoVMANDN">;
7375defm : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">;
7376defm : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">;
7377defm : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">;
7378defm : VPatBinaryM_MM<"int_riscv_vmorn", "PseudoVMORN">;
7379defm : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">;
7380
7381// pseudo instructions
7382defm : VPatNullaryM<"int_riscv_vmclr", "PseudoVMCLR">;
7383defm : VPatNullaryM<"int_riscv_vmset", "PseudoVMSET">;
7384
7385//===----------------------------------------------------------------------===//
7386// 15.2. Vector count population in mask vcpop.m
7387//===----------------------------------------------------------------------===//
7388defm : VPatUnaryS_M<"int_riscv_vcpop", "PseudoVCPOP">;
7389
7390//===----------------------------------------------------------------------===//
7391// 15.3. vfirst find-first-set mask bit
7392//===----------------------------------------------------------------------===//
7393defm : VPatUnaryS_M<"int_riscv_vfirst", "PseudoVFIRST">;
7394
7395//===----------------------------------------------------------------------===//
7396// 15.4. vmsbf.m set-before-first mask bit
7397//===----------------------------------------------------------------------===//
7398defm : VPatUnaryM_M<"int_riscv_vmsbf", "PseudoVMSBF">;
7399
7400//===----------------------------------------------------------------------===//
7401// 15.5. vmsif.m set-including-first mask bit
7402//===----------------------------------------------------------------------===//
7403defm : VPatUnaryM_M<"int_riscv_vmsif", "PseudoVMSIF">;
7404
7405//===----------------------------------------------------------------------===//
7406// 15.6. vmsof.m set-only-first mask bit
7407//===----------------------------------------------------------------------===//
7408defm : VPatUnaryM_M<"int_riscv_vmsof", "PseudoVMSOF">;
7409
7410//===----------------------------------------------------------------------===//
7411// 15.8.  Vector Iota Instruction
7412//===----------------------------------------------------------------------===//
7413defm : VPatUnaryV_M<"int_riscv_viota", "PseudoVIOTA">;
7414
7415//===----------------------------------------------------------------------===//
7416// 15.9. Vector Element Index Instruction
7417//===----------------------------------------------------------------------===//
7418defm : VPatNullaryV<"int_riscv_vid", "PseudoVID">;
7419
7420
7421//===----------------------------------------------------------------------===//
7422// 16. Vector Permutation Instructions
7423//===----------------------------------------------------------------------===//
7424
7425//===----------------------------------------------------------------------===//
7426// 16.1. Integer Scalar Move Instructions
7427//===----------------------------------------------------------------------===//
7428
7429foreach vti = NoGroupIntegerVectors in {
7430  let Predicates = GetVTypePredicates<vti>.Predicates in
7431  def : Pat<(XLenVT (riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2))),
7432            (PseudoVMV_X_S $rs2, vti.Log2SEW)>;
7433  // vmv.s.x is handled with a custom node in RISCVInstrInfoVVLPatterns.td
7434}
7435
7436//===----------------------------------------------------------------------===//
7437// 16.3. Vector Slide Instructions
7438//===----------------------------------------------------------------------===//
7439defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllIntegerVectors, uimm5>;
7440defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllIntegerVectors, uimm5>;
7441defm : VPatBinaryV_VX<"int_riscv_vslide1up", "PseudoVSLIDE1UP", AllIntegerVectors>;
7442defm : VPatBinaryV_VX<"int_riscv_vslide1down", "PseudoVSLIDE1DOWN", AllIntegerVectors>;
7443
7444defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllFloatVectors, uimm5>;
7445defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVectors, uimm5>;
7446defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>;
7447defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>;
7448
7449//===----------------------------------------------------------------------===//
7450// 16.4. Vector Register Gather Instructions
7451//===----------------------------------------------------------------------===//
7452defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
7453                                AllIntegerVectors, uimm5>;
7454defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
7455                              eew=16, vtilist=AllIntegerVectors>;
7456
7457defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
7458                                AllFloatVectors, uimm5>;
7459defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
7460                              eew=16, vtilist=AllFloatVectors>;
7461//===----------------------------------------------------------------------===//
7462// 16.5. Vector Compress Instruction
7463//===----------------------------------------------------------------------===//
7464defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>;
7465defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>;
7466
7467// Include the non-intrinsic ISel patterns
7468include "RISCVInstrInfoVVLPatterns.td"
7469include "RISCVInstrInfoVSDPatterns.td"
7470