xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoV.td (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1//===-- RISCVInstrInfoV.td - RISC-V 'V' instructions -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// This file describes the RISC-V instructions from the standard 'V' Vector
10/// extension, version 1.0.
11///
12//===----------------------------------------------------------------------===//
13
14include "RISCVInstrFormatsV.td"
15
16//===----------------------------------------------------------------------===//
17// Operand and SDNode transformation definitions.
18//===----------------------------------------------------------------------===//
19
20class VTypeIAsmOperand<int VTypeINum> : AsmOperandClass {
21  let Name = "VTypeI" # VTypeINum;
22  let ParserMethod = "parseVTypeI";
23  let DiagnosticType = "InvalidVTypeI";
24  let RenderMethod = "addVTypeIOperands";
25}
26
27class VTypeIOp<int VTypeINum> : Operand<XLenVT> {
28  let ParserMatchClass = VTypeIAsmOperand<VTypeINum>;
29  let PrintMethod = "printVTypeI";
30  let DecoderMethod = "decodeUImmOperand<"#VTypeINum#">";
31  let OperandType = "OPERAND_VTYPEI" # VTypeINum;
32  let OperandNamespace = "RISCVOp";
33  let MCOperandPredicate = [{
34    int64_t Imm;
35    if (MCOp.evaluateAsConstantImm(Imm))
36      return isUInt<VTypeINum>(Imm);
37    return MCOp.isBareSymbolRef();
38  }];
39}
40
41def VTypeIOp10 : VTypeIOp<10>;
42def VTypeIOp11 : VTypeIOp<11>;
43
44def VMaskAsmOperand : AsmOperandClass {
45  let Name = "RVVMaskRegOpOperand";
46  let RenderMethod = "addRegOperands";
47  let PredicateMethod = "isV0Reg";
48  let ParserMethod = "parseMaskReg";
49  let IsOptional = 1;
50  let DefaultMethod = "defaultMaskRegOp";
51  let DiagnosticType = "InvalidVMaskRegister";
52}
53
54def VMaskOp : RegisterOperand<VMV0> {
55  let ParserMatchClass = VMaskAsmOperand;
56  let PrintMethod = "printVMaskReg";
57  let EncoderMethod = "getVMaskReg";
58  let DecoderMethod = "decodeVMaskReg";
59}
60
61def simm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<5>(Imm);}]> {
62  let ParserMatchClass = SImmAsmOperand<5>;
63  let EncoderMethod = "getImmOpValue";
64  let DecoderMethod = "decodeSImmOperand<5>";
65  let OperandType = "OPERAND_SIMM5";
66  let OperandNamespace = "RISCVOp";
67  let MCOperandPredicate = [{
68    int64_t Imm;
69    if (MCOp.evaluateAsConstantImm(Imm))
70      return isInt<5>(Imm);
71    return MCOp.isBareSymbolRef();
72  }];
73}
74
75def SImm5Plus1AsmOperand : AsmOperandClass {
76  let Name = "SImm5Plus1";
77  let RenderMethod = "addImmOperands";
78  let DiagnosticType = "InvalidSImm5Plus1";
79}
80
81def simm5_plus1 : Operand<XLenVT>, ImmLeaf<XLenVT,
82  [{return (isInt<5>(Imm) && Imm != -16) || Imm == 16;}]> {
83  let ParserMatchClass = SImm5Plus1AsmOperand;
84  let OperandType = "OPERAND_SIMM5_PLUS1";
85  let OperandNamespace = "RISCVOp";
86  let MCOperandPredicate = [{
87    int64_t Imm;
88    if (MCOp.evaluateAsConstantImm(Imm))
89      return (isInt<5>(Imm) && Imm != -16) || Imm == 16;
90    return MCOp.isBareSymbolRef();
91  }];
92}
93
94def simm5_plus1_nonzero : ImmLeaf<XLenVT,
95  [{return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);}]>;
96
97//===----------------------------------------------------------------------===//
98// Scheduling definitions.
99//===----------------------------------------------------------------------===//
100
101class VMVRSched<int n> : Sched<[
102  !cast<SchedReadWrite>("WriteVMov" #n #"V"),
103  !cast<SchedReadWrite>("ReadVMov" #n #"V")
104]>;
105
106class VLESched<string mx> : Sched<[
107  !cast<SchedReadWrite>("WriteVLDE_" #mx),
108  !cast<SchedReadWrite>("ReadVLDX_" #mx), ReadVMask
109]>;
110
111class VSESched<string mx> : Sched<[
112  !cast<SchedReadWrite>("WriteVSTE_" #mx),
113  !cast<SchedReadWrite>("ReadVSTEV_" #mx),
114  !cast<SchedReadWrite>("ReadVSTX_" #mx), ReadVMask
115]>;
116
117class VLSSched<int n, string mx> : Sched<[
118  !cast<SchedReadWrite>("WriteVLDS" #n #"_" #mx),
119  !cast<SchedReadWrite>("ReadVLDX_" #mx),
120  !cast<SchedReadWrite>("ReadVLDSX_" #mx), ReadVMask
121]>;
122
123class VSSSched<int n, string mx> : Sched<[
124  !cast<SchedReadWrite>("WriteVSTS" #n #"_" #mx),
125  !cast<SchedReadWrite>("ReadVSTS" #n #"V_" #mx),
126  !cast<SchedReadWrite>("ReadVSTX_" #mx),
127  !cast<SchedReadWrite>("ReadVSTSX_" #mx), ReadVMask
128]>;
129
130class VLXSched<int n, string o, string mx> : Sched<[
131  !cast<SchedReadWrite>("WriteVLD" #o #"X" #n #"_" #mx),
132  !cast<SchedReadWrite>("ReadVLDX_" #mx),
133  !cast<SchedReadWrite>("ReadVLD" #o #"XV_" #mx), ReadVMask
134]>;
135
136class VSXSched<int n, string o, string mx> : Sched<[
137  !cast<SchedReadWrite>("WriteVST" #o #"X" #n #"_" #mx),
138  !cast<SchedReadWrite>("ReadVST" #o #"X" #n #"_" #mx),
139  !cast<SchedReadWrite>("ReadVSTX_" #mx),
140  !cast<SchedReadWrite>("ReadVST" #o #"XV_" #mx), ReadVMask
141]>;
142
143class VLFSched<string mx> : Sched<[
144  !cast<SchedReadWrite>("WriteVLDFF_" #mx),
145  !cast<SchedReadWrite>("ReadVLDX_" #mx), ReadVMask
146]>;
147
148// Unit-Stride Segment Loads and Stores
149class VLSEGSched<int nf, int eew, string mx> : Sched<[
150  !cast<SchedReadWrite>("WriteVLSEG" #nf #"e" #eew #"_" #mx),
151  !cast<SchedReadWrite>("ReadVLDX_" #mx), ReadVMask
152]>;
153class VSSEGSched<int nf, int eew, string mx> : Sched<[
154  !cast<SchedReadWrite>("WriteVSSEG" #nf #"e" #eew #"_" #mx),
155  !cast<SchedReadWrite>("ReadVSTEV_" #mx),
156  !cast<SchedReadWrite>("ReadVSTX_" #mx), ReadVMask
157]>;
158class VLSEGFFSched<int nf, int eew, string mx> : Sched<[
159  !cast<SchedReadWrite>("WriteVLSEGFF" #nf #"e" #eew #"_" #mx),
160  !cast<SchedReadWrite>("ReadVLDX_" #mx), ReadVMask
161]>;
162// Strided Segment Loads and Stores
163class VLSSEGSched<int nf, int eew, string mx> : Sched<[
164  !cast<SchedReadWrite>("WriteVLSSEG" #nf #"e" #eew #"_" #mx),
165  !cast<SchedReadWrite>("ReadVLDX_" #mx),
166  !cast<SchedReadWrite>("ReadVLDSX_" #mx), ReadVMask
167]>;
168class VSSSEGSched<int nf, int eew, string mx> : Sched<[
169  !cast<SchedReadWrite>("WriteVSSSEG" #nf #"e" #eew #"_" #mx),
170  !cast<SchedReadWrite>("ReadVSTS" #eew #"V" #"_" #mx),
171  !cast<SchedReadWrite>("ReadVSTX_" #mx),
172  !cast<SchedReadWrite>("ReadVSTSX_" #mx), ReadVMask
173]>;
174// Indexed Segment Loads and Stores
175class VLXSEGSched<int nf, int eew, string o, string mx> : Sched<[
176  !cast<SchedReadWrite>("WriteVL" #o #"XSEG" #nf #"e" #eew #"_" #mx),
177  !cast<SchedReadWrite>("ReadVLDX_" #mx),
178  !cast<SchedReadWrite>("ReadVLD" #o #"XV" #"_" #mx), ReadVMask
179]>;
180class VSXSEGSched<int nf, int eew, string o, string mx> : Sched<[
181  !cast<SchedReadWrite>("WriteVS" #o #"XSEG" #nf #"e" #eew #"_" #mx),
182  !cast<SchedReadWrite>("ReadVST" #o #"X" #eew # "_" # mx),
183  !cast<SchedReadWrite>("ReadVSTX_" #mx),
184  !cast<SchedReadWrite>("ReadVST" #o #"XV" # "_" # mx), ReadVMask
185]>;
186
187//===----------------------------------------------------------------------===//
188// Instruction class templates
189//===----------------------------------------------------------------------===//
190
191let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
192// unit-stride load vd, (rs1), vm
193class VUnitStrideLoad<RISCVWidth width, string opcodestr>
194    : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
195                (outs VR:$vd),
196                (ins GPRMem:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
197
198let vm = 1, RVVConstraint = NoConstraint in {
199// unit-stride whole register load vl<nf>r.v vd, (rs1)
200class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC>
201    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
202                width.Value{2-0}, (outs VRC:$vd), (ins GPRMem:$rs1),
203                opcodestr, "$vd, (${rs1})"> {
204  let Uses = [];
205}
206
207// unit-stride mask load vd, (rs1)
208class VUnitStrideLoadMask<string opcodestr>
209    : RVInstVLU<0b000, LSWidth8.Value{3}, LUMOPUnitStrideMask, LSWidth8.Value{2-0},
210                (outs VR:$vd),
211                (ins GPRMem:$rs1), opcodestr, "$vd, (${rs1})">;
212} // vm = 1, RVVConstraint = NoConstraint
213
214// unit-stride fault-only-first load vd, (rs1), vm
215class VUnitStrideLoadFF<RISCVWidth width, string opcodestr>
216    : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
217                (outs VR:$vd),
218                (ins GPRMem:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
219
220// strided load vd, (rs1), rs2, vm
221class VStridedLoad<RISCVWidth width, string opcodestr>
222    : RVInstVLS<0b000, width.Value{3}, width.Value{2-0},
223                (outs VR:$vd),
224                (ins GPRMem:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
225                "$vd, (${rs1}), $rs2$vm">;
226
227// indexed load vd, (rs1), vs2, vm
228class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
229    : RVInstVLX<0b000, width.Value{3}, mop, width.Value{2-0},
230                (outs VR:$vd),
231                (ins GPRMem:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
232                "$vd, (${rs1}), $vs2$vm">;
233
234// unit-stride segment load vd, (rs1), vm
235class VUnitStrideSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
236    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
237                (outs VR:$vd),
238                (ins GPRMem:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
239
240// segment fault-only-first load vd, (rs1), vm
241class VUnitStrideSegmentLoadFF<bits<3> nf, RISCVWidth width, string opcodestr>
242    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
243                (outs VR:$vd),
244                (ins GPRMem:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
245
246// strided segment load vd, (rs1), rs2, vm
247class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
248    : RVInstVLS<nf, width.Value{3}, width.Value{2-0},
249                (outs VR:$vd),
250                (ins GPRMem:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
251                "$vd, (${rs1}), $rs2$vm">;
252
253// indexed segment load vd, (rs1), vs2, vm
254class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
255                          string opcodestr>
256    : RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0},
257                (outs VR:$vd),
258                (ins GPRMem:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
259                "$vd, (${rs1}), $vs2$vm">;
260} // hasSideEffects = 0, mayLoad = 1, mayStore = 0
261
262let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
263// unit-stride store vd, vs3, (rs1), vm
264class VUnitStrideStore<RISCVWidth width, string opcodestr>
265    : RVInstVSU<0b000, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
266                (outs), (ins VR:$vs3, GPRMem:$rs1, VMaskOp:$vm), opcodestr,
267                "$vs3, (${rs1})$vm">;
268
269let vm = 1 in {
270// vs<nf>r.v vd, (rs1)
271class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC>
272    : RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
273                0b000, (outs), (ins VRC:$vs3, GPRMem:$rs1),
274                opcodestr, "$vs3, (${rs1})"> {
275  let Uses = [];
276}
277
278// unit-stride mask store vd, vs3, (rs1)
279class VUnitStrideStoreMask<string opcodestr>
280    : RVInstVSU<0b000, LSWidth8.Value{3}, SUMOPUnitStrideMask, LSWidth8.Value{2-0},
281                (outs), (ins VR:$vs3, GPRMem:$rs1), opcodestr,
282                "$vs3, (${rs1})">;
283} // vm = 1
284
285// strided store vd, vs3, (rs1), rs2, vm
286class VStridedStore<RISCVWidth width, string opcodestr>
287    : RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs),
288                (ins VR:$vs3, GPRMem:$rs1, GPR:$rs2, VMaskOp:$vm),
289                opcodestr, "$vs3, (${rs1}), $rs2$vm">;
290
291// indexed store vd, vs3, (rs1), vs2, vm
292class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
293    : RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs),
294                (ins VR:$vs3, GPRMem:$rs1, VR:$vs2, VMaskOp:$vm),
295                opcodestr, "$vs3, (${rs1}), $vs2$vm">;
296
297// segment store vd, vs3, (rs1), vm
298class VUnitStrideSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
299    : RVInstVSU<nf, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
300                (outs), (ins VR:$vs3, GPRMem:$rs1, VMaskOp:$vm), opcodestr,
301                "$vs3, (${rs1})$vm">;
302
303// segment store vd, vs3, (rs1), rs2, vm
304class VStridedSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
305    : RVInstVSS<nf, width.Value{3}, width.Value{2-0}, (outs),
306                (ins VR:$vs3, GPRMem:$rs1, GPR:$rs2, VMaskOp:$vm),
307                opcodestr, "$vs3, (${rs1}), $rs2$vm">;
308
309// segment store vd, vs3, (rs1), vs2, vm
310class VIndexedSegmentStore<bits<3> nf, RISCVMOP mop, RISCVWidth width,
311                           string opcodestr>
312    : RVInstVSX<nf, width.Value{3}, mop, width.Value{2-0}, (outs),
313                (ins VR:$vs3, GPRMem:$rs1, VR:$vs2, VMaskOp:$vm),
314                opcodestr, "$vs3, (${rs1}), $vs2$vm">;
315} // hasSideEffects = 0, mayLoad = 0, mayStore = 1
316
317let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
318// op vd, vs2, vs1, vm
319class VALUVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
320    : RVInstVV<funct6, opv, (outs VR:$vd),
321                (ins VR:$vs2, VR:$vs1, VMaskOp:$vm),
322                opcodestr, "$vd, $vs2, $vs1$vm">;
323
324// op vd, vs2, vs1, v0 (without mask, use v0 as carry input)
325class VALUmVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
326    : RVInstVV<funct6, opv, (outs VR:$vd),
327                (ins VR:$vs2, VR:$vs1, VMV0:$v0),
328                opcodestr, "$vd, $vs2, $vs1, v0"> {
329  let vm = 0;
330}
331
332// op vd, vs1, vs2, vm (reverse the order of vs1 and vs2)
333class VALUrVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
334    : RVInstVV<funct6, opv, (outs VR:$vd),
335                (ins VR:$vs1, VR:$vs2, VMaskOp:$vm),
336                opcodestr, "$vd, $vs1, $vs2$vm">;
337
338// op vd, vs2, vs1
339class VALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
340    : RVInstVV<funct6, opv, (outs VR:$vd),
341               (ins VR:$vs2, VR:$vs1),
342               opcodestr, "$vd, $vs2, $vs1"> {
343  let vm = 1;
344}
345
346// op vd, vs2, rs1, vm
347class VALUVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
348    : RVInstVX<funct6, opv, (outs VR:$vd),
349                (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
350                opcodestr, "$vd, $vs2, $rs1$vm">;
351
352// op vd, vs2, rs1, v0 (without mask, use v0 as carry input)
353class VALUmVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
354    : RVInstVX<funct6, opv, (outs VR:$vd),
355                (ins VR:$vs2, GPR:$rs1, VMV0:$v0),
356                opcodestr, "$vd, $vs2, $rs1, v0"> {
357  let vm = 0;
358}
359
360// op vd, rs1, vs2, vm (reverse the order of rs1 and vs2)
361class VALUrVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
362    : RVInstVX<funct6, opv, (outs VR:$vd),
363                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm),
364                opcodestr, "$vd, $rs1, $vs2$vm">;
365
366// op vd, vs1, vs2
367class VALUVXNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
368    : RVInstVX<funct6, opv, (outs VR:$vd),
369               (ins VR:$vs2, GPR:$rs1),
370               opcodestr, "$vd, $vs2, $rs1"> {
371  let vm = 1;
372}
373
374// op vd, vs2, imm, vm
375class VALUVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
376    : RVInstIVI<funct6, (outs VR:$vd),
377                (ins VR:$vs2, optype:$imm, VMaskOp:$vm),
378                opcodestr, "$vd, $vs2, $imm$vm">;
379
380// op vd, vs2, imm, v0 (without mask, use v0 as carry input)
381class VALUmVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
382    : RVInstIVI<funct6, (outs VR:$vd),
383                (ins VR:$vs2, optype:$imm, VMV0:$v0),
384                opcodestr, "$vd, $vs2, $imm, v0"> {
385  let vm = 0;
386}
387
388// op vd, vs2, imm, vm
389class VALUVINoVm<bits<6> funct6, string opcodestr, Operand optype = simm5>
390    : RVInstIVI<funct6, (outs VR:$vd),
391                (ins VR:$vs2, optype:$imm),
392                opcodestr, "$vd, $vs2, $imm"> {
393  let vm = 1;
394}
395
396// op vd, vs2, rs1, vm (Float)
397class VALUVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
398    : RVInstVX<funct6, opv, (outs VR:$vd),
399                (ins VR:$vs2, FPR32:$rs1, VMaskOp:$vm),
400                opcodestr, "$vd, $vs2, $rs1$vm">;
401
402// op vd, rs1, vs2, vm (Float) (with mask, reverse the order of rs1 and vs2)
403class VALUrVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
404    : RVInstVX<funct6, opv, (outs VR:$vd),
405                (ins FPR32:$rs1, VR:$vs2, VMaskOp:$vm),
406                opcodestr, "$vd, $rs1, $vs2$vm">;
407
408// op vd, vs2, vm (use vs1 as instruction encoding)
409class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
410    : RVInstV<funct6, vs1, opv, (outs VR:$vd),
411               (ins VR:$vs2, VMaskOp:$vm),
412               opcodestr, "$vd, $vs2$vm">;
413} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
414
415//===----------------------------------------------------------------------===//
416// Combination of instruction classes.
417// Use these multiclasses to define instructions more easily.
418//===----------------------------------------------------------------------===//
419
420multiclass VIndexLoadStore<list<int> EEWList> {
421  foreach n = EEWList in {
422    defvar w = !cast<RISCVWidth>("LSWidth" # n);
423
424    def VLUXEI # n # _V :
425      VIndexedLoad<MOPLDIndexedUnord, w, "vluxei" # n # ".v">,
426      VLXSched<n, "U", UpperBoundLMUL>;
427    def VLOXEI # n # _V :
428      VIndexedLoad<MOPLDIndexedOrder, w, "vloxei" # n # ".v">,
429      VLXSched<n, "O", UpperBoundLMUL>;
430
431    def VSUXEI # n # _V :
432      VIndexedStore<MOPSTIndexedUnord, w, "vsuxei" # n # ".v">,
433      VSXSched<n, "U", UpperBoundLMUL>;
434    def VSOXEI # n # _V :
435      VIndexedStore<MOPSTIndexedOrder, w, "vsoxei" # n # ".v">,
436      VSXSched<n, "O", UpperBoundLMUL>;
437  }
438}
439
440multiclass VALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
441  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
442           Sched<[WriteVIALUV_UpperBound, ReadVIALUV_UpperBound,
443                  ReadVIALUV_UpperBound, ReadVMask]>;
444  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
445           Sched<[WriteVIALUX_UpperBound, ReadVIALUV_UpperBound,
446                  ReadVIALUX_UpperBound, ReadVMask]>;
447  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
448           Sched<[WriteVIALUI_UpperBound, ReadVIALUV_UpperBound,
449                  ReadVMask]>;
450}
451
452multiclass VALU_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
453  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
454           Sched<[WriteVIALUV_UpperBound, ReadVIALUV_UpperBound,
455                  ReadVIALUV_UpperBound, ReadVMask]>;
456  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
457           Sched<[WriteVIALUX_UpperBound, ReadVIALUV_UpperBound,
458                  ReadVIALUX_UpperBound, ReadVMask]>;
459}
460
461multiclass VALU_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
462  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
463           Sched<[WriteVIALUV_UpperBound, ReadVIALUV_UpperBound,
464                  ReadVIALUX_UpperBound, ReadVMask]>;
465  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
466           Sched<[WriteVIALUI_UpperBound, ReadVIALUV_UpperBound,
467                  ReadVMask]>;
468}
469
470multiclass VALU_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
471  def V  : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
472           Sched<[WriteVIWALUV_UpperBound, ReadVIWALUV_UpperBound,
473                  ReadVIWALUV_UpperBound, ReadVMask]>;
474  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
475           Sched<[WriteVIWALUX_UpperBound, ReadVIWALUV_UpperBound,
476                  ReadVIWALUX_UpperBound, ReadVMask]>;
477}
478
479multiclass VMAC_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
480  def V : VALUrVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
481          Sched<[WriteVIMulAddV_UpperBound, ReadVIMulAddV_UpperBound,
482                 ReadVIMulAddV_UpperBound, ReadVMask]>;
483  def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
484          Sched<[WriteVIMulAddX_UpperBound, ReadVIMulAddV_UpperBound,
485                 ReadVIMulAddX_UpperBound, ReadVMask]>;
486}
487
488multiclass VWMAC_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
489  def V : VALUrVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
490          Sched<[WriteVIWMulAddV_UpperBound, ReadVIWMulAddV_UpperBound,
491                 ReadVIWMulAddV_UpperBound, ReadVMask]>;
492  def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
493          Sched<[WriteVIWMulAddX_UpperBound, ReadVIWMulAddV_UpperBound,
494                 ReadVIWMulAddX_UpperBound, ReadVMask]>;
495}
496
497multiclass VWMAC_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
498  def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
499          Sched<[WriteVIWMulAddX_UpperBound, ReadVIWMulAddV_UpperBound,
500                 ReadVIWMulAddX_UpperBound, ReadVMask]>;
501}
502
503multiclass VALU_MV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
504  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
505           Sched<[WriteVExtV_UpperBound, ReadVExtV_UpperBound, ReadVMask]>;
506}
507
508multiclass VALUm_IV_V_X_I<string opcodestr, bits<6> funct6> {
509  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
510           Sched<[WriteVICALUV_UpperBound, ReadVICALUV_UpperBound,
511                  ReadVICALUV_UpperBound, ReadVMask]>;
512  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
513           Sched<[WriteVICALUX_UpperBound, ReadVICALUV_UpperBound,
514                  ReadVICALUX_UpperBound, ReadVMask]>;
515  def IM : VALUmVI<funct6, opcodestr # ".vim">,
516           Sched<[WriteVICALUI_UpperBound, ReadVICALUV_UpperBound,
517                  ReadVMask]>;
518}
519
520multiclass VMRG_IV_V_X_I<string opcodestr, bits<6> funct6> {
521  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
522           Sched<[WriteVIMergeV_UpperBound, ReadVIMergeV_UpperBound,
523                  ReadVIMergeV_UpperBound, ReadVMask]>;
524  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
525           Sched<[WriteVIMergeX_UpperBound, ReadVIMergeV_UpperBound,
526                  ReadVIMergeX_UpperBound, ReadVMask]>;
527  def IM : VALUmVI<funct6, opcodestr # ".vim">,
528           Sched<[WriteVIMergeI_UpperBound, ReadVIMergeV_UpperBound,
529                  ReadVMask]>;
530}
531
532multiclass VALUm_IV_V_X<string opcodestr, bits<6> funct6> {
533  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
534           Sched<[WriteVICALUV_UpperBound, ReadVICALUV_UpperBound,
535                  ReadVICALUV_UpperBound, ReadVMask]>;
536  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
537           Sched<[WriteVICALUX_UpperBound, ReadVICALUV_UpperBound,
538                  ReadVICALUX_UpperBound, ReadVMask]>;
539}
540
541multiclass VALUNoVm_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5> {
542  def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">,
543          Sched<[WriteVICALUV_UpperBound, ReadVICALUV_UpperBound,
544                 ReadVICALUV_UpperBound]>;
545  def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">,
546          Sched<[WriteVICALUX_UpperBound, ReadVICALUV_UpperBound,
547                 ReadVICALUX_UpperBound]>;
548  def I : VALUVINoVm<funct6, opcodestr # ".vi", optype>,
549          Sched<[WriteVICALUI_UpperBound, ReadVICALUV_UpperBound]>;
550}
551
552multiclass VALUNoVm_IV_V_X<string opcodestr, bits<6> funct6> {
553  def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">,
554          Sched<[WriteVICALUV_UpperBound, ReadVICALUV_UpperBound,
555                 ReadVICALUV_UpperBound]>;
556  def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">,
557          Sched<[WriteVICALUX_UpperBound, ReadVICALUV_UpperBound,
558                 ReadVICALUX_UpperBound]>;
559}
560
561multiclass VALU_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
562  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
563          Sched<[WriteVFALUV_UpperBound, ReadVFALUV_UpperBound,
564                 ReadVFALUV_UpperBound, ReadVMask]>;
565  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
566          Sched<[WriteVFALUF_UpperBound, ReadVFALUV_UpperBound,
567                 ReadVFALUF_UpperBound, ReadVMask]>;
568}
569
570multiclass VALU_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
571  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
572          Sched<[WriteVFALUF_UpperBound, ReadVFALUV_UpperBound,
573                 ReadVFALUF_UpperBound, ReadVMask]>;
574}
575
576multiclass VWALU_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
577  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
578          Sched<[WriteVFWALUV_UpperBound, ReadVFWALUV_UpperBound,
579                 ReadVFWALUV_UpperBound, ReadVMask]>;
580  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
581          Sched<[WriteVFWALUF_UpperBound, ReadVFWALUV_UpperBound,
582                 ReadVFWALUF_UpperBound, ReadVMask]>;
583}
584
585multiclass VMUL_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
586  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
587          Sched<[WriteVFMulV_UpperBound, ReadVFMulV_UpperBound,
588                 ReadVFMulV_UpperBound, ReadVMask]>;
589  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
590          Sched<[WriteVFMulF_UpperBound, ReadVFMulV_UpperBound,
591                 ReadVFMulF_UpperBound, ReadVMask]>;
592}
593
594multiclass VDIV_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
595  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
596          Sched<[WriteVFDivV_UpperBound, ReadVFDivV_UpperBound,
597                 ReadVFDivV_UpperBound, ReadVMask]>;
598  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
599          Sched<[WriteVFDivF_UpperBound, ReadVFDivV_UpperBound,
600                 ReadVFDivF_UpperBound, ReadVMask]>;
601}
602
603multiclass VRDIV_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
604  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
605          Sched<[WriteVFDivF_UpperBound, ReadVFDivV_UpperBound,
606                 ReadVFDivF_UpperBound, ReadVMask]>;
607}
608
609multiclass VWMUL_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
610  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
611          Sched<[WriteVFWMulV_UpperBound, ReadVFWMulV_UpperBound,
612                 ReadVFWMulV_UpperBound, ReadVMask]>;
613  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
614          Sched<[WriteVFWMulF_UpperBound, ReadVFWMulV_UpperBound,
615                 ReadVFWMulF_UpperBound, ReadVMask]>;
616}
617
618multiclass VMAC_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
619  def V : VALUrVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
620          Sched<[WriteVFMulAddV_UpperBound, ReadVFMulAddV_UpperBound,
621                 ReadVFMulAddV_UpperBound, ReadVMask]>;
622  def F : VALUrVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
623          Sched<[WriteVFMulAddF_UpperBound, ReadVFMulAddV_UpperBound,
624                 ReadVFMulAddF_UpperBound, ReadVMask]>;
625}
626
627multiclass VWMAC_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
628  def V : VALUrVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
629          Sched<[WriteVFWMulAddV_UpperBound, ReadVFWMulAddV_UpperBound,
630                 ReadVFWMulAddV_UpperBound, ReadVMask]>;
631  def F : VALUrVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
632          Sched<[WriteVFWMulAddF_UpperBound, ReadVFWMulAddV_UpperBound,
633                 ReadVFWMulAddF_UpperBound, ReadVMask]>;
634}
635
636multiclass VSQR_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
637  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
638           Sched<[WriteVFSqrtV_UpperBound, ReadVFSqrtV_UpperBound,
639                  ReadVMask]>;
640}
641
642multiclass VRCP_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
643  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
644           Sched<[WriteVFRecpV_UpperBound, ReadVFRecpV_UpperBound,
645                  ReadVMask]>;
646}
647
648multiclass VCMP_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
649  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
650          Sched<[WriteVFCmpV_UpperBound, ReadVFCmpV_UpperBound,
651                 ReadVFCmpV_UpperBound, ReadVMask]>;
652  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
653          Sched<[WriteVFCmpF_UpperBound, ReadVFCmpV_UpperBound,
654                 ReadVFCmpF_UpperBound, ReadVMask]>;
655}
656
657multiclass VCMP_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
658  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
659          Sched<[WriteVFCmpF_UpperBound, ReadVFCmpV_UpperBound,
660                 ReadVFCmpF_UpperBound, ReadVMask]>;
661}
662
663multiclass VSGNJ_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
664  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
665          Sched<[WriteVFSgnjV_UpperBound, ReadVFSgnjV_UpperBound,
666                 ReadVFSgnjV_UpperBound, ReadVMask]>;
667  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
668          Sched<[WriteVFSgnjF_UpperBound, ReadVFSgnjV_UpperBound,
669                 ReadVFSgnjF_UpperBound, ReadVMask]>;
670}
671
672multiclass VCLS_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
673  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
674           Sched<[WriteVFClassV_UpperBound, ReadVFClassV_UpperBound,
675                  ReadVMask]>;
676}
677
678multiclass VCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
679  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
680           Sched<[WriteVFCvtIToFV_UpperBound, ReadVFCvtIToFV_UpperBound,
681                  ReadVMask]>;
682}
683
684multiclass VCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
685  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
686           Sched<[WriteVFCvtFToIV_UpperBound, ReadVFCvtFToIV_UpperBound,
687                  ReadVMask]>;
688}
689
690multiclass VWCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
691  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
692           Sched<[WriteVFWCvtIToFV_UpperBound, ReadVFWCvtIToFV_UpperBound,
693                  ReadVMask]>;
694}
695
696multiclass VWCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
697  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
698           Sched<[WriteVFWCvtFToIV_UpperBound, ReadVFWCvtFToIV_UpperBound,
699                  ReadVMask]>;
700}
701
702multiclass VWCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
703  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
704           Sched<[WriteVFWCvtFToFV_UpperBound, ReadVFWCvtFToFV_UpperBound,
705                  ReadVMask]>;
706}
707
708multiclass VNCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
709  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
710           Sched<[WriteVFNCvtIToFV_UpperBound, ReadVFNCvtIToFV_UpperBound,
711                  ReadVMask]>;
712}
713
714multiclass VNCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
715  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
716           Sched<[WriteVFNCvtFToIV_UpperBound, ReadVFNCvtFToIV_UpperBound,
717                  ReadVMask]>;
718}
719
720multiclass VNCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
721  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
722           Sched<[WriteVFNCvtFToFV_UpperBound, ReadVFNCvtFToFV_UpperBound,
723                  ReadVMask]>;
724}
725
726multiclass VRED_MV_V<string opcodestr, bits<6> funct6> {
727  def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">,
728            Sched<[WriteVIRedV, ReadVIRedV, ReadVIRedV0, ReadVMask]>;
729}
730
731multiclass VWRED_IV_V<string opcodestr, bits<6> funct6> {
732  def _VS : VALUVV<funct6, OPIVV, opcodestr # ".vs">,
733            Sched<[WriteVIWRedV, ReadVIWRedV, ReadVIWRedV0, ReadVMask]>;
734}
735
736multiclass VRED_FV_V<string opcodestr, bits<6> funct6> {
737  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
738            Sched<[WriteVFRedV, ReadVFRedV, ReadVFRedV0, ReadVMask]>;
739}
740
741multiclass VREDO_FV_V<string opcodestr, bits<6> funct6> {
742  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
743            Sched<[WriteVFRedOV, ReadVFRedOV, ReadVFRedOV0, ReadVMask]>;
744}
745
746multiclass VWRED_FV_V<string opcodestr, bits<6> funct6> {
747  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
748            Sched<[WriteVFWRedV, ReadVFWRedV, ReadVFWRedV0, ReadVMask]>;
749}
750
751multiclass VWREDO_FV_V<string opcodestr, bits<6> funct6> {
752  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
753            Sched<[WriteVFWRedOV, ReadVFWRedOV, ReadVFWRedOV0, ReadVMask]>;
754}
755
756multiclass VMALU_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
757  def M : VALUVVNoVm<funct6, OPMVV, opcodestr #"." #vm #"m">,
758          Sched<[WriteVMALUV_UpperBound, ReadVMALUV_UpperBound,
759                 ReadVMALUV_UpperBound]>;
760}
761
762multiclass VMSFS_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> {
763  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
764           Sched<[WriteVMSFSV_UpperBound, ReadVMSFSV_UpperBound, ReadVMask]>;
765}
766
767multiclass VMIOT_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> {
768  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
769           Sched<[WriteVMIotV_UpperBound, ReadVMIotV_UpperBound, ReadVMask]>;
770}
771
772multiclass VSHT_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
773  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
774           Sched<[WriteVShiftV_UpperBound, ReadVShiftV_UpperBound,
775                  ReadVShiftV_UpperBound, ReadVMask]>;
776  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
777           Sched<[WriteVShiftX_UpperBound, ReadVShiftV_UpperBound,
778                  ReadVShiftX_UpperBound, ReadVMask]>;
779  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
780           Sched<[WriteVShiftI_UpperBound, ReadVShiftV_UpperBound,
781                  ReadVMask]>;
782}
783
784multiclass VNSHT_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
785  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
786           Sched<[WriteVNShiftV_UpperBound, ReadVNShiftV_UpperBound,
787                  ReadVNShiftV_UpperBound, ReadVMask]>;
788  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
789           Sched<[WriteVNShiftX_UpperBound, ReadVNShiftV_UpperBound,
790                  ReadVNShiftX_UpperBound, ReadVMask]>;
791  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
792           Sched<[WriteVNShiftI_UpperBound, ReadVNShiftV_UpperBound,
793                  ReadVMask]>;
794}
795
796multiclass VCMP_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
797  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
798           Sched<[WriteVICmpV_UpperBound, ReadVICmpV_UpperBound,
799                  ReadVICmpV_UpperBound, ReadVMask]>;
800  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
801           Sched<[WriteVICmpX_UpperBound, ReadVICmpV_UpperBound,
802                  ReadVICmpX_UpperBound, ReadVMask]>;
803  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
804           Sched<[WriteVICmpI_UpperBound, ReadVICmpV_UpperBound,
805                  ReadVMask]>;
806}
807
808multiclass VCMP_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
809  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
810           Sched<[WriteVICmpV_UpperBound, ReadVICmpV_UpperBound,
811                  ReadVICmpX_UpperBound, ReadVMask]>;
812  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
813           Sched<[WriteVICmpI_UpperBound, ReadVICmpV_UpperBound,
814                  ReadVMask]>;
815}
816
817multiclass VCMP_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
818  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
819           Sched<[WriteVICmpV_UpperBound, ReadVICmpV_UpperBound,
820                  ReadVICmpV_UpperBound, ReadVMask]>;
821  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
822           Sched<[WriteVICmpX_UpperBound, ReadVICmpV_UpperBound,
823                  ReadVICmpX_UpperBound, ReadVMask]>;
824}
825
826multiclass VMUL_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
827  def V  : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
828           Sched<[WriteVIMulV_UpperBound, ReadVIMulV_UpperBound,
829                  ReadVIMulV_UpperBound, ReadVMask]>;
830  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
831           Sched<[WriteVIMulX_UpperBound, ReadVIMulV_UpperBound,
832                  ReadVIMulX_UpperBound, ReadVMask]>;
833}
834
835multiclass VWMUL_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
836  def V  : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
837           Sched<[WriteVIWMulV_UpperBound, ReadVIWMulV_UpperBound,
838                  ReadVIWMulV_UpperBound, ReadVMask]>;
839  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
840           Sched<[WriteVIWMulX_UpperBound, ReadVIWMulV_UpperBound,
841                  ReadVIWMulX_UpperBound, ReadVMask]>;
842}
843
844multiclass VDIV_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
845  def V  : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
846           Sched<[WriteVIDivV_UpperBound, ReadVIDivV_UpperBound,
847                  ReadVIDivV_UpperBound, ReadVMask]>;
848  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
849           Sched<[WriteVIDivX_UpperBound, ReadVIDivV_UpperBound,
850                  ReadVIDivX_UpperBound, ReadVMask]>;
851}
852
853multiclass VSALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
854  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
855           Sched<[WriteVSALUV_UpperBound, ReadVSALUV_UpperBound,
856                  ReadVSALUV_UpperBound, ReadVMask]>;
857  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
858           Sched<[WriteVSALUX_UpperBound, ReadVSALUV_UpperBound,
859                  ReadVSALUX_UpperBound, ReadVMask]>;
860  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
861           Sched<[WriteVSALUI_UpperBound, ReadVSALUV_UpperBound,
862                  ReadVMask]>;
863}
864
865multiclass VSALU_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
866  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
867           Sched<[WriteVSALUV_UpperBound, ReadVSALUV_UpperBound,
868                  ReadVSALUV_UpperBound, ReadVMask]>;
869  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
870           Sched<[WriteVSALUX_UpperBound, ReadVSALUV_UpperBound,
871                  ReadVSALUX_UpperBound, ReadVMask]>;
872}
873
874multiclass VAALU_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
875  def V  : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
876           Sched<[WriteVAALUV_UpperBound, ReadVAALUV_UpperBound,
877                  ReadVAALUV_UpperBound, ReadVMask]>;
878  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
879           Sched<[WriteVAALUX_UpperBound, ReadVAALUV_UpperBound,
880                  ReadVAALUX_UpperBound, ReadVMask]>;
881}
882
883multiclass VSMUL_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
884  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
885           Sched<[WriteVSMulV_UpperBound, ReadVSMulV_UpperBound,
886                  ReadVSMulV_UpperBound, ReadVMask]>;
887  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
888           Sched<[WriteVSMulX_UpperBound, ReadVSMulV_UpperBound,
889                  ReadVSMulX_UpperBound, ReadVMask]>;
890}
891
892multiclass VSSHF_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
893  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
894           Sched<[WriteVSShiftV_UpperBound, ReadVSShiftV_UpperBound,
895                  ReadVSShiftV_UpperBound, ReadVMask]>;
896  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
897           Sched<[WriteVSShiftX_UpperBound, ReadVSShiftV_UpperBound,
898                  ReadVSShiftX_UpperBound, ReadVMask]>;
899  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
900           Sched<[WriteVSShiftI_UpperBound, ReadVSShiftV_UpperBound,
901                  ReadVMask]>;
902}
903
904multiclass VNCLP_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
905  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
906           Sched<[WriteVNClipV_UpperBound, ReadVNClipV_UpperBound,
907                  ReadVNClipV_UpperBound, ReadVMask]>;
908  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
909           Sched<[WriteVNClipX_UpperBound, ReadVNClipV_UpperBound,
910                  ReadVNClipX_UpperBound, ReadVMask]>;
911  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
912           Sched<[WriteVNClipI_UpperBound, ReadVNClipV_UpperBound,
913                  ReadVMask]>;
914}
915
916multiclass VSLD_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
917  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
918           Sched<[WriteVISlideX_UpperBound, ReadVISlideV_UpperBound,
919                  ReadVISlideX_UpperBound, ReadVMask]>;
920  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
921           Sched<[WriteVISlideI_UpperBound, ReadVISlideV_UpperBound,
922                  ReadVMask]>;
923}
924
925multiclass VSLD1_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
926  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
927           Sched<[WriteVISlide1X_UpperBound, ReadVISlideV_UpperBound,
928                  ReadVISlideX_UpperBound, ReadVMask]>;
929}
930
931multiclass VSLD1_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
932  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
933          Sched<[WriteVFSlide1F_UpperBound, ReadVFSlideV_UpperBound,
934                 ReadVFSlideF_UpperBound, ReadVMask]>;
935}
936
937multiclass VGTR_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
938  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
939           Sched<[WriteVGatherV_UpperBound, ReadVGatherV_UpperBound,
940                  ReadVGatherV_UpperBound, ReadVMask]>;
941  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
942           Sched<[WriteVGatherX_UpperBound, ReadVGatherV_UpperBound,
943                  ReadVGatherX_UpperBound, ReadVMask]>;
944  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
945           Sched<[WriteVGatherI_UpperBound, ReadVGatherV_UpperBound,
946                  ReadVMask]>;
947}
948
949multiclass VCPR_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
950  def M  : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">,
951           Sched<[WriteVCompressV_UpperBound, ReadVCompressV_UpperBound,
952                  ReadVCompressV_UpperBound]>;
953}
954
955multiclass VWholeLoadN<bits<3> nf, string opcodestr, RegisterClass VRC> {
956  foreach l = [8, 16, 32] in {
957    defvar w = !cast<RISCVWidth>("LSWidth" # l);
958    defvar s = !cast<SchedWrite>("WriteVLD" # !add(nf, 1) # "R");
959
960    def E # l # _V : VWholeLoad<nf, w, opcodestr # "e" # l # ".v", VRC>,
961                     Sched<[s, ReadVLDX_UpperBound]>;
962  }
963}
964multiclass VWholeLoadEEW64<bits<3> nf, string opcodestr, RegisterClass VRC, SchedReadWrite schedrw> {
965  def E64_V : VWholeLoad<nf, LSWidth64, opcodestr # "e64.v", VRC>,
966              Sched<[schedrw, ReadVLDX_UpperBound]>;
967}
968
969//===----------------------------------------------------------------------===//
970// Instructions
971//===----------------------------------------------------------------------===//
972
973let Predicates = [HasVInstructions] in {
974let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
975def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp11:$vtypei),
976                           "vsetvli", "$rd, $rs1, $vtypei">,
977                           Sched<[WriteVSETVLI, ReadVSETVLI]>;
978def VSETIVLI : RVInstSetiVLi<(outs GPR:$rd), (ins uimm5:$uimm, VTypeIOp10:$vtypei),
979                             "vsetivli", "$rd, $uimm, $vtypei">,
980                             Sched<[WriteVSETIVLI]>;
981
982def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
983                         "vsetvl", "$rd, $rs1, $rs2">,
984                          Sched<[WriteVSETVL, ReadVSETVL, ReadVSETVL]>;
985} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
986foreach eew = [8, 16, 32] in {
987  defvar w = !cast<RISCVWidth>("LSWidth" # eew);
988
989  // Vector Unit-Stride Instructions
990  def VLE#eew#_V : VUnitStrideLoad<w, "vle"#eew#".v">, VLESched<UpperBoundLMUL>;
991  def VSE#eew#_V  : VUnitStrideStore<w,  "vse"#eew#".v">, VSESched<UpperBoundLMUL>;
992
993  // Vector Unit-Stride Fault-only-First Loads
994  def VLE#eew#FF_V : VUnitStrideLoadFF<w,  "vle"#eew#"ff.v">, VLFSched<UpperBoundLMUL>;
995
996  // Vector Strided Instructions
997  def VLSE#eew#_V  : VStridedLoad<w,  "vlse"#eew#".v">, VLSSched<eew, UpperBoundLMUL>;
998  def VSSE#eew#_V  : VStridedStore<w,  "vsse"#eew#".v">, VSSSched<eew, UpperBoundLMUL>;
999}
1000
1001defm "" : VIndexLoadStore<[8, 16, 32]>;
1002} // Predicates = [HasVInstructions]
1003
1004let Predicates = [HasVInstructions] in {
1005def VLM_V : VUnitStrideLoadMask<"vlm.v">,
1006             Sched<[WriteVLDM_UpperBound, ReadVLDX_UpperBound]>;
1007def VSM_V : VUnitStrideStoreMask<"vsm.v">,
1008             Sched<[WriteVSTM_UpperBound, ReadVSTM_UpperBound, ReadVSTX_UpperBound]>;
1009def : InstAlias<"vle1.v $vd, (${rs1})",
1010                (VLM_V VR:$vd, GPR:$rs1), 0>;
1011def : InstAlias<"vse1.v $vs3, (${rs1})",
1012                (VSM_V VR:$vs3, GPR:$rs1), 0>;
1013
1014defm VL1R : VWholeLoadN<0, "vl1r", VR>;
1015defm VL2R : VWholeLoadN<1, "vl2r", VRM2>;
1016defm VL4R : VWholeLoadN<3, "vl4r", VRM4>;
1017defm VL8R : VWholeLoadN<7, "vl8r", VRM8>;
1018
1019def VS1R_V : VWholeStore<0, "vs1r.v", VR>,
1020             Sched<[WriteVST1R, ReadVST1R, ReadVSTX_UpperBound]>;
1021def VS2R_V : VWholeStore<1, "vs2r.v", VRM2>,
1022             Sched<[WriteVST2R, ReadVST2R, ReadVSTX_UpperBound]>;
1023def VS4R_V : VWholeStore<3, "vs4r.v", VRM4>,
1024             Sched<[WriteVST4R, ReadVST4R, ReadVSTX_UpperBound]>;
1025def VS8R_V : VWholeStore<7, "vs8r.v", VRM8>,
1026             Sched<[WriteVST8R, ReadVST8R, ReadVSTX_UpperBound]>;
1027
1028def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
1029def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>;
1030def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>;
1031def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>;
1032} // Predicates = [HasVInstructions]
1033
1034let Predicates = [HasVInstructionsI64] in {
1035// Vector Unit-Stride Instructions
1036def VLE64_V : VUnitStrideLoad<LSWidth64, "vle64.v">,
1037              VLESched<UpperBoundLMUL>;
1038
1039def VLE64FF_V : VUnitStrideLoadFF<LSWidth64, "vle64ff.v">,
1040                VLFSched<UpperBoundLMUL>;
1041
1042def VSE64_V : VUnitStrideStore<LSWidth64, "vse64.v">,
1043              VSESched<UpperBoundLMUL>;
1044// Vector Strided Instructions
1045def VLSE64_V : VStridedLoad<LSWidth64, "vlse64.v">,
1046               VLSSched<32, UpperBoundLMUL>;
1047
1048def VSSE64_V : VStridedStore<LSWidth64, "vsse64.v">,
1049               VSSSched<64, UpperBoundLMUL>;
1050
1051defm VL1R: VWholeLoadEEW64<0, "vl1r", VR, WriteVLD1R>;
1052defm VL2R: VWholeLoadEEW64<1, "vl2r", VRM2, WriteVLD2R>;
1053defm VL4R: VWholeLoadEEW64<3, "vl4r", VRM4, WriteVLD4R>;
1054defm VL8R: VWholeLoadEEW64<7, "vl8r", VRM8, WriteVLD8R>;
1055} // Predicates = [HasVInstructionsI64]
1056let Predicates = [IsRV64, HasVInstructionsI64] in {
1057  // Vector Indexed Instructions
1058  defm "" : VIndexLoadStore<[64]>;
1059} // [IsRV64, HasVInstructionsI64]
1060
1061let Predicates = [HasVInstructions] in {
1062// Vector Single-Width Integer Add and Subtract
1063defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
1064defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>;
1065defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>;
1066
1067def : InstAlias<"vneg.v $vd, $vs$vm", (VRSUB_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
1068def : InstAlias<"vneg.v $vd, $vs", (VRSUB_VX VR:$vd, VR:$vs, X0, zero_reg)>;
1069
1070// Vector Widening Integer Add/Subtract
1071// Refer to 11.2 Widening Vector Arithmetic Instructions
1072// The destination vector register group cannot overlap a source vector
1073// register group of a different element width (including the mask register
1074// if masked), otherwise an illegal instruction exception is raised.
1075let Constraints = "@earlyclobber $vd" in {
1076let RVVConstraint = WidenV in {
1077defm VWADDU_V : VALU_MV_V_X<"vwaddu", 0b110000>;
1078defm VWSUBU_V : VALU_MV_V_X<"vwsubu", 0b110010>;
1079defm VWADD_V : VALU_MV_V_X<"vwadd", 0b110001>;
1080defm VWSUB_V : VALU_MV_V_X<"vwsub", 0b110011>;
1081} // RVVConstraint = WidenV
1082// Set earlyclobber for following instructions for second and mask operands.
1083// This has the downside that the earlyclobber constraint is too coarse and
1084// will impose unnecessary restrictions by not allowing the destination to
1085// overlap with the first (wide) operand.
1086let RVVConstraint = WidenW in {
1087defm VWADDU_W : VALU_MV_V_X<"vwaddu", 0b110100, "w">;
1088defm VWSUBU_W : VALU_MV_V_X<"vwsubu", 0b110110, "w">;
1089defm VWADD_W : VALU_MV_V_X<"vwadd", 0b110101, "w">;
1090defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">;
1091} // RVVConstraint = WidenW
1092} // Constraints = "@earlyclobber $vd"
1093
1094def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm",
1095                (VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
1096def : InstAlias<"vwcvt.x.x.v $vd, $vs",
1097                (VWADD_VX VR:$vd, VR:$vs, X0, zero_reg)>;
1098def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm",
1099                (VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
1100def : InstAlias<"vwcvtu.x.x.v $vd, $vs",
1101                (VWADDU_VX VR:$vd, VR:$vs, X0, zero_reg)>;
1102
1103// Vector Integer Extension
1104defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>;
1105defm VSEXT_VF8 : VALU_MV_VS2<"vsext.vf8", 0b010010, 0b00011>;
1106defm VZEXT_VF4 : VALU_MV_VS2<"vzext.vf4", 0b010010, 0b00100>;
1107defm VSEXT_VF4 : VALU_MV_VS2<"vsext.vf4", 0b010010, 0b00101>;
1108defm VZEXT_VF2 : VALU_MV_VS2<"vzext.vf2", 0b010010, 0b00110>;
1109defm VSEXT_VF2 : VALU_MV_VS2<"vsext.vf2", 0b010010, 0b00111>;
1110
1111// Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
1112defm VADC_V : VALUm_IV_V_X_I<"vadc", 0b010000>;
1113let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1114defm VMADC_V : VALUm_IV_V_X_I<"vmadc", 0b010001>;
1115defm VMADC_V : VALUNoVm_IV_V_X_I<"vmadc", 0b010001>;
1116} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1117defm VSBC_V : VALUm_IV_V_X<"vsbc", 0b010010>;
1118let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1119defm VMSBC_V : VALUm_IV_V_X<"vmsbc", 0b010011>;
1120defm VMSBC_V : VALUNoVm_IV_V_X<"vmsbc", 0b010011>;
1121} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1122
1123// Vector Bitwise Logical Instructions
1124defm VAND_V : VALU_IV_V_X_I<"vand", 0b001001>;
1125defm VOR_V : VALU_IV_V_X_I<"vor", 0b001010>;
1126defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>;
1127
1128def : InstAlias<"vnot.v $vd, $vs$vm",
1129                (VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>;
1130def : InstAlias<"vnot.v $vd, $vs",
1131                (VXOR_VI VR:$vd, VR:$vs, -1, zero_reg)>;
1132
1133// Vector Single-Width Bit Shift Instructions
1134defm VSLL_V : VSHT_IV_V_X_I<"vsll", 0b100101, uimm5>;
1135defm VSRL_V : VSHT_IV_V_X_I<"vsrl", 0b101000, uimm5>;
1136defm VSRA_V : VSHT_IV_V_X_I<"vsra", 0b101001, uimm5>;
1137
1138// Vector Narrowing Integer Right Shift Instructions
1139// Refer to 11.3. Narrowing Vector Arithmetic Instructions
1140// The destination vector register group cannot overlap the first source
1141// vector register group (specified by vs2). The destination vector register
1142// group cannot overlap the mask register if used, unless LMUL=1.
1143let Constraints = "@earlyclobber $vd" in {
1144defm VNSRL_W : VNSHT_IV_V_X_I<"vnsrl", 0b101100, uimm5, "w">;
1145defm VNSRA_W : VNSHT_IV_V_X_I<"vnsra", 0b101101, uimm5, "w">;
1146} // Constraints = "@earlyclobber $vd"
1147
1148def : InstAlias<"vncvt.x.x.w $vd, $vs$vm",
1149                (VNSRL_WX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
1150def : InstAlias<"vncvt.x.x.w $vd, $vs",
1151                (VNSRL_WX VR:$vd, VR:$vs, X0, zero_reg)>;
1152
1153// Vector Integer Comparison Instructions
1154let RVVConstraint = NoConstraint in {
1155defm VMSEQ_V : VCMP_IV_V_X_I<"vmseq", 0b011000>;
1156defm VMSNE_V : VCMP_IV_V_X_I<"vmsne", 0b011001>;
1157defm VMSLTU_V : VCMP_IV_V_X<"vmsltu", 0b011010>;
1158defm VMSLT_V : VCMP_IV_V_X<"vmslt", 0b011011>;
1159defm VMSLEU_V : VCMP_IV_V_X_I<"vmsleu", 0b011100>;
1160defm VMSLE_V : VCMP_IV_V_X_I<"vmsle", 0b011101>;
1161defm VMSGTU_V : VCMP_IV_X_I<"vmsgtu", 0b011110>;
1162defm VMSGT_V : VCMP_IV_X_I<"vmsgt", 0b011111>;
1163} // RVVConstraint = NoConstraint
1164
1165def : InstAlias<"vmsgtu.vv $vd, $va, $vb$vm",
1166                (VMSLTU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1167def : InstAlias<"vmsgt.vv $vd, $va, $vb$vm",
1168                (VMSLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1169def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm",
1170                (VMSLEU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1171def : InstAlias<"vmsge.vv $vd, $va, $vb$vm",
1172                (VMSLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1173
1174let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
1175    mayStore = 0 in {
1176// For unsigned comparisons we need to special case 0 immediate to maintain
1177// the always true/false semantics we would invert if we just decremented the
1178// immediate like we do for signed. To match the GNU assembler we will use
1179// vmseq/vmsne.vv with the same register for both operands which we can't do
1180// from an InstAlias.
1181def PseudoVMSGEU_VI : Pseudo<(outs VR:$vd),
1182                             (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1183                             [], "vmsgeu.vi", "$vd, $vs2, $imm$vm">;
1184def PseudoVMSLTU_VI : Pseudo<(outs VR:$vd),
1185                             (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1186                             [], "vmsltu.vi", "$vd, $vs2, $imm$vm">;
1187// Handle signed with pseudos as well for more consistency in the
1188// implementation.
1189def PseudoVMSGE_VI : Pseudo<(outs VR:$vd),
1190                            (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1191                            [], "vmsge.vi", "$vd, $vs2, $imm$vm">;
1192def PseudoVMSLT_VI : Pseudo<(outs VR:$vd),
1193                            (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1194                            [], "vmslt.vi", "$vd, $vs2, $imm$vm">;
1195}
1196
1197let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
1198    mayStore = 0 in {
1199def PseudoVMSGEU_VX : Pseudo<(outs VR:$vd),
1200                             (ins VR:$vs2, GPR:$rs1),
1201                             [], "vmsgeu.vx", "$vd, $vs2, $rs1">;
1202def PseudoVMSGE_VX : Pseudo<(outs VR:$vd),
1203                            (ins VR:$vs2, GPR:$rs1),
1204                            [], "vmsge.vx", "$vd, $vs2, $rs1">;
1205def PseudoVMSGEU_VX_M : Pseudo<(outs VRNoV0:$vd),
1206                               (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1207                               [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm">;
1208def PseudoVMSGE_VX_M : Pseudo<(outs VRNoV0:$vd),
1209                              (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1210                              [], "vmsge.vx", "$vd, $vs2, $rs1$vm">;
1211def PseudoVMSGEU_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
1212                                 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1213                                 [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm, $scratch">;
1214def PseudoVMSGE_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
1215                                (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1216                                [], "vmsge.vx", "$vd, $vs2, $rs1$vm, $scratch">;
1217}
1218
1219// Vector Integer Min/Max Instructions
1220defm VMINU_V : VCMP_IV_V_X<"vminu", 0b000100>;
1221defm VMIN_V : VCMP_IV_V_X<"vmin", 0b000101>;
1222defm VMAXU_V : VCMP_IV_V_X<"vmaxu", 0b000110>;
1223defm VMAX_V : VCMP_IV_V_X<"vmax", 0b000111>;
1224
1225// Vector Single-Width Integer Multiply Instructions
1226defm VMUL_V : VMUL_MV_V_X<"vmul", 0b100101>;
1227defm VMULH_V : VMUL_MV_V_X<"vmulh", 0b100111>;
1228defm VMULHU_V : VMUL_MV_V_X<"vmulhu", 0b100100>;
1229defm VMULHSU_V : VMUL_MV_V_X<"vmulhsu", 0b100110>;
1230
1231// Vector Integer Divide Instructions
1232defm VDIVU_V : VDIV_MV_V_X<"vdivu", 0b100000>;
1233defm VDIV_V : VDIV_MV_V_X<"vdiv", 0b100001>;
1234defm VREMU_V : VDIV_MV_V_X<"vremu", 0b100010>;
1235defm VREM_V : VDIV_MV_V_X<"vrem", 0b100011>;
1236
1237// Vector Widening Integer Multiply Instructions
1238let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
1239defm VWMUL_V : VWMUL_MV_V_X<"vwmul", 0b111011>;
1240defm VWMULU_V : VWMUL_MV_V_X<"vwmulu", 0b111000>;
1241defm VWMULSU_V : VWMUL_MV_V_X<"vwmulsu", 0b111010>;
1242} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
1243
1244// Vector Single-Width Integer Multiply-Add Instructions
1245defm VMACC_V : VMAC_MV_V_X<"vmacc", 0b101101>;
1246defm VNMSAC_V : VMAC_MV_V_X<"vnmsac", 0b101111>;
1247defm VMADD_V : VMAC_MV_V_X<"vmadd", 0b101001>;
1248defm VNMSUB_V : VMAC_MV_V_X<"vnmsub", 0b101011>;
1249
1250// Vector Widening Integer Multiply-Add Instructions
1251let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
1252defm VWMACCU_V : VWMAC_MV_V_X<"vwmaccu", 0b111100>;
1253defm VWMACC_V : VWMAC_MV_V_X<"vwmacc", 0b111101>;
1254defm VWMACCSU_V : VWMAC_MV_V_X<"vwmaccsu", 0b111111>;
1255defm VWMACCUS_V : VWMAC_MV_X<"vwmaccus", 0b111110>;
1256} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
1257
1258// Vector Integer Merge Instructions
1259defm VMERGE_V : VMRG_IV_V_X_I<"vmerge", 0b010111>;
1260
1261// Vector Integer Move Instructions
1262let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vs2 = 0, vm = 1,
1263    RVVConstraint = NoConstraint  in {
1264// op vd, vs1
1265def VMV_V_V : RVInstVV<0b010111, OPIVV, (outs VR:$vd),
1266                       (ins VR:$vs1), "vmv.v.v", "$vd, $vs1">,
1267              Sched<[WriteVIMovV_UpperBound, ReadVIMovV_UpperBound]>;
1268// op vd, rs1
1269def VMV_V_X : RVInstVX<0b010111, OPIVX, (outs VR:$vd),
1270                       (ins GPR:$rs1), "vmv.v.x", "$vd, $rs1">,
1271              Sched<[WriteVIMovX_UpperBound, ReadVIMovX_UpperBound]>;
1272// op vd, imm
1273def VMV_V_I : RVInstIVI<0b010111, (outs VR:$vd),
1274                       (ins simm5:$imm), "vmv.v.i", "$vd, $imm">,
1275              Sched<[WriteVIMovI_UpperBound]>;
1276} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1277
1278// Vector Fixed-Point Arithmetic Instructions
1279defm VSADDU_V : VSALU_IV_V_X_I<"vsaddu", 0b100000>;
1280defm VSADD_V : VSALU_IV_V_X_I<"vsadd", 0b100001>;
1281defm VSSUBU_V : VSALU_IV_V_X<"vssubu", 0b100010>;
1282defm VSSUB_V : VSALU_IV_V_X<"vssub", 0b100011>;
1283
1284// Vector Single-Width Averaging Add and Subtract
1285defm VAADDU_V : VAALU_MV_V_X<"vaaddu", 0b001000>;
1286defm VAADD_V : VAALU_MV_V_X<"vaadd", 0b001001>;
1287defm VASUBU_V : VAALU_MV_V_X<"vasubu", 0b001010>;
1288defm VASUB_V : VAALU_MV_V_X<"vasub", 0b001011>;
1289
1290// Vector Single-Width Fractional Multiply with Rounding and Saturation
1291defm VSMUL_V : VSMUL_IV_V_X<"vsmul", 0b100111>;
1292
1293// Vector Single-Width Scaling Shift Instructions
1294defm VSSRL_V : VSSHF_IV_V_X_I<"vssrl", 0b101010, uimm5>;
1295defm VSSRA_V : VSSHF_IV_V_X_I<"vssra", 0b101011, uimm5>;
1296
1297// Vector Narrowing Fixed-Point Clip Instructions
1298let Constraints = "@earlyclobber $vd" in {
1299defm VNCLIPU_W : VNCLP_IV_V_X_I<"vnclipu", 0b101110, uimm5, "w">;
1300defm VNCLIP_W : VNCLP_IV_V_X_I<"vnclip", 0b101111, uimm5, "w">;
1301} // Constraints = "@earlyclobber $vd"
1302} // Predicates = [HasVInstructions]
1303
1304let Predicates = [HasVInstructionsAnyF] in {
1305// Vector Single-Width Floating-Point Add/Subtract Instructions
1306let Uses = [FRM], mayRaiseFPException = true in {
1307defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>;
1308defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>;
1309defm VFRSUB_V : VALU_FV_F<"vfrsub", 0b100111>;
1310}
1311
1312// Vector Widening Floating-Point Add/Subtract Instructions
1313let Constraints = "@earlyclobber $vd",
1314    Uses = [FRM],
1315    mayRaiseFPException = true in {
1316let RVVConstraint = WidenV in {
1317defm VFWADD_V : VWALU_FV_V_F<"vfwadd", 0b110000>;
1318defm VFWSUB_V : VWALU_FV_V_F<"vfwsub", 0b110010>;
1319} // RVVConstraint = WidenV
1320// Set earlyclobber for following instructions for second and mask operands.
1321// This has the downside that the earlyclobber constraint is too coarse and
1322// will impose unnecessary restrictions by not allowing the destination to
1323// overlap with the first (wide) operand.
1324let RVVConstraint = WidenW in {
1325defm VFWADD_W : VWALU_FV_V_F<"vfwadd", 0b110100, "w">;
1326defm VFWSUB_W : VWALU_FV_V_F<"vfwsub", 0b110110, "w">;
1327} // RVVConstraint = WidenW
1328} // Constraints = "@earlyclobber $vd", Uses = [FRM], mayRaiseFPException = true
1329
1330// Vector Single-Width Floating-Point Multiply/Divide Instructions
1331let Uses = [FRM], mayRaiseFPException = true in {
1332defm VFMUL_V : VMUL_FV_V_F<"vfmul", 0b100100>;
1333defm VFDIV_V : VDIV_FV_V_F<"vfdiv", 0b100000>;
1334defm VFRDIV_V : VRDIV_FV_F<"vfrdiv", 0b100001>;
1335}
1336
1337// Vector Widening Floating-Point Multiply
1338let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV,
1339    Uses = [FRM], mayRaiseFPException = true in {
1340defm VFWMUL_V : VWMUL_FV_V_F<"vfwmul", 0b111000>;
1341} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, Uses = [FRM], mayRaiseFPException = true
1342
1343// Vector Single-Width Floating-Point Fused Multiply-Add Instructions
1344let Uses = [FRM], mayRaiseFPException = true in {
1345defm VFMACC_V : VMAC_FV_V_F<"vfmacc", 0b101100>;
1346defm VFNMACC_V : VMAC_FV_V_F<"vfnmacc", 0b101101>;
1347defm VFMSAC_V : VMAC_FV_V_F<"vfmsac", 0b101110>;
1348defm VFNMSAC_V : VMAC_FV_V_F<"vfnmsac", 0b101111>;
1349defm VFMADD_V : VMAC_FV_V_F<"vfmadd", 0b101000>;
1350defm VFNMADD_V : VMAC_FV_V_F<"vfnmadd", 0b101001>;
1351defm VFMSUB_V : VMAC_FV_V_F<"vfmsub", 0b101010>;
1352defm VFNMSUB_V : VMAC_FV_V_F<"vfnmsub", 0b101011>;
1353}
1354
1355// Vector Widening Floating-Point Fused Multiply-Add Instructions
1356let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV,
1357    Uses = [FRM], mayRaiseFPException = true in {
1358defm VFWMACC_V : VWMAC_FV_V_F<"vfwmacc", 0b111100>;
1359defm VFWNMACC_V : VWMAC_FV_V_F<"vfwnmacc", 0b111101>;
1360defm VFWMSAC_V : VWMAC_FV_V_F<"vfwmsac", 0b111110>;
1361defm VFWNMSAC_V : VWMAC_FV_V_F<"vfwnmsac", 0b111111>;
1362} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, Uses = [FRM], mayRaiseFPException = true
1363
1364// Vector Floating-Point Square-Root Instruction
1365let Uses = [FRM], mayRaiseFPException = true in {
1366defm VFSQRT_V : VSQR_FV_VS2<"vfsqrt.v", 0b010011, 0b00000>;
1367defm VFREC7_V : VRCP_FV_VS2<"vfrec7.v", 0b010011, 0b00101>;
1368}
1369
1370let mayRaiseFPException = true in
1371defm VFRSQRT7_V : VRCP_FV_VS2<"vfrsqrt7.v", 0b010011, 0b00100>;
1372
1373// Vector Floating-Point MIN/MAX Instructions
1374let mayRaiseFPException = true in {
1375defm VFMIN_V : VCMP_FV_V_F<"vfmin", 0b000100>;
1376defm VFMAX_V : VCMP_FV_V_F<"vfmax", 0b000110>;
1377}
1378
1379// Vector Floating-Point Sign-Injection Instructions
1380defm VFSGNJ_V : VSGNJ_FV_V_F<"vfsgnj", 0b001000>;
1381defm VFSGNJN_V : VSGNJ_FV_V_F<"vfsgnjn", 0b001001>;
1382defm VFSGNJX_V : VSGNJ_FV_V_F<"vfsgnjx", 0b001010>;
1383
1384def : InstAlias<"vfneg.v $vd, $vs$vm",
1385                (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
1386def : InstAlias<"vfneg.v $vd, $vs",
1387                (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>;
1388def : InstAlias<"vfabs.v $vd, $vs$vm",
1389                (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
1390def : InstAlias<"vfabs.v $vd, $vs",
1391                (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>;
1392
1393// Vector Floating-Point Compare Instructions
1394let RVVConstraint = NoConstraint, mayRaiseFPException = true in {
1395defm VMFEQ_V : VCMP_FV_V_F<"vmfeq", 0b011000>;
1396defm VMFNE_V : VCMP_FV_V_F<"vmfne", 0b011100>;
1397defm VMFLT_V : VCMP_FV_V_F<"vmflt", 0b011011>;
1398defm VMFLE_V : VCMP_FV_V_F<"vmfle", 0b011001>;
1399defm VMFGT_V : VCMP_FV_F<"vmfgt", 0b011101>;
1400defm VMFGE_V : VCMP_FV_F<"vmfge", 0b011111>;
1401} // RVVConstraint = NoConstraint, mayRaiseFPException = true
1402
1403def : InstAlias<"vmfgt.vv $vd, $va, $vb$vm",
1404                (VMFLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1405def : InstAlias<"vmfge.vv $vd, $va, $vb$vm",
1406                (VMFLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1407
1408// Vector Floating-Point Classify Instruction
1409defm VFCLASS_V : VCLS_FV_VS2<"vfclass.v", 0b010011, 0b10000>;
1410
1411let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
1412
1413// Vector Floating-Point Merge Instruction
1414let vm = 0 in
1415def VFMERGE_VFM : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
1416                           (ins VR:$vs2, FPR32:$rs1, VMV0:$v0),
1417                           "vfmerge.vfm", "$vd, $vs2, $rs1, v0">,
1418                  Sched<[WriteVFMergeV_UpperBound, ReadVFMergeV_UpperBound,
1419                         ReadVFMergeF_UpperBound, ReadVMask]>;
1420
1421// Vector Floating-Point Move Instruction
1422let RVVConstraint = NoConstraint in
1423let vm = 1, vs2 = 0 in
1424def VFMV_V_F : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
1425                       (ins FPR32:$rs1), "vfmv.v.f", "$vd, $rs1">,
1426               Sched<[WriteVFMovV_UpperBound, ReadVFMovF_UpperBound]>;
1427
1428} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1429
1430// Single-Width Floating-Point/Integer Type-Convert Instructions
1431let mayRaiseFPException = true in {
1432let Uses = [FRM] in {
1433defm VFCVT_XU_F_V : VCVTI_FV_VS2<"vfcvt.xu.f.v", 0b010010, 0b00000>;
1434defm VFCVT_X_F_V : VCVTI_FV_VS2<"vfcvt.x.f.v", 0b010010, 0b00001>;
1435}
1436defm VFCVT_RTZ_XU_F_V : VCVTI_FV_VS2<"vfcvt.rtz.xu.f.v", 0b010010, 0b00110>;
1437defm VFCVT_RTZ_X_F_V : VCVTI_FV_VS2<"vfcvt.rtz.x.f.v", 0b010010, 0b00111>;
1438let Uses = [FRM] in {
1439defm VFCVT_F_XU_V : VCVTF_IV_VS2<"vfcvt.f.xu.v", 0b010010, 0b00010>;
1440defm VFCVT_F_X_V : VCVTF_IV_VS2<"vfcvt.f.x.v", 0b010010, 0b00011>;
1441}
1442} // mayRaiseFPException = true
1443
1444// Widening Floating-Point/Integer Type-Convert Instructions
1445let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt,
1446    mayRaiseFPException = true in {
1447let Uses = [FRM] in {
1448defm VFWCVT_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.xu.f.v", 0b010010, 0b01000>;
1449defm VFWCVT_X_F_V : VWCVTI_FV_VS2<"vfwcvt.x.f.v", 0b010010, 0b01001>;
1450}
1451defm VFWCVT_RTZ_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.xu.f.v", 0b010010, 0b01110>;
1452defm VFWCVT_RTZ_X_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.x.f.v", 0b010010, 0b01111>;
1453defm VFWCVT_F_XU_V : VWCVTF_IV_VS2<"vfwcvt.f.xu.v", 0b010010, 0b01010>;
1454defm VFWCVT_F_X_V : VWCVTF_IV_VS2<"vfwcvt.f.x.v", 0b010010, 0b01011>;
1455defm VFWCVT_F_F_V : VWCVTF_FV_VS2<"vfwcvt.f.f.v", 0b010010, 0b01100>;
1456} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt
1457
1458// Narrowing Floating-Point/Integer Type-Convert Instructions
1459let Constraints = "@earlyclobber $vd", mayRaiseFPException = true in {
1460let Uses = [FRM] in {
1461defm VFNCVT_XU_F_W : VNCVTI_FV_VS2<"vfncvt.xu.f.w", 0b010010, 0b10000>;
1462defm VFNCVT_X_F_W : VNCVTI_FV_VS2<"vfncvt.x.f.w", 0b010010, 0b10001>;
1463}
1464defm VFNCVT_RTZ_XU_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.xu.f.w", 0b010010, 0b10110>;
1465defm VFNCVT_RTZ_X_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.x.f.w", 0b010010, 0b10111>;
1466let Uses = [FRM] in {
1467defm VFNCVT_F_XU_W : VNCVTF_IV_VS2<"vfncvt.f.xu.w", 0b010010, 0b10010>;
1468defm VFNCVT_F_X_W : VNCVTF_IV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>;
1469defm VFNCVT_F_F_W : VNCVTF_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>;
1470}
1471defm VFNCVT_ROD_F_F_W : VNCVTF_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>;
1472} // Constraints = "@earlyclobber $vd", mayRaiseFPException = true
1473} // Predicates = HasVInstructionsAnyF]
1474
1475let Predicates = [HasVInstructions] in {
1476
1477// Vector Single-Width Integer Reduction Instructions
1478let RVVConstraint = NoConstraint in {
1479defm VREDSUM : VRED_MV_V<"vredsum", 0b000000>;
1480defm VREDMAXU : VRED_MV_V<"vredmaxu", 0b000110>;
1481defm VREDMAX : VRED_MV_V<"vredmax", 0b000111>;
1482defm VREDMINU : VRED_MV_V<"vredminu", 0b000100>;
1483defm VREDMIN : VRED_MV_V<"vredmin", 0b000101>;
1484defm VREDAND : VRED_MV_V<"vredand", 0b000001>;
1485defm VREDOR : VRED_MV_V<"vredor", 0b000010>;
1486defm VREDXOR : VRED_MV_V<"vredxor", 0b000011>;
1487} // RVVConstraint = NoConstraint
1488
1489// Vector Widening Integer Reduction Instructions
1490let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1491// Set earlyclobber for following instructions for second and mask operands.
1492// This has the downside that the earlyclobber constraint is too coarse and
1493// will impose unnecessary restrictions by not allowing the destination to
1494// overlap with the first (wide) operand.
1495defm VWREDSUMU : VWRED_IV_V<"vwredsumu", 0b110000>;
1496defm VWREDSUM : VWRED_IV_V<"vwredsum", 0b110001>;
1497} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1498
1499} // Predicates = [HasVInstructions]
1500
1501let Predicates = [HasVInstructionsAnyF] in {
1502// Vector Single-Width Floating-Point Reduction Instructions
1503let RVVConstraint = NoConstraint in {
1504let Uses = [FRM], mayRaiseFPException = true in {
1505defm VFREDOSUM : VREDO_FV_V<"vfredosum", 0b000011>;
1506defm VFREDUSUM : VRED_FV_V<"vfredusum", 0b000001>;
1507}
1508let mayRaiseFPException = true in {
1509defm VFREDMAX : VRED_FV_V<"vfredmax", 0b000111>;
1510defm VFREDMIN : VRED_FV_V<"vfredmin", 0b000101>;
1511}
1512} // RVVConstraint = NoConstraint
1513
1514def : InstAlias<"vfredsum.vs $vd, $vs2, $vs1$vm",
1515                (VFREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
1516
1517// Vector Widening Floating-Point Reduction Instructions
1518let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1519// Set earlyclobber for following instructions for second and mask operands.
1520// This has the downside that the earlyclobber constraint is too coarse and
1521// will impose unnecessary restrictions by not allowing the destination to
1522// overlap with the first (wide) operand.
1523let Uses = [FRM], mayRaiseFPException = true in {
1524defm VFWREDOSUM : VWREDO_FV_V<"vfwredosum", 0b110011>;
1525defm VFWREDUSUM : VWRED_FV_V<"vfwredusum", 0b110001>;
1526}
1527} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1528
1529def : InstAlias<"vfwredsum.vs $vd, $vs2, $vs1$vm",
1530                (VFWREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
1531} // Predicates = [HasVInstructionsAnyF]
1532
1533let Predicates = [HasVInstructions] in {
1534// Vector Mask-Register Logical Instructions
1535let RVVConstraint = NoConstraint in {
1536defm VMAND_M : VMALU_MV_Mask<"vmand", 0b011001, "m">;
1537defm VMNAND_M : VMALU_MV_Mask<"vmnand", 0b011101, "m">;
1538defm VMANDN_M : VMALU_MV_Mask<"vmandn", 0b011000, "m">;
1539defm VMXOR_M : VMALU_MV_Mask<"vmxor", 0b011011, "m">;
1540defm VMOR_M : VMALU_MV_Mask<"vmor", 0b011010, "m">;
1541defm VMNOR_M : VMALU_MV_Mask<"vmnor", 0b011110, "m">;
1542defm VMORN_M : VMALU_MV_Mask<"vmorn", 0b011100, "m">;
1543defm VMXNOR_M : VMALU_MV_Mask<"vmxnor", 0b011111, "m">;
1544}
1545
1546def : InstAlias<"vmmv.m $vd, $vs",
1547                (VMAND_MM VR:$vd, VR:$vs, VR:$vs)>;
1548def : InstAlias<"vmclr.m $vd",
1549                (VMXOR_MM VR:$vd, VR:$vd, VR:$vd)>;
1550def : InstAlias<"vmset.m $vd",
1551                (VMXNOR_MM VR:$vd, VR:$vd, VR:$vd)>;
1552def : InstAlias<"vmnot.m $vd, $vs",
1553                (VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>;
1554
1555def : InstAlias<"vmandnot.mm $vd, $vs2, $vs1",
1556                (VMANDN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
1557def : InstAlias<"vmornot.mm $vd, $vs2, $vs1",
1558                (VMORN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
1559
1560let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
1561    RVVConstraint = NoConstraint  in {
1562
1563// Vector mask population count vcpop
1564def VCPOP_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd),
1565                      (ins VR:$vs2, VMaskOp:$vm),
1566                      "vcpop.m", "$vd, $vs2$vm">,
1567              Sched<[WriteVMPopV_UpperBound, ReadVMPopV_UpperBound,
1568                     ReadVMask]>;
1569
1570// vfirst find-first-set mask bit
1571def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd),
1572                       (ins VR:$vs2, VMaskOp:$vm),
1573                       "vfirst.m", "$vd, $vs2$vm">,
1574              Sched<[WriteVMFFSV_UpperBound, ReadVMFFSV_UpperBound,
1575                     ReadVMask]>;
1576
1577} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1578
1579def : InstAlias<"vpopc.m $vd, $vs2$vm",
1580                (VCPOP_M GPR:$vd, VR:$vs2, VMaskOp:$vm), 0>;
1581
1582let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in {
1583
1584// vmsbf.m set-before-first mask bit
1585defm VMSBF_M : VMSFS_MV_V<"vmsbf.m", 0b010100, 0b00001>;
1586// vmsif.m set-including-first mask bit
1587defm VMSIF_M : VMSFS_MV_V<"vmsif.m", 0b010100, 0b00011>;
1588// vmsof.m set-only-first mask bit
1589defm VMSOF_M : VMSFS_MV_V<"vmsof.m", 0b010100, 0b00010>;
1590// Vector Iota Instruction
1591defm VIOTA_M : VMIOT_MV_V<"viota.m", 0b010100, 0b10000>;
1592
1593} // Constraints = "@earlyclobber $vd", RVVConstraint = Iota
1594
1595// Vector Element Index Instruction
1596let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
1597
1598let vs2 = 0 in
1599def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VR:$vd),
1600                    (ins VMaskOp:$vm), "vid.v", "$vd$vm">,
1601            Sched<[WriteVMIdxV_UpperBound, ReadVMask]>;
1602
1603// Integer Scalar Move Instructions
1604let vm = 1, RVVConstraint = NoConstraint in {
1605def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd),
1606                      (ins VR:$vs2), "vmv.x.s", "$vd, $vs2">,
1607              Sched<[WriteVIMovVX_UpperBound, ReadVIMovVX_UpperBound]>;
1608let Constraints = "$vd = $vd_wb" in
1609def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb),
1610                      (ins VR:$vd, GPR:$rs1), "vmv.s.x", "$vd, $rs1">,
1611              Sched<[WriteVIMovXV_UpperBound, ReadVIMovXV_UpperBound,
1612                     ReadVIMovXX_UpperBound]>;
1613}
1614
1615} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1616
1617} // Predicates = [HasVInstructions]
1618
1619let Predicates = [HasVInstructionsAnyF] in {
1620
1621let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1,
1622    RVVConstraint = NoConstraint  in {
1623// Floating-Point Scalar Move Instructions
1624def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd),
1625                      (ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">,
1626               Sched<[WriteVFMovVF_UpperBound, ReadVFMovVF_UpperBound]>;
1627let Constraints = "$vd = $vd_wb" in
1628def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
1629                       (ins VR:$vd, FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">,
1630               Sched<[WriteVFMovFV_UpperBound, ReadVFMovFV_UpperBound,
1631                      ReadVFMovFX_UpperBound]>;
1632
1633} // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1
1634
1635} // Predicates = [HasVInstructionsAnyF]
1636
1637let Predicates = [HasVInstructions] in {
1638// Vector Slide Instructions
1639let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1640defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110, uimm5>;
1641defm VSLIDE1UP_V : VSLD1_MV_X<"vslide1up", 0b001110>;
1642} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1643defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111, uimm5>;
1644defm VSLIDE1DOWN_V : VSLD1_MV_X<"vslide1down", 0b001111>;
1645} // Predicates = [HasVInstructions]
1646
1647let Predicates = [HasVInstructionsAnyF] in {
1648let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1649defm VFSLIDE1UP_V : VSLD1_FV_F<"vfslide1up", 0b001110>;
1650} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1651defm VFSLIDE1DOWN_V : VSLD1_FV_F<"vfslide1down", 0b001111>;
1652} // Predicates = [HasVInstructionsAnyF]
1653
1654let Predicates = [HasVInstructions] in {
1655// Vector Register Gather Instruction
1656let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in {
1657defm VRGATHER_V : VGTR_IV_V_X_I<"vrgather", 0b001100, uimm5>;
1658def VRGATHEREI16_VV : VALUVV<0b001110, OPIVV, "vrgatherei16.vv">,
1659                      Sched<[WriteVGatherV_UpperBound, ReadVGatherV_UpperBound,
1660                             ReadVGatherV_UpperBound]>;
1661} // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather
1662
1663// Vector Compress Instruction
1664let Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress in {
1665defm VCOMPRESS_V : VCPR_MV_Mask<"vcompress", 0b010111>;
1666} // Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress
1667
1668let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
1669    RVVConstraint = NoConstraint in {
1670// A future extension may relax the vector register alignment restrictions.
1671foreach n = [1, 2, 4, 8] in {
1672  defvar vrc = !cast<VReg>(!if(!eq(n, 1), "VR", "VRM"#n));
1673  def VMV#n#R_V  : RVInstV<0b100111, !add(n, -1), OPIVI, (outs vrc:$vd),
1674                           (ins vrc:$vs2), "vmv" # n # "r.v", "$vd, $vs2">,
1675                   VMVRSched<n> {
1676    let Uses = [];
1677    let vm = 1;
1678  }
1679}
1680} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1681} // Predicates = [HasVInstructions]
1682
1683let Predicates = [HasVInstructions] in {
1684  foreach nf=2-8 in {
1685    foreach eew = [8, 16, 32] in {
1686      defvar w = !cast<RISCVWidth>("LSWidth"#eew);
1687
1688      def VLSEG#nf#E#eew#_V :
1689        VUnitStrideSegmentLoad<!add(nf, -1), w, "vlseg"#nf#"e"#eew#".v">,
1690        VLSEGSched<nf, eew, UpperBoundLMUL>;
1691      def VLSEG#nf#E#eew#FF_V :
1692        VUnitStrideSegmentLoadFF<!add(nf, -1), w, "vlseg"#nf#"e"#eew#"ff.v">,
1693        VLSEGFFSched<nf, eew, UpperBoundLMUL>;
1694      def VSSEG#nf#E#eew#_V :
1695        VUnitStrideSegmentStore<!add(nf, -1), w, "vsseg"#nf#"e"#eew#".v">,
1696        VSSEGSched<nf, eew, UpperBoundLMUL>;
1697      // Vector Strided Instructions
1698      def VLSSEG#nf#E#eew#_V :
1699        VStridedSegmentLoad<!add(nf, -1), w, "vlsseg"#nf#"e"#eew#".v">,
1700        VLSSEGSched<nf, eew, UpperBoundLMUL>;
1701      def VSSSEG#nf#E#eew#_V :
1702        VStridedSegmentStore<!add(nf, -1), w, "vssseg"#nf#"e"#eew#".v">,
1703        VSSSEGSched<nf, eew, UpperBoundLMUL>;
1704
1705      // Vector Indexed Instructions
1706      def VLUXSEG#nf#EI#eew#_V :
1707        VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, w,
1708                            "vluxseg"#nf#"ei"#eew#".v">,
1709        VLXSEGSched<nf, eew, "U", UpperBoundLMUL>;
1710      def VLOXSEG#nf#EI#eew#_V :
1711        VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, w,
1712                            "vloxseg"#nf#"ei"#eew#".v">,
1713        VLXSEGSched<nf, eew, "O", UpperBoundLMUL>;
1714      def VSUXSEG#nf#EI#eew#_V :
1715        VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, w,
1716                             "vsuxseg"#nf#"ei"#eew#".v">,
1717        VSXSEGSched<nf, eew, "U", UpperBoundLMUL>;
1718      def VSOXSEG#nf#EI#eew#_V :
1719        VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, w,
1720                             "vsoxseg"#nf#"ei"#eew#".v">,
1721        VSXSEGSched<nf, eew, "O", UpperBoundLMUL>;
1722    }
1723  }
1724} // Predicates = [HasVInstructions]
1725
1726let Predicates = [HasVInstructionsI64] in {
1727  foreach nf=2-8 in {
1728    // Vector Unit-strided Segment Instructions
1729    def VLSEG#nf#E64_V :
1730      VUnitStrideSegmentLoad<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64.v">,
1731      VLSEGSched<nf, 64, UpperBoundLMUL>;
1732    def VLSEG#nf#E64FF_V :
1733      VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64ff.v">,
1734      VLSEGFFSched<nf, 64, UpperBoundLMUL>;
1735    def VSSEG#nf#E64_V :
1736      VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">,
1737      VSSEGSched<nf, 64, UpperBoundLMUL>;
1738
1739    // Vector Strided Segment Instructions
1740    def VLSSEG#nf#E64_V :
1741      VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">,
1742      VLSSEGSched<nf, 64, UpperBoundLMUL>;
1743    def VSSSEG#nf#E64_V :
1744      VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">,
1745      VSSSEGSched<nf, 64, UpperBoundLMUL>;
1746  }
1747} // Predicates = [HasVInstructionsI64]
1748let Predicates = [HasVInstructionsI64, IsRV64] in {
1749  foreach nf = 2 - 8 in {
1750    // Vector Indexed Segment Instructions
1751    def VLUXSEG #nf #EI64_V
1752        : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, LSWidth64,
1753                              "vluxseg" #nf #"ei64.v">,
1754          VLXSEGSched<nf, 64, "U", UpperBoundLMUL>;
1755    def VLOXSEG #nf #EI64_V
1756        : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, LSWidth64,
1757                              "vloxseg" #nf #"ei64.v">,
1758          VLXSEGSched<nf, 64, "O", UpperBoundLMUL>;
1759    def VSUXSEG #nf #EI64_V
1760        : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, LSWidth64,
1761                               "vsuxseg" #nf #"ei64.v">,
1762          VSXSEGSched<nf, 64, "U", UpperBoundLMUL>;
1763    def VSOXSEG #nf #EI64_V
1764        : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, LSWidth64,
1765                               "vsoxseg" #nf #"ei64.v">,
1766          VSXSEGSched<nf, 64, "O", UpperBoundLMUL>;
1767  }
1768} // Predicates = [HasVInstructionsI64, IsRV64]
1769
1770include "RISCVInstrInfoVPseudos.td"
1771