xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoV.td (revision 02e9120893770924227138ba49df1edb3896112a)
1//===-- RISCVInstrInfoV.td - RISC-V 'V' instructions -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// This file describes the RISC-V instructions from the standard 'V' Vector
10/// extension, version 1.0.
11///
12//===----------------------------------------------------------------------===//
13
14include "RISCVInstrFormatsV.td"
15
16//===----------------------------------------------------------------------===//
17// Operand and SDNode transformation definitions.
18//===----------------------------------------------------------------------===//
19
20class VTypeIAsmOperand<int VTypeINum> : AsmOperandClass {
21  let Name = "VTypeI" # VTypeINum;
22  let ParserMethod = "parseVTypeI";
23  let DiagnosticType = "InvalidVTypeI";
24  let RenderMethod = "addVTypeIOperands";
25}
26
27class VTypeIOp<int VTypeINum> : Operand<XLenVT> {
28  let ParserMatchClass = VTypeIAsmOperand<VTypeINum>;
29  let PrintMethod = "printVTypeI";
30  let DecoderMethod = "decodeUImmOperand<"#VTypeINum#">";
31  let OperandType = "OPERAND_VTYPEI" # VTypeINum;
32  let OperandNamespace = "RISCVOp";
33  let MCOperandPredicate = [{
34    int64_t Imm;
35    if (MCOp.evaluateAsConstantImm(Imm))
36      return isUInt<VTypeINum>(Imm);
37    return MCOp.isBareSymbolRef();
38  }];
39}
40
41def VTypeIOp10 : VTypeIOp<10>;
42def VTypeIOp11 : VTypeIOp<11>;
43
44def VMaskAsmOperand : AsmOperandClass {
45  let Name = "RVVMaskRegOpOperand";
46  let RenderMethod = "addRegOperands";
47  let PredicateMethod = "isV0Reg";
48  let ParserMethod = "parseMaskReg";
49  let IsOptional = 1;
50  let DefaultMethod = "defaultMaskRegOp";
51  let DiagnosticType = "InvalidVMaskRegister";
52}
53
54def VMaskOp : RegisterOperand<VMV0> {
55  let ParserMatchClass = VMaskAsmOperand;
56  let PrintMethod = "printVMaskReg";
57  let EncoderMethod = "getVMaskReg";
58  let DecoderMethod = "decodeVMaskReg";
59}
60
61def simm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<5>(Imm);}]> {
62  let ParserMatchClass = SImmAsmOperand<5>;
63  let EncoderMethod = "getImmOpValue";
64  let DecoderMethod = "decodeSImmOperand<5>";
65  let OperandType = "OPERAND_SIMM5";
66  let OperandNamespace = "RISCVOp";
67  let MCOperandPredicate = [{
68    int64_t Imm;
69    if (MCOp.evaluateAsConstantImm(Imm))
70      return isInt<5>(Imm);
71    return MCOp.isBareSymbolRef();
72  }];
73}
74
75def SImm5Plus1AsmOperand : AsmOperandClass {
76  let Name = "SImm5Plus1";
77  let RenderMethod = "addImmOperands";
78  let DiagnosticType = "InvalidSImm5Plus1";
79}
80
81def simm5_plus1 : Operand<XLenVT>, ImmLeaf<XLenVT,
82  [{return (isInt<5>(Imm) && Imm != -16) || Imm == 16;}]> {
83  let ParserMatchClass = SImm5Plus1AsmOperand;
84  let OperandType = "OPERAND_SIMM5_PLUS1";
85  let OperandNamespace = "RISCVOp";
86  let MCOperandPredicate = [{
87    int64_t Imm;
88    if (MCOp.evaluateAsConstantImm(Imm))
89      return (isInt<5>(Imm) && Imm != -16) || Imm == 16;
90    return MCOp.isBareSymbolRef();
91  }];
92}
93
94def simm5_plus1_nonzero : ImmLeaf<XLenVT,
95  [{return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);}]>;
96
97//===----------------------------------------------------------------------===//
98// Scheduling definitions.
99//===----------------------------------------------------------------------===//
100
101class VMVRSched<int n> : Sched<[
102  !cast<SchedReadWrite>("WriteVMov" #n #"V"),
103  !cast<SchedReadWrite>("ReadVMov" #n #"V")
104]>;
105
106class VLESched<string lmul = "WorstCase"> : Sched<[
107  !cast<SchedReadWrite>("WriteVLDE_" #lmul),
108  ReadVLDX, ReadVMask
109]>;
110
111class VSESched<string lmul = "WorstCase"> : Sched<[
112  !cast<SchedReadWrite>("WriteVSTE_" #lmul),
113  !cast<SchedReadWrite>("ReadVSTEV_" #lmul),
114  ReadVSTX, ReadVMask
115]>;
116
117class VLSSched<int eew, string emul = "WorstCase"> : Sched<[
118  !cast<SchedReadWrite>("WriteVLDS" #eew #"_" #emul),
119  ReadVLDX, ReadVLDSX, ReadVMask
120]>;
121
122class VSSSched<int eew, string emul = "WorstCase"> : Sched<[
123  !cast<SchedReadWrite>("WriteVSTS" #eew #"_" #emul),
124  !cast<SchedReadWrite>("ReadVSTS" #eew #"V_" #emul),
125  ReadVSTX, ReadVSTSX, ReadVMask
126]>;
127
128class VLXSched<int dataEEW, string isOrdered,
129               string dataEMUL = "WorstCase",
130               string idxEMUL = "WorstCase"> : Sched<[
131  !cast<SchedReadWrite>("WriteVLD" #isOrdered #"X" #dataEEW #"_" #dataEMUL),
132  ReadVLDX,
133  !cast<SchedReadWrite>("ReadVLD" #isOrdered #"XV_" #idxEMUL), ReadVMask
134]>;
135
136class VSXSched<int dataEEW, string isOrdered,
137               string dataEMUL = "WorstCase",
138               string idxEMUL = "WorstCase"> : Sched<[
139  !cast<SchedReadWrite>("WriteVST" #isOrdered #"X" #dataEEW #"_" #dataEMUL),
140  !cast<SchedReadWrite>("ReadVST" #isOrdered #"X" #dataEEW #"_" #dataEMUL),
141  ReadVSTX, !cast<SchedReadWrite>("ReadVST" #isOrdered #"XV_" #idxEMUL), ReadVMask
142]>;
143
144class VLFSched<string lmul = "WorstCase"> : Sched<[
145  !cast<SchedReadWrite>("WriteVLDFF_" #lmul),
146  ReadVLDX, ReadVMask
147]>;
148
149// Unit-Stride Segment Loads and Stores
150class VLSEGSched<int nf, int eew, string emul = "WorstCase"> : Sched<[
151  !cast<SchedReadWrite>("WriteVLSEG" #nf #"e" #eew #"_" #emul),
152  ReadVLDX, ReadVMask
153]>;
154class VSSEGSched<int nf, int eew, string emul = "WorstCase"> : Sched<[
155  !cast<SchedReadWrite>("WriteVSSEG" #nf #"e" #eew #"_" #emul),
156  !cast<SchedReadWrite>("ReadVSTEV_" #emul),
157  ReadVSTX, ReadVMask
158]>;
159class VLSEGFFSched<int nf, int eew, string emul = "WorstCase"> : Sched<[
160  !cast<SchedReadWrite>("WriteVLSEGFF" #nf #"e" #eew #"_" #emul),
161  ReadVLDX, ReadVMask
162]>;
163// Strided Segment Loads and Stores
164class VLSSEGSched<int nf, int eew, string emul = "WorstCase"> : Sched<[
165  !cast<SchedReadWrite>("WriteVLSSEG" #nf #"e" #eew #"_" #emul),
166  ReadVLDX, ReadVLDSX, ReadVMask
167]>;
168class VSSSEGSched<int nf, int eew, string emul = "WorstCase"> : Sched<[
169  !cast<SchedReadWrite>("WriteVSSSEG" #nf #"e" #eew #"_" #emul),
170  !cast<SchedReadWrite>("ReadVSTS" #eew #"V_" #emul),
171  ReadVSTX, ReadVSTSX, ReadVMask
172]>;
173// Indexed Segment Loads and Stores
174class VLXSEGSched<int nf, int eew, string isOrdered, string emul = "WorstCase"> : Sched<[
175  !cast<SchedReadWrite>("WriteVL" #isOrdered #"XSEG" #nf #"e" #eew #"_" #emul),
176  ReadVLDX, !cast<SchedReadWrite>("ReadVLD" #isOrdered #"XV_" #emul), ReadVMask
177]>;
178class VSXSEGSched<int nf, int eew, string isOrdered, string emul = "WorstCase"> : Sched<[
179  !cast<SchedReadWrite>("WriteVS" #isOrdered #"XSEG" #nf #"e" #eew #"_" #emul),
180  !cast<SchedReadWrite>("ReadVST" #isOrdered #"X" #eew #"_" #emul),
181  ReadVSTX, !cast<SchedReadWrite>("ReadVST" #isOrdered #"XV_" #emul), ReadVMask
182]>;
183
184//===----------------------------------------------------------------------===//
185// Instruction class templates
186//===----------------------------------------------------------------------===//
187
188let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
189// unit-stride load vd, (rs1), vm
190class VUnitStrideLoad<RISCVWidth width, string opcodestr>
191    : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
192                (outs VR:$vd),
193                (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">;
194
195let vm = 1, RVVConstraint = NoConstraint in {
196// unit-stride whole register load vl<nf>r.v vd, (rs1)
197class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC>
198    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
199                width.Value{2-0}, (outs VRC:$vd), (ins GPRMemZeroOffset:$rs1),
200                opcodestr, "$vd, $rs1"> {
201  let Uses = [];
202}
203
204// unit-stride mask load vd, (rs1)
205class VUnitStrideLoadMask<string opcodestr>
206    : RVInstVLU<0b000, LSWidth8.Value{3}, LUMOPUnitStrideMask, LSWidth8.Value{2-0},
207                (outs VR:$vd),
208                (ins GPRMemZeroOffset:$rs1), opcodestr, "$vd, $rs1">;
209} // vm = 1, RVVConstraint = NoConstraint
210
211// unit-stride fault-only-first load vd, (rs1), vm
212class VUnitStrideLoadFF<RISCVWidth width, string opcodestr>
213    : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
214                (outs VR:$vd),
215                (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">;
216
217// strided load vd, (rs1), rs2, vm
218class VStridedLoad<RISCVWidth width, string opcodestr>
219    : RVInstVLS<0b000, width.Value{3}, width.Value{2-0},
220                (outs VR:$vd),
221                (ins GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
222                "$vd, $rs1, $rs2$vm">;
223
224// indexed load vd, (rs1), vs2, vm
225class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
226    : RVInstVLX<0b000, width.Value{3}, mop, width.Value{2-0},
227                (outs VR:$vd),
228                (ins GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
229                "$vd, $rs1, $vs2$vm">;
230
231// unit-stride segment load vd, (rs1), vm
232class VUnitStrideSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
233    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
234                (outs VR:$vd),
235                (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">;
236
237// segment fault-only-first load vd, (rs1), vm
238class VUnitStrideSegmentLoadFF<bits<3> nf, RISCVWidth width, string opcodestr>
239    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
240                (outs VR:$vd),
241                (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">;
242
243// strided segment load vd, (rs1), rs2, vm
244class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
245    : RVInstVLS<nf, width.Value{3}, width.Value{2-0},
246                (outs VR:$vd),
247                (ins GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
248                "$vd, $rs1, $rs2$vm">;
249
250// indexed segment load vd, (rs1), vs2, vm
251class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
252                          string opcodestr>
253    : RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0},
254                (outs VR:$vd),
255                (ins GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
256                "$vd, $rs1, $vs2$vm">;
257} // hasSideEffects = 0, mayLoad = 1, mayStore = 0
258
259let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
260// unit-stride store vd, vs3, (rs1), vm
261class VUnitStrideStore<RISCVWidth width, string opcodestr>
262    : RVInstVSU<0b000, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
263                (outs), (ins VR:$vs3, GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr,
264                "$vs3, ${rs1}$vm">;
265
266let vm = 1 in {
267// vs<nf>r.v vd, (rs1)
268class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC>
269    : RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
270                0b000, (outs), (ins VRC:$vs3, GPRMemZeroOffset:$rs1),
271                opcodestr, "$vs3, $rs1"> {
272  let Uses = [];
273}
274
275// unit-stride mask store vd, vs3, (rs1)
276class VUnitStrideStoreMask<string opcodestr>
277    : RVInstVSU<0b000, LSWidth8.Value{3}, SUMOPUnitStrideMask, LSWidth8.Value{2-0},
278                (outs), (ins VR:$vs3, GPRMemZeroOffset:$rs1), opcodestr,
279                "$vs3, $rs1">;
280} // vm = 1
281
282// strided store vd, vs3, (rs1), rs2, vm
283class VStridedStore<RISCVWidth width, string opcodestr>
284    : RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs),
285                (ins VR:$vs3, GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm),
286                opcodestr, "$vs3, $rs1, $rs2$vm">;
287
288// indexed store vd, vs3, (rs1), vs2, vm
289class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
290    : RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs),
291                (ins VR:$vs3, GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm),
292                opcodestr, "$vs3, $rs1, $vs2$vm">;
293
294// segment store vd, vs3, (rs1), vm
295class VUnitStrideSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
296    : RVInstVSU<nf, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
297                (outs), (ins VR:$vs3, GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr,
298                "$vs3, ${rs1}$vm">;
299
300// segment store vd, vs3, (rs1), rs2, vm
301class VStridedSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
302    : RVInstVSS<nf, width.Value{3}, width.Value{2-0}, (outs),
303                (ins VR:$vs3, GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm),
304                opcodestr, "$vs3, $rs1, $rs2$vm">;
305
306// segment store vd, vs3, (rs1), vs2, vm
307class VIndexedSegmentStore<bits<3> nf, RISCVMOP mop, RISCVWidth width,
308                           string opcodestr>
309    : RVInstVSX<nf, width.Value{3}, mop, width.Value{2-0}, (outs),
310                (ins VR:$vs3, GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm),
311                opcodestr, "$vs3, $rs1, $vs2$vm">;
312} // hasSideEffects = 0, mayLoad = 0, mayStore = 1
313
314let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
315// op vd, vs2, vs1, vm
316class VALUVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
317    : RVInstVV<funct6, opv, (outs VR:$vd),
318                (ins VR:$vs2, VR:$vs1, VMaskOp:$vm),
319                opcodestr, "$vd, $vs2, $vs1$vm">;
320
321// op vd, vs2, vs1, v0 (without mask, use v0 as carry input)
322class VALUmVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
323    : RVInstVV<funct6, opv, (outs VR:$vd),
324                (ins VR:$vs2, VR:$vs1, VMV0:$v0),
325                opcodestr, "$vd, $vs2, $vs1, v0"> {
326  let vm = 0;
327}
328
329// op vd, vs1, vs2, vm (reverse the order of vs1 and vs2)
330class VALUrVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
331    : RVInstVV<funct6, opv, (outs VR:$vd),
332                (ins VR:$vs1, VR:$vs2, VMaskOp:$vm),
333                opcodestr, "$vd, $vs1, $vs2$vm">;
334
335// op vd, vs2, vs1
336class VALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
337    : RVInstVV<funct6, opv, (outs VR:$vd),
338               (ins VR:$vs2, VR:$vs1),
339               opcodestr, "$vd, $vs2, $vs1"> {
340  let vm = 1;
341}
342
343// op vd, vs2, rs1, vm
344class VALUVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
345    : RVInstVX<funct6, opv, (outs VR:$vd),
346                (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
347                opcodestr, "$vd, $vs2, $rs1$vm">;
348
349// op vd, vs2, rs1, v0 (without mask, use v0 as carry input)
350class VALUmVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
351    : RVInstVX<funct6, opv, (outs VR:$vd),
352                (ins VR:$vs2, GPR:$rs1, VMV0:$v0),
353                opcodestr, "$vd, $vs2, $rs1, v0"> {
354  let vm = 0;
355}
356
357// op vd, rs1, vs2, vm (reverse the order of rs1 and vs2)
358class VALUrVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
359    : RVInstVX<funct6, opv, (outs VR:$vd),
360                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm),
361                opcodestr, "$vd, $rs1, $vs2$vm">;
362
363// op vd, vs1, vs2
364class VALUVXNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
365    : RVInstVX<funct6, opv, (outs VR:$vd),
366               (ins VR:$vs2, GPR:$rs1),
367               opcodestr, "$vd, $vs2, $rs1"> {
368  let vm = 1;
369}
370
371// op vd, vs2, imm, vm
372class VALUVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
373    : RVInstIVI<funct6, (outs VR:$vd),
374                (ins VR:$vs2, optype:$imm, VMaskOp:$vm),
375                opcodestr, "$vd, $vs2, $imm$vm">;
376
377// op vd, vs2, imm, v0 (without mask, use v0 as carry input)
378class VALUmVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
379    : RVInstIVI<funct6, (outs VR:$vd),
380                (ins VR:$vs2, optype:$imm, VMV0:$v0),
381                opcodestr, "$vd, $vs2, $imm, v0"> {
382  let vm = 0;
383}
384
385// op vd, vs2, imm, vm
386class VALUVINoVm<bits<6> funct6, string opcodestr, Operand optype = simm5>
387    : RVInstIVI<funct6, (outs VR:$vd),
388                (ins VR:$vs2, optype:$imm),
389                opcodestr, "$vd, $vs2, $imm"> {
390  let vm = 1;
391}
392
393// op vd, vs2, rs1, vm (Float)
394class VALUVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
395    : RVInstVX<funct6, opv, (outs VR:$vd),
396                (ins VR:$vs2, FPR32:$rs1, VMaskOp:$vm),
397                opcodestr, "$vd, $vs2, $rs1$vm">;
398
399// op vd, rs1, vs2, vm (Float) (with mask, reverse the order of rs1 and vs2)
400class VALUrVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
401    : RVInstVX<funct6, opv, (outs VR:$vd),
402                (ins FPR32:$rs1, VR:$vs2, VMaskOp:$vm),
403                opcodestr, "$vd, $rs1, $vs2$vm">;
404
405// op vd, vs2, vm (use vs1 as instruction encoding)
406class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
407    : RVInstV<funct6, vs1, opv, (outs VR:$vd),
408               (ins VR:$vs2, VMaskOp:$vm),
409               opcodestr, "$vd, $vs2$vm">;
410
411// op vd, vs2 (use vs1 as instruction encoding)
412class VALUVs2NoVm<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
413    : RVInstV<funct6, vs1, opv, (outs VR:$vd),
414              (ins VR:$vs2), opcodestr,
415              "$vd, $vs2"> {
416  let vm = 1;
417}
418} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
419
420//===----------------------------------------------------------------------===//
421// Combination of instruction classes.
422// Use these multiclasses to define instructions more easily.
423//===----------------------------------------------------------------------===//
424
425multiclass VIndexLoadStore<list<int> EEWList> {
426  foreach n = EEWList in {
427    defvar w = !cast<RISCVWidth>("LSWidth" # n);
428
429    def VLUXEI # n # _V :
430      VIndexedLoad<MOPLDIndexedUnord, w, "vluxei" # n # ".v">,
431      VLXSched<n, "U">;
432    def VLOXEI # n # _V :
433      VIndexedLoad<MOPLDIndexedOrder, w, "vloxei" # n # ".v">,
434      VLXSched<n, "O">;
435
436    def VSUXEI # n # _V :
437      VIndexedStore<MOPSTIndexedUnord, w, "vsuxei" # n # ".v">,
438      VSXSched<n, "U">;
439    def VSOXEI # n # _V :
440      VIndexedStore<MOPSTIndexedOrder, w, "vsoxei" # n # ".v">,
441      VSXSched<n, "O">;
442  }
443}
444
445multiclass VALU_IV_V<string opcodestr, bits<6> funct6> {
446  def V  : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
447           Sched<[WriteVIALUV_WorstCase, ReadVIALUV_WorstCase,
448                  ReadVIALUV_WorstCase, ReadVMask]>;
449}
450
451multiclass VALU_IV_X<string opcodestr, bits<6> funct6> {
452  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
453           Sched<[WriteVIALUX_WorstCase, ReadVIALUV_WorstCase,
454                  ReadVIALUX_WorstCase, ReadVMask]>;
455}
456
457multiclass VALU_IV_I<string opcodestr, bits<6> funct6> {
458  def I  : VALUVI<funct6, opcodestr # ".vi", simm5>,
459           Sched<[WriteVIALUI_WorstCase, ReadVIALUV_WorstCase,
460                  ReadVMask]>;
461}
462
463multiclass VALU_IV_V_X_I<string opcodestr, bits<6> funct6>
464    : VALU_IV_V<opcodestr, funct6>,
465      VALU_IV_X<opcodestr, funct6>,
466      VALU_IV_I<opcodestr, funct6>;
467
468multiclass VALU_IV_V_X<string opcodestr, bits<6> funct6>
469    : VALU_IV_V<opcodestr, funct6>,
470      VALU_IV_X<opcodestr, funct6>;
471
472multiclass VALU_IV_X_I<string opcodestr, bits<6> funct6>
473    : VALU_IV_X<opcodestr, funct6>,
474      VALU_IV_I<opcodestr, funct6>;
475
476multiclass VALU_MV_V_X<string opcodestr, bits<6> funct6, string vw> {
477  def V  : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
478           Sched<[WriteVIWALUV_WorstCase, ReadVIWALUV_WorstCase,
479                  ReadVIWALUV_WorstCase, ReadVMask]>;
480  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
481           Sched<[WriteVIWALUX_WorstCase, ReadVIWALUV_WorstCase,
482                  ReadVIWALUX_WorstCase, ReadVMask]>;
483}
484
485multiclass VMAC_MV_V_X<string opcodestr, bits<6> funct6> {
486  def V : VALUrVV<funct6, OPMVV, opcodestr # ".vv">,
487          Sched<[WriteVIMulAddV_WorstCase, ReadVIMulAddV_WorstCase,
488                 ReadVIMulAddV_WorstCase, ReadVMask]>;
489  def X : VALUrVX<funct6, OPMVX, opcodestr # ".vx">,
490          Sched<[WriteVIMulAddX_WorstCase, ReadVIMulAddV_WorstCase,
491                 ReadVIMulAddX_WorstCase, ReadVMask]>;
492}
493
494multiclass VWMAC_MV_X<string opcodestr, bits<6> funct6> {
495  def X : VALUrVX<funct6, OPMVX, opcodestr # ".vx">,
496          Sched<[WriteVIWMulAddX_WorstCase, ReadVIWMulAddV_WorstCase,
497                 ReadVIWMulAddX_WorstCase, ReadVMask]>;
498}
499
500multiclass VWMAC_MV_V_X<string opcodestr, bits<6> funct6>
501   : VWMAC_MV_X<opcodestr, funct6> {
502  def V : VALUrVV<funct6, OPMVV, opcodestr # ".vv">,
503          Sched<[WriteVIWMulAddV_WorstCase, ReadVIWMulAddV_WorstCase,
504                 ReadVIWMulAddV_WorstCase, ReadVMask]>;
505}
506
507multiclass VALU_MV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
508  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
509           Sched<[WriteVExtV_WorstCase, ReadVExtV_WorstCase, ReadVMask]>;
510}
511
512multiclass VMRG_IV_V_X_I<string opcodestr, bits<6> funct6> {
513  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
514           Sched<[WriteVIMergeV_WorstCase, ReadVIMergeV_WorstCase,
515                  ReadVIMergeV_WorstCase, ReadVMask]>;
516  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
517           Sched<[WriteVIMergeX_WorstCase, ReadVIMergeV_WorstCase,
518                  ReadVIMergeX_WorstCase, ReadVMask]>;
519  def IM : VALUmVI<funct6, opcodestr # ".vim">,
520           Sched<[WriteVIMergeI_WorstCase, ReadVIMergeV_WorstCase,
521                  ReadVMask]>;
522}
523
524multiclass VALUm_IV_V_X<string opcodestr, bits<6> funct6> {
525  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
526           Sched<[WriteVICALUV_WorstCase, ReadVICALUV_WorstCase,
527                  ReadVICALUV_WorstCase, ReadVMask]>;
528  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
529           Sched<[WriteVICALUX_WorstCase, ReadVICALUV_WorstCase,
530                  ReadVICALUX_WorstCase, ReadVMask]>;
531}
532
533multiclass VALUm_IV_V_X_I<string opcodestr, bits<6> funct6>
534    : VALUm_IV_V_X<opcodestr, funct6> {
535  def IM : VALUmVI<funct6, opcodestr # ".vim">,
536           Sched<[WriteVICALUI_WorstCase, ReadVICALUV_WorstCase,
537                  ReadVMask]>;
538}
539
540multiclass VALUNoVm_IV_V_X<string opcodestr, bits<6> funct6> {
541  def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">,
542          Sched<[WriteVICALUV_WorstCase, ReadVICALUV_WorstCase,
543                 ReadVICALUV_WorstCase]>;
544  def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">,
545          Sched<[WriteVICALUX_WorstCase, ReadVICALUV_WorstCase,
546                 ReadVICALUX_WorstCase]>;
547}
548
549multiclass VALUNoVm_IV_V_X_I<string opcodestr, bits<6> funct6>
550   : VALUNoVm_IV_V_X<opcodestr, funct6> {
551  def I : VALUVINoVm<funct6, opcodestr # ".vi", simm5>,
552          Sched<[WriteVICALUI_WorstCase, ReadVICALUV_WorstCase]>;
553}
554
555multiclass VALU_FV_F<string opcodestr, bits<6> funct6> {
556  def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
557          Sched<[WriteVFALUF_WorstCase, ReadVFALUV_WorstCase,
558                 ReadVFALUF_WorstCase, ReadVMask]>;
559}
560
561multiclass VALU_FV_V_F<string opcodestr, bits<6> funct6>
562    : VALU_FV_F<opcodestr, funct6> {
563  def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
564          Sched<[WriteVFALUV_WorstCase, ReadVFALUV_WorstCase,
565                 ReadVFALUV_WorstCase, ReadVMask]>;
566}
567
568multiclass VWALU_FV_V_F<string opcodestr, bits<6> funct6, string vw> {
569  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
570          Sched<[WriteVFWALUV_WorstCase, ReadVFWALUV_WorstCase,
571                 ReadVFWALUV_WorstCase, ReadVMask]>;
572  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
573          Sched<[WriteVFWALUF_WorstCase, ReadVFWALUV_WorstCase,
574                 ReadVFWALUF_WorstCase, ReadVMask]>;
575}
576
577multiclass VMUL_FV_V_F<string opcodestr, bits<6> funct6> {
578  def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
579          Sched<[WriteVFMulV_WorstCase, ReadVFMulV_WorstCase,
580                 ReadVFMulV_WorstCase, ReadVMask]>;
581  def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
582          Sched<[WriteVFMulF_WorstCase, ReadVFMulV_WorstCase,
583                 ReadVFMulF_WorstCase, ReadVMask]>;
584}
585
586multiclass VDIV_FV_F<string opcodestr, bits<6> funct6> {
587  def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
588          Sched<[WriteVFDivF_WorstCase, ReadVFDivV_WorstCase,
589                 ReadVFDivF_WorstCase, ReadVMask]>;
590}
591
592multiclass VDIV_FV_V_F<string opcodestr, bits<6> funct6>
593    : VDIV_FV_F<opcodestr, funct6> {
594  def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
595          Sched<[WriteVFDivV_WorstCase, ReadVFDivV_WorstCase,
596                 ReadVFDivV_WorstCase, ReadVMask]>;
597}
598
599multiclass VWMUL_FV_V_F<string opcodestr, bits<6> funct6> {
600  def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
601          Sched<[WriteVFWMulV_WorstCase, ReadVFWMulV_WorstCase,
602                 ReadVFWMulV_WorstCase, ReadVMask]>;
603  def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
604          Sched<[WriteVFWMulF_WorstCase, ReadVFWMulV_WorstCase,
605                 ReadVFWMulF_WorstCase, ReadVMask]>;
606}
607
608multiclass VMAC_FV_V_F<string opcodestr, bits<6> funct6> {
609  def V : VALUrVV<funct6, OPFVV, opcodestr # ".vv">,
610          Sched<[WriteVFMulAddV_WorstCase, ReadVFMulAddV_WorstCase,
611                 ReadVFMulAddV_WorstCase, ReadVMask]>;
612  def F : VALUrVF<funct6, OPFVF, opcodestr # ".vf">,
613          Sched<[WriteVFMulAddF_WorstCase, ReadVFMulAddV_WorstCase,
614                 ReadVFMulAddF_WorstCase, ReadVMask]>;
615}
616
617multiclass VWMAC_FV_V_F<string opcodestr, bits<6> funct6> {
618  def V : VALUrVV<funct6, OPFVV, opcodestr # ".vv">,
619          Sched<[WriteVFWMulAddV_WorstCase, ReadVFWMulAddV_WorstCase,
620                 ReadVFWMulAddV_WorstCase, ReadVMask]>;
621  def F : VALUrVF<funct6, OPFVF, opcodestr # ".vf">,
622          Sched<[WriteVFWMulAddF_WorstCase, ReadVFWMulAddV_WorstCase,
623                 ReadVFWMulAddF_WorstCase, ReadVMask]>;
624}
625
626multiclass VSQR_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
627  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
628           Sched<[WriteVFSqrtV_WorstCase, ReadVFSqrtV_WorstCase,
629                  ReadVMask]>;
630}
631
632multiclass VRCP_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
633  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
634           Sched<[WriteVFRecpV_WorstCase, ReadVFRecpV_WorstCase,
635                  ReadVMask]>;
636}
637
638multiclass VMINMAX_FV_V_F<string opcodestr, bits<6> funct6> {
639  def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
640          Sched<[WriteVFMinMaxV_WorstCase, ReadVFMinMaxV_WorstCase,
641                 ReadVFMinMaxV_WorstCase, ReadVMask]>;
642  def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
643          Sched<[WriteVFMinMaxF_WorstCase, ReadVFMinMaxV_WorstCase,
644                 ReadVFMinMaxF_WorstCase, ReadVMask]>;
645}
646
647multiclass VCMP_FV_F<string opcodestr, bits<6> funct6> {
648  def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
649          Sched<[WriteVFCmpF_WorstCase, ReadVFCmpV_WorstCase,
650                 ReadVFCmpF_WorstCase, ReadVMask]>;
651}
652
653multiclass VCMP_FV_V_F<string opcodestr, bits<6> funct6>
654    : VCMP_FV_F<opcodestr, funct6> {
655  def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
656          Sched<[WriteVFCmpV_WorstCase, ReadVFCmpV_WorstCase,
657                 ReadVFCmpV_WorstCase, ReadVMask]>;
658}
659
660multiclass VSGNJ_FV_V_F<string opcodestr, bits<6> funct6> {
661  def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
662          Sched<[WriteVFSgnjV_WorstCase, ReadVFSgnjV_WorstCase,
663                 ReadVFSgnjV_WorstCase, ReadVMask]>;
664  def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
665          Sched<[WriteVFSgnjF_WorstCase, ReadVFSgnjV_WorstCase,
666                 ReadVFSgnjF_WorstCase, ReadVMask]>;
667}
668
669multiclass VCLS_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
670  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
671           Sched<[WriteVFClassV_WorstCase, ReadVFClassV_WorstCase,
672                  ReadVMask]>;
673}
674
675multiclass VCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
676  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
677           Sched<[WriteVFCvtIToFV_WorstCase, ReadVFCvtIToFV_WorstCase,
678                  ReadVMask]>;
679}
680
681multiclass VCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
682  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
683           Sched<[WriteVFCvtFToIV_WorstCase, ReadVFCvtFToIV_WorstCase,
684                  ReadVMask]>;
685}
686
687multiclass VWCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
688  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
689           Sched<[WriteVFWCvtIToFV_WorstCase, ReadVFWCvtIToFV_WorstCase,
690                  ReadVMask]>;
691}
692
693multiclass VWCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
694  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
695           Sched<[WriteVFWCvtFToIV_WorstCase, ReadVFWCvtFToIV_WorstCase,
696                  ReadVMask]>;
697}
698
699multiclass VWCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
700  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
701           Sched<[WriteVFWCvtFToFV_WorstCase, ReadVFWCvtFToFV_WorstCase,
702                  ReadVMask]>;
703}
704
705multiclass VNCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
706  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
707           Sched<[WriteVFNCvtIToFV_WorstCase, ReadVFNCvtIToFV_WorstCase,
708                  ReadVMask]>;
709}
710
711multiclass VNCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
712  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
713           Sched<[WriteVFNCvtFToIV_WorstCase, ReadVFNCvtFToIV_WorstCase,
714                  ReadVMask]>;
715}
716
717multiclass VNCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
718  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
719           Sched<[WriteVFNCvtFToFV_WorstCase, ReadVFNCvtFToFV_WorstCase,
720                  ReadVMask]>;
721}
722
723multiclass VRED_MV_V<string opcodestr, bits<6> funct6> {
724  def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">,
725            Sched<[WriteVIRedV_From_WorstCase, ReadVIRedV, ReadVIRedV0,
726                   ReadVMask]>;
727}
728
729multiclass VREDMINMAX_MV_V<string opcodestr, bits<6> funct6> {
730  def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">,
731            Sched<[WriteVIRedMinMaxV_From_WorstCase, ReadVIRedV, ReadVIRedV0,
732                   ReadVMask]>;
733}
734
735multiclass VWRED_IV_V<string opcodestr, bits<6> funct6> {
736  def _VS : VALUVV<funct6, OPIVV, opcodestr # ".vs">,
737            Sched<[WriteVIWRedV_From_WorstCase, ReadVIWRedV, ReadVIWRedV0,
738                   ReadVMask]>;
739}
740
741multiclass VRED_FV_V<string opcodestr, bits<6> funct6> {
742  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
743            Sched<[WriteVFRedV_From_WorstCase, ReadVFRedV, ReadVFRedV0,
744                   ReadVMask]>;
745}
746
747multiclass VREDMINMAX_FV_V<string opcodestr, bits<6> funct6> {
748  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
749            Sched<[WriteVFRedMinMaxV_From_WorstCase, ReadVFRedV, ReadVFRedV0,
750                   ReadVMask]>;
751}
752
753multiclass VREDO_FV_V<string opcodestr, bits<6> funct6> {
754  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
755            Sched<[WriteVFRedOV_From_WorstCase, ReadVFRedOV, ReadVFRedOV0,
756                   ReadVMask]>;
757}
758
759multiclass VWRED_FV_V<string opcodestr, bits<6> funct6> {
760  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
761            Sched<[WriteVFWRedV_From_WorstCase, ReadVFWRedV, ReadVFWRedV0,
762                   ReadVMask]>;
763}
764
765multiclass VWREDO_FV_V<string opcodestr, bits<6> funct6> {
766  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
767            Sched<[WriteVFWRedOV_From_WorstCase, ReadVFWRedOV, ReadVFWRedOV0,
768                   ReadVMask]>;
769}
770
771multiclass VMALU_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
772  def M : VALUVVNoVm<funct6, OPMVV, opcodestr #"." #vm #"m">,
773          Sched<[WriteVMALUV_WorstCase, ReadVMALUV_WorstCase,
774                 ReadVMALUV_WorstCase]>;
775}
776
777multiclass VMSFS_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> {
778  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
779           Sched<[WriteVMSFSV_WorstCase, ReadVMSFSV_WorstCase, ReadVMask]>;
780}
781
782multiclass VMIOT_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> {
783  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
784           Sched<[WriteVMIotV_WorstCase, ReadVMIotV_WorstCase, ReadVMask]>;
785}
786
787multiclass VSHT_IV_V_X_I<string opcodestr, bits<6> funct6> {
788  def V  : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
789           Sched<[WriteVShiftV_WorstCase, ReadVShiftV_WorstCase,
790                  ReadVShiftV_WorstCase, ReadVMask]>;
791  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
792           Sched<[WriteVShiftX_WorstCase, ReadVShiftV_WorstCase,
793                  ReadVShiftX_WorstCase, ReadVMask]>;
794  def I  : VALUVI<funct6, opcodestr # ".vi", uimm5>,
795           Sched<[WriteVShiftI_WorstCase, ReadVShiftV_WorstCase,
796                  ReadVMask]>;
797}
798
799multiclass VNSHT_IV_V_X_I<string opcodestr, bits<6> funct6> {
800  def V  : VALUVV<funct6, OPIVV, opcodestr # ".wv">,
801           Sched<[WriteVNShiftV_WorstCase, ReadVNShiftV_WorstCase,
802                  ReadVNShiftV_WorstCase, ReadVMask]>;
803  def X  : VALUVX<funct6, OPIVX, opcodestr # ".wx">,
804           Sched<[WriteVNShiftX_WorstCase, ReadVNShiftV_WorstCase,
805                  ReadVNShiftX_WorstCase, ReadVMask]>;
806  def I  : VALUVI<funct6, opcodestr # ".wi", uimm5>,
807           Sched<[WriteVNShiftI_WorstCase, ReadVNShiftV_WorstCase,
808                  ReadVMask]>;
809}
810
811multiclass VMINMAX_IV_V_X<string opcodestr, bits<6> funct6> {
812  def V  : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
813           Sched<[WriteVIMinMaxV_WorstCase, ReadVIMinMaxV_WorstCase,
814                  ReadVIMinMaxV_WorstCase, ReadVMask]>;
815  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
816           Sched<[WriteVIMinMaxX_WorstCase, ReadVIMinMaxV_WorstCase,
817                  ReadVIMinMaxX_WorstCase, ReadVMask]>;
818}
819
820multiclass VCMP_IV_V<string opcodestr, bits<6> funct6> {
821  def V  : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
822           Sched<[WriteVICmpV_WorstCase, ReadVICmpV_WorstCase,
823                  ReadVICmpV_WorstCase, ReadVMask]>;
824}
825
826multiclass VCMP_IV_X<string opcodestr, bits<6> funct6> {
827  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
828           Sched<[WriteVICmpX_WorstCase, ReadVICmpV_WorstCase,
829                  ReadVICmpX_WorstCase, ReadVMask]>;
830}
831
832multiclass VCMP_IV_I<string opcodestr, bits<6> funct6> {
833  def I  : VALUVI<funct6, opcodestr # ".vi", simm5>,
834           Sched<[WriteVICmpI_WorstCase, ReadVICmpV_WorstCase,
835                  ReadVMask]>;
836}
837
838multiclass VCMP_IV_V_X_I<string opcodestr, bits<6> funct6>
839    : VCMP_IV_V<opcodestr, funct6>,
840      VCMP_IV_X<opcodestr, funct6>,
841      VCMP_IV_I<opcodestr, funct6>;
842
843multiclass VCMP_IV_X_I<string opcodestr, bits<6> funct6>
844    : VCMP_IV_X<opcodestr, funct6>,
845      VCMP_IV_I<opcodestr, funct6>;
846
847multiclass VCMP_IV_V_X<string opcodestr, bits<6> funct6>
848    : VCMP_IV_V<opcodestr, funct6>,
849      VCMP_IV_X<opcodestr, funct6>;
850
851multiclass VMUL_MV_V_X<string opcodestr, bits<6> funct6> {
852  def V  : VALUVV<funct6, OPMVV, opcodestr # ".vv">,
853           Sched<[WriteVIMulV_WorstCase, ReadVIMulV_WorstCase,
854                  ReadVIMulV_WorstCase, ReadVMask]>;
855  def X  : VALUVX<funct6, OPMVX, opcodestr # ".vx">,
856           Sched<[WriteVIMulX_WorstCase, ReadVIMulV_WorstCase,
857                  ReadVIMulX_WorstCase, ReadVMask]>;
858}
859
860multiclass VWMUL_MV_V_X<string opcodestr, bits<6> funct6> {
861  def V  : VALUVV<funct6, OPMVV, opcodestr # ".vv">,
862           Sched<[WriteVIWMulV_WorstCase, ReadVIWMulV_WorstCase,
863                  ReadVIWMulV_WorstCase, ReadVMask]>;
864  def X  : VALUVX<funct6, OPMVX, opcodestr # ".vx">,
865           Sched<[WriteVIWMulX_WorstCase, ReadVIWMulV_WorstCase,
866                  ReadVIWMulX_WorstCase, ReadVMask]>;
867}
868
869multiclass VDIV_MV_V_X<string opcodestr, bits<6> funct6> {
870  def V  : VALUVV<funct6, OPMVV, opcodestr # ".vv">,
871           Sched<[WriteVIDivV_WorstCase, ReadVIDivV_WorstCase,
872                  ReadVIDivV_WorstCase, ReadVMask]>;
873  def X  : VALUVX<funct6, OPMVX, opcodestr # ".vx">,
874           Sched<[WriteVIDivX_WorstCase, ReadVIDivV_WorstCase,
875                  ReadVIDivX_WorstCase, ReadVMask]>;
876}
877
878multiclass VSALU_IV_V_X<string opcodestr, bits<6> funct6> {
879  def V  : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
880           Sched<[WriteVSALUV_WorstCase, ReadVSALUV_WorstCase,
881                  ReadVSALUV_WorstCase, ReadVMask]>;
882  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
883           Sched<[WriteVSALUX_WorstCase, ReadVSALUV_WorstCase,
884                  ReadVSALUX_WorstCase, ReadVMask]>;
885}
886
887multiclass VSALU_IV_V_X_I<string opcodestr, bits<6> funct6>
888    : VSALU_IV_V_X<opcodestr, funct6> {
889  def I  : VALUVI<funct6, opcodestr # ".vi", simm5>,
890           Sched<[WriteVSALUI_WorstCase, ReadVSALUV_WorstCase,
891                  ReadVMask]>;
892}
893
894multiclass VAALU_MV_V_X<string opcodestr, bits<6> funct6> {
895  def V  : VALUVV<funct6, OPMVV, opcodestr # ".vv">,
896           Sched<[WriteVAALUV_WorstCase, ReadVAALUV_WorstCase,
897                  ReadVAALUV_WorstCase, ReadVMask]>;
898  def X  : VALUVX<funct6, OPMVX, opcodestr # ".vx">,
899           Sched<[WriteVAALUX_WorstCase, ReadVAALUV_WorstCase,
900                  ReadVAALUX_WorstCase, ReadVMask]>;
901}
902
903multiclass VSMUL_IV_V_X<string opcodestr, bits<6> funct6> {
904  def V  : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
905           Sched<[WriteVSMulV_WorstCase, ReadVSMulV_WorstCase,
906                  ReadVSMulV_WorstCase, ReadVMask]>;
907  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
908           Sched<[WriteVSMulX_WorstCase, ReadVSMulV_WorstCase,
909                  ReadVSMulX_WorstCase, ReadVMask]>;
910}
911
912multiclass VSSHF_IV_V_X_I<string opcodestr, bits<6> funct6> {
913  def V  : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
914           Sched<[WriteVSShiftV_WorstCase, ReadVSShiftV_WorstCase,
915                  ReadVSShiftV_WorstCase, ReadVMask]>;
916  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
917           Sched<[WriteVSShiftX_WorstCase, ReadVSShiftV_WorstCase,
918                  ReadVSShiftX_WorstCase, ReadVMask]>;
919  def I  : VALUVI<funct6, opcodestr # ".vi", uimm5>,
920           Sched<[WriteVSShiftI_WorstCase, ReadVSShiftV_WorstCase,
921                  ReadVMask]>;
922}
923
924multiclass VNCLP_IV_V_X_I<string opcodestr, bits<6> funct6> {
925  def V  : VALUVV<funct6, OPIVV, opcodestr # ".wv">,
926           Sched<[WriteVNClipV_WorstCase, ReadVNClipV_WorstCase,
927                  ReadVNClipV_WorstCase, ReadVMask]>;
928  def X  : VALUVX<funct6, OPIVX, opcodestr # ".wx">,
929           Sched<[WriteVNClipX_WorstCase, ReadVNClipV_WorstCase,
930                  ReadVNClipX_WorstCase, ReadVMask]>;
931  def I  : VALUVI<funct6, opcodestr # ".wi", uimm5>,
932           Sched<[WriteVNClipI_WorstCase, ReadVNClipV_WorstCase,
933                  ReadVMask]>;
934}
935
936multiclass VSLD_IV_X_I<string opcodestr, bits<6> funct6> {
937  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
938           Sched<[WriteVISlideX_WorstCase, ReadVISlideV_WorstCase,
939                  ReadVISlideX_WorstCase, ReadVMask]>;
940  def I  : VALUVI<funct6, opcodestr # ".vi", uimm5>,
941           Sched<[WriteVISlideI_WorstCase, ReadVISlideV_WorstCase,
942                  ReadVMask]>;
943}
944
945multiclass VSLD1_MV_X<string opcodestr, bits<6> funct6> {
946  def X  : VALUVX<funct6, OPMVX, opcodestr # ".vx">,
947           Sched<[WriteVISlide1X_WorstCase, ReadVISlideV_WorstCase,
948                  ReadVISlideX_WorstCase, ReadVMask]>;
949}
950
951multiclass VSLD1_FV_F<string opcodestr, bits<6> funct6> {
952  def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
953          Sched<[WriteVFSlide1F_WorstCase, ReadVFSlideV_WorstCase,
954                 ReadVFSlideF_WorstCase, ReadVMask]>;
955}
956
957multiclass VGTR_IV_V_X_I<string opcodestr, bits<6> funct6> {
958  def V  : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
959           Sched<[WriteVRGatherVV_WorstCase, ReadVRGatherVV_data_WorstCase,
960                  ReadVRGatherVV_index_WorstCase, ReadVMask]>;
961  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
962           Sched<[WriteVRGatherVX_WorstCase, ReadVRGatherVX_data_WorstCase,
963                  ReadVRGatherVX_index_WorstCase, ReadVMask]>;
964  def I  : VALUVI<funct6, opcodestr # ".vi", uimm5>,
965           Sched<[WriteVRGatherVI_WorstCase, ReadVRGatherVI_data_WorstCase,
966                  ReadVMask]>;
967}
968
969multiclass VCPR_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
970  def M  : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">,
971           Sched<[WriteVCompressV_WorstCase, ReadVCompressV_WorstCase,
972                  ReadVCompressV_WorstCase]>;
973}
974
975multiclass VWholeLoadN<bits<3> nf, string opcodestr, RegisterClass VRC> {
976  foreach l = [8, 16, 32] in {
977    defvar w = !cast<RISCVWidth>("LSWidth" # l);
978    defvar s = !cast<SchedWrite>("WriteVLD" # !add(nf, 1) # "R");
979
980    def E # l # _V : VWholeLoad<nf, w, opcodestr # "e" # l # ".v", VRC>,
981                     Sched<[s, ReadVLDX]>;
982  }
983}
984multiclass VWholeLoadEEW64<bits<3> nf, string opcodestr, RegisterClass VRC, SchedReadWrite schedrw> {
985  def E64_V : VWholeLoad<nf, LSWidth64, opcodestr # "e64.v", VRC>,
986              Sched<[schedrw, ReadVLDX]>;
987}
988
989//===----------------------------------------------------------------------===//
990// Instructions
991//===----------------------------------------------------------------------===//
992
993let Predicates = [HasVInstructions] in {
994let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
995def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp11:$vtypei),
996                           "vsetvli", "$rd, $rs1, $vtypei">,
997                           Sched<[WriteVSETVLI, ReadVSETVLI]>;
998def VSETIVLI : RVInstSetiVLi<(outs GPR:$rd), (ins uimm5:$uimm, VTypeIOp10:$vtypei),
999                             "vsetivli", "$rd, $uimm, $vtypei">,
1000                             Sched<[WriteVSETIVLI]>;
1001
1002def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
1003                         "vsetvl", "$rd, $rs1, $rs2">,
1004                          Sched<[WriteVSETVL, ReadVSETVL, ReadVSETVL]>;
1005} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
1006foreach eew = [8, 16, 32] in {
1007  defvar w = !cast<RISCVWidth>("LSWidth" # eew);
1008
1009  // Vector Unit-Stride Instructions
1010  def VLE#eew#_V : VUnitStrideLoad<w, "vle"#eew#".v">, VLESched;
1011  def VSE#eew#_V  : VUnitStrideStore<w,  "vse"#eew#".v">, VSESched;
1012
1013  // Vector Unit-Stride Fault-only-First Loads
1014  def VLE#eew#FF_V : VUnitStrideLoadFF<w,  "vle"#eew#"ff.v">, VLFSched;
1015
1016  // Vector Strided Instructions
1017  def VLSE#eew#_V  : VStridedLoad<w,  "vlse"#eew#".v">, VLSSched<eew>;
1018  def VSSE#eew#_V  : VStridedStore<w,  "vsse"#eew#".v">, VSSSched<eew>;
1019}
1020
1021defm "" : VIndexLoadStore<[8, 16, 32]>;
1022} // Predicates = [HasVInstructions]
1023
1024let Predicates = [HasVInstructions] in {
1025def VLM_V : VUnitStrideLoadMask<"vlm.v">,
1026             Sched<[WriteVLDM_WorstCase, ReadVLDX]>;
1027def VSM_V : VUnitStrideStoreMask<"vsm.v">,
1028             Sched<[WriteVSTM_WorstCase, ReadVSTM_WorstCase, ReadVSTX]>;
1029def : InstAlias<"vle1.v $vd, (${rs1})",
1030                (VLM_V VR:$vd, GPR:$rs1), 0>;
1031def : InstAlias<"vse1.v $vs3, (${rs1})",
1032                (VSM_V VR:$vs3, GPR:$rs1), 0>;
1033
1034defm VL1R : VWholeLoadN<0, "vl1r", VR>;
1035defm VL2R : VWholeLoadN<1, "vl2r", VRM2>;
1036defm VL4R : VWholeLoadN<3, "vl4r", VRM4>;
1037defm VL8R : VWholeLoadN<7, "vl8r", VRM8>;
1038
1039def VS1R_V : VWholeStore<0, "vs1r.v", VR>,
1040             Sched<[WriteVST1R, ReadVST1R, ReadVSTX]>;
1041def VS2R_V : VWholeStore<1, "vs2r.v", VRM2>,
1042             Sched<[WriteVST2R, ReadVST2R, ReadVSTX]>;
1043def VS4R_V : VWholeStore<3, "vs4r.v", VRM4>,
1044             Sched<[WriteVST4R, ReadVST4R, ReadVSTX]>;
1045def VS8R_V : VWholeStore<7, "vs8r.v", VRM8>,
1046             Sched<[WriteVST8R, ReadVST8R, ReadVSTX]>;
1047
1048def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
1049def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>;
1050def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>;
1051def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>;
1052} // Predicates = [HasVInstructions]
1053
1054let Predicates = [HasVInstructionsI64] in {
1055// Vector Unit-Stride Instructions
1056def VLE64_V : VUnitStrideLoad<LSWidth64, "vle64.v">,
1057              VLESched;
1058
1059def VLE64FF_V : VUnitStrideLoadFF<LSWidth64, "vle64ff.v">,
1060                VLFSched;
1061
1062def VSE64_V : VUnitStrideStore<LSWidth64, "vse64.v">,
1063              VSESched;
1064// Vector Strided Instructions
1065def VLSE64_V : VStridedLoad<LSWidth64, "vlse64.v">,
1066               VLSSched<32>;
1067
1068def VSSE64_V : VStridedStore<LSWidth64, "vsse64.v">,
1069               VSSSched<64>;
1070
1071defm VL1R: VWholeLoadEEW64<0, "vl1r", VR, WriteVLD1R>;
1072defm VL2R: VWholeLoadEEW64<1, "vl2r", VRM2, WriteVLD2R>;
1073defm VL4R: VWholeLoadEEW64<3, "vl4r", VRM4, WriteVLD4R>;
1074defm VL8R: VWholeLoadEEW64<7, "vl8r", VRM8, WriteVLD8R>;
1075} // Predicates = [HasVInstructionsI64]
1076let Predicates = [IsRV64, HasVInstructionsI64] in {
1077  // Vector Indexed Instructions
1078  defm "" : VIndexLoadStore<[64]>;
1079} // [IsRV64, HasVInstructionsI64]
1080
1081let Predicates = [HasVInstructions] in {
1082// Vector Single-Width Integer Add and Subtract
1083defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
1084defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>;
1085defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>;
1086
1087def : InstAlias<"vneg.v $vd, $vs$vm", (VRSUB_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
1088def : InstAlias<"vneg.v $vd, $vs", (VRSUB_VX VR:$vd, VR:$vs, X0, zero_reg)>;
1089
1090// Vector Widening Integer Add/Subtract
1091// Refer to 11.2 Widening Vector Arithmetic Instructions
1092// The destination vector register group cannot overlap a source vector
1093// register group of a different element width (including the mask register
1094// if masked), otherwise an illegal instruction exception is raised.
1095let Constraints = "@earlyclobber $vd" in {
1096let RVVConstraint = WidenV in {
1097defm VWADDU_V : VALU_MV_V_X<"vwaddu", 0b110000, "v">;
1098defm VWSUBU_V : VALU_MV_V_X<"vwsubu", 0b110010, "v">;
1099defm VWADD_V : VALU_MV_V_X<"vwadd", 0b110001, "v">;
1100defm VWSUB_V : VALU_MV_V_X<"vwsub", 0b110011, "v">;
1101} // RVVConstraint = WidenV
1102// Set earlyclobber for following instructions for second and mask operands.
1103// This has the downside that the earlyclobber constraint is too coarse and
1104// will impose unnecessary restrictions by not allowing the destination to
1105// overlap with the first (wide) operand.
1106let RVVConstraint = WidenW in {
1107defm VWADDU_W : VALU_MV_V_X<"vwaddu", 0b110100, "w">;
1108defm VWSUBU_W : VALU_MV_V_X<"vwsubu", 0b110110, "w">;
1109defm VWADD_W : VALU_MV_V_X<"vwadd", 0b110101, "w">;
1110defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">;
1111} // RVVConstraint = WidenW
1112} // Constraints = "@earlyclobber $vd"
1113
1114def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm",
1115                (VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
1116def : InstAlias<"vwcvt.x.x.v $vd, $vs",
1117                (VWADD_VX VR:$vd, VR:$vs, X0, zero_reg)>;
1118def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm",
1119                (VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
1120def : InstAlias<"vwcvtu.x.x.v $vd, $vs",
1121                (VWADDU_VX VR:$vd, VR:$vs, X0, zero_reg)>;
1122
1123// Vector Integer Extension
1124defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>;
1125defm VSEXT_VF8 : VALU_MV_VS2<"vsext.vf8", 0b010010, 0b00011>;
1126defm VZEXT_VF4 : VALU_MV_VS2<"vzext.vf4", 0b010010, 0b00100>;
1127defm VSEXT_VF4 : VALU_MV_VS2<"vsext.vf4", 0b010010, 0b00101>;
1128defm VZEXT_VF2 : VALU_MV_VS2<"vzext.vf2", 0b010010, 0b00110>;
1129defm VSEXT_VF2 : VALU_MV_VS2<"vsext.vf2", 0b010010, 0b00111>;
1130
1131// Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
1132defm VADC_V : VALUm_IV_V_X_I<"vadc", 0b010000>;
1133let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1134defm VMADC_V : VALUm_IV_V_X_I<"vmadc", 0b010001>;
1135defm VMADC_V : VALUNoVm_IV_V_X_I<"vmadc", 0b010001>;
1136} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1137defm VSBC_V : VALUm_IV_V_X<"vsbc", 0b010010>;
1138let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1139defm VMSBC_V : VALUm_IV_V_X<"vmsbc", 0b010011>;
1140defm VMSBC_V : VALUNoVm_IV_V_X<"vmsbc", 0b010011>;
1141} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1142
1143// Vector Bitwise Logical Instructions
1144defm VAND_V : VALU_IV_V_X_I<"vand", 0b001001>;
1145defm VOR_V : VALU_IV_V_X_I<"vor", 0b001010>;
1146defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>;
1147
1148def : InstAlias<"vnot.v $vd, $vs$vm",
1149                (VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>;
1150def : InstAlias<"vnot.v $vd, $vs",
1151                (VXOR_VI VR:$vd, VR:$vs, -1, zero_reg)>;
1152
1153// Vector Single-Width Bit Shift Instructions
1154defm VSLL_V : VSHT_IV_V_X_I<"vsll", 0b100101>;
1155defm VSRL_V : VSHT_IV_V_X_I<"vsrl", 0b101000>;
1156defm VSRA_V : VSHT_IV_V_X_I<"vsra", 0b101001>;
1157
1158// Vector Narrowing Integer Right Shift Instructions
1159// Refer to 11.3. Narrowing Vector Arithmetic Instructions
1160// The destination vector register group cannot overlap the first source
1161// vector register group (specified by vs2). The destination vector register
1162// group cannot overlap the mask register if used, unless LMUL=1.
1163let Constraints = "@earlyclobber $vd" in {
1164defm VNSRL_W : VNSHT_IV_V_X_I<"vnsrl", 0b101100>;
1165defm VNSRA_W : VNSHT_IV_V_X_I<"vnsra", 0b101101>;
1166} // Constraints = "@earlyclobber $vd"
1167
1168def : InstAlias<"vncvt.x.x.w $vd, $vs$vm",
1169                (VNSRL_WX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
1170def : InstAlias<"vncvt.x.x.w $vd, $vs",
1171                (VNSRL_WX VR:$vd, VR:$vs, X0, zero_reg)>;
1172
1173// Vector Integer Comparison Instructions
1174let RVVConstraint = NoConstraint in {
1175defm VMSEQ_V : VCMP_IV_V_X_I<"vmseq", 0b011000>;
1176defm VMSNE_V : VCMP_IV_V_X_I<"vmsne", 0b011001>;
1177defm VMSLTU_V : VCMP_IV_V_X<"vmsltu", 0b011010>;
1178defm VMSLT_V : VCMP_IV_V_X<"vmslt", 0b011011>;
1179defm VMSLEU_V : VCMP_IV_V_X_I<"vmsleu", 0b011100>;
1180defm VMSLE_V : VCMP_IV_V_X_I<"vmsle", 0b011101>;
1181defm VMSGTU_V : VCMP_IV_X_I<"vmsgtu", 0b011110>;
1182defm VMSGT_V : VCMP_IV_X_I<"vmsgt", 0b011111>;
1183} // RVVConstraint = NoConstraint
1184
1185def : InstAlias<"vmsgtu.vv $vd, $va, $vb$vm",
1186                (VMSLTU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1187def : InstAlias<"vmsgt.vv $vd, $va, $vb$vm",
1188                (VMSLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1189def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm",
1190                (VMSLEU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1191def : InstAlias<"vmsge.vv $vd, $va, $vb$vm",
1192                (VMSLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1193
1194let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
1195    mayStore = 0 in {
1196// For unsigned comparisons we need to special case 0 immediate to maintain
1197// the always true/false semantics we would invert if we just decremented the
1198// immediate like we do for signed. To match the GNU assembler we will use
1199// vmseq/vmsne.vv with the same register for both operands which we can't do
1200// from an InstAlias.
1201def PseudoVMSGEU_VI : Pseudo<(outs VR:$vd),
1202                             (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1203                             [], "vmsgeu.vi", "$vd, $vs2, $imm$vm">;
1204def PseudoVMSLTU_VI : Pseudo<(outs VR:$vd),
1205                             (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1206                             [], "vmsltu.vi", "$vd, $vs2, $imm$vm">;
1207// Handle signed with pseudos as well for more consistency in the
1208// implementation.
1209def PseudoVMSGE_VI : Pseudo<(outs VR:$vd),
1210                            (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1211                            [], "vmsge.vi", "$vd, $vs2, $imm$vm">;
1212def PseudoVMSLT_VI : Pseudo<(outs VR:$vd),
1213                            (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1214                            [], "vmslt.vi", "$vd, $vs2, $imm$vm">;
1215}
1216
1217let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
1218    mayStore = 0 in {
1219def PseudoVMSGEU_VX : Pseudo<(outs VR:$vd),
1220                             (ins VR:$vs2, GPR:$rs1),
1221                             [], "vmsgeu.vx", "$vd, $vs2, $rs1">;
1222def PseudoVMSGE_VX : Pseudo<(outs VR:$vd),
1223                            (ins VR:$vs2, GPR:$rs1),
1224                            [], "vmsge.vx", "$vd, $vs2, $rs1">;
1225def PseudoVMSGEU_VX_M : Pseudo<(outs VRNoV0:$vd),
1226                               (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1227                               [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm">;
1228def PseudoVMSGE_VX_M : Pseudo<(outs VRNoV0:$vd),
1229                              (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1230                              [], "vmsge.vx", "$vd, $vs2, $rs1$vm">;
1231def PseudoVMSGEU_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
1232                                 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1233                                 [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm, $scratch">;
1234def PseudoVMSGE_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
1235                                (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1236                                [], "vmsge.vx", "$vd, $vs2, $rs1$vm, $scratch">;
1237}
1238
1239// Vector Integer Min/Max Instructions
1240defm VMINU_V : VMINMAX_IV_V_X<"vminu", 0b000100>;
1241defm VMIN_V : VMINMAX_IV_V_X<"vmin", 0b000101>;
1242defm VMAXU_V : VMINMAX_IV_V_X<"vmaxu", 0b000110>;
1243defm VMAX_V : VMINMAX_IV_V_X<"vmax", 0b000111>;
1244
1245// Vector Single-Width Integer Multiply Instructions
1246defm VMUL_V : VMUL_MV_V_X<"vmul", 0b100101>;
1247defm VMULH_V : VMUL_MV_V_X<"vmulh", 0b100111>;
1248defm VMULHU_V : VMUL_MV_V_X<"vmulhu", 0b100100>;
1249defm VMULHSU_V : VMUL_MV_V_X<"vmulhsu", 0b100110>;
1250
1251// Vector Integer Divide Instructions
1252defm VDIVU_V : VDIV_MV_V_X<"vdivu", 0b100000>;
1253defm VDIV_V : VDIV_MV_V_X<"vdiv", 0b100001>;
1254defm VREMU_V : VDIV_MV_V_X<"vremu", 0b100010>;
1255defm VREM_V : VDIV_MV_V_X<"vrem", 0b100011>;
1256
1257// Vector Widening Integer Multiply Instructions
1258let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
1259defm VWMUL_V : VWMUL_MV_V_X<"vwmul", 0b111011>;
1260defm VWMULU_V : VWMUL_MV_V_X<"vwmulu", 0b111000>;
1261defm VWMULSU_V : VWMUL_MV_V_X<"vwmulsu", 0b111010>;
1262} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
1263
1264// Vector Single-Width Integer Multiply-Add Instructions
1265defm VMACC_V : VMAC_MV_V_X<"vmacc", 0b101101>;
1266defm VNMSAC_V : VMAC_MV_V_X<"vnmsac", 0b101111>;
1267defm VMADD_V : VMAC_MV_V_X<"vmadd", 0b101001>;
1268defm VNMSUB_V : VMAC_MV_V_X<"vnmsub", 0b101011>;
1269
1270// Vector Widening Integer Multiply-Add Instructions
1271let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
1272defm VWMACCU_V : VWMAC_MV_V_X<"vwmaccu", 0b111100>;
1273defm VWMACC_V : VWMAC_MV_V_X<"vwmacc", 0b111101>;
1274defm VWMACCSU_V : VWMAC_MV_V_X<"vwmaccsu", 0b111111>;
1275defm VWMACCUS_V : VWMAC_MV_X<"vwmaccus", 0b111110>;
1276} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
1277
1278// Vector Integer Merge Instructions
1279defm VMERGE_V : VMRG_IV_V_X_I<"vmerge", 0b010111>;
1280
1281// Vector Integer Move Instructions
1282let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vs2 = 0, vm = 1,
1283    RVVConstraint = NoConstraint  in {
1284// op vd, vs1
1285def VMV_V_V : RVInstVV<0b010111, OPIVV, (outs VR:$vd),
1286                       (ins VR:$vs1), "vmv.v.v", "$vd, $vs1">,
1287              Sched<[WriteVIMovV_WorstCase, ReadVIMovV_WorstCase]>;
1288// op vd, rs1
1289def VMV_V_X : RVInstVX<0b010111, OPIVX, (outs VR:$vd),
1290                       (ins GPR:$rs1), "vmv.v.x", "$vd, $rs1">,
1291              Sched<[WriteVIMovX_WorstCase, ReadVIMovX_WorstCase]>;
1292// op vd, imm
1293def VMV_V_I : RVInstIVI<0b010111, (outs VR:$vd),
1294                       (ins simm5:$imm), "vmv.v.i", "$vd, $imm">,
1295              Sched<[WriteVIMovI_WorstCase]>;
1296} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1297
1298// Vector Fixed-Point Arithmetic Instructions
1299defm VSADDU_V : VSALU_IV_V_X_I<"vsaddu", 0b100000>;
1300defm VSADD_V : VSALU_IV_V_X_I<"vsadd", 0b100001>;
1301defm VSSUBU_V : VSALU_IV_V_X<"vssubu", 0b100010>;
1302defm VSSUB_V : VSALU_IV_V_X<"vssub", 0b100011>;
1303
1304// Vector Single-Width Averaging Add and Subtract
1305defm VAADDU_V : VAALU_MV_V_X<"vaaddu", 0b001000>;
1306defm VAADD_V : VAALU_MV_V_X<"vaadd", 0b001001>;
1307defm VASUBU_V : VAALU_MV_V_X<"vasubu", 0b001010>;
1308defm VASUB_V : VAALU_MV_V_X<"vasub", 0b001011>;
1309
1310// Vector Single-Width Fractional Multiply with Rounding and Saturation
1311defm VSMUL_V : VSMUL_IV_V_X<"vsmul", 0b100111>;
1312
1313// Vector Single-Width Scaling Shift Instructions
1314defm VSSRL_V : VSSHF_IV_V_X_I<"vssrl", 0b101010>;
1315defm VSSRA_V : VSSHF_IV_V_X_I<"vssra", 0b101011>;
1316
1317// Vector Narrowing Fixed-Point Clip Instructions
1318let Constraints = "@earlyclobber $vd" in {
1319defm VNCLIPU_W : VNCLP_IV_V_X_I<"vnclipu", 0b101110>;
1320defm VNCLIP_W : VNCLP_IV_V_X_I<"vnclip", 0b101111>;
1321} // Constraints = "@earlyclobber $vd"
1322} // Predicates = [HasVInstructions]
1323
1324let Predicates = [HasVInstructionsAnyF] in {
1325// Vector Single-Width Floating-Point Add/Subtract Instructions
1326let Uses = [FRM], mayRaiseFPException = true in {
1327defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>;
1328defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>;
1329defm VFRSUB_V : VALU_FV_F<"vfrsub", 0b100111>;
1330}
1331
1332// Vector Widening Floating-Point Add/Subtract Instructions
1333let Constraints = "@earlyclobber $vd",
1334    Uses = [FRM],
1335    mayRaiseFPException = true in {
1336let RVVConstraint = WidenV in {
1337defm VFWADD_V : VWALU_FV_V_F<"vfwadd", 0b110000, "v">;
1338defm VFWSUB_V : VWALU_FV_V_F<"vfwsub", 0b110010, "v">;
1339} // RVVConstraint = WidenV
1340// Set earlyclobber for following instructions for second and mask operands.
1341// This has the downside that the earlyclobber constraint is too coarse and
1342// will impose unnecessary restrictions by not allowing the destination to
1343// overlap with the first (wide) operand.
1344let RVVConstraint = WidenW in {
1345defm VFWADD_W : VWALU_FV_V_F<"vfwadd", 0b110100, "w">;
1346defm VFWSUB_W : VWALU_FV_V_F<"vfwsub", 0b110110, "w">;
1347} // RVVConstraint = WidenW
1348} // Constraints = "@earlyclobber $vd", Uses = [FRM], mayRaiseFPException = true
1349
1350// Vector Single-Width Floating-Point Multiply/Divide Instructions
1351let Uses = [FRM], mayRaiseFPException = true in {
1352defm VFMUL_V : VMUL_FV_V_F<"vfmul", 0b100100>;
1353defm VFDIV_V : VDIV_FV_V_F<"vfdiv", 0b100000>;
1354defm VFRDIV_V : VDIV_FV_F<"vfrdiv", 0b100001>;
1355}
1356
1357// Vector Widening Floating-Point Multiply
1358let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV,
1359    Uses = [FRM], mayRaiseFPException = true in {
1360defm VFWMUL_V : VWMUL_FV_V_F<"vfwmul", 0b111000>;
1361} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, Uses = [FRM], mayRaiseFPException = true
1362
1363// Vector Single-Width Floating-Point Fused Multiply-Add Instructions
1364let Uses = [FRM], mayRaiseFPException = true in {
1365defm VFMACC_V : VMAC_FV_V_F<"vfmacc", 0b101100>;
1366defm VFNMACC_V : VMAC_FV_V_F<"vfnmacc", 0b101101>;
1367defm VFMSAC_V : VMAC_FV_V_F<"vfmsac", 0b101110>;
1368defm VFNMSAC_V : VMAC_FV_V_F<"vfnmsac", 0b101111>;
1369defm VFMADD_V : VMAC_FV_V_F<"vfmadd", 0b101000>;
1370defm VFNMADD_V : VMAC_FV_V_F<"vfnmadd", 0b101001>;
1371defm VFMSUB_V : VMAC_FV_V_F<"vfmsub", 0b101010>;
1372defm VFNMSUB_V : VMAC_FV_V_F<"vfnmsub", 0b101011>;
1373}
1374
1375// Vector Widening Floating-Point Fused Multiply-Add Instructions
1376let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV,
1377    Uses = [FRM], mayRaiseFPException = true in {
1378defm VFWMACC_V : VWMAC_FV_V_F<"vfwmacc", 0b111100>;
1379defm VFWNMACC_V : VWMAC_FV_V_F<"vfwnmacc", 0b111101>;
1380defm VFWMSAC_V : VWMAC_FV_V_F<"vfwmsac", 0b111110>;
1381defm VFWNMSAC_V : VWMAC_FV_V_F<"vfwnmsac", 0b111111>;
1382} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, Uses = [FRM], mayRaiseFPException = true
1383
1384// Vector Floating-Point Square-Root Instruction
1385let Uses = [FRM], mayRaiseFPException = true in {
1386defm VFSQRT_V : VSQR_FV_VS2<"vfsqrt.v", 0b010011, 0b00000>;
1387defm VFREC7_V : VRCP_FV_VS2<"vfrec7.v", 0b010011, 0b00101>;
1388}
1389
1390let mayRaiseFPException = true in
1391defm VFRSQRT7_V : VRCP_FV_VS2<"vfrsqrt7.v", 0b010011, 0b00100>;
1392
1393// Vector Floating-Point MIN/MAX Instructions
1394let mayRaiseFPException = true in {
1395defm VFMIN_V : VMINMAX_FV_V_F<"vfmin", 0b000100>;
1396defm VFMAX_V : VMINMAX_FV_V_F<"vfmax", 0b000110>;
1397}
1398
1399// Vector Floating-Point Sign-Injection Instructions
1400defm VFSGNJ_V : VSGNJ_FV_V_F<"vfsgnj", 0b001000>;
1401defm VFSGNJN_V : VSGNJ_FV_V_F<"vfsgnjn", 0b001001>;
1402defm VFSGNJX_V : VSGNJ_FV_V_F<"vfsgnjx", 0b001010>;
1403
1404def : InstAlias<"vfneg.v $vd, $vs$vm",
1405                (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
1406def : InstAlias<"vfneg.v $vd, $vs",
1407                (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>;
1408def : InstAlias<"vfabs.v $vd, $vs$vm",
1409                (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
1410def : InstAlias<"vfabs.v $vd, $vs",
1411                (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>;
1412
1413// Vector Floating-Point Compare Instructions
1414let RVVConstraint = NoConstraint, mayRaiseFPException = true in {
1415defm VMFEQ_V : VCMP_FV_V_F<"vmfeq", 0b011000>;
1416defm VMFNE_V : VCMP_FV_V_F<"vmfne", 0b011100>;
1417defm VMFLT_V : VCMP_FV_V_F<"vmflt", 0b011011>;
1418defm VMFLE_V : VCMP_FV_V_F<"vmfle", 0b011001>;
1419defm VMFGT_V : VCMP_FV_F<"vmfgt", 0b011101>;
1420defm VMFGE_V : VCMP_FV_F<"vmfge", 0b011111>;
1421} // RVVConstraint = NoConstraint, mayRaiseFPException = true
1422
1423def : InstAlias<"vmfgt.vv $vd, $va, $vb$vm",
1424                (VMFLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1425def : InstAlias<"vmfge.vv $vd, $va, $vb$vm",
1426                (VMFLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1427
1428// Vector Floating-Point Classify Instruction
1429defm VFCLASS_V : VCLS_FV_VS2<"vfclass.v", 0b010011, 0b10000>;
1430
1431let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
1432
1433// Vector Floating-Point Merge Instruction
1434let vm = 0 in
1435def VFMERGE_VFM : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
1436                           (ins VR:$vs2, FPR32:$rs1, VMV0:$v0),
1437                           "vfmerge.vfm", "$vd, $vs2, $rs1, v0">,
1438                  Sched<[WriteVFMergeV_WorstCase, ReadVFMergeV_WorstCase,
1439                         ReadVFMergeF_WorstCase, ReadVMask]>;
1440
1441// Vector Floating-Point Move Instruction
1442let RVVConstraint = NoConstraint in
1443let vm = 1, vs2 = 0 in
1444def VFMV_V_F : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
1445                       (ins FPR32:$rs1), "vfmv.v.f", "$vd, $rs1">,
1446               Sched<[WriteVFMovV_WorstCase, ReadVFMovF_WorstCase]>;
1447
1448} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1449
1450// Single-Width Floating-Point/Integer Type-Convert Instructions
1451let mayRaiseFPException = true in {
1452let Uses = [FRM] in {
1453defm VFCVT_XU_F_V : VCVTI_FV_VS2<"vfcvt.xu.f.v", 0b010010, 0b00000>;
1454defm VFCVT_X_F_V : VCVTI_FV_VS2<"vfcvt.x.f.v", 0b010010, 0b00001>;
1455}
1456defm VFCVT_RTZ_XU_F_V : VCVTI_FV_VS2<"vfcvt.rtz.xu.f.v", 0b010010, 0b00110>;
1457defm VFCVT_RTZ_X_F_V : VCVTI_FV_VS2<"vfcvt.rtz.x.f.v", 0b010010, 0b00111>;
1458let Uses = [FRM] in {
1459defm VFCVT_F_XU_V : VCVTF_IV_VS2<"vfcvt.f.xu.v", 0b010010, 0b00010>;
1460defm VFCVT_F_X_V : VCVTF_IV_VS2<"vfcvt.f.x.v", 0b010010, 0b00011>;
1461}
1462} // mayRaiseFPException = true
1463
1464// Widening Floating-Point/Integer Type-Convert Instructions
1465let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt,
1466    mayRaiseFPException = true in {
1467let Uses = [FRM] in {
1468defm VFWCVT_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.xu.f.v", 0b010010, 0b01000>;
1469defm VFWCVT_X_F_V : VWCVTI_FV_VS2<"vfwcvt.x.f.v", 0b010010, 0b01001>;
1470}
1471defm VFWCVT_RTZ_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.xu.f.v", 0b010010, 0b01110>;
1472defm VFWCVT_RTZ_X_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.x.f.v", 0b010010, 0b01111>;
1473defm VFWCVT_F_XU_V : VWCVTF_IV_VS2<"vfwcvt.f.xu.v", 0b010010, 0b01010>;
1474defm VFWCVT_F_X_V : VWCVTF_IV_VS2<"vfwcvt.f.x.v", 0b010010, 0b01011>;
1475defm VFWCVT_F_F_V : VWCVTF_FV_VS2<"vfwcvt.f.f.v", 0b010010, 0b01100>;
1476} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt
1477
1478// Narrowing Floating-Point/Integer Type-Convert Instructions
1479let Constraints = "@earlyclobber $vd", mayRaiseFPException = true in {
1480let Uses = [FRM] in {
1481defm VFNCVT_XU_F_W : VNCVTI_FV_VS2<"vfncvt.xu.f.w", 0b010010, 0b10000>;
1482defm VFNCVT_X_F_W : VNCVTI_FV_VS2<"vfncvt.x.f.w", 0b010010, 0b10001>;
1483}
1484defm VFNCVT_RTZ_XU_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.xu.f.w", 0b010010, 0b10110>;
1485defm VFNCVT_RTZ_X_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.x.f.w", 0b010010, 0b10111>;
1486let Uses = [FRM] in {
1487defm VFNCVT_F_XU_W : VNCVTF_IV_VS2<"vfncvt.f.xu.w", 0b010010, 0b10010>;
1488defm VFNCVT_F_X_W : VNCVTF_IV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>;
1489defm VFNCVT_F_F_W : VNCVTF_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>;
1490}
1491defm VFNCVT_ROD_F_F_W : VNCVTF_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>;
1492} // Constraints = "@earlyclobber $vd", mayRaiseFPException = true
1493} // Predicates = HasVInstructionsAnyF]
1494
1495let Predicates = [HasVInstructions] in {
1496
1497// Vector Single-Width Integer Reduction Instructions
1498let RVVConstraint = NoConstraint in {
1499defm VREDSUM  : VRED_MV_V<"vredsum", 0b000000>;
1500defm VREDMAXU : VREDMINMAX_MV_V<"vredmaxu", 0b000110>;
1501defm VREDMAX  : VREDMINMAX_MV_V<"vredmax", 0b000111>;
1502defm VREDMINU : VREDMINMAX_MV_V<"vredminu", 0b000100>;
1503defm VREDMIN  : VREDMINMAX_MV_V<"vredmin", 0b000101>;
1504defm VREDAND  : VRED_MV_V<"vredand", 0b000001>;
1505defm VREDOR   : VRED_MV_V<"vredor", 0b000010>;
1506defm VREDXOR  : VRED_MV_V<"vredxor", 0b000011>;
1507} // RVVConstraint = NoConstraint
1508
1509// Vector Widening Integer Reduction Instructions
1510let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1511// Set earlyclobber for following instructions for second and mask operands.
1512// This has the downside that the earlyclobber constraint is too coarse and
1513// will impose unnecessary restrictions by not allowing the destination to
1514// overlap with the first (wide) operand.
1515defm VWREDSUMU : VWRED_IV_V<"vwredsumu", 0b110000>;
1516defm VWREDSUM : VWRED_IV_V<"vwredsum", 0b110001>;
1517} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1518
1519} // Predicates = [HasVInstructions]
1520
1521let Predicates = [HasVInstructionsAnyF] in {
1522// Vector Single-Width Floating-Point Reduction Instructions
1523let RVVConstraint = NoConstraint in {
1524let Uses = [FRM], mayRaiseFPException = true in {
1525defm VFREDOSUM : VREDO_FV_V<"vfredosum", 0b000011>;
1526defm VFREDUSUM : VRED_FV_V<"vfredusum", 0b000001>;
1527}
1528let mayRaiseFPException = true in {
1529defm VFREDMAX : VREDMINMAX_FV_V<"vfredmax", 0b000111>;
1530defm VFREDMIN : VREDMINMAX_FV_V<"vfredmin", 0b000101>;
1531}
1532} // RVVConstraint = NoConstraint
1533
1534def : InstAlias<"vfredsum.vs $vd, $vs2, $vs1$vm",
1535                (VFREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
1536
1537// Vector Widening Floating-Point Reduction Instructions
1538let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1539// Set earlyclobber for following instructions for second and mask operands.
1540// This has the downside that the earlyclobber constraint is too coarse and
1541// will impose unnecessary restrictions by not allowing the destination to
1542// overlap with the first (wide) operand.
1543let Uses = [FRM], mayRaiseFPException = true in {
1544defm VFWREDOSUM : VWREDO_FV_V<"vfwredosum", 0b110011>;
1545defm VFWREDUSUM : VWRED_FV_V<"vfwredusum", 0b110001>;
1546}
1547} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1548
1549def : InstAlias<"vfwredsum.vs $vd, $vs2, $vs1$vm",
1550                (VFWREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
1551} // Predicates = [HasVInstructionsAnyF]
1552
1553let Predicates = [HasVInstructions] in {
1554// Vector Mask-Register Logical Instructions
1555let RVVConstraint = NoConstraint in {
1556defm VMAND_M : VMALU_MV_Mask<"vmand", 0b011001, "m">;
1557defm VMNAND_M : VMALU_MV_Mask<"vmnand", 0b011101, "m">;
1558defm VMANDN_M : VMALU_MV_Mask<"vmandn", 0b011000, "m">;
1559defm VMXOR_M : VMALU_MV_Mask<"vmxor", 0b011011, "m">;
1560defm VMOR_M : VMALU_MV_Mask<"vmor", 0b011010, "m">;
1561defm VMNOR_M : VMALU_MV_Mask<"vmnor", 0b011110, "m">;
1562defm VMORN_M : VMALU_MV_Mask<"vmorn", 0b011100, "m">;
1563defm VMXNOR_M : VMALU_MV_Mask<"vmxnor", 0b011111, "m">;
1564}
1565
1566def : InstAlias<"vmmv.m $vd, $vs",
1567                (VMAND_MM VR:$vd, VR:$vs, VR:$vs)>;
1568def : InstAlias<"vmclr.m $vd",
1569                (VMXOR_MM VR:$vd, VR:$vd, VR:$vd)>;
1570def : InstAlias<"vmset.m $vd",
1571                (VMXNOR_MM VR:$vd, VR:$vd, VR:$vd)>;
1572def : InstAlias<"vmnot.m $vd, $vs",
1573                (VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>;
1574
1575def : InstAlias<"vmandnot.mm $vd, $vs2, $vs1",
1576                (VMANDN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
1577def : InstAlias<"vmornot.mm $vd, $vs2, $vs1",
1578                (VMORN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
1579
1580let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
1581    RVVConstraint = NoConstraint  in {
1582
1583// Vector mask population count vcpop
1584def VCPOP_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd),
1585                      (ins VR:$vs2, VMaskOp:$vm),
1586                      "vcpop.m", "$vd, $vs2$vm">,
1587              Sched<[WriteVMPopV_WorstCase, ReadVMPopV_WorstCase,
1588                     ReadVMask]>;
1589
1590// vfirst find-first-set mask bit
1591def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd),
1592                       (ins VR:$vs2, VMaskOp:$vm),
1593                       "vfirst.m", "$vd, $vs2$vm">,
1594              Sched<[WriteVMFFSV_WorstCase, ReadVMFFSV_WorstCase,
1595                     ReadVMask]>;
1596
1597} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1598
1599def : InstAlias<"vpopc.m $vd, $vs2$vm",
1600                (VCPOP_M GPR:$vd, VR:$vs2, VMaskOp:$vm), 0>;
1601
1602let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in {
1603
1604// vmsbf.m set-before-first mask bit
1605defm VMSBF_M : VMSFS_MV_V<"vmsbf.m", 0b010100, 0b00001>;
1606// vmsif.m set-including-first mask bit
1607defm VMSIF_M : VMSFS_MV_V<"vmsif.m", 0b010100, 0b00011>;
1608// vmsof.m set-only-first mask bit
1609defm VMSOF_M : VMSFS_MV_V<"vmsof.m", 0b010100, 0b00010>;
1610// Vector Iota Instruction
1611defm VIOTA_M : VMIOT_MV_V<"viota.m", 0b010100, 0b10000>;
1612
1613} // Constraints = "@earlyclobber $vd", RVVConstraint = Iota
1614
1615// Vector Element Index Instruction
1616let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
1617
1618let vs2 = 0 in
1619def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VR:$vd),
1620                    (ins VMaskOp:$vm), "vid.v", "$vd$vm">,
1621            Sched<[WriteVMIdxV_WorstCase, ReadVMask]>;
1622
1623// Integer Scalar Move Instructions
1624let vm = 1, RVVConstraint = NoConstraint in {
1625def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd),
1626                      (ins VR:$vs2), "vmv.x.s", "$vd, $vs2">,
1627              Sched<[WriteVIMovVX, ReadVIMovVX]>;
1628let Constraints = "$vd = $vd_wb" in
1629def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb),
1630                      (ins VR:$vd, GPR:$rs1), "vmv.s.x", "$vd, $rs1">,
1631              Sched<[WriteVIMovXV, ReadVIMovXV, ReadVIMovXX]>;
1632}
1633
1634} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1635
1636} // Predicates = [HasVInstructions]
1637
1638let Predicates = [HasVInstructionsAnyF] in {
1639
1640let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1,
1641    RVVConstraint = NoConstraint  in {
1642// Floating-Point Scalar Move Instructions
1643def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd),
1644                      (ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">,
1645               Sched<[WriteVFMovVF, ReadVFMovVF]>;
1646let Constraints = "$vd = $vd_wb" in
1647def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
1648                       (ins VR:$vd, FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">,
1649               Sched<[WriteVFMovFV, ReadVFMovFV, ReadVFMovFX]>;
1650
1651} // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1
1652
1653} // Predicates = [HasVInstructionsAnyF]
1654
1655let Predicates = [HasVInstructions] in {
1656// Vector Slide Instructions
1657let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1658defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110>;
1659defm VSLIDE1UP_V : VSLD1_MV_X<"vslide1up", 0b001110>;
1660} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1661defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111>;
1662defm VSLIDE1DOWN_V : VSLD1_MV_X<"vslide1down", 0b001111>;
1663} // Predicates = [HasVInstructions]
1664
1665let Predicates = [HasVInstructionsAnyF] in {
1666let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1667defm VFSLIDE1UP_V : VSLD1_FV_F<"vfslide1up", 0b001110>;
1668} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1669defm VFSLIDE1DOWN_V : VSLD1_FV_F<"vfslide1down", 0b001111>;
1670} // Predicates = [HasVInstructionsAnyF]
1671
1672let Predicates = [HasVInstructions] in {
1673// Vector Register Gather Instruction
1674let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in {
1675defm VRGATHER_V : VGTR_IV_V_X_I<"vrgather", 0b001100>;
1676def VRGATHEREI16_VV : VALUVV<0b001110, OPIVV, "vrgatherei16.vv">,
1677                      Sched<[WriteVRGatherVV_WorstCase, ReadVRGatherVV_data_WorstCase,
1678                             ReadVRGatherVV_index_WorstCase]>;
1679} // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather
1680
1681// Vector Compress Instruction
1682let Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress in {
1683defm VCOMPRESS_V : VCPR_MV_Mask<"vcompress", 0b010111>;
1684} // Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress
1685
1686let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isMoveReg = 1,
1687    RVVConstraint = NoConstraint in {
1688// A future extension may relax the vector register alignment restrictions.
1689foreach n = [1, 2, 4, 8] in {
1690  defvar vrc = !cast<VReg>(!if(!eq(n, 1), "VR", "VRM"#n));
1691  def VMV#n#R_V  : RVInstV<0b100111, !add(n, -1), OPIVI, (outs vrc:$vd),
1692                           (ins vrc:$vs2), "vmv" # n # "r.v", "$vd, $vs2">,
1693                   VMVRSched<n> {
1694    let Uses = [];
1695    let vm = 1;
1696  }
1697}
1698} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1699} // Predicates = [HasVInstructions]
1700
1701let Predicates = [HasVInstructions] in {
1702  foreach nf=2-8 in {
1703    foreach eew = [8, 16, 32] in {
1704      defvar w = !cast<RISCVWidth>("LSWidth"#eew);
1705
1706      def VLSEG#nf#E#eew#_V :
1707        VUnitStrideSegmentLoad<!add(nf, -1), w, "vlseg"#nf#"e"#eew#".v">,
1708        VLSEGSched<nf, eew>;
1709      def VLSEG#nf#E#eew#FF_V :
1710        VUnitStrideSegmentLoadFF<!add(nf, -1), w, "vlseg"#nf#"e"#eew#"ff.v">,
1711        VLSEGFFSched<nf, eew>;
1712      def VSSEG#nf#E#eew#_V :
1713        VUnitStrideSegmentStore<!add(nf, -1), w, "vsseg"#nf#"e"#eew#".v">,
1714        VSSEGSched<nf, eew>;
1715      // Vector Strided Instructions
1716      def VLSSEG#nf#E#eew#_V :
1717        VStridedSegmentLoad<!add(nf, -1), w, "vlsseg"#nf#"e"#eew#".v">,
1718        VLSSEGSched<nf, eew>;
1719      def VSSSEG#nf#E#eew#_V :
1720        VStridedSegmentStore<!add(nf, -1), w, "vssseg"#nf#"e"#eew#".v">,
1721        VSSSEGSched<nf, eew>;
1722
1723      // Vector Indexed Instructions
1724      def VLUXSEG#nf#EI#eew#_V :
1725        VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, w,
1726                            "vluxseg"#nf#"ei"#eew#".v">,
1727        VLXSEGSched<nf, eew, "U">;
1728      def VLOXSEG#nf#EI#eew#_V :
1729        VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, w,
1730                            "vloxseg"#nf#"ei"#eew#".v">,
1731        VLXSEGSched<nf, eew, "O">;
1732      def VSUXSEG#nf#EI#eew#_V :
1733        VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, w,
1734                             "vsuxseg"#nf#"ei"#eew#".v">,
1735        VSXSEGSched<nf, eew, "U">;
1736      def VSOXSEG#nf#EI#eew#_V :
1737        VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, w,
1738                             "vsoxseg"#nf#"ei"#eew#".v">,
1739        VSXSEGSched<nf, eew, "O">;
1740    }
1741  }
1742} // Predicates = [HasVInstructions]
1743
1744let Predicates = [HasVInstructionsI64] in {
1745  foreach nf=2-8 in {
1746    // Vector Unit-strided Segment Instructions
1747    def VLSEG#nf#E64_V :
1748      VUnitStrideSegmentLoad<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64.v">,
1749      VLSEGSched<nf, 64>;
1750    def VLSEG#nf#E64FF_V :
1751      VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64ff.v">,
1752      VLSEGFFSched<nf, 64>;
1753    def VSSEG#nf#E64_V :
1754      VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">,
1755      VSSEGSched<nf, 64>;
1756
1757    // Vector Strided Segment Instructions
1758    def VLSSEG#nf#E64_V :
1759      VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">,
1760      VLSSEGSched<nf, 64>;
1761    def VSSSEG#nf#E64_V :
1762      VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">,
1763      VSSSEGSched<nf, 64>;
1764  }
1765} // Predicates = [HasVInstructionsI64]
1766let Predicates = [HasVInstructionsI64, IsRV64] in {
1767  foreach nf = 2 - 8 in {
1768    // Vector Indexed Segment Instructions
1769    def VLUXSEG #nf #EI64_V
1770        : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, LSWidth64,
1771                              "vluxseg" #nf #"ei64.v">,
1772          VLXSEGSched<nf, 64, "U">;
1773    def VLOXSEG #nf #EI64_V
1774        : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, LSWidth64,
1775                              "vloxseg" #nf #"ei64.v">,
1776          VLXSEGSched<nf, 64, "O">;
1777    def VSUXSEG #nf #EI64_V
1778        : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, LSWidth64,
1779                               "vsuxseg" #nf #"ei64.v">,
1780          VSXSEGSched<nf, 64, "U">;
1781    def VSOXSEG #nf #EI64_V
1782        : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, LSWidth64,
1783                               "vsoxseg" #nf #"ei64.v">,
1784          VSXSEGSched<nf, 64, "O">;
1785  }
1786} // Predicates = [HasVInstructionsI64, IsRV64]
1787
1788include "RISCVInstrInfoVPseudos.td"
1789