xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoV.td (revision 79ac3c12a714bcd3f2354c52d948aed9575c46d6)
1//===-- RISCVInstrInfoV.td - RISC-V 'V' instructions -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// This file describes the RISC-V instructions from the standard 'V' Vector
10/// extension, version 0.10.
11/// This version is still experimental as the 'V' extension hasn't been
12/// ratified yet.
13///
14//===----------------------------------------------------------------------===//
15
16include "RISCVInstrFormatsV.td"
17
18//===----------------------------------------------------------------------===//
19// Operand and SDNode transformation definitions.
20//===----------------------------------------------------------------------===//
21
22def VTypeIAsmOperand : AsmOperandClass {
23  let Name = "VTypeI";
24  let ParserMethod = "parseVTypeI";
25  let DiagnosticType = "InvalidVTypeI";
26}
27
28def VTypeIOp : Operand<XLenVT> {
29  let ParserMatchClass = VTypeIAsmOperand;
30  let PrintMethod = "printVTypeI";
31  let DecoderMethod = "decodeUImmOperand<11>";
32}
33
34def VMaskAsmOperand : AsmOperandClass {
35  let Name = "RVVMaskRegOpOperand";
36  let RenderMethod = "addRegOperands";
37  let PredicateMethod = "isV0Reg";
38  let ParserMethod = "parseMaskReg";
39  let IsOptional = 1;
40  let DefaultMethod = "defaultMaskRegOp";
41  let DiagnosticType = "InvalidVMaskRegister";
42}
43
44def VMaskOp : RegisterOperand<VMV0> {
45  let ParserMatchClass = VMaskAsmOperand;
46  let PrintMethod = "printVMaskReg";
47  let EncoderMethod = "getVMaskReg";
48  let DecoderMethod = "decodeVMaskReg";
49}
50
51def simm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<5>(Imm);}]> {
52  let ParserMatchClass = SImmAsmOperand<5>;
53  let EncoderMethod = "getImmOpValue";
54  let DecoderMethod = "decodeSImmOperand<5>";
55  let MCOperandPredicate = [{
56    int64_t Imm;
57    if (MCOp.evaluateAsConstantImm(Imm))
58      return isInt<5>(Imm);
59    return MCOp.isBareSymbolRef();
60  }];
61}
62
63def SImm5Plus1AsmOperand : AsmOperandClass {
64  let Name = "SImm5Plus1";
65  let RenderMethod = "addImmOperands";
66  let DiagnosticType = "InvalidSImm5Plus1";
67}
68
69def simm5_plus1 : Operand<XLenVT>, ImmLeaf<XLenVT,
70                                           [{return isInt<5>(Imm - 1);}]> {
71  let ParserMatchClass = SImm5Plus1AsmOperand;
72  let MCOperandPredicate = [{
73    int64_t Imm;
74    if (MCOp.evaluateAsConstantImm(Imm))
75      return isInt<5>(Imm - 1);
76    return MCOp.isBareSymbolRef();
77  }];
78}
79
80//===----------------------------------------------------------------------===//
81// Instruction class templates
82//===----------------------------------------------------------------------===//
83
84let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
85// load vd, (rs1)
86class VUnitStrideLoadMask<string opcodestr>
87    : RVInstVLU<0b000, LSWidth8.Value{3}, LUMOPUnitStrideMask, LSWidth8.Value{2-0},
88                (outs VR:$vd),
89                (ins GPR:$rs1), opcodestr, "$vd, (${rs1})">;
90
91// load vd, (rs1), vm
92class VUnitStrideLoad<RISCVLSUMOP lumop, RISCVWidth width,
93                      string opcodestr>
94    : RVInstVLU<0b000, width.Value{3}, lumop, width.Value{2-0},
95                (outs VR:$vd),
96                (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
97
98// load vd, (rs1), rs2, vm
99class VStridedLoad<RISCVWidth width, string opcodestr>
100    : RVInstVLS<0b000, width.Value{3}, width.Value{2-0},
101                (outs VR:$vd),
102                (ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
103                "$vd, (${rs1}), $rs2$vm">;
104
105// load vd, (rs1), vs2, vm
106class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
107    : RVInstVLX<0b000, width.Value{3}, mop, width.Value{2-0},
108                (outs VR:$vd),
109                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
110                "$vd, (${rs1}), $vs2$vm">;
111
112// vl<nf>r.v vd, (rs1)
113class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr>
114    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
115                width.Value{2-0}, (outs VR:$vd), (ins GPR:$rs1),
116                opcodestr, "$vd, (${rs1})"> {
117  let vm = 1;
118  let Uses = [];
119  let RVVConstraint = NoConstraint;
120}
121
122// segment load vd, (rs1), vm
123class VUnitStrideSegmentLoad<bits<3> nf, RISCVLSUMOP lumop,
124                             RISCVWidth width, string opcodestr>
125    : RVInstVLU<nf, width.Value{3}, lumop, width.Value{2-0},
126                (outs VR:$vd),
127                (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
128
129// segment load vd, (rs1), rs2, vm
130class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
131    : RVInstVLS<nf, width.Value{3}, width.Value{2-0},
132                (outs VR:$vd),
133                (ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
134                "$vd, (${rs1}), $rs2$vm">;
135
136// segment load vd, (rs1), vs2, vm
137class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
138                          string opcodestr>
139    : RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0},
140                (outs VR:$vd),
141                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
142                "$vd, (${rs1}), $vs2$vm">;
143} // hasSideEffects = 0, mayLoad = 1, mayStore = 0
144
145let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
146// store vd, vs3, (rs1), vm
147class VUnitStrideStoreMask<string opcodestr>
148    : RVInstVSU<0b000, LSWidth8.Value{3}, SUMOPUnitStrideMask, LSWidth8.Value{2-0},
149                (outs), (ins VR:$vs3, GPR:$rs1), opcodestr,
150                "$vs3, (${rs1})">;
151
152// store vd, vs3, (rs1), vm
153class VUnitStrideStore<RISCVLSUMOP sumop, RISCVWidth width,
154                         string opcodestr>
155    : RVInstVSU<0b000, width.Value{3}, sumop, width.Value{2-0},
156                (outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
157                "$vs3, (${rs1})$vm">;
158
159// store vd, vs3, (rs1), rs2, vm
160class VStridedStore<RISCVWidth width, string opcodestr>
161    : RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs),
162                (ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm),
163                opcodestr, "$vs3, (${rs1}), $rs2$vm">;
164
165// store vd, vs3, (rs1), vs2, vm
166class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
167    : RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs),
168                (ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
169                opcodestr, "$vs3, (${rs1}), $vs2$vm">;
170
171// vs<nf>r.v vd, (rs1)
172class VWholeStore<bits<3> nf, string opcodestr>
173    : RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
174                0b000, (outs), (ins VR:$vs3, GPR:$rs1),
175                opcodestr, "$vs3, (${rs1})"> {
176  let vm = 1;
177  let Uses = [];
178}
179
180// segment store vd, vs3, (rs1), vm
181class VUnitStrideSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
182    : RVInstVSU<nf, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
183                (outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
184                "$vs3, (${rs1})$vm">;
185
186// segment store vd, vs3, (rs1), rs2, vm
187class VStridedSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
188    : RVInstVSS<nf, width.Value{3}, width.Value{2-0}, (outs),
189                (ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm),
190                opcodestr, "$vs3, (${rs1}), $rs2$vm">;
191
192// segment store vd, vs3, (rs1), vs2, vm
193class VIndexedSegmentStore<bits<3> nf, RISCVMOP mop, RISCVWidth width,
194                           string opcodestr>
195    : RVInstVSX<nf, width.Value{3}, mop, width.Value{2-0}, (outs),
196                (ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
197                opcodestr, "$vs3, (${rs1}), $vs2$vm">;
198} // hasSideEffects = 0, mayLoad = 0, mayStore = 1
199
200let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
201// op vd, vs2, vs1, vm
202class VALUVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
203    : RVInstVV<funct6, opv, (outs VR:$vd),
204                (ins VR:$vs2, VR:$vs1, VMaskOp:$vm),
205                opcodestr, "$vd, $vs2, $vs1$vm">;
206
207// op vd, vs2, vs1, v0 (without mask, use v0 as carry input)
208class VALUmVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
209    : RVInstVV<funct6, opv, (outs VR:$vd),
210                (ins VR:$vs2, VR:$vs1, VMV0:$v0),
211                opcodestr, "$vd, $vs2, $vs1, v0"> {
212  let vm = 0;
213}
214
215// op vd, vs1, vs2, vm (reverse the order of vs1 and vs2)
216class VALUrVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
217    : RVInstVV<funct6, opv, (outs VR:$vd),
218                (ins VR:$vs1, VR:$vs2, VMaskOp:$vm),
219                opcodestr, "$vd, $vs1, $vs2$vm">;
220
221// op vd, vs2, vs1
222class VALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
223    : RVInstVV<funct6, opv, (outs VR:$vd),
224               (ins VR:$vs2, VR:$vs1),
225               opcodestr, "$vd, $vs2, $vs1"> {
226  let vm = 1;
227}
228
229// op vd, vs2, rs1, vm
230class VALUVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
231    : RVInstVX<funct6, opv, (outs VR:$vd),
232                (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
233                opcodestr, "$vd, $vs2, $rs1$vm">;
234
235// op vd, vs2, rs1, v0 (without mask, use v0 as carry input)
236class VALUmVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
237    : RVInstVX<funct6, opv, (outs VR:$vd),
238                (ins VR:$vs2, GPR:$rs1, VMV0:$v0),
239                opcodestr, "$vd, $vs2, $rs1, v0"> {
240  let vm = 0;
241}
242
243// op vd, rs1, vs2, vm (reverse the order of rs1 and vs2)
244class VALUrVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
245    : RVInstVX<funct6, opv, (outs VR:$vd),
246                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm),
247                opcodestr, "$vd, $rs1, $vs2$vm">;
248
249// op vd, vs1, vs2
250class VALUVXNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
251    : RVInstVX<funct6, opv, (outs VR:$vd),
252               (ins VR:$vs2, GPR:$rs1),
253               opcodestr, "$vd, $vs2, $rs1"> {
254  let vm = 1;
255}
256
257// op vd, vs2, imm, vm
258class VALUVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
259    : RVInstIVI<funct6, (outs VR:$vd),
260                (ins VR:$vs2, optype:$imm, VMaskOp:$vm),
261                opcodestr, "$vd, $vs2, $imm$vm">;
262
263// op vd, vs2, imm, v0 (without mask, use v0 as carry input)
264class VALUmVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
265    : RVInstIVI<funct6, (outs VR:$vd),
266                (ins VR:$vs2, optype:$imm, VMV0:$v0),
267                opcodestr, "$vd, $vs2, $imm, v0"> {
268  let vm = 0;
269}
270
271// op vd, vs2, imm, vm
272class VALUVINoVm<bits<6> funct6, string opcodestr, Operand optype = simm5>
273    : RVInstIVI<funct6, (outs VR:$vd),
274                (ins VR:$vs2, optype:$imm),
275                opcodestr, "$vd, $vs2, $imm"> {
276  let vm = 1;
277}
278
279// op vd, vs2, rs1, vm (Float)
280class VALUVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
281    : RVInstVX<funct6, opv, (outs VR:$vd),
282                (ins VR:$vs2, FPR32:$rs1, VMaskOp:$vm),
283                opcodestr, "$vd, $vs2, $rs1$vm">;
284
285// op vd, rs1, vs2, vm (Float) (with mask, reverse the order of rs1 and vs2)
286class VALUrVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
287    : RVInstVX<funct6, opv, (outs VR:$vd),
288                (ins FPR32:$rs1, VR:$vs2, VMaskOp:$vm),
289                opcodestr, "$vd, $rs1, $vs2$vm">;
290
291// op vd, vs2, vm (use vs1 as instruction encoding)
292class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
293    : RVInstV<funct6, vs1, opv, (outs VR:$vd),
294               (ins VR:$vs2, VMaskOp:$vm),
295               opcodestr, "$vd, $vs2$vm">;
296} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
297
298let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in {
299// vamo vd, (rs1), vs2, vd, vm
300class VAMOWd<RISCVAMOOP amoop, RISCVWidth width, string opcodestr>
301    : RVInstVAMO<amoop, width.Value{2-0}, (outs VR:$vd_wd),
302            (ins GPR:$rs1, VR:$vs2, VR:$vd, VMaskOp:$vm),
303            opcodestr, "$vd_wd, (${rs1}), $vs2, $vd$vm"> {
304    let Constraints = "$vd_wd = $vd";
305    let wd = 1;
306    bits<5> vd;
307    let Inst{11-7} = vd;
308}
309
310// vamo x0, (rs1), vs2, vs3, vm
311class VAMONoWd<RISCVAMOOP amoop, RISCVWidth width, string opcodestr>
312    : RVInstVAMO<amoop, width.Value{2-0}, (outs),
313            (ins GPR:$rs1, VR:$vs2, VR:$vs3, VMaskOp:$vm),
314            opcodestr, "x0, (${rs1}), $vs2, $vs3$vm"> {
315    bits<5> vs3;
316    let Inst{11-7} = vs3;
317}
318
319} // hasSideEffects = 0, mayLoad = 1, mayStore = 1
320
321//===----------------------------------------------------------------------===//
322// Combination of instruction classes.
323// Use these multiclasses to define instructions more easily.
324//===----------------------------------------------------------------------===//
325multiclass VALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
326  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">;
327  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
328  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>;
329}
330
331multiclass VALU_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
332  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">;
333  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
334}
335
336multiclass VALUr_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
337  def V : VALUrVV<funct6, OPIVV, opcodestr # "." # vw # "v">;
338  def X : VALUrVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
339}
340
341multiclass VALU_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
342  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
343  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>;
344}
345
346multiclass VALU_IV_V<string opcodestr, bits<6> funct6> {
347  def _VS  : VALUVV<funct6, OPIVV, opcodestr # ".vs">;
348}
349
350multiclass VALUr_IV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
351  def X : VALUrVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
352}
353
354multiclass VALU_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
355  def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">;
356  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
357}
358
359multiclass VALU_MV_V<string opcodestr, bits<6> funct6> {
360  def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">;
361}
362
363multiclass VALU_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
364  def M : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">;
365}
366
367multiclass VALU_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
368  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
369}
370
371multiclass VALUr_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
372  def V : VALUrVV<funct6, OPMVV, opcodestr # "." # vw # "v">;
373  def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
374}
375
376multiclass VALUr_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
377  def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
378}
379
380multiclass VALU_MV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
381  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>;
382}
383
384multiclass VALUm_IV_V_X_I<string opcodestr, bits<6> funct6> {
385  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">;
386  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">;
387  def IM : VALUmVI<funct6, opcodestr # ".vim">;
388}
389
390multiclass VALUm_IV_V_X<string opcodestr, bits<6> funct6> {
391  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">;
392  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">;
393}
394
395multiclass VALUNoVm_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5> {
396  def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">;
397  def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">;
398  def I : VALUVINoVm<funct6, opcodestr # ".vi", optype>;
399}
400
401multiclass VALUNoVm_IV_V_X<string opcodestr, bits<6> funct6> {
402  def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">;
403  def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">;
404}
405
406multiclass VALU_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
407  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">;
408  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">;
409}
410
411multiclass VALU_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
412  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">;
413}
414
415multiclass VALUr_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
416  def V : VALUrVV<funct6, OPFVV, opcodestr # "." # vw # "v">;
417  def F : VALUrVF<funct6, OPFVF, opcodestr # "." # vw # "f">;
418}
419
420multiclass VALU_FV_V<string opcodestr, bits<6> funct6> {
421  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">;
422}
423
424multiclass VALU_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
425  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>;
426}
427
428multiclass VAMO<RISCVAMOOP amoop, RISCVWidth width, string opcodestr> {
429  def _WD : VAMOWd<amoop, width, opcodestr>;
430  def _UNWD : VAMONoWd<amoop, width, opcodestr>;
431}
432
433multiclass VWholeLoad<bits<3> nf, string opcodestr> {
434  def E8_V : VWholeLoad<nf, LSWidth8, opcodestr # "e8.v">;
435  def E16_V : VWholeLoad<nf, LSWidth16, opcodestr # "e16.v">;
436  def E32_V : VWholeLoad<nf, LSWidth32, opcodestr # "e32.v">;
437  def E64_V : VWholeLoad<nf, LSWidth64, opcodestr # "e64.v">;
438}
439
440//===----------------------------------------------------------------------===//
441// Instructions
442//===----------------------------------------------------------------------===//
443
444let Predicates = [HasStdExtV] in {
445let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
446def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei),
447                           "vsetvli", "$rd, $rs1, $vtypei">;
448
449def VSETIVLI : RVInstSetiVLi<(outs GPR:$rd), (ins uimm5:$uimm, VTypeIOp:$vtypei),
450                             "vsetivli", "$rd, $uimm, $vtypei">;
451
452def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
453                         "vsetvl", "$rd, $rs1, $rs2">;
454} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
455
456// Vector Unit-Stride Instructions
457def VLE8_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth8, "vle8.v">;
458def VLE16_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth16, "vle16.v">;
459def VLE32_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth32, "vle32.v">;
460def VLE64_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth64, "vle64.v">;
461
462def VLE8FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth8, "vle8ff.v">;
463def VLE16FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth16, "vle16ff.v">;
464def VLE32FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth32, "vle32ff.v">;
465def VLE64FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth64, "vle64ff.v">;
466
467def VLE1_V : VUnitStrideLoadMask<"vle1.v">;
468def VSE1_V : VUnitStrideStoreMask<"vse1.v">;
469
470def VSE8_V : VUnitStrideStore<SUMOPUnitStride, LSWidth8, "vse8.v">;
471def VSE16_V : VUnitStrideStore<SUMOPUnitStride, LSWidth16, "vse16.v">;
472def VSE32_V : VUnitStrideStore<SUMOPUnitStride, LSWidth32, "vse32.v">;
473def VSE64_V : VUnitStrideStore<SUMOPUnitStride, LSWidth64, "vse64.v">;
474
475// Vector Strided Instructions
476def VLSE8_V : VStridedLoad<LSWidth8, "vlse8.v">;
477def VLSE16_V : VStridedLoad<LSWidth16, "vlse16.v">;
478def VLSE32_V : VStridedLoad<LSWidth32, "vlse32.v">;
479def VLSE64_V : VStridedLoad<LSWidth64, "vlse64.v">;
480
481def VSSE8_V : VStridedStore<LSWidth8, "vsse8.v">;
482def VSSE16_V : VStridedStore<LSWidth16, "vsse16.v">;
483def VSSE32_V : VStridedStore<LSWidth32, "vsse32.v">;
484def VSSE64_V : VStridedStore<LSWidth64, "vsse64.v">;
485
486// Vector Indexed Instructions
487def VLUXEI8_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth8, "vluxei8.v">;
488def VLUXEI16_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth16, "vluxei16.v">;
489def VLUXEI32_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth32, "vluxei32.v">;
490def VLUXEI64_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth64, "vluxei64.v">;
491
492def VLOXEI8_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth8, "vloxei8.v">;
493def VLOXEI16_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth16, "vloxei16.v">;
494def VLOXEI32_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth32, "vloxei32.v">;
495def VLOXEI64_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth64, "vloxei64.v">;
496
497def VSUXEI8_V : VIndexedStore<MOPSTIndexedUnord, LSWidth8, "vsuxei8.v">;
498def VSUXEI16_V : VIndexedStore<MOPSTIndexedUnord, LSWidth16, "vsuxei16.v">;
499def VSUXEI32_V : VIndexedStore<MOPSTIndexedUnord, LSWidth32, "vsuxei32.v">;
500def VSUXEI64_V : VIndexedStore<MOPSTIndexedUnord, LSWidth64, "vsuxei64.v">;
501
502def VSOXEI8_V : VIndexedStore<MOPSTIndexedOrder, LSWidth8, "vsoxei8.v">;
503def VSOXEI16_V : VIndexedStore<MOPSTIndexedOrder, LSWidth16, "vsoxei16.v">;
504def VSOXEI32_V : VIndexedStore<MOPSTIndexedOrder, LSWidth32, "vsoxei32.v">;
505def VSOXEI64_V : VIndexedStore<MOPSTIndexedOrder, LSWidth64, "vsoxei64.v">;
506
507defm VL1R : VWholeLoad<0, "vl1r">;
508defm VL2R : VWholeLoad<1, "vl2r">;
509defm VL4R : VWholeLoad<3, "vl4r">;
510defm VL8R : VWholeLoad<7, "vl8r">;
511def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
512def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VR:$vd, GPR:$rs1)>;
513def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VR:$vd, GPR:$rs1)>;
514def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VR:$vd, GPR:$rs1)>;
515
516def VS1R_V : VWholeStore<0, "vs1r.v">;
517def VS2R_V : VWholeStore<1, "vs2r.v">;
518def VS4R_V : VWholeStore<3, "vs4r.v">;
519def VS8R_V : VWholeStore<7, "vs8r.v">;
520
521// Vector Single-Width Integer Add and Subtract
522defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
523defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>;
524defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>;
525
526def : InstAlias<"vneg.v $vd, $vs$vm", (VRSUB_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
527
528// Vector Widening Integer Add/Subtract
529// Refer to 11.2 Widening Vector Arithmetic Instructions
530// The destination vector register group cannot overlap a source vector
531// register group of a different element width (including the mask register
532// if masked), otherwise an illegal instruction exception is raised.
533let Constraints = "@earlyclobber $vd" in {
534let RVVConstraint = WidenV in {
535defm VWADDU_V : VALU_MV_V_X<"vwaddu", 0b110000>;
536defm VWSUBU_V : VALU_MV_V_X<"vwsubu", 0b110010>;
537defm VWADD_V : VALU_MV_V_X<"vwadd", 0b110001>;
538defm VWSUB_V : VALU_MV_V_X<"vwsub", 0b110011>;
539} // RVVConstraint = WidenV
540// Set earlyclobber for following instructions for second and mask operands.
541// This has the downside that the earlyclobber constraint is too coarse and
542// will impose unnecessary restrictions by not allowing the destination to
543// overlap with the first (wide) operand.
544let RVVConstraint = WidenW in {
545defm VWADDU_W : VALU_MV_V_X<"vwaddu", 0b110100, "w">;
546defm VWSUBU_W : VALU_MV_V_X<"vwsubu", 0b110110, "w">;
547defm VWADD_W : VALU_MV_V_X<"vwadd", 0b110101, "w">;
548defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">;
549} // RVVConstraint = WidenW
550} // Constraints = "@earlyclobber $vd"
551
552def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm",
553                (VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
554def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm",
555                (VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
556
557// Vector Integer Extension
558defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>;
559defm VSEXT_VF8 : VALU_MV_VS2<"vsext.vf8", 0b010010, 0b00011>;
560defm VZEXT_VF4 : VALU_MV_VS2<"vzext.vf4", 0b010010, 0b00100>;
561defm VSEXT_VF4 : VALU_MV_VS2<"vsext.vf4", 0b010010, 0b00101>;
562defm VZEXT_VF2 : VALU_MV_VS2<"vzext.vf2", 0b010010, 0b00110>;
563defm VSEXT_VF2 : VALU_MV_VS2<"vsext.vf2", 0b010010, 0b00111>;
564
565// Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
566defm VADC_V : VALUm_IV_V_X_I<"vadc", 0b010000>;
567let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
568defm VMADC_V : VALUm_IV_V_X_I<"vmadc", 0b010001>;
569defm VMADC_V : VALUNoVm_IV_V_X_I<"vmadc", 0b010001>;
570} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
571defm VSBC_V : VALUm_IV_V_X<"vsbc", 0b010010>;
572let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
573defm VMSBC_V : VALUm_IV_V_X<"vmsbc", 0b010011>;
574defm VMSBC_V : VALUNoVm_IV_V_X<"vmsbc", 0b010011>;
575} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
576
577// Vector Bitwise Logical Instructions
578defm VAND_V : VALU_IV_V_X_I<"vand", 0b001001>;
579defm VOR_V : VALU_IV_V_X_I<"vor", 0b001010>;
580defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>;
581
582def : InstAlias<"vnot.v $vd, $vs$vm",
583                (VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>;
584
585// Vector Single-Width Bit Shift Instructions
586defm VSLL_V : VALU_IV_V_X_I<"vsll", 0b100101, uimm5>;
587defm VSRL_V : VALU_IV_V_X_I<"vsrl", 0b101000, uimm5>;
588defm VSRA_V : VALU_IV_V_X_I<"vsra", 0b101001, uimm5>;
589
590// Vector Narrowing Integer Right Shift Instructions
591// Refer to 11.3. Narrowing Vector Arithmetic Instructions
592// The destination vector register group cannot overlap the first source
593// vector register group (specified by vs2). The destination vector register
594// group cannot overlap the mask register if used, unless LMUL=1.
595let Constraints = "@earlyclobber $vd" in {
596defm VNSRL_W : VALU_IV_V_X_I<"vnsrl", 0b101100, uimm5, "w">;
597defm VNSRA_W : VALU_IV_V_X_I<"vnsra", 0b101101, uimm5, "w">;
598} // Constraints = "@earlyclobber $vd"
599
600def : InstAlias<"vncvt.x.x.w $vd, $vs$vm",
601                (VNSRL_WX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
602
603// Vector Integer Comparison Instructions
604let RVVConstraint = NoConstraint in {
605defm VMSEQ_V : VALU_IV_V_X_I<"vmseq", 0b011000>;
606defm VMSNE_V : VALU_IV_V_X_I<"vmsne", 0b011001>;
607defm VMSLTU_V : VALU_IV_V_X<"vmsltu", 0b011010>;
608defm VMSLT_V : VALU_IV_V_X<"vmslt", 0b011011>;
609defm VMSLEU_V : VALU_IV_V_X_I<"vmsleu", 0b011100>;
610defm VMSLE_V : VALU_IV_V_X_I<"vmsle", 0b011101>;
611defm VMSGTU_V : VALU_IV_X_I<"vmsgtu", 0b011110>;
612defm VMSGT_V : VALU_IV_X_I<"vmsgt", 0b011111>;
613} // RVVConstraint = NoConstraint
614
615def : InstAlias<"vmsgtu.vv $vd, $va, $vb$vm",
616                (VMSLTU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
617def : InstAlias<"vmsgt.vv $vd, $va, $vb$vm",
618                (VMSLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
619def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm",
620                (VMSLEU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
621def : InstAlias<"vmsge.vv $vd, $va, $vb$vm",
622                (VMSLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
623
624let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
625    mayStore = 0 in {
626// For unsigned comparisons we need to special case 0 immediate to maintain
627// the always true/false semantics we would invert if we just decremented the
628// immediate like we do for signed. To match the GNU assembler we will use
629// vmseq/vmsne.vv with the same register for both operands which we can't do
630// from an InstAlias.
631def PseudoVMSGEU_VI : Pseudo<(outs VR:$vd),
632                             (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
633                             [], "vmsgeu.vi", "$vd, $vs2, $imm$vm">;
634def PseudoVMSLTU_VI : Pseudo<(outs VR:$vd),
635                             (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
636                             [], "vmsltu.vi", "$vd, $vs2, $imm$vm">;
637// Handle signed with pseudos as well for more consistency in the
638// implementation.
639def PseudoVMSGE_VI : Pseudo<(outs VR:$vd),
640                            (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
641                            [], "vmsge.vi", "$vd, $vs2, $imm$vm">;
642def PseudoVMSLT_VI : Pseudo<(outs VR:$vd),
643                            (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
644                            [], "vmslt.vi", "$vd, $vs2, $imm$vm">;
645}
646
647let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
648    mayStore = 0 in {
649def PseudoVMSGEU_VX : Pseudo<(outs VR:$vd),
650                             (ins VR:$vs2, GPR:$rs1),
651                             [], "vmsgeu.vx", "$vd, $vs2, $rs1">;
652def PseudoVMSGE_VX : Pseudo<(outs VR:$vd),
653                            (ins VR:$vs2, GPR:$rs1),
654                            [], "vmsge.vx", "$vd, $vs2, $rs1">;
655def PseudoVMSGEU_VX_M : Pseudo<(outs VRNoV0:$vd),
656                               (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
657                               [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm">;
658def PseudoVMSGE_VX_M : Pseudo<(outs VRNoV0:$vd),
659                              (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
660                              [], "vmsge.vx", "$vd, $vs2, $rs1$vm">;
661def PseudoVMSGEU_VX_M_T : Pseudo<(outs VMV0:$vd, VR:$scratch),
662                                 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
663                                 [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm, $scratch">;
664def PseudoVMSGE_VX_M_T : Pseudo<(outs VMV0:$vd, VR:$scratch),
665                                (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
666                                [], "vmsge.vx", "$vd, $vs2, $rs1$vm, $scratch">;
667}
668
669// Vector Integer Min/Max Instructions
670defm VMINU_V : VALU_IV_V_X<"vminu", 0b000100>;
671defm VMIN_V : VALU_IV_V_X<"vmin", 0b000101>;
672defm VMAXU_V : VALU_IV_V_X<"vmaxu", 0b000110>;
673defm VMAX_V : VALU_IV_V_X<"vmax", 0b000111>;
674
675// Vector Single-Width Integer Multiply Instructions
676defm VMUL_V : VALU_MV_V_X<"vmul", 0b100101>;
677defm VMULH_V : VALU_MV_V_X<"vmulh", 0b100111>;
678defm VMULHU_V : VALU_MV_V_X<"vmulhu", 0b100100>;
679defm VMULHSU_V : VALU_MV_V_X<"vmulhsu", 0b100110>;
680
681// Vector Integer Divide Instructions
682defm VDIVU_V : VALU_MV_V_X<"vdivu", 0b100000>;
683defm VDIV_V : VALU_MV_V_X<"vdiv", 0b100001>;
684defm VREMU_V : VALU_MV_V_X<"vremu", 0b100010>;
685defm VREM_V : VALU_MV_V_X<"vrem", 0b100011>;
686
687// Vector Widening Integer Multiply Instructions
688let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
689defm VWMUL_V : VALU_MV_V_X<"vwmul", 0b111011>;
690defm VWMULU_V : VALU_MV_V_X<"vwmulu", 0b111000>;
691defm VWMULSU_V : VALU_MV_V_X<"vwmulsu", 0b111010>;
692} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
693
694// Vector Single-Width Integer Multiply-Add Instructions
695defm VMACC_V : VALUr_MV_V_X<"vmacc", 0b101101>;
696defm VNMSAC_V : VALUr_MV_V_X<"vnmsac", 0b101111>;
697defm VMADD_V : VALUr_MV_V_X<"vmadd", 0b101001>;
698defm VNMSUB_V : VALUr_MV_V_X<"vnmsub", 0b101011>;
699
700// Vector Widening Integer Multiply-Add Instructions
701let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
702defm VWMACCU_V : VALUr_MV_V_X<"vwmaccu", 0b111100>;
703defm VWMACC_V : VALUr_MV_V_X<"vwmacc", 0b111101>;
704defm VWMACCSU_V : VALUr_MV_V_X<"vwmaccsu", 0b111111>;
705defm VWMACCUS_V : VALUr_MV_X<"vwmaccus", 0b111110>;
706} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
707
708// Vector Integer Merge Instructions
709defm VMERGE_V : VALUm_IV_V_X_I<"vmerge", 0b010111>;
710
711// Vector Integer Move Instructions
712let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vs2 = 0, vm = 1,
713    RVVConstraint = NoConstraint  in {
714// op vd, vs1
715def VMV_V_V : RVInstVV<0b010111, OPIVV, (outs VR:$vd),
716                       (ins VR:$vs1), "vmv.v.v", "$vd, $vs1">;
717// op vd, rs1
718def VMV_V_X : RVInstVX<0b010111, OPIVX, (outs VR:$vd),
719                       (ins GPR:$rs1), "vmv.v.x", "$vd, $rs1">;
720// op vd, imm
721def VMV_V_I : RVInstIVI<0b010111, (outs VR:$vd),
722                       (ins simm5:$imm), "vmv.v.i", "$vd, $imm">;
723} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
724
725// Vector Fixed-Point Arithmetic Instructions
726defm VSADDU_V : VALU_IV_V_X_I<"vsaddu", 0b100000>;
727defm VSADD_V : VALU_IV_V_X_I<"vsadd", 0b100001>;
728defm VSSUBU_V : VALU_IV_V_X<"vssubu", 0b100010>;
729defm VSSUB_V : VALU_IV_V_X<"vssub", 0b100011>;
730
731// Vector Single-Width Averaging Add and Subtract
732defm VAADDU_V : VALU_MV_V_X<"vaaddu", 0b001000>;
733defm VAADD_V : VALU_MV_V_X<"vaadd", 0b001001>;
734defm VASUBU_V : VALU_MV_V_X<"vasubu", 0b001010>;
735defm VASUB_V : VALU_MV_V_X<"vasub", 0b001011>;
736
737// Vector Single-Width Fractional Multiply with Rounding and Saturation
738defm VSMUL_V : VALU_IV_V_X<"vsmul", 0b100111>;
739
740// Vector Single-Width Scaling Shift Instructions
741defm VSSRL_V : VALU_IV_V_X_I<"vssrl", 0b101010, uimm5>;
742defm VSSRA_V : VALU_IV_V_X_I<"vssra", 0b101011, uimm5>;
743
744// Vector Narrowing Fixed-Point Clip Instructions
745let Constraints = "@earlyclobber $vd" in {
746defm VNCLIPU_W : VALU_IV_V_X_I<"vnclipu", 0b101110, uimm5, "w">;
747defm VNCLIP_W : VALU_IV_V_X_I<"vnclip", 0b101111, uimm5, "w">;
748} // Constraints = "@earlyclobber $vd"
749} // Predicates = [HasStdExtV]
750
751let Predicates = [HasStdExtV, HasStdExtF] in {
752// Vector Single-Width Floating-Point Add/Subtract Instructions
753defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>;
754defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>;
755defm VFRSUB_V : VALU_FV_F<"vfrsub", 0b100111>;
756
757// Vector Widening Floating-Point Add/Subtract Instructions
758let Constraints = "@earlyclobber $vd" in {
759let RVVConstraint = WidenV in {
760defm VFWADD_V : VALU_FV_V_F<"vfwadd", 0b110000>;
761defm VFWSUB_V : VALU_FV_V_F<"vfwsub", 0b110010>;
762} // RVVConstraint = WidenV
763// Set earlyclobber for following instructions for second and mask operands.
764// This has the downside that the earlyclobber constraint is too coarse and
765// will impose unnecessary restrictions by not allowing the destination to
766// overlap with the first (wide) operand.
767let RVVConstraint = WidenW in {
768defm VFWADD_W : VALU_FV_V_F<"vfwadd", 0b110100, "w">;
769defm VFWSUB_W : VALU_FV_V_F<"vfwsub", 0b110110, "w">;
770} // RVVConstraint = WidenW
771} // Constraints = "@earlyclobber $vd"
772
773// Vector Single-Width Floating-Point Multiply/Divide Instructions
774defm VFMUL_V : VALU_FV_V_F<"vfmul", 0b100100>;
775defm VFDIV_V : VALU_FV_V_F<"vfdiv", 0b100000>;
776defm VFRDIV_V : VALU_FV_F<"vfrdiv", 0b100001>;
777
778// Vector Widening Floating-Point Multiply
779let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
780defm VFWMUL_V : VALU_FV_V_F<"vfwmul", 0b111000>;
781} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
782
783// Vector Single-Width Floating-Point Fused Multiply-Add Instructions
784defm VFMACC_V : VALUr_FV_V_F<"vfmacc", 0b101100>;
785defm VFNMACC_V : VALUr_FV_V_F<"vfnmacc", 0b101101>;
786defm VFMSAC_V : VALUr_FV_V_F<"vfmsac", 0b101110>;
787defm VFNMSAC_V : VALUr_FV_V_F<"vfnmsac", 0b101111>;
788defm VFMADD_V : VALUr_FV_V_F<"vfmadd", 0b101000>;
789defm VFNMADD_V : VALUr_FV_V_F<"vfnmadd", 0b101001>;
790defm VFMSUB_V : VALUr_FV_V_F<"vfmsub", 0b101010>;
791defm VFNMSUB_V : VALUr_FV_V_F<"vfnmsub", 0b101011>;
792
793// Vector Widening Floating-Point Fused Multiply-Add Instructions
794let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
795defm VFWMACC_V : VALUr_FV_V_F<"vfwmacc", 0b111100>;
796defm VFWNMACC_V : VALUr_FV_V_F<"vfwnmacc", 0b111101>;
797defm VFWMSAC_V : VALUr_FV_V_F<"vfwmsac", 0b111110>;
798defm VFWNMSAC_V : VALUr_FV_V_F<"vfwnmsac", 0b111111>;
799} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
800
801// Vector Floating-Point Square-Root Instruction
802defm VFSQRT_V : VALU_FV_VS2<"vfsqrt.v", 0b010011, 0b00000>;
803defm VFRSQRT7_V : VALU_FV_VS2<"vfrsqrt7.v", 0b010011, 0b00100>;
804defm VFREC7_V : VALU_FV_VS2<"vfrec7.v", 0b010011, 0b00101>;
805
806// Vector Floating-Point MIN/MAX Instructions
807defm VFMIN_V : VALU_FV_V_F<"vfmin", 0b000100>;
808defm VFMAX_V : VALU_FV_V_F<"vfmax", 0b000110>;
809
810// Vector Floating-Point Sign-Injection Instructions
811defm VFSGNJ_V : VALU_FV_V_F<"vfsgnj", 0b001000>;
812defm VFSGNJN_V : VALU_FV_V_F<"vfsgnjn", 0b001001>;
813defm VFSGNJX_V : VALU_FV_V_F<"vfsgnjx", 0b001010>;
814
815def : InstAlias<"vfneg.v $vd, $vs$vm",
816                (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
817
818// Vector Floating-Point Compare Instructions
819let RVVConstraint = NoConstraint in {
820defm VMFEQ_V : VALU_FV_V_F<"vmfeq", 0b011000>;
821defm VMFNE_V : VALU_FV_V_F<"vmfne", 0b011100>;
822defm VMFLT_V : VALU_FV_V_F<"vmflt", 0b011011>;
823defm VMFLE_V : VALU_FV_V_F<"vmfle", 0b011001>;
824defm VMFGT_V : VALU_FV_F<"vmfgt", 0b011101>;
825defm VMFGE_V : VALU_FV_F<"vmfge", 0b011111>;
826} // RVVConstraint = NoConstraint
827
828def : InstAlias<"vmfgt.vv $vd, $va, $vb$vm",
829                (VMFLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
830def : InstAlias<"vmfge.vv $vd, $va, $vb$vm",
831                (VMFLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
832
833// Vector Floating-Point Classify Instruction
834defm VFCLASS_V : VALU_FV_VS2<"vfclass.v", 0b010011, 0b10000>;
835
836let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
837// Vector Floating-Point Merge Instruction
838def VFMERGE_VFM : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
839                           (ins VR:$vs2, FPR32:$rs1, VMV0:$v0),
840                           "vfmerge.vfm", "$vd, $vs2, $rs1, v0"> {
841  let vm = 0;
842}
843
844// Vector Floating-Point Move Instruction
845let RVVConstraint = NoConstraint in
846def VFMV_V_F : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
847                       (ins FPR32:$rs1), "vfmv.v.f", "$vd, $rs1"> {
848  let vs2 = 0;
849  let vm = 1;
850}
851} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
852
853// Single-Width Floating-Point/Integer Type-Convert Instructions
854defm VFCVT_XU_F_V : VALU_FV_VS2<"vfcvt.xu.f.v", 0b010010, 0b00000>;
855defm VFCVT_X_F_V : VALU_FV_VS2<"vfcvt.x.f.v", 0b010010, 0b00001>;
856defm VFCVT_RTZ_XU_F_V : VALU_FV_VS2<"vfcvt.rtz.xu.f.v", 0b010010, 0b00110>;
857defm VFCVT_RTZ_X_F_V : VALU_FV_VS2<"vfcvt.rtz.x.f.v", 0b010010, 0b00111>;
858defm VFCVT_F_XU_V : VALU_FV_VS2<"vfcvt.f.xu.v", 0b010010, 0b00010>;
859defm VFCVT_F_X_V : VALU_FV_VS2<"vfcvt.f.x.v", 0b010010, 0b00011>;
860
861// Widening Floating-Point/Integer Type-Convert Instructions
862let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt in {
863defm VFWCVT_XU_F_V : VALU_FV_VS2<"vfwcvt.xu.f.v", 0b010010, 0b01000>;
864defm VFWCVT_X_F_V : VALU_FV_VS2<"vfwcvt.x.f.v", 0b010010, 0b01001>;
865defm VFWCVT_RTZ_XU_F_V : VALU_FV_VS2<"vfwcvt.rtz.xu.f.v", 0b010010, 0b01110>;
866defm VFWCVT_RTZ_X_F_V : VALU_FV_VS2<"vfwcvt.rtz.x.f.v", 0b010010, 0b01111>;
867defm VFWCVT_F_XU_V : VALU_FV_VS2<"vfwcvt.f.xu.v", 0b010010, 0b01010>;
868defm VFWCVT_F_X_V : VALU_FV_VS2<"vfwcvt.f.x.v", 0b010010, 0b01011>;
869defm VFWCVT_F_F_V : VALU_FV_VS2<"vfwcvt.f.f.v", 0b010010, 0b01100>;
870} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt
871
872// Narrowing Floating-Point/Integer Type-Convert Instructions
873let Constraints = "@earlyclobber $vd" in {
874defm VFNCVT_XU_F_W : VALU_FV_VS2<"vfncvt.xu.f.w", 0b010010, 0b10000>;
875defm VFNCVT_X_F_W : VALU_FV_VS2<"vfncvt.x.f.w", 0b010010, 0b10001>;
876defm VFNCVT_RTZ_XU_F_W : VALU_FV_VS2<"vfncvt.rtz.xu.f.w", 0b010010, 0b10110>;
877defm VFNCVT_RTZ_X_F_W : VALU_FV_VS2<"vfncvt.rtz.x.f.w", 0b010010, 0b10111>;
878defm VFNCVT_F_XU_W : VALU_FV_VS2<"vfncvt.f.xu.w", 0b010010, 0b10010>;
879defm VFNCVT_F_X_W : VALU_FV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>;
880defm VFNCVT_F_F_W : VALU_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>;
881defm VFNCVT_ROD_F_F_W : VALU_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>;
882} // Constraints = "@earlyclobber $vd"
883} // Predicates = [HasStdExtV, HasStdExtF]
884
885let Predicates = [HasStdExtV] in {
886// Vector Single-Width Integer Reduction Instructions
887let RVVConstraint = NoConstraint in {
888defm VREDSUM : VALU_MV_V<"vredsum", 0b000000>;
889defm VREDMAXU : VALU_MV_V<"vredmaxu", 0b000110>;
890defm VREDMAX : VALU_MV_V<"vredmax", 0b000111>;
891defm VREDMINU : VALU_MV_V<"vredminu", 0b000100>;
892defm VREDMIN : VALU_MV_V<"vredmin", 0b000101>;
893defm VREDAND : VALU_MV_V<"vredand", 0b000001>;
894defm VREDOR : VALU_MV_V<"vredor", 0b000010>;
895defm VREDXOR : VALU_MV_V<"vredxor", 0b000011>;
896} // RVVConstraint = NoConstraint
897
898// Vector Widening Integer Reduction Instructions
899let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
900// Set earlyclobber for following instructions for second and mask operands.
901// This has the downside that the earlyclobber constraint is too coarse and
902// will impose unnecessary restrictions by not allowing the destination to
903// overlap with the first (wide) operand.
904defm VWREDSUMU : VALU_IV_V<"vwredsumu", 0b110000>;
905defm VWREDSUM : VALU_IV_V<"vwredsum", 0b110001>;
906} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
907} // Predicates = [HasStdExtV]
908
909let Predicates = [HasStdExtV, HasStdExtF] in {
910// Vector Single-Width Floating-Point Reduction Instructions
911let RVVConstraint = NoConstraint in {
912defm VFREDOSUM : VALU_FV_V<"vfredosum", 0b000011>;
913defm VFREDSUM : VALU_FV_V<"vfredsum", 0b000001>;
914defm VFREDMAX : VALU_FV_V<"vfredmax", 0b000111>;
915defm VFREDMIN : VALU_FV_V<"vfredmin", 0b000101>;
916} // RVVConstraint = NoConstraint
917
918// Vector Widening Floating-Point Reduction Instructions
919let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
920// Set earlyclobber for following instructions for second and mask operands.
921// This has the downside that the earlyclobber constraint is too coarse and
922// will impose unnecessary restrictions by not allowing the destination to
923// overlap with the first (wide) operand.
924defm VFWREDOSUM : VALU_FV_V<"vfwredosum", 0b110011>;
925defm VFWREDSUM : VALU_FV_V<"vfwredsum", 0b110001>;
926} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
927} // Predicates = [HasStdExtV, HasStdExtF]
928
929let Predicates = [HasStdExtV] in {
930// Vector Mask-Register Logical Instructions
931let RVVConstraint = NoConstraint in {
932defm VMAND_M : VALU_MV_Mask<"vmand", 0b011001, "m">;
933defm VMNAND_M : VALU_MV_Mask<"vmnand", 0b011101, "m">;
934defm VMANDNOT_M : VALU_MV_Mask<"vmandnot", 0b011000, "m">;
935defm VMXOR_M : VALU_MV_Mask<"vmxor", 0b011011, "m">;
936defm VMOR_M : VALU_MV_Mask<"vmor", 0b011010, "m">;
937defm VMNOR_M : VALU_MV_Mask<"vmnor", 0b011110, "m">;
938defm VMORNOT_M : VALU_MV_Mask<"vmornot", 0b011100, "m">;
939defm VMXNOR_M : VALU_MV_Mask<"vmxnor", 0b011111, "m">;
940}
941
942def : InstAlias<"vmmv.m $vd, $vs",
943                (VMAND_MM VR:$vd, VR:$vs, VR:$vs)>;
944def : InstAlias<"vmclr.m $vd",
945                (VMXOR_MM VR:$vd, VR:$vd, VR:$vd)>;
946def : InstAlias<"vmset.m $vd",
947                (VMXNOR_MM VR:$vd, VR:$vd, VR:$vd)>;
948def : InstAlias<"vmnot.m $vd, $vs",
949                (VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>;
950
951let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
952    RVVConstraint = NoConstraint  in {
953// Vector mask population count vpopc
954def VPOPC_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd),
955                        (ins VR:$vs2, VMaskOp:$vm),
956                        "vpopc.m", "$vd, $vs2$vm">;
957
958// vfirst find-first-set mask bit
959def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd),
960                        (ins VR:$vs2, VMaskOp:$vm),
961                        "vfirst.m", "$vd, $vs2$vm">;
962} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
963
964let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in {
965// vmsbf.m set-before-first mask bit
966defm VMSBF_M : VALU_MV_VS2<"vmsbf.m", 0b010100, 0b00001>;
967// vmsif.m set-including-first mask bit
968defm VMSIF_M : VALU_MV_VS2<"vmsif.m", 0b010100, 0b00011>;
969// vmsof.m set-only-first mask bit
970defm VMSOF_M : VALU_MV_VS2<"vmsof.m", 0b010100, 0b00010>;
971// Vector Iota Instruction
972defm VIOTA_M : VALU_MV_VS2<"viota.m", 0b010100, 0b10000>;
973} // Constraints = "@earlyclobber $vd", RVVConstraint = Iota
974
975// Vector Element Index Instruction
976let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
977def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VR:$vd),
978                      (ins VMaskOp:$vm), "vid.v", "$vd$vm"> {
979  let vs2 = 0;
980}
981
982// Integer Scalar Move Instructions
983let vm = 1, RVVConstraint = NoConstraint in {
984def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd),
985                      (ins VR:$vs2), "vmv.x.s", "$vd, $vs2">;
986let Constraints = "$vd = $vd_wb" in
987def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb),
988                      (ins VR:$vd, GPR:$rs1), "vmv.s.x", "$vd, $rs1">;
989
990}
991} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
992} // Predicates = [HasStdExtV]
993
994let Predicates = [HasStdExtV, HasStdExtF] in {
995let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1,
996    RVVConstraint = NoConstraint  in {
997// Floating-Point Scalar Move Instructions
998def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd),
999                      (ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">;
1000let Constraints = "$vd = $vd_wb" in
1001def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
1002                      (ins VR:$vd, FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">;
1003
1004} // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1
1005} // Predicates = [HasStdExtV, HasStdExtF]
1006
1007let Predicates = [HasStdExtV] in {
1008// Vector Slide Instructions
1009let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1010defm VSLIDEUP_V : VALU_IV_X_I<"vslideup", 0b001110, uimm5>;
1011defm VSLIDE1UP_V : VALU_MV_X<"vslide1up", 0b001110>;
1012} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1013defm VSLIDEDOWN_V : VALU_IV_X_I<"vslidedown", 0b001111, uimm5>;
1014defm VSLIDE1DOWN_V : VALU_MV_X<"vslide1down", 0b001111>;
1015} // Predicates = [HasStdExtV]
1016
1017let Predicates = [HasStdExtV, HasStdExtF] in {
1018let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1019defm VFSLIDE1UP_V : VALU_FV_F<"vfslide1up", 0b001110>;
1020} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1021defm VFSLIDE1DOWN_V : VALU_FV_F<"vfslide1down", 0b001111>;
1022} // Predicates = [HasStdExtV, HasStdExtF]
1023
1024let Predicates = [HasStdExtV] in {
1025// Vector Register Gather Instruction
1026let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in {
1027defm VRGATHER_V : VALU_IV_V_X_I<"vrgather", 0b001100, uimm5>;
1028def VRGATHEREI16_VV : VALUVV<0b001110, OPIVV, "vrgatherei16.vv">;
1029} // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather
1030
1031// Vector Compress Instruction
1032let Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress in {
1033defm VCOMPRESS_V : VALU_MV_Mask<"vcompress", 0b010111>;
1034} // Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress
1035
1036let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
1037    RVVConstraint = NoConstraint in {
1038foreach nf = [1, 2, 4, 8] in {
1039  def VMV#nf#R_V  : RVInstV<0b100111, !add(nf, -1), OPIVI, (outs VR:$vd),
1040                            (ins VR:$vs2), "vmv" # nf # "r.v",
1041                            "$vd, $vs2"> {
1042    let Uses = [];
1043    let vm = 1;
1044  }
1045}
1046} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1047} // Predicates = [HasStdExtV]
1048
1049let Predicates = [HasStdExtZvlsseg] in {
1050  foreach nf=2-8 in {
1051    def VLSEG#nf#E8_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth8, "vlseg"#nf#"e8.v">;
1052    def VLSEG#nf#E16_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth16, "vlseg"#nf#"e16.v">;
1053    def VLSEG#nf#E32_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth32, "vlseg"#nf#"e32.v">;
1054    def VLSEG#nf#E64_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth64, "vlseg"#nf#"e64.v">;
1055
1056    def VLSEG#nf#E8FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth8, "vlseg"#nf#"e8ff.v">;
1057    def VLSEG#nf#E16FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth16, "vlseg"#nf#"e16ff.v">;
1058    def VLSEG#nf#E32FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth32, "vlseg"#nf#"e32ff.v">;
1059    def VLSEG#nf#E64FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth64, "vlseg"#nf#"e64ff.v">;
1060
1061    def VSSEG#nf#E8_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth8, "vsseg"#nf#"e8.v">;
1062    def VSSEG#nf#E16_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth16, "vsseg"#nf#"e16.v">;
1063    def VSSEG#nf#E32_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth32, "vsseg"#nf#"e32.v">;
1064    def VSSEG#nf#E64_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">;
1065
1066    // Vector Strided Instructions
1067    def VLSSEG#nf#E8_V : VStridedSegmentLoad<!add(nf, -1), LSWidth8, "vlsseg"#nf#"e8.v">;
1068    def VLSSEG#nf#E16_V : VStridedSegmentLoad<!add(nf, -1), LSWidth16, "vlsseg"#nf#"e16.v">;
1069    def VLSSEG#nf#E32_V : VStridedSegmentLoad<!add(nf, -1), LSWidth32, "vlsseg"#nf#"e32.v">;
1070    def VLSSEG#nf#E64_V : VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">;
1071
1072    def VSSSEG#nf#E8_V : VStridedSegmentStore<!add(nf, -1), LSWidth8, "vssseg"#nf#"e8.v">;
1073    def VSSSEG#nf#E16_V : VStridedSegmentStore<!add(nf, -1), LSWidth16, "vssseg"#nf#"e16.v">;
1074    def VSSSEG#nf#E32_V : VStridedSegmentStore<!add(nf, -1), LSWidth32, "vssseg"#nf#"e32.v">;
1075    def VSSSEG#nf#E64_V : VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">;
1076
1077    // Vector Indexed Instructions
1078    def VLUXSEG#nf#EI8_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
1079                             LSWidth8, "vluxseg"#nf#"ei8.v">;
1080    def VLUXSEG#nf#EI16_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
1081                              LSWidth16, "vluxseg"#nf#"ei16.v">;
1082    def VLUXSEG#nf#EI32_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
1083                              LSWidth32, "vluxseg"#nf#"ei32.v">;
1084    def VLUXSEG#nf#EI64_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
1085                              LSWidth64, "vluxseg"#nf#"ei64.v">;
1086
1087    def VLOXSEG#nf#EI8_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
1088                             LSWidth8, "vloxseg"#nf#"ei8.v">;
1089    def VLOXSEG#nf#EI16_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
1090                              LSWidth16, "vloxseg"#nf#"ei16.v">;
1091    def VLOXSEG#nf#EI32_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
1092                              LSWidth32, "vloxseg"#nf#"ei32.v">;
1093    def VLOXSEG#nf#EI64_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
1094                              LSWidth64, "vloxseg"#nf#"ei64.v">;
1095
1096    def VSUXSEG#nf#EI8_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
1097                             LSWidth8, "vsuxseg"#nf#"ei8.v">;
1098    def VSUXSEG#nf#EI16_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
1099                              LSWidth16, "vsuxseg"#nf#"ei16.v">;
1100    def VSUXSEG#nf#EI32_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
1101                              LSWidth32, "vsuxseg"#nf#"ei32.v">;
1102    def VSUXSEG#nf#EI64_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
1103                              LSWidth64, "vsuxseg"#nf#"ei64.v">;
1104
1105    def VSOXSEG#nf#EI8_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
1106                             LSWidth8, "vsoxseg"#nf#"ei8.v">;
1107    def VSOXSEG#nf#EI16_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
1108                              LSWidth16, "vsoxseg"#nf#"ei16.v">;
1109    def VSOXSEG#nf#EI32_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
1110                              LSWidth32, "vsoxseg"#nf#"ei32.v">;
1111    def VSOXSEG#nf#EI64_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
1112                              LSWidth64, "vsoxseg"#nf#"ei64.v">;
1113  }
1114} // Predicates = [HasStdExtZvlsseg]
1115
1116let Predicates = [HasStdExtZvamo, HasStdExtA] in {
1117  defm VAMOSWAPEI8 : VAMO<AMOOPVamoSwap, LSWidth8, "vamoswapei8.v">;
1118  defm VAMOSWAPEI16 : VAMO<AMOOPVamoSwap, LSWidth16, "vamoswapei16.v">;
1119  defm VAMOSWAPEI32 : VAMO<AMOOPVamoSwap, LSWidth32, "vamoswapei32.v">;
1120
1121  defm VAMOADDEI8 : VAMO<AMOOPVamoAdd, LSWidth8, "vamoaddei8.v">;
1122  defm VAMOADDEI16 : VAMO<AMOOPVamoAdd, LSWidth16, "vamoaddei16.v">;
1123  defm VAMOADDEI32 : VAMO<AMOOPVamoAdd, LSWidth32, "vamoaddei32.v">;
1124
1125  defm VAMOXOREI8 : VAMO<AMOOPVamoXor, LSWidth8, "vamoxorei8.v">;
1126  defm VAMOXOREI16 : VAMO<AMOOPVamoXor, LSWidth16, "vamoxorei16.v">;
1127  defm VAMOXOREI32 : VAMO<AMOOPVamoXor, LSWidth32, "vamoxorei32.v">;
1128
1129  defm VAMOANDEI8 : VAMO<AMOOPVamoAnd, LSWidth8, "vamoandei8.v">;
1130  defm VAMOANDEI16 : VAMO<AMOOPVamoAnd, LSWidth16, "vamoandei16.v">;
1131  defm VAMOANDEI32 : VAMO<AMOOPVamoAnd, LSWidth32, "vamoandei32.v">;
1132
1133  defm VAMOOREI8 : VAMO<AMOOPVamoOr, LSWidth8, "vamoorei8.v">;
1134  defm VAMOOREI16 : VAMO<AMOOPVamoOr, LSWidth16, "vamoorei16.v">;
1135  defm VAMOOREI32 : VAMO<AMOOPVamoOr, LSWidth32, "vamoorei32.v">;
1136
1137  defm VAMOMINEI8 : VAMO<AMOOPVamoMin, LSWidth8, "vamominei8.v">;
1138  defm VAMOMINEI16 : VAMO<AMOOPVamoMin, LSWidth16, "vamominei16.v">;
1139  defm VAMOMINEI32 : VAMO<AMOOPVamoMin, LSWidth32, "vamominei32.v">;
1140
1141  defm VAMOMAXEI8 : VAMO<AMOOPVamoMax, LSWidth8, "vamomaxei8.v">;
1142  defm VAMOMAXEI16 : VAMO<AMOOPVamoMax, LSWidth16, "vamomaxei16.v">;
1143  defm VAMOMAXEI32 : VAMO<AMOOPVamoMax, LSWidth32, "vamomaxei32.v">;
1144
1145  defm VAMOMINUEI8 : VAMO<AMOOPVamoMinu, LSWidth8, "vamominuei8.v">;
1146  defm VAMOMINUEI16 : VAMO<AMOOPVamoMinu, LSWidth16, "vamominuei16.v">;
1147  defm VAMOMINUEI32 : VAMO<AMOOPVamoMinu, LSWidth32, "vamominuei32.v">;
1148
1149  defm VAMOMAXUEI8 : VAMO<AMOOPVamoMaxu, LSWidth8, "vamomaxuei8.v">;
1150  defm VAMOMAXUEI16 : VAMO<AMOOPVamoMaxu, LSWidth16, "vamomaxuei16.v">;
1151  defm VAMOMAXUEI32 : VAMO<AMOOPVamoMaxu, LSWidth32, "vamomaxuei32.v">;
1152} // Predicates = [HasStdExtZvamo, HasStdExtA]
1153
1154let Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64] in {
1155  defm VAMOSWAPEI64 : VAMO<AMOOPVamoSwap, LSWidth64, "vamoswapei64.v">;
1156  defm VAMOADDEI64 : VAMO<AMOOPVamoAdd, LSWidth64, "vamoaddei64.v">;
1157  defm VAMOXOREI64 : VAMO<AMOOPVamoXor, LSWidth64, "vamoxorei64.v">;
1158  defm VAMOANDEI64 : VAMO<AMOOPVamoAnd, LSWidth64, "vamoandei64.v">;
1159  defm VAMOOREI64 : VAMO<AMOOPVamoOr, LSWidth64, "vamoorei64.v">;
1160  defm VAMOMINEI64 : VAMO<AMOOPVamoMin, LSWidth64, "vamominei64.v">;
1161  defm VAMOMAXEI64 : VAMO<AMOOPVamoMax, LSWidth64, "vamomaxei64.v">;
1162  defm VAMOMINUEI64 : VAMO<AMOOPVamoMinu, LSWidth64, "vamominuei64.v">;
1163  defm VAMOMAXUEI64 : VAMO<AMOOPVamoMaxu, LSWidth64, "vamomaxuei64.v">;
1164} // Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64]
1165
1166include "RISCVInstrInfoVPseudos.td"
1167