1//===-- RISCVInstrInfoV.td - RISC-V 'V' instructions -------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file describes the RISC-V instructions from the standard 'V' Vector 10/// extension, version 0.10. 11/// This version is still experimental as the 'V' extension hasn't been 12/// ratified yet. 13/// 14//===----------------------------------------------------------------------===// 15 16include "RISCVInstrFormatsV.td" 17 18//===----------------------------------------------------------------------===// 19// Operand and SDNode transformation definitions. 20//===----------------------------------------------------------------------===// 21 22class VTypeIAsmOperand<int VTypeINum> : AsmOperandClass { 23 let Name = "VTypeI" # VTypeINum; 24 let ParserMethod = "parseVTypeI"; 25 let DiagnosticType = "InvalidVTypeI"; 26 let RenderMethod = "addVTypeIOperands"; 27} 28 29class VTypeIOp<int VTypeINum> : Operand<XLenVT> { 30 let ParserMatchClass = VTypeIAsmOperand<VTypeINum>; 31 let PrintMethod = "printVTypeI"; 32 let DecoderMethod = "decodeUImmOperand<"#VTypeINum#">"; 33} 34 35def VTypeIOp10 : VTypeIOp<10>; 36def VTypeIOp11 : VTypeIOp<11>; 37 38def VMaskAsmOperand : AsmOperandClass { 39 let Name = "RVVMaskRegOpOperand"; 40 let RenderMethod = "addRegOperands"; 41 let PredicateMethod = "isV0Reg"; 42 let ParserMethod = "parseMaskReg"; 43 let IsOptional = 1; 44 let DefaultMethod = "defaultMaskRegOp"; 45 let DiagnosticType = "InvalidVMaskRegister"; 46} 47 48def VMaskOp : RegisterOperand<VMV0> { 49 let ParserMatchClass = VMaskAsmOperand; 50 let PrintMethod = "printVMaskReg"; 51 let EncoderMethod = "getVMaskReg"; 52 let DecoderMethod = "decodeVMaskReg"; 53} 54 55def simm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<5>(Imm);}]> { 56 let ParserMatchClass = SImmAsmOperand<5>; 57 let EncoderMethod = "getImmOpValue"; 58 let DecoderMethod = "decodeSImmOperand<5>"; 59 let MCOperandPredicate = [{ 60 int64_t Imm; 61 if (MCOp.evaluateAsConstantImm(Imm)) 62 return isInt<5>(Imm); 63 return MCOp.isBareSymbolRef(); 64 }]; 65} 66 67def SImm5Plus1AsmOperand : AsmOperandClass { 68 let Name = "SImm5Plus1"; 69 let RenderMethod = "addImmOperands"; 70 let DiagnosticType = "InvalidSImm5Plus1"; 71} 72 73def simm5_plus1 : Operand<XLenVT>, ImmLeaf<XLenVT, 74 [{return (isInt<5>(Imm) && Imm != -16) || Imm == 16;}]> { 75 let ParserMatchClass = SImm5Plus1AsmOperand; 76 let MCOperandPredicate = [{ 77 int64_t Imm; 78 if (MCOp.evaluateAsConstantImm(Imm)) 79 return (isInt<5>(Imm) && Imm != -16) || Imm == 16; 80 return MCOp.isBareSymbolRef(); 81 }]; 82} 83 84def simm5_plus1_nonzero : ImmLeaf<XLenVT, 85 [{return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);}]>; 86 87//===----------------------------------------------------------------------===// 88// Scheduling definitions. 89//===----------------------------------------------------------------------===// 90 91class VMVRSched<int n>: Sched <[!cast<SchedReadWrite>("WriteVMov" # n # "V"), 92 !cast<SchedReadWrite>("ReadVMov" # n # "V")]>; 93 94class VLESched<int n> : Sched <[!cast<SchedReadWrite>("WriteVLDE" # n), 95 ReadVLDX, ReadVMask]>; 96 97class VSESched<int n> : Sched <[!cast<SchedReadWrite>("WriteVSTE" # n), 98 !cast<SchedReadWrite>("ReadVSTE" # n # "V"), 99 ReadVSTX, ReadVMask]>; 100 101class VLSSched<int n> : Sched <[!cast<SchedReadWrite>("WriteVLDS" # n), 102 ReadVLDX, ReadVLDSX, ReadVMask]>; 103 104class VSSSched<int n> : Sched <[!cast<SchedReadWrite>("WriteVSTS" # n), 105 !cast<SchedReadWrite>("ReadVSTS" # n # "V"), 106 ReadVSTX, ReadVSTSX, ReadVMask]>; 107 108class VLXSched<int n, string o> : 109 Sched <[!cast<SchedReadWrite>("WriteVLD" # o # "X" # n), 110 ReadVLDX, !cast<SchedReadWrite>("ReadVLD" # o # "XV"), ReadVMask]>; 111 112class VSXSched<int n, string o> : 113 Sched <[!cast<SchedReadWrite>("WriteVST" # o # "X" # n), 114 !cast<SchedReadWrite>("ReadVST" # o # "X" # n), 115 ReadVSTX, !cast<SchedReadWrite>("ReadVST" # o # "XV"), ReadVMask]>; 116 117class VLFSched<int n> : Sched <[!cast<SchedReadWrite>("WriteVLDFF" # n), 118 ReadVLDX, ReadVMask]>; 119 120//===----------------------------------------------------------------------===// 121// Instruction class templates 122//===----------------------------------------------------------------------===// 123 124let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in { 125// unit-stride load vd, (rs1), vm 126class VUnitStrideLoad<RISCVWidth width, string opcodestr> 127 : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStride, width.Value{2-0}, 128 (outs VR:$vd), 129 (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">; 130 131let vm = 1, RVVConstraint = NoConstraint in { 132// unit-stride whole register load vl<nf>r.v vd, (rs1) 133class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC> 134 : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg, 135 width.Value{2-0}, (outs VRC:$vd), (ins GPR:$rs1), 136 opcodestr, "$vd, (${rs1})"> { 137 let Uses = []; 138} 139 140// unit-stride mask load vd, (rs1) 141class VUnitStrideLoadMask<string opcodestr> 142 : RVInstVLU<0b000, LSWidth8.Value{3}, LUMOPUnitStrideMask, LSWidth8.Value{2-0}, 143 (outs VR:$vd), 144 (ins GPR:$rs1), opcodestr, "$vd, (${rs1})">; 145} // vm = 1, RVVConstraint = NoConstraint 146 147// unit-stride fault-only-first load vd, (rs1), vm 148class VUnitStrideLoadFF<RISCVWidth width, string opcodestr> 149 : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0}, 150 (outs VR:$vd), 151 (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">; 152 153// strided load vd, (rs1), rs2, vm 154class VStridedLoad<RISCVWidth width, string opcodestr> 155 : RVInstVLS<0b000, width.Value{3}, width.Value{2-0}, 156 (outs VR:$vd), 157 (ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr, 158 "$vd, (${rs1}), $rs2$vm">; 159 160// indexed load vd, (rs1), vs2, vm 161class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr> 162 : RVInstVLX<0b000, width.Value{3}, mop, width.Value{2-0}, 163 (outs VR:$vd), 164 (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr, 165 "$vd, (${rs1}), $vs2$vm">; 166 167// unit-stride segment load vd, (rs1), vm 168class VUnitStrideSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr> 169 : RVInstVLU<nf, width.Value{3}, LUMOPUnitStride, width.Value{2-0}, 170 (outs VR:$vd), 171 (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">; 172 173// segment fault-only-first load vd, (rs1), vm 174class VUnitStrideSegmentLoadFF<bits<3> nf, RISCVWidth width, string opcodestr> 175 : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0}, 176 (outs VR:$vd), 177 (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">; 178 179// strided segment load vd, (rs1), rs2, vm 180class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr> 181 : RVInstVLS<nf, width.Value{3}, width.Value{2-0}, 182 (outs VR:$vd), 183 (ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr, 184 "$vd, (${rs1}), $rs2$vm">; 185 186// indexed segment load vd, (rs1), vs2, vm 187class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width, 188 string opcodestr> 189 : RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0}, 190 (outs VR:$vd), 191 (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr, 192 "$vd, (${rs1}), $vs2$vm">; 193} // hasSideEffects = 0, mayLoad = 1, mayStore = 0 194 195let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in { 196// unit-stride store vd, vs3, (rs1), vm 197class VUnitStrideStore<RISCVWidth width, string opcodestr> 198 : RVInstVSU<0b000, width.Value{3}, SUMOPUnitStride, width.Value{2-0}, 199 (outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr, 200 "$vs3, (${rs1})$vm">; 201 202let vm = 1 in { 203// vs<nf>r.v vd, (rs1) 204class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC> 205 : RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg, 206 0b000, (outs), (ins VRC:$vs3, GPR:$rs1), 207 opcodestr, "$vs3, (${rs1})"> { 208 let Uses = []; 209} 210 211// unit-stride mask store vd, vs3, (rs1) 212class VUnitStrideStoreMask<string opcodestr> 213 : RVInstVSU<0b000, LSWidth8.Value{3}, SUMOPUnitStrideMask, LSWidth8.Value{2-0}, 214 (outs), (ins VR:$vs3, GPR:$rs1), opcodestr, 215 "$vs3, (${rs1})">; 216} // vm = 1 217 218// strided store vd, vs3, (rs1), rs2, vm 219class VStridedStore<RISCVWidth width, string opcodestr> 220 : RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs), 221 (ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm), 222 opcodestr, "$vs3, (${rs1}), $rs2$vm">; 223 224// indexed store vd, vs3, (rs1), vs2, vm 225class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr> 226 : RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs), 227 (ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm), 228 opcodestr, "$vs3, (${rs1}), $vs2$vm">; 229 230// segment store vd, vs3, (rs1), vm 231class VUnitStrideSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr> 232 : RVInstVSU<nf, width.Value{3}, SUMOPUnitStride, width.Value{2-0}, 233 (outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr, 234 "$vs3, (${rs1})$vm">; 235 236// segment store vd, vs3, (rs1), rs2, vm 237class VStridedSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr> 238 : RVInstVSS<nf, width.Value{3}, width.Value{2-0}, (outs), 239 (ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm), 240 opcodestr, "$vs3, (${rs1}), $rs2$vm">; 241 242// segment store vd, vs3, (rs1), vs2, vm 243class VIndexedSegmentStore<bits<3> nf, RISCVMOP mop, RISCVWidth width, 244 string opcodestr> 245 : RVInstVSX<nf, width.Value{3}, mop, width.Value{2-0}, (outs), 246 (ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm), 247 opcodestr, "$vs3, (${rs1}), $vs2$vm">; 248} // hasSideEffects = 0, mayLoad = 0, mayStore = 1 249 250let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { 251// op vd, vs2, vs1, vm 252class VALUVV<bits<6> funct6, RISCVVFormat opv, string opcodestr> 253 : RVInstVV<funct6, opv, (outs VR:$vd), 254 (ins VR:$vs2, VR:$vs1, VMaskOp:$vm), 255 opcodestr, "$vd, $vs2, $vs1$vm">; 256 257// op vd, vs2, vs1, v0 (without mask, use v0 as carry input) 258class VALUmVV<bits<6> funct6, RISCVVFormat opv, string opcodestr> 259 : RVInstVV<funct6, opv, (outs VR:$vd), 260 (ins VR:$vs2, VR:$vs1, VMV0:$v0), 261 opcodestr, "$vd, $vs2, $vs1, v0"> { 262 let vm = 0; 263} 264 265// op vd, vs1, vs2, vm (reverse the order of vs1 and vs2) 266class VALUrVV<bits<6> funct6, RISCVVFormat opv, string opcodestr> 267 : RVInstVV<funct6, opv, (outs VR:$vd), 268 (ins VR:$vs1, VR:$vs2, VMaskOp:$vm), 269 opcodestr, "$vd, $vs1, $vs2$vm">; 270 271// op vd, vs2, vs1 272class VALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr> 273 : RVInstVV<funct6, opv, (outs VR:$vd), 274 (ins VR:$vs2, VR:$vs1), 275 opcodestr, "$vd, $vs2, $vs1"> { 276 let vm = 1; 277} 278 279// op vd, vs2, rs1, vm 280class VALUVX<bits<6> funct6, RISCVVFormat opv, string opcodestr> 281 : RVInstVX<funct6, opv, (outs VR:$vd), 282 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm), 283 opcodestr, "$vd, $vs2, $rs1$vm">; 284 285// op vd, vs2, rs1, v0 (without mask, use v0 as carry input) 286class VALUmVX<bits<6> funct6, RISCVVFormat opv, string opcodestr> 287 : RVInstVX<funct6, opv, (outs VR:$vd), 288 (ins VR:$vs2, GPR:$rs1, VMV0:$v0), 289 opcodestr, "$vd, $vs2, $rs1, v0"> { 290 let vm = 0; 291} 292 293// op vd, rs1, vs2, vm (reverse the order of rs1 and vs2) 294class VALUrVX<bits<6> funct6, RISCVVFormat opv, string opcodestr> 295 : RVInstVX<funct6, opv, (outs VR:$vd), 296 (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), 297 opcodestr, "$vd, $rs1, $vs2$vm">; 298 299// op vd, vs1, vs2 300class VALUVXNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr> 301 : RVInstVX<funct6, opv, (outs VR:$vd), 302 (ins VR:$vs2, GPR:$rs1), 303 opcodestr, "$vd, $vs2, $rs1"> { 304 let vm = 1; 305} 306 307// op vd, vs2, imm, vm 308class VALUVI<bits<6> funct6, string opcodestr, Operand optype = simm5> 309 : RVInstIVI<funct6, (outs VR:$vd), 310 (ins VR:$vs2, optype:$imm, VMaskOp:$vm), 311 opcodestr, "$vd, $vs2, $imm$vm">; 312 313// op vd, vs2, imm, v0 (without mask, use v0 as carry input) 314class VALUmVI<bits<6> funct6, string opcodestr, Operand optype = simm5> 315 : RVInstIVI<funct6, (outs VR:$vd), 316 (ins VR:$vs2, optype:$imm, VMV0:$v0), 317 opcodestr, "$vd, $vs2, $imm, v0"> { 318 let vm = 0; 319} 320 321// op vd, vs2, imm, vm 322class VALUVINoVm<bits<6> funct6, string opcodestr, Operand optype = simm5> 323 : RVInstIVI<funct6, (outs VR:$vd), 324 (ins VR:$vs2, optype:$imm), 325 opcodestr, "$vd, $vs2, $imm"> { 326 let vm = 1; 327} 328 329// op vd, vs2, rs1, vm (Float) 330class VALUVF<bits<6> funct6, RISCVVFormat opv, string opcodestr> 331 : RVInstVX<funct6, opv, (outs VR:$vd), 332 (ins VR:$vs2, FPR32:$rs1, VMaskOp:$vm), 333 opcodestr, "$vd, $vs2, $rs1$vm">; 334 335// op vd, rs1, vs2, vm (Float) (with mask, reverse the order of rs1 and vs2) 336class VALUrVF<bits<6> funct6, RISCVVFormat opv, string opcodestr> 337 : RVInstVX<funct6, opv, (outs VR:$vd), 338 (ins FPR32:$rs1, VR:$vs2, VMaskOp:$vm), 339 opcodestr, "$vd, $rs1, $vs2$vm">; 340 341// op vd, vs2, vm (use vs1 as instruction encoding) 342class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr> 343 : RVInstV<funct6, vs1, opv, (outs VR:$vd), 344 (ins VR:$vs2, VMaskOp:$vm), 345 opcodestr, "$vd, $vs2$vm">; 346} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 347 348//===----------------------------------------------------------------------===// 349// Combination of instruction classes. 350// Use these multiclasses to define instructions more easily. 351//===----------------------------------------------------------------------===// 352 353multiclass VIndexLoadStore<list<int> EEWList> { 354 foreach n = EEWList in { 355 defvar w = !cast<RISCVWidth>("LSWidth" # n); 356 357 def VLUXEI # n # _V : 358 VIndexedLoad<MOPLDIndexedUnord, w, "vluxei" # n # ".v">, 359 VLXSched<n, "U">; 360 def VLOXEI # n # _V : 361 VIndexedLoad<MOPLDIndexedOrder, w, "vloxei" # n # ".v">, 362 VLXSched<n, "O">; 363 364 def VSUXEI # n # _V : 365 VIndexedStore<MOPSTIndexedUnord, w, "vsuxei" # n # ".v">, 366 VSXSched<n, "U">; 367 def VSOXEI # n # _V : 368 VIndexedStore<MOPSTIndexedOrder, w, "vsoxei" # n # ".v">, 369 VSXSched<n, "O">; 370 } 371} 372 373multiclass VALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> { 374 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">, 375 Sched<[WriteVIALUV, ReadVIALUV, ReadVIALUV, ReadVMask]>; 376 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">, 377 Sched<[WriteVIALUX, ReadVIALUV, ReadVIALUX, ReadVMask]>; 378 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>, 379 Sched<[WriteVIALUI, ReadVIALUV, ReadVMask]>; 380} 381 382multiclass VALU_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> { 383 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">, 384 Sched<[WriteVIALUV, ReadVIALUV, ReadVIALUV, ReadVMask]>; 385 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">, 386 Sched<[WriteVIALUX, ReadVIALUV, ReadVIALUX, ReadVMask]>; 387} 388 389multiclass VALU_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> { 390 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">, 391 Sched<[WriteVIALUV, ReadVIALUV, ReadVIALUX, ReadVMask]>; 392 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>, 393 Sched<[WriteVIALUI, ReadVIALUV, ReadVMask]>; 394} 395 396multiclass VALU_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> { 397 def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">, 398 Sched<[WriteVIWALUV, ReadVIWALUV, ReadVIWALUV, ReadVMask]>; 399 def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">, 400 Sched<[WriteVIWALUX, ReadVIWALUV, ReadVIWALUX, ReadVMask]>; 401} 402 403multiclass VMAC_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> { 404 def V : VALUrVV<funct6, OPMVV, opcodestr # "." # vw # "v">, 405 Sched<[WriteVIMulAddV, ReadVIMulAddV, ReadVIMulAddV, ReadVMask]>; 406 def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">, 407 Sched<[WriteVIMulAddX, ReadVIMulAddV, ReadVIMulAddX, ReadVMask]>; 408} 409 410multiclass VWMAC_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> { 411 def V : VALUrVV<funct6, OPMVV, opcodestr # "." # vw # "v">, 412 Sched<[WriteVIWMulAddV, ReadVIWMulAddV, ReadVIWMulAddV, ReadVMask]>; 413 def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">, 414 Sched<[WriteVIWMulAddX, ReadVIWMulAddV, ReadVIWMulAddX, ReadVMask]>; 415} 416 417multiclass VWMAC_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> { 418 def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">, 419 Sched<[WriteVIWMulAddX, ReadVIWMulAddV, ReadVIWMulAddX, ReadVMask]>; 420} 421 422multiclass VALU_MV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 423 def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>, 424 Sched<[WriteVExtV, ReadVExtV, ReadVMask]>; 425} 426 427multiclass VALUm_IV_V_X_I<string opcodestr, bits<6> funct6> { 428 def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">, 429 Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>; 430 def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">, 431 Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>; 432 def IM : VALUmVI<funct6, opcodestr # ".vim">, 433 Sched<[WriteVICALUI, ReadVIALUCV, ReadVMask]>; 434} 435 436multiclass VMRG_IV_V_X_I<string opcodestr, bits<6> funct6> { 437 def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">, 438 Sched<[WriteVIMergeV, ReadVIMergeV, ReadVIMergeV, ReadVMask]>; 439 def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">, 440 Sched<[WriteVIMergeX, ReadVIMergeV, ReadVIMergeX, ReadVMask]>; 441 def IM : VALUmVI<funct6, opcodestr # ".vim">, 442 Sched<[WriteVIMergeI, ReadVIMergeV, ReadVMask]>; 443} 444 445multiclass VALUm_IV_V_X<string opcodestr, bits<6> funct6> { 446 def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">, 447 Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>; 448 def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">, 449 Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>; 450} 451 452multiclass VALUNoVm_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5> { 453 def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">, 454 Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV]>; 455 def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">, 456 Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX]>; 457 def I : VALUVINoVm<funct6, opcodestr # ".vi", optype>, 458 Sched<[WriteVICALUI, ReadVIALUCV]>; 459} 460 461multiclass VALUNoVm_IV_V_X<string opcodestr, bits<6> funct6> { 462 def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">, 463 Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV]>; 464 def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">, 465 Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX]>; 466} 467 468multiclass VALU_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> { 469 def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">, 470 Sched<[WriteVFALUV, ReadVFALUV, ReadVFALUV, ReadVMask]>; 471 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">, 472 Sched<[WriteVFALUF, ReadVFALUV, ReadVFALUF, ReadVMask]>; 473} 474 475multiclass VALU_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> { 476 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">, 477 Sched<[WriteVFALUF, ReadVFALUV, ReadVFALUF, ReadVMask]>; 478} 479 480multiclass VWALU_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> { 481 def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">, 482 Sched<[WriteVFWALUV, ReadVFWALUV, ReadVFWALUV, ReadVMask]>; 483 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">, 484 Sched<[WriteVFWALUF, ReadVFWALUV, ReadVFWALUF, ReadVMask]>; 485} 486 487multiclass VMUL_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> { 488 def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">, 489 Sched<[WriteVFMulV, ReadVFMulV, ReadVFMulV, ReadVMask]>; 490 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">, 491 Sched<[WriteVFMulF, ReadVFMulV, ReadVFMulF, ReadVMask]>; 492} 493 494multiclass VDIV_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> { 495 def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">, 496 Sched<[WriteVFDivV, ReadVFDivV, ReadVFDivV, ReadVMask]>; 497 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">, 498 Sched<[WriteVFDivF, ReadVFDivV, ReadVFDivF, ReadVMask]>; 499} 500 501multiclass VRDIV_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> { 502 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">, 503 Sched<[WriteVFDivF, ReadVFDivV, ReadVFDivF, ReadVMask]>; 504} 505 506multiclass VWMUL_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> { 507 def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">, 508 Sched<[WriteVFWMulV, ReadVFWMulV, ReadVFWMulV, ReadVMask]>; 509 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">, 510 Sched<[WriteVFWMulF, ReadVFWMulV, ReadVFWMulF, ReadVMask]>; 511} 512 513multiclass VMAC_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> { 514 def V : VALUrVV<funct6, OPFVV, opcodestr # "." # vw # "v">, 515 Sched<[WriteVFMulAddV, ReadVFMulAddV, ReadVFMulAddV, ReadVMask]>; 516 def F : VALUrVF<funct6, OPFVF, opcodestr # "." # vw # "f">, 517 Sched<[WriteVFMulAddF, ReadVFMulAddV, ReadVFMulAddF, ReadVMask]>; 518} 519 520multiclass VWMAC_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> { 521 def V : VALUrVV<funct6, OPFVV, opcodestr # "." # vw # "v">, 522 Sched<[WriteVFWMulAddV, ReadVFWMulAddV, ReadVFWMulAddV, ReadVMask]>; 523 def F : VALUrVF<funct6, OPFVF, opcodestr # "." # vw # "f">, 524 Sched<[WriteVFWMulAddF, ReadVFWMulAddV, ReadVFWMulAddF, ReadVMask]>; 525} 526 527multiclass VSQR_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 528 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 529 Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>; 530} 531 532multiclass VRCP_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 533 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 534 Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>; 535} 536 537multiclass VCMP_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> { 538 def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">, 539 Sched<[WriteVFCmpV, ReadVFCmpV, ReadVFCmpV, ReadVMask]>; 540 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">, 541 Sched<[WriteVFCmpF, ReadVFCmpV, ReadVFCmpF, ReadVMask]>; 542} 543 544multiclass VCMP_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> { 545 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">, 546 Sched<[WriteVFCmpF, ReadVFCmpV, ReadVFCmpF, ReadVMask]>; 547} 548 549multiclass VSGNJ_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> { 550 def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">, 551 Sched<[WriteVFSgnjV, ReadVFSgnjV, ReadVFSgnjV, ReadVMask]>; 552 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">, 553 Sched<[WriteVFSgnjF, ReadVFSgnjV, ReadVFSgnjF, ReadVMask]>; 554} 555 556multiclass VCLS_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 557 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 558 Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>; 559} 560 561multiclass VCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 562 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 563 Sched<[WriteVFCvtIToFV, ReadVFCvtIToFV, ReadVMask]>; 564} 565 566multiclass VCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 567 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 568 Sched<[WriteVFCvtFToIV, ReadVFCvtFToIV, ReadVMask]>; 569} 570 571multiclass VWCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 572 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 573 Sched<[WriteVFWCvtIToFV, ReadVFWCvtIToFV, ReadVMask]>; 574} 575 576multiclass VWCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 577 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 578 Sched<[WriteVFWCvtFToIV, ReadVFWCvtFToIV, ReadVMask]>; 579} 580 581multiclass VWCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 582 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 583 Sched<[WriteVFWCvtFToFV, ReadVFWCvtFToFV, ReadVMask]>; 584} 585 586multiclass VNCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 587 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 588 Sched<[WriteVFNCvtIToFV, ReadVFNCvtIToFV, ReadVMask]>; 589} 590 591multiclass VNCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 592 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 593 Sched<[WriteVFNCvtFToIV, ReadVFNCvtFToIV, ReadVMask]>; 594} 595 596multiclass VNCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 597 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 598 Sched<[WriteVFNCvtFToFV, ReadVFNCvtFToFV, ReadVMask]>; 599} 600 601multiclass VRED_MV_V<string opcodestr, bits<6> funct6> { 602 def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">, 603 Sched<[WriteVIRedV, ReadVIRedV, ReadVIRedV0, ReadVMask]>; 604} 605 606multiclass VWRED_IV_V<string opcodestr, bits<6> funct6> { 607 def _VS : VALUVV<funct6, OPIVV, opcodestr # ".vs">, 608 Sched<[WriteVIWRedV, ReadVIWRedV, ReadVIWRedV0, ReadVMask]>; 609} 610 611multiclass VRED_FV_V<string opcodestr, bits<6> funct6> { 612 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">, 613 Sched<[WriteVFRedV, ReadVFRedV, ReadVFRedV0, ReadVMask]>; 614} 615 616multiclass VREDO_FV_V<string opcodestr, bits<6> funct6> { 617 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">, 618 Sched<[WriteVFRedOV, ReadVFRedOV, ReadVFRedOV0, ReadVMask]>; 619} 620 621multiclass VWRED_FV_V<string opcodestr, bits<6> funct6> { 622 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">, 623 Sched<[WriteVFWRedV, ReadVFWRedV, ReadVFWRedV0, ReadVMask]>; 624} 625 626multiclass VWREDO_FV_V<string opcodestr, bits<6> funct6> { 627 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">, 628 Sched<[WriteVFWRedOV, ReadVFWRedOV, ReadVFWRedOV0, ReadVMask]>; 629} 630 631multiclass VMALU_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> { 632 def M : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">, 633 Sched<[WriteVMALUV, ReadVMALUV, ReadVMALUV]>; 634} 635 636multiclass VMSFS_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> { 637 def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>, 638 Sched<[WriteVMSFSV, ReadVMSFSV, ReadVMask]>; 639} 640 641multiclass VMIOT_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> { 642 def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>, 643 Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>; 644} 645 646multiclass VSHT_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> { 647 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">, 648 Sched<[WriteVShiftV, ReadVShiftV, ReadVShiftV, ReadVMask]>; 649 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">, 650 Sched<[WriteVShiftX, ReadVShiftV, ReadVShiftX, ReadVMask]>; 651 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>, 652 Sched<[WriteVShiftI, ReadVShiftV, ReadVMask]>; 653} 654 655multiclass VNSHT_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> { 656 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">, 657 Sched<[WriteVNShiftV, ReadVNShiftV, ReadVNShiftV, ReadVMask]>; 658 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">, 659 Sched<[WriteVNShiftX, ReadVNShiftV, ReadVNShiftX, ReadVMask]>; 660 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>, 661 Sched<[WriteVNShiftI, ReadVNShiftV, ReadVMask]>; 662} 663 664multiclass VCMP_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> { 665 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">, 666 Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpV, ReadVMask]>; 667 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">, 668 Sched<[WriteVICmpX, ReadVICmpV, ReadVICmpX, ReadVMask]>; 669 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>, 670 Sched<[WriteVICmpI, ReadVICmpV, ReadVMask]>; 671} 672 673multiclass VCMP_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> { 674 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">, 675 Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpX, ReadVMask]>; 676 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>, 677 Sched<[WriteVICmpI, ReadVICmpV, ReadVMask]>; 678} 679 680multiclass VCMP_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> { 681 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">, 682 Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpV, ReadVMask]>; 683 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">, 684 Sched<[WriteVICmpX, ReadVICmpV, ReadVICmpX, ReadVMask]>; 685} 686 687multiclass VMUL_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> { 688 def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">, 689 Sched<[WriteVIMulV, ReadVIMulV, ReadVIMulV, ReadVMask]>; 690 def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">, 691 Sched<[WriteVIMulX, ReadVIMulV, ReadVIMulX, ReadVMask]>; 692} 693 694multiclass VWMUL_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> { 695 def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">, 696 Sched<[WriteVIWMulV, ReadVIWMulV, ReadVIWMulV, ReadVMask]>; 697 def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">, 698 Sched<[WriteVIWMulX, ReadVIWMulV, ReadVIWMulX, ReadVMask]>; 699} 700 701multiclass VDIV_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> { 702 def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">, 703 Sched<[WriteVIDivV, ReadVIDivV, ReadVIDivV, ReadVMask]>; 704 def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">, 705 Sched<[WriteVIDivX, ReadVIDivV, ReadVIDivX, ReadVMask]>; 706} 707 708multiclass VSALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> { 709 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">, 710 Sched<[WriteVSALUV, ReadVSALUV, ReadVSALUV, ReadVMask]>; 711 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">, 712 Sched<[WriteVSALUX, ReadVSALUV, ReadVSALUX, ReadVMask]>; 713 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>, 714 Sched<[WriteVSALUI, ReadVSALUV, ReadVMask]>; 715} 716 717multiclass VSALU_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> { 718 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">, 719 Sched<[WriteVSALUV, ReadVSALUV, ReadVSALUV, ReadVMask]>; 720 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">, 721 Sched<[WriteVSALUX, ReadVSALUV, ReadVSALUX, ReadVMask]>; 722} 723 724multiclass VAALU_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> { 725 def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">, 726 Sched<[WriteVAALUV, ReadVAALUV, ReadVAALUV, ReadVMask]>; 727 def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">, 728 Sched<[WriteVAALUX, ReadVAALUV, ReadVAALUX, ReadVMask]>; 729} 730 731multiclass VSMUL_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> { 732 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">, 733 Sched<[WriteVSMulV, ReadVSMulV, ReadVSMulV, ReadVMask]>; 734 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">, 735 Sched<[WriteVSMulX, ReadVSMulV, ReadVSMulX, ReadVMask]>; 736} 737 738multiclass VSSHF_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> { 739 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">, 740 Sched<[WriteVSShiftV, ReadVSShiftV, ReadVSShiftV, ReadVMask]>; 741 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">, 742 Sched<[WriteVSShiftX, ReadVSShiftV, ReadVSShiftX, ReadVMask]>; 743 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>, 744 Sched<[WriteVSShiftI, ReadVSShiftV, ReadVMask]>; 745} 746 747multiclass VNCLP_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> { 748 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">, 749 Sched<[WriteVNClipV, ReadVNClipV, ReadVNClipV, ReadVMask]>; 750 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">, 751 Sched<[WriteVNClipX, ReadVNClipV, ReadVNClipX, ReadVMask]>; 752 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>, 753 Sched<[WriteVNClipI, ReadVNClipV, ReadVMask]>; 754} 755 756multiclass VSLD_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> { 757 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">, 758 Sched<[WriteVISlideX, ReadVISlideV, ReadVISlideX, ReadVMask]>; 759 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>, 760 Sched<[WriteVISlideI, ReadVISlideV, ReadVMask]>; 761} 762 763multiclass VSLD1_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> { 764 def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">, 765 Sched<[WriteVISlide1X, ReadVISlideV, ReadVISlideX, ReadVMask]>; 766} 767 768multiclass VSLD1_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> { 769 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">, 770 Sched<[WriteVFSlide1F, ReadVFSlideV, ReadVFSlideF, ReadVMask]>; 771} 772 773multiclass VGTR_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> { 774 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">, 775 Sched<[WriteVGatherV, ReadVGatherV, ReadVGatherV, ReadVMask]>; 776 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">, 777 Sched<[WriteVGatherX, ReadVGatherV, ReadVGatherX, ReadVMask]>; 778 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>, 779 Sched<[WriteVGatherI, ReadVGatherV, ReadVMask]>; 780} 781 782multiclass VCPR_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> { 783 def M : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">, 784 Sched<[WriteVCompressV, ReadVCompressV, ReadVCompressV]>; 785} 786 787multiclass VWholeLoadN<bits<3> nf, string opcodestr, RegisterClass VRC> { 788 foreach l = [8, 16, 32] in { 789 defvar w = !cast<RISCVWidth>("LSWidth" # l); 790 defvar s = !cast<SchedWrite>("WriteVLD" # !add(nf, 1) # "R" # l); 791 792 def E # l # _V : VWholeLoad<nf, w, opcodestr # "e" # l # ".v", VRC>, 793 Sched<[s, ReadVLDX]>; 794 } 795} 796multiclass VWholeLoadEEW64<bits<3> nf, string opcodestr, RegisterClass VRC, SchedReadWrite schedrw> { 797 def E64_V : VWholeLoad<nf, LSWidth64, opcodestr # "e64.v", VRC>, 798 Sched<[schedrw, ReadVLDX]>; 799} 800 801//===----------------------------------------------------------------------===// 802// Instructions 803//===----------------------------------------------------------------------===// 804 805let Predicates = [HasVInstructions] in { 806let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in { 807def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp11:$vtypei), 808 "vsetvli", "$rd, $rs1, $vtypei">; 809 810def VSETIVLI : RVInstSetiVLi<(outs GPR:$rd), (ins uimm5:$uimm, VTypeIOp10:$vtypei), 811 "vsetivli", "$rd, $uimm, $vtypei">; 812 813def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2), 814 "vsetvl", "$rd, $rs1, $rs2">; 815} // hasSideEffects = 1, mayLoad = 0, mayStore = 0 816foreach eew = [8, 16, 32] in { 817 defvar w = !cast<RISCVWidth>("LSWidth" # eew); 818 819 // Vector Unit-Stride Instructions 820 def VLE#eew#_V : VUnitStrideLoad<w, "vle"#eew#".v">, VLESched<eew>; 821 def VSE#eew#_V : VUnitStrideStore<w, "vse"#eew#".v">, VSESched<eew>; 822 823 // Vector Unit-Stride Fault-only-First Loads 824 def VLE#eew#FF_V : VUnitStrideLoadFF<w, "vle"#eew#"ff.v">, VLFSched<eew>; 825 826 // Vector Strided Instructions 827 def VLSE#eew#_V : VStridedLoad<w, "vlse"#eew#".v">, VLSSched<eew>; 828 def VSSE#eew#_V : VStridedStore<w, "vsse"#eew#".v">, VSSSched<eew>; 829} 830 831defm "" : VIndexLoadStore<[8, 16, 32]>; 832} // Predicates = [HasVInstructions] 833 834let Predicates = [HasVInstructions] in { 835def VLM_V : VUnitStrideLoadMask<"vlm.v">, 836 Sched<[WriteVLDM, ReadVLDX]>; 837def VSM_V : VUnitStrideStoreMask<"vsm.v">, 838 Sched<[WriteVSTM, ReadVSTM, ReadVSTX]>; 839def : InstAlias<"vle1.v $vd, (${rs1})", 840 (VLM_V VR:$vd, GPR:$rs1), 0>; 841def : InstAlias<"vse1.v $vs3, (${rs1})", 842 (VSM_V VR:$vs3, GPR:$rs1), 0>; 843 844defm VL1R : VWholeLoadN<0, "vl1r", VR>; 845defm VL2R : VWholeLoadN<1, "vl2r", VRM2>; 846defm VL4R : VWholeLoadN<3, "vl4r", VRM4>; 847defm VL8R : VWholeLoadN<7, "vl8r", VRM8>; 848 849def VS1R_V : VWholeStore<0, "vs1r.v", VR>, 850 Sched<[WriteVST1R, ReadVST1R, ReadVSTX]>; 851def VS2R_V : VWholeStore<1, "vs2r.v", VRM2>, 852 Sched<[WriteVST2R, ReadVST2R, ReadVSTX]>; 853def VS4R_V : VWholeStore<3, "vs4r.v", VRM4>, 854 Sched<[WriteVST4R, ReadVST4R, ReadVSTX]>; 855def VS8R_V : VWholeStore<7, "vs8r.v", VRM8>, 856 Sched<[WriteVST8R, ReadVST8R, ReadVSTX]>; 857 858def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>; 859def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>; 860def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>; 861def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>; 862} // Predicates = [HasVInstructions] 863 864let Predicates = [HasVInstructionsI64] in { 865// Vector Unit-Stride Instructions 866def VLE64_V : VUnitStrideLoad<LSWidth64, "vle64.v">, 867 VLESched<64>; 868 869def VLE64FF_V : VUnitStrideLoadFF<LSWidth64, "vle64ff.v">, 870 VLFSched<64>; 871 872def VSE64_V : VUnitStrideStore<LSWidth64, "vse64.v">, 873 VSESched<64>; 874// Vector Strided Instructions 875def VLSE64_V : VStridedLoad<LSWidth64, "vlse64.v">, 876 VLSSched<32>; 877 878def VSSE64_V : VStridedStore<LSWidth64, "vsse64.v">, 879 VSSSched<64>; 880 881defm VL1R: VWholeLoadEEW64<0, "vl1r", VR, WriteVLD1R64>; 882defm VL2R: VWholeLoadEEW64<1, "vl2r", VRM2, WriteVLD2R64>; 883defm VL4R: VWholeLoadEEW64<3, "vl4r", VRM4, WriteVLD4R64>; 884defm VL8R: VWholeLoadEEW64<7, "vl8r", VRM8, WriteVLD8R64>; 885} // Predicates = [HasVInstructionsI64] 886let Predicates = [IsRV64, HasVInstructionsI64] in { 887 // Vector Indexed Instructions 888 defm "" : VIndexLoadStore<[64]>; 889} // [IsRV64, HasVInstructionsI64] 890 891let Predicates = [HasVInstructions] in { 892// Vector Single-Width Integer Add and Subtract 893defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>; 894defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>; 895defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>; 896 897def : InstAlias<"vneg.v $vd, $vs$vm", (VRSUB_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; 898 899// Vector Widening Integer Add/Subtract 900// Refer to 11.2 Widening Vector Arithmetic Instructions 901// The destination vector register group cannot overlap a source vector 902// register group of a different element width (including the mask register 903// if masked), otherwise an illegal instruction exception is raised. 904let Constraints = "@earlyclobber $vd" in { 905let RVVConstraint = WidenV in { 906defm VWADDU_V : VALU_MV_V_X<"vwaddu", 0b110000>; 907defm VWSUBU_V : VALU_MV_V_X<"vwsubu", 0b110010>; 908defm VWADD_V : VALU_MV_V_X<"vwadd", 0b110001>; 909defm VWSUB_V : VALU_MV_V_X<"vwsub", 0b110011>; 910} // RVVConstraint = WidenV 911// Set earlyclobber for following instructions for second and mask operands. 912// This has the downside that the earlyclobber constraint is too coarse and 913// will impose unnecessary restrictions by not allowing the destination to 914// overlap with the first (wide) operand. 915let RVVConstraint = WidenW in { 916defm VWADDU_W : VALU_MV_V_X<"vwaddu", 0b110100, "w">; 917defm VWSUBU_W : VALU_MV_V_X<"vwsubu", 0b110110, "w">; 918defm VWADD_W : VALU_MV_V_X<"vwadd", 0b110101, "w">; 919defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">; 920} // RVVConstraint = WidenW 921} // Constraints = "@earlyclobber $vd" 922 923def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm", 924 (VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; 925def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm", 926 (VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; 927 928// Vector Integer Extension 929defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>; 930defm VSEXT_VF8 : VALU_MV_VS2<"vsext.vf8", 0b010010, 0b00011>; 931defm VZEXT_VF4 : VALU_MV_VS2<"vzext.vf4", 0b010010, 0b00100>; 932defm VSEXT_VF4 : VALU_MV_VS2<"vsext.vf4", 0b010010, 0b00101>; 933defm VZEXT_VF2 : VALU_MV_VS2<"vzext.vf2", 0b010010, 0b00110>; 934defm VSEXT_VF2 : VALU_MV_VS2<"vsext.vf2", 0b010010, 0b00111>; 935 936// Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions 937defm VADC_V : VALUm_IV_V_X_I<"vadc", 0b010000>; 938let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in { 939defm VMADC_V : VALUm_IV_V_X_I<"vmadc", 0b010001>; 940defm VMADC_V : VALUNoVm_IV_V_X_I<"vmadc", 0b010001>; 941} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint 942defm VSBC_V : VALUm_IV_V_X<"vsbc", 0b010010>; 943let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in { 944defm VMSBC_V : VALUm_IV_V_X<"vmsbc", 0b010011>; 945defm VMSBC_V : VALUNoVm_IV_V_X<"vmsbc", 0b010011>; 946} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint 947 948// Vector Bitwise Logical Instructions 949defm VAND_V : VALU_IV_V_X_I<"vand", 0b001001>; 950defm VOR_V : VALU_IV_V_X_I<"vor", 0b001010>; 951defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>; 952 953def : InstAlias<"vnot.v $vd, $vs$vm", 954 (VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>; 955 956// Vector Single-Width Bit Shift Instructions 957defm VSLL_V : VSHT_IV_V_X_I<"vsll", 0b100101, uimm5>; 958defm VSRL_V : VSHT_IV_V_X_I<"vsrl", 0b101000, uimm5>; 959defm VSRA_V : VSHT_IV_V_X_I<"vsra", 0b101001, uimm5>; 960 961// Vector Narrowing Integer Right Shift Instructions 962// Refer to 11.3. Narrowing Vector Arithmetic Instructions 963// The destination vector register group cannot overlap the first source 964// vector register group (specified by vs2). The destination vector register 965// group cannot overlap the mask register if used, unless LMUL=1. 966let Constraints = "@earlyclobber $vd" in { 967defm VNSRL_W : VNSHT_IV_V_X_I<"vnsrl", 0b101100, uimm5, "w">; 968defm VNSRA_W : VNSHT_IV_V_X_I<"vnsra", 0b101101, uimm5, "w">; 969} // Constraints = "@earlyclobber $vd" 970 971def : InstAlias<"vncvt.x.x.w $vd, $vs$vm", 972 (VNSRL_WX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; 973 974// Vector Integer Comparison Instructions 975let RVVConstraint = NoConstraint in { 976defm VMSEQ_V : VCMP_IV_V_X_I<"vmseq", 0b011000>; 977defm VMSNE_V : VCMP_IV_V_X_I<"vmsne", 0b011001>; 978defm VMSLTU_V : VCMP_IV_V_X<"vmsltu", 0b011010>; 979defm VMSLT_V : VCMP_IV_V_X<"vmslt", 0b011011>; 980defm VMSLEU_V : VCMP_IV_V_X_I<"vmsleu", 0b011100>; 981defm VMSLE_V : VCMP_IV_V_X_I<"vmsle", 0b011101>; 982defm VMSGTU_V : VCMP_IV_X_I<"vmsgtu", 0b011110>; 983defm VMSGT_V : VCMP_IV_X_I<"vmsgt", 0b011111>; 984} // RVVConstraint = NoConstraint 985 986def : InstAlias<"vmsgtu.vv $vd, $va, $vb$vm", 987 (VMSLTU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; 988def : InstAlias<"vmsgt.vv $vd, $va, $vb$vm", 989 (VMSLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; 990def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm", 991 (VMSLEU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; 992def : InstAlias<"vmsge.vv $vd, $va, $vb$vm", 993 (VMSLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; 994 995let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0, 996 mayStore = 0 in { 997// For unsigned comparisons we need to special case 0 immediate to maintain 998// the always true/false semantics we would invert if we just decremented the 999// immediate like we do for signed. To match the GNU assembler we will use 1000// vmseq/vmsne.vv with the same register for both operands which we can't do 1001// from an InstAlias. 1002def PseudoVMSGEU_VI : Pseudo<(outs VR:$vd), 1003 (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm), 1004 [], "vmsgeu.vi", "$vd, $vs2, $imm$vm">; 1005def PseudoVMSLTU_VI : Pseudo<(outs VR:$vd), 1006 (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm), 1007 [], "vmsltu.vi", "$vd, $vs2, $imm$vm">; 1008// Handle signed with pseudos as well for more consistency in the 1009// implementation. 1010def PseudoVMSGE_VI : Pseudo<(outs VR:$vd), 1011 (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm), 1012 [], "vmsge.vi", "$vd, $vs2, $imm$vm">; 1013def PseudoVMSLT_VI : Pseudo<(outs VR:$vd), 1014 (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm), 1015 [], "vmslt.vi", "$vd, $vs2, $imm$vm">; 1016} 1017 1018let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0, 1019 mayStore = 0 in { 1020def PseudoVMSGEU_VX : Pseudo<(outs VR:$vd), 1021 (ins VR:$vs2, GPR:$rs1), 1022 [], "vmsgeu.vx", "$vd, $vs2, $rs1">; 1023def PseudoVMSGE_VX : Pseudo<(outs VR:$vd), 1024 (ins VR:$vs2, GPR:$rs1), 1025 [], "vmsge.vx", "$vd, $vs2, $rs1">; 1026def PseudoVMSGEU_VX_M : Pseudo<(outs VRNoV0:$vd), 1027 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm), 1028 [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm">; 1029def PseudoVMSGE_VX_M : Pseudo<(outs VRNoV0:$vd), 1030 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm), 1031 [], "vmsge.vx", "$vd, $vs2, $rs1$vm">; 1032def PseudoVMSGEU_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch), 1033 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm), 1034 [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm, $scratch">; 1035def PseudoVMSGE_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch), 1036 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm), 1037 [], "vmsge.vx", "$vd, $vs2, $rs1$vm, $scratch">; 1038} 1039 1040// Vector Integer Min/Max Instructions 1041defm VMINU_V : VCMP_IV_V_X<"vminu", 0b000100>; 1042defm VMIN_V : VCMP_IV_V_X<"vmin", 0b000101>; 1043defm VMAXU_V : VCMP_IV_V_X<"vmaxu", 0b000110>; 1044defm VMAX_V : VCMP_IV_V_X<"vmax", 0b000111>; 1045 1046// Vector Single-Width Integer Multiply Instructions 1047defm VMUL_V : VMUL_MV_V_X<"vmul", 0b100101>; 1048defm VMULH_V : VMUL_MV_V_X<"vmulh", 0b100111>; 1049defm VMULHU_V : VMUL_MV_V_X<"vmulhu", 0b100100>; 1050defm VMULHSU_V : VMUL_MV_V_X<"vmulhsu", 0b100110>; 1051 1052// Vector Integer Divide Instructions 1053defm VDIVU_V : VDIV_MV_V_X<"vdivu", 0b100000>; 1054defm VDIV_V : VDIV_MV_V_X<"vdiv", 0b100001>; 1055defm VREMU_V : VDIV_MV_V_X<"vremu", 0b100010>; 1056defm VREM_V : VDIV_MV_V_X<"vrem", 0b100011>; 1057 1058// Vector Widening Integer Multiply Instructions 1059let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in { 1060defm VWMUL_V : VWMUL_MV_V_X<"vwmul", 0b111011>; 1061defm VWMULU_V : VWMUL_MV_V_X<"vwmulu", 0b111000>; 1062defm VWMULSU_V : VWMUL_MV_V_X<"vwmulsu", 0b111010>; 1063} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV 1064 1065// Vector Single-Width Integer Multiply-Add Instructions 1066defm VMACC_V : VMAC_MV_V_X<"vmacc", 0b101101>; 1067defm VNMSAC_V : VMAC_MV_V_X<"vnmsac", 0b101111>; 1068defm VMADD_V : VMAC_MV_V_X<"vmadd", 0b101001>; 1069defm VNMSUB_V : VMAC_MV_V_X<"vnmsub", 0b101011>; 1070 1071// Vector Widening Integer Multiply-Add Instructions 1072let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in { 1073defm VWMACCU_V : VWMAC_MV_V_X<"vwmaccu", 0b111100>; 1074defm VWMACC_V : VWMAC_MV_V_X<"vwmacc", 0b111101>; 1075defm VWMACCSU_V : VWMAC_MV_V_X<"vwmaccsu", 0b111111>; 1076defm VWMACCUS_V : VWMAC_MV_X<"vwmaccus", 0b111110>; 1077} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV 1078 1079// Vector Integer Merge Instructions 1080defm VMERGE_V : VMRG_IV_V_X_I<"vmerge", 0b010111>; 1081 1082// Vector Integer Move Instructions 1083let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vs2 = 0, vm = 1, 1084 RVVConstraint = NoConstraint in { 1085// op vd, vs1 1086def VMV_V_V : RVInstVV<0b010111, OPIVV, (outs VR:$vd), 1087 (ins VR:$vs1), "vmv.v.v", "$vd, $vs1">, 1088 Sched<[WriteVIMovV, ReadVIMovV]>; 1089// op vd, rs1 1090def VMV_V_X : RVInstVX<0b010111, OPIVX, (outs VR:$vd), 1091 (ins GPR:$rs1), "vmv.v.x", "$vd, $rs1">, 1092 Sched<[WriteVIMovX, ReadVIMovX]>; 1093// op vd, imm 1094def VMV_V_I : RVInstIVI<0b010111, (outs VR:$vd), 1095 (ins simm5:$imm), "vmv.v.i", "$vd, $imm">, 1096 Sched<[WriteVIMovI]>; 1097} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 1098 1099// Vector Fixed-Point Arithmetic Instructions 1100defm VSADDU_V : VSALU_IV_V_X_I<"vsaddu", 0b100000>; 1101defm VSADD_V : VSALU_IV_V_X_I<"vsadd", 0b100001>; 1102defm VSSUBU_V : VSALU_IV_V_X<"vssubu", 0b100010>; 1103defm VSSUB_V : VSALU_IV_V_X<"vssub", 0b100011>; 1104 1105// Vector Single-Width Averaging Add and Subtract 1106defm VAADDU_V : VAALU_MV_V_X<"vaaddu", 0b001000>; 1107defm VAADD_V : VAALU_MV_V_X<"vaadd", 0b001001>; 1108defm VASUBU_V : VAALU_MV_V_X<"vasubu", 0b001010>; 1109defm VASUB_V : VAALU_MV_V_X<"vasub", 0b001011>; 1110 1111// Vector Single-Width Fractional Multiply with Rounding and Saturation 1112defm VSMUL_V : VSMUL_IV_V_X<"vsmul", 0b100111>; 1113 1114// Vector Single-Width Scaling Shift Instructions 1115defm VSSRL_V : VSSHF_IV_V_X_I<"vssrl", 0b101010, uimm5>; 1116defm VSSRA_V : VSSHF_IV_V_X_I<"vssra", 0b101011, uimm5>; 1117 1118// Vector Narrowing Fixed-Point Clip Instructions 1119let Constraints = "@earlyclobber $vd" in { 1120defm VNCLIPU_W : VNCLP_IV_V_X_I<"vnclipu", 0b101110, uimm5, "w">; 1121defm VNCLIP_W : VNCLP_IV_V_X_I<"vnclip", 0b101111, uimm5, "w">; 1122} // Constraints = "@earlyclobber $vd" 1123} // Predicates = [HasVInstructions] 1124 1125let Predicates = [HasVInstructionsAnyF] in { 1126// Vector Single-Width Floating-Point Add/Subtract Instructions 1127defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>; 1128defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>; 1129defm VFRSUB_V : VALU_FV_F<"vfrsub", 0b100111>; 1130 1131// Vector Widening Floating-Point Add/Subtract Instructions 1132let Constraints = "@earlyclobber $vd" in { 1133let RVVConstraint = WidenV in { 1134defm VFWADD_V : VWALU_FV_V_F<"vfwadd", 0b110000>; 1135defm VFWSUB_V : VWALU_FV_V_F<"vfwsub", 0b110010>; 1136} // RVVConstraint = WidenV 1137// Set earlyclobber for following instructions for second and mask operands. 1138// This has the downside that the earlyclobber constraint is too coarse and 1139// will impose unnecessary restrictions by not allowing the destination to 1140// overlap with the first (wide) operand. 1141let RVVConstraint = WidenW in { 1142defm VFWADD_W : VWALU_FV_V_F<"vfwadd", 0b110100, "w">; 1143defm VFWSUB_W : VWALU_FV_V_F<"vfwsub", 0b110110, "w">; 1144} // RVVConstraint = WidenW 1145} // Constraints = "@earlyclobber $vd" 1146 1147// Vector Single-Width Floating-Point Multiply/Divide Instructions 1148defm VFMUL_V : VMUL_FV_V_F<"vfmul", 0b100100>; 1149defm VFDIV_V : VDIV_FV_V_F<"vfdiv", 0b100000>; 1150defm VFRDIV_V : VRDIV_FV_F<"vfrdiv", 0b100001>; 1151 1152// Vector Widening Floating-Point Multiply 1153let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in { 1154defm VFWMUL_V : VWMUL_FV_V_F<"vfwmul", 0b111000>; 1155} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV 1156 1157// Vector Single-Width Floating-Point Fused Multiply-Add Instructions 1158defm VFMACC_V : VMAC_FV_V_F<"vfmacc", 0b101100>; 1159defm VFNMACC_V : VMAC_FV_V_F<"vfnmacc", 0b101101>; 1160defm VFMSAC_V : VMAC_FV_V_F<"vfmsac", 0b101110>; 1161defm VFNMSAC_V : VMAC_FV_V_F<"vfnmsac", 0b101111>; 1162defm VFMADD_V : VMAC_FV_V_F<"vfmadd", 0b101000>; 1163defm VFNMADD_V : VMAC_FV_V_F<"vfnmadd", 0b101001>; 1164defm VFMSUB_V : VMAC_FV_V_F<"vfmsub", 0b101010>; 1165defm VFNMSUB_V : VMAC_FV_V_F<"vfnmsub", 0b101011>; 1166 1167// Vector Widening Floating-Point Fused Multiply-Add Instructions 1168let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in { 1169defm VFWMACC_V : VWMAC_FV_V_F<"vfwmacc", 0b111100>; 1170defm VFWNMACC_V : VWMAC_FV_V_F<"vfwnmacc", 0b111101>; 1171defm VFWMSAC_V : VWMAC_FV_V_F<"vfwmsac", 0b111110>; 1172defm VFWNMSAC_V : VWMAC_FV_V_F<"vfwnmsac", 0b111111>; 1173} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV 1174 1175// Vector Floating-Point Square-Root Instruction 1176defm VFSQRT_V : VSQR_FV_VS2<"vfsqrt.v", 0b010011, 0b00000>; 1177defm VFRSQRT7_V : VRCP_FV_VS2<"vfrsqrt7.v", 0b010011, 0b00100>; 1178defm VFREC7_V : VRCP_FV_VS2<"vfrec7.v", 0b010011, 0b00101>; 1179 1180// Vector Floating-Point MIN/MAX Instructions 1181defm VFMIN_V : VCMP_FV_V_F<"vfmin", 0b000100>; 1182defm VFMAX_V : VCMP_FV_V_F<"vfmax", 0b000110>; 1183 1184// Vector Floating-Point Sign-Injection Instructions 1185defm VFSGNJ_V : VSGNJ_FV_V_F<"vfsgnj", 0b001000>; 1186defm VFSGNJN_V : VSGNJ_FV_V_F<"vfsgnjn", 0b001001>; 1187defm VFSGNJX_V : VSGNJ_FV_V_F<"vfsgnjx", 0b001010>; 1188 1189def : InstAlias<"vfneg.v $vd, $vs$vm", 1190 (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>; 1191def : InstAlias<"vfabs.v $vd, $vs$vm", 1192 (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>; 1193 1194// Vector Floating-Point Compare Instructions 1195let RVVConstraint = NoConstraint in { 1196defm VMFEQ_V : VCMP_FV_V_F<"vmfeq", 0b011000>; 1197defm VMFNE_V : VCMP_FV_V_F<"vmfne", 0b011100>; 1198defm VMFLT_V : VCMP_FV_V_F<"vmflt", 0b011011>; 1199defm VMFLE_V : VCMP_FV_V_F<"vmfle", 0b011001>; 1200defm VMFGT_V : VCMP_FV_F<"vmfgt", 0b011101>; 1201defm VMFGE_V : VCMP_FV_F<"vmfge", 0b011111>; 1202} // RVVConstraint = NoConstraint 1203 1204def : InstAlias<"vmfgt.vv $vd, $va, $vb$vm", 1205 (VMFLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; 1206def : InstAlias<"vmfge.vv $vd, $va, $vb$vm", 1207 (VMFLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; 1208 1209// Vector Floating-Point Classify Instruction 1210defm VFCLASS_V : VCLS_FV_VS2<"vfclass.v", 0b010011, 0b10000>; 1211 1212let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { 1213 1214// Vector Floating-Point Merge Instruction 1215let vm = 0 in 1216def VFMERGE_VFM : RVInstVX<0b010111, OPFVF, (outs VR:$vd), 1217 (ins VR:$vs2, FPR32:$rs1, VMV0:$v0), 1218 "vfmerge.vfm", "$vd, $vs2, $rs1, v0">, 1219 Sched<[WriteVFMergeV, ReadVFMergeV, ReadVFMergeF, ReadVMask]>; 1220 1221// Vector Floating-Point Move Instruction 1222let RVVConstraint = NoConstraint in 1223let vm = 1, vs2 = 0 in 1224def VFMV_V_F : RVInstVX<0b010111, OPFVF, (outs VR:$vd), 1225 (ins FPR32:$rs1), "vfmv.v.f", "$vd, $rs1">, 1226 Sched<[WriteVFMovV, ReadVFMovF]>; 1227 1228} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 1229 1230// Single-Width Floating-Point/Integer Type-Convert Instructions 1231defm VFCVT_XU_F_V : VCVTI_FV_VS2<"vfcvt.xu.f.v", 0b010010, 0b00000>; 1232defm VFCVT_X_F_V : VCVTI_FV_VS2<"vfcvt.x.f.v", 0b010010, 0b00001>; 1233defm VFCVT_RTZ_XU_F_V : VCVTI_FV_VS2<"vfcvt.rtz.xu.f.v", 0b010010, 0b00110>; 1234defm VFCVT_RTZ_X_F_V : VCVTI_FV_VS2<"vfcvt.rtz.x.f.v", 0b010010, 0b00111>; 1235defm VFCVT_F_XU_V : VCVTF_IV_VS2<"vfcvt.f.xu.v", 0b010010, 0b00010>; 1236defm VFCVT_F_X_V : VCVTF_IV_VS2<"vfcvt.f.x.v", 0b010010, 0b00011>; 1237 1238// Widening Floating-Point/Integer Type-Convert Instructions 1239let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt in { 1240defm VFWCVT_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.xu.f.v", 0b010010, 0b01000>; 1241defm VFWCVT_X_F_V : VWCVTI_FV_VS2<"vfwcvt.x.f.v", 0b010010, 0b01001>; 1242defm VFWCVT_RTZ_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.xu.f.v", 0b010010, 0b01110>; 1243defm VFWCVT_RTZ_X_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.x.f.v", 0b010010, 0b01111>; 1244defm VFWCVT_F_XU_V : VWCVTF_IV_VS2<"vfwcvt.f.xu.v", 0b010010, 0b01010>; 1245defm VFWCVT_F_X_V : VWCVTF_IV_VS2<"vfwcvt.f.x.v", 0b010010, 0b01011>; 1246defm VFWCVT_F_F_V : VWCVTF_FV_VS2<"vfwcvt.f.f.v", 0b010010, 0b01100>; 1247} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt 1248 1249// Narrowing Floating-Point/Integer Type-Convert Instructions 1250let Constraints = "@earlyclobber $vd" in { 1251defm VFNCVT_XU_F_W : VNCVTI_FV_VS2<"vfncvt.xu.f.w", 0b010010, 0b10000>; 1252defm VFNCVT_X_F_W : VNCVTI_FV_VS2<"vfncvt.x.f.w", 0b010010, 0b10001>; 1253defm VFNCVT_RTZ_XU_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.xu.f.w", 0b010010, 0b10110>; 1254defm VFNCVT_RTZ_X_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.x.f.w", 0b010010, 0b10111>; 1255defm VFNCVT_F_XU_W : VNCVTF_IV_VS2<"vfncvt.f.xu.w", 0b010010, 0b10010>; 1256defm VFNCVT_F_X_W : VNCVTF_IV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>; 1257defm VFNCVT_F_F_W : VNCVTF_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>; 1258defm VFNCVT_ROD_F_F_W : VNCVTF_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>; 1259} // Constraints = "@earlyclobber $vd" 1260} // Predicates = HasVInstructionsAnyF] 1261 1262let Predicates = [HasVInstructions] in { 1263 1264// Vector Single-Width Integer Reduction Instructions 1265let RVVConstraint = NoConstraint in { 1266defm VREDSUM : VRED_MV_V<"vredsum", 0b000000>; 1267defm VREDMAXU : VRED_MV_V<"vredmaxu", 0b000110>; 1268defm VREDMAX : VRED_MV_V<"vredmax", 0b000111>; 1269defm VREDMINU : VRED_MV_V<"vredminu", 0b000100>; 1270defm VREDMIN : VRED_MV_V<"vredmin", 0b000101>; 1271defm VREDAND : VRED_MV_V<"vredand", 0b000001>; 1272defm VREDOR : VRED_MV_V<"vredor", 0b000010>; 1273defm VREDXOR : VRED_MV_V<"vredxor", 0b000011>; 1274} // RVVConstraint = NoConstraint 1275 1276// Vector Widening Integer Reduction Instructions 1277let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in { 1278// Set earlyclobber for following instructions for second and mask operands. 1279// This has the downside that the earlyclobber constraint is too coarse and 1280// will impose unnecessary restrictions by not allowing the destination to 1281// overlap with the first (wide) operand. 1282defm VWREDSUMU : VWRED_IV_V<"vwredsumu", 0b110000>; 1283defm VWREDSUM : VWRED_IV_V<"vwredsum", 0b110001>; 1284} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint 1285 1286} // Predicates = [HasVInstructions] 1287 1288let Predicates = [HasVInstructionsAnyF] in { 1289// Vector Single-Width Floating-Point Reduction Instructions 1290let RVVConstraint = NoConstraint in { 1291defm VFREDOSUM : VREDO_FV_V<"vfredosum", 0b000011>; 1292defm VFREDUSUM : VRED_FV_V<"vfredusum", 0b000001>; 1293defm VFREDMAX : VRED_FV_V<"vfredmax", 0b000111>; 1294defm VFREDMIN : VRED_FV_V<"vfredmin", 0b000101>; 1295} // RVVConstraint = NoConstraint 1296 1297def : InstAlias<"vfredsum.vs $vd, $vs2, $vs1$vm", 1298 (VFREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>; 1299 1300// Vector Widening Floating-Point Reduction Instructions 1301let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in { 1302// Set earlyclobber for following instructions for second and mask operands. 1303// This has the downside that the earlyclobber constraint is too coarse and 1304// will impose unnecessary restrictions by not allowing the destination to 1305// overlap with the first (wide) operand. 1306defm VFWREDOSUM : VWREDO_FV_V<"vfwredosum", 0b110011>; 1307defm VFWREDUSUM : VWRED_FV_V<"vfwredusum", 0b110001>; 1308} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint 1309 1310def : InstAlias<"vfwredsum.vs $vd, $vs2, $vs1$vm", 1311 (VFWREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>; 1312} // Predicates = [HasVInstructionsAnyF] 1313 1314let Predicates = [HasVInstructions] in { 1315// Vector Mask-Register Logical Instructions 1316let RVVConstraint = NoConstraint in { 1317defm VMAND_M : VMALU_MV_Mask<"vmand", 0b011001, "m">; 1318defm VMNAND_M : VMALU_MV_Mask<"vmnand", 0b011101, "m">; 1319defm VMANDN_M : VMALU_MV_Mask<"vmandn", 0b011000, "m">; 1320defm VMXOR_M : VMALU_MV_Mask<"vmxor", 0b011011, "m">; 1321defm VMOR_M : VMALU_MV_Mask<"vmor", 0b011010, "m">; 1322defm VMNOR_M : VMALU_MV_Mask<"vmnor", 0b011110, "m">; 1323defm VMORN_M : VMALU_MV_Mask<"vmorn", 0b011100, "m">; 1324defm VMXNOR_M : VMALU_MV_Mask<"vmxnor", 0b011111, "m">; 1325} 1326 1327def : InstAlias<"vmmv.m $vd, $vs", 1328 (VMAND_MM VR:$vd, VR:$vs, VR:$vs)>; 1329def : InstAlias<"vmclr.m $vd", 1330 (VMXOR_MM VR:$vd, VR:$vd, VR:$vd)>; 1331def : InstAlias<"vmset.m $vd", 1332 (VMXNOR_MM VR:$vd, VR:$vd, VR:$vd)>; 1333def : InstAlias<"vmnot.m $vd, $vs", 1334 (VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>; 1335 1336def : InstAlias<"vmandnot.mm $vd, $vs2, $vs1", 1337 (VMANDN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>; 1338def : InstAlias<"vmornot.mm $vd, $vs2, $vs1", 1339 (VMORN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>; 1340 1341let hasSideEffects = 0, mayLoad = 0, mayStore = 0, 1342 RVVConstraint = NoConstraint in { 1343 1344// Vector mask population count vcpop 1345def VCPOP_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd), 1346 (ins VR:$vs2, VMaskOp:$vm), 1347 "vcpop.m", "$vd, $vs2$vm">, 1348 Sched<[WriteVMPopV, ReadVMPopV, ReadVMask]>; 1349 1350// vfirst find-first-set mask bit 1351def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd), 1352 (ins VR:$vs2, VMaskOp:$vm), 1353 "vfirst.m", "$vd, $vs2$vm">, 1354 Sched<[WriteVMFFSV, ReadVMFFSV, ReadVMask]>; 1355 1356} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 1357 1358def : InstAlias<"vpopc.m $vd, $vs2$vm", 1359 (VCPOP_M GPR:$vd, VR:$vs2, VMaskOp:$vm), 0>; 1360 1361let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in { 1362 1363// vmsbf.m set-before-first mask bit 1364defm VMSBF_M : VMSFS_MV_V<"vmsbf.m", 0b010100, 0b00001>; 1365// vmsif.m set-including-first mask bit 1366defm VMSIF_M : VMSFS_MV_V<"vmsif.m", 0b010100, 0b00011>; 1367// vmsof.m set-only-first mask bit 1368defm VMSOF_M : VMSFS_MV_V<"vmsof.m", 0b010100, 0b00010>; 1369// Vector Iota Instruction 1370defm VIOTA_M : VMIOT_MV_V<"viota.m", 0b010100, 0b10000>; 1371 1372} // Constraints = "@earlyclobber $vd", RVVConstraint = Iota 1373 1374// Vector Element Index Instruction 1375let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { 1376 1377let vs2 = 0 in 1378def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VR:$vd), 1379 (ins VMaskOp:$vm), "vid.v", "$vd$vm">, 1380 Sched<[WriteVMIdxV, ReadVMask]>; 1381 1382// Integer Scalar Move Instructions 1383let vm = 1, RVVConstraint = NoConstraint in { 1384def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd), 1385 (ins VR:$vs2), "vmv.x.s", "$vd, $vs2">, 1386 Sched<[WriteVIMovVX, ReadVIMovVX]>; 1387let Constraints = "$vd = $vd_wb" in 1388def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb), 1389 (ins VR:$vd, GPR:$rs1), "vmv.s.x", "$vd, $rs1">, 1390 Sched<[WriteVIMovXV, ReadVIMovXV, ReadVIMovXX]>; 1391} 1392 1393} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 1394 1395} // Predicates = [HasVInstructions] 1396 1397let Predicates = [HasVInstructionsAnyF] in { 1398 1399let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1, 1400 RVVConstraint = NoConstraint in { 1401// Floating-Point Scalar Move Instructions 1402def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd), 1403 (ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">, 1404 Sched<[WriteVFMovVF, ReadVFMovVF]>; 1405let Constraints = "$vd = $vd_wb" in 1406def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb), 1407 (ins VR:$vd, FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">, 1408 Sched<[WriteVFMovFV, ReadVFMovFV, ReadVFMovFX]>; 1409 1410} // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1 1411 1412} // Predicates = [HasVInstructionsAnyF] 1413 1414let Predicates = [HasVInstructions] in { 1415// Vector Slide Instructions 1416let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in { 1417defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110, uimm5>; 1418defm VSLIDE1UP_V : VSLD1_MV_X<"vslide1up", 0b001110>; 1419} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp 1420defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111, uimm5>; 1421defm VSLIDE1DOWN_V : VSLD1_MV_X<"vslide1down", 0b001111>; 1422} // Predicates = [HasVInstructions] 1423 1424let Predicates = [HasVInstructionsAnyF] in { 1425let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in { 1426defm VFSLIDE1UP_V : VSLD1_FV_F<"vfslide1up", 0b001110>; 1427} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp 1428defm VFSLIDE1DOWN_V : VSLD1_FV_F<"vfslide1down", 0b001111>; 1429} // Predicates = [HasVInstructionsAnyF] 1430 1431let Predicates = [HasVInstructions] in { 1432// Vector Register Gather Instruction 1433let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in { 1434defm VRGATHER_V : VGTR_IV_V_X_I<"vrgather", 0b001100, uimm5>; 1435def VRGATHEREI16_VV : VALUVV<0b001110, OPIVV, "vrgatherei16.vv">, 1436 Sched<[WriteVGatherV, ReadVGatherV, ReadVGatherV]>; 1437} // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather 1438 1439// Vector Compress Instruction 1440let Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress in { 1441defm VCOMPRESS_V : VCPR_MV_Mask<"vcompress", 0b010111>; 1442} // Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress 1443 1444let hasSideEffects = 0, mayLoad = 0, mayStore = 0, 1445 RVVConstraint = NoConstraint in { 1446def VMV1R_V : RVInstV<0b100111, 0, OPIVI, (outs VR:$vd), (ins VR:$vs2), 1447 "vmv1r.v", "$vd, $vs2">, VMVRSched<1> { 1448 let Uses = []; 1449 let vm = 1; 1450} 1451// A future extension may relax the vector register alignment restrictions. 1452foreach n = [2, 4, 8] in { 1453 defvar vrc = !cast<VReg>("VRM"#n); 1454 def VMV#n#R_V : RVInstV<0b100111, !add(n, -1), OPIVI, (outs vrc:$vd), 1455 (ins vrc:$vs2), "vmv" # n # "r.v", "$vd, $vs2">, 1456 VMVRSched<n> { 1457 let Uses = []; 1458 let vm = 1; 1459 } 1460} 1461} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 1462} // Predicates = [HasVInstructions] 1463 1464let Predicates = [HasVInstructions] in { 1465 foreach nf=2-8 in { 1466 foreach eew = [8, 16, 32] in { 1467 defvar w = !cast<RISCVWidth>("LSWidth"#eew); 1468 1469 def VLSEG#nf#E#eew#_V : 1470 VUnitStrideSegmentLoad<!add(nf, -1), w, "vlseg"#nf#"e"#eew#".v">; 1471 def VLSEG#nf#E#eew#FF_V : 1472 VUnitStrideSegmentLoadFF<!add(nf, -1), w, "vlseg"#nf#"e"#eew#"ff.v">; 1473 def VSSEG#nf#E#eew#_V : 1474 VUnitStrideSegmentStore<!add(nf, -1), w, "vsseg"#nf#"e"#eew#".v">; 1475 1476 // Vector Strided Instructions 1477 def VLSSEG#nf#E#eew#_V : 1478 VStridedSegmentLoad<!add(nf, -1), w, "vlsseg"#nf#"e"#eew#".v">; 1479 def VSSSEG#nf#E#eew#_V : 1480 VStridedSegmentStore<!add(nf, -1), w, "vssseg"#nf#"e"#eew#".v">; 1481 1482 // Vector Indexed Instructions 1483 def VLUXSEG#nf#EI#eew#_V : 1484 VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, w, 1485 "vluxseg"#nf#"ei"#eew#".v">; 1486 def VLOXSEG#nf#EI#eew#_V : 1487 VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, w, 1488 "vloxseg"#nf#"ei"#eew#".v">; 1489 def VSUXSEG#nf#EI#eew#_V : 1490 VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, w, 1491 "vsuxseg"#nf#"ei"#eew#".v">; 1492 def VSOXSEG#nf#EI#eew#_V : 1493 VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, w, 1494 "vsoxseg"#nf#"ei"#eew#".v">; 1495 } 1496 } 1497} // Predicates = [HasVInstructions] 1498 1499let Predicates = [HasVInstructionsI64] in { 1500 foreach nf=2-8 in { 1501 // Vector Unit-strided Segment Instructions 1502 def VLSEG#nf#E64_V : 1503 VUnitStrideSegmentLoad<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64.v">; 1504 def VLSEG#nf#E64FF_V : 1505 VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64ff.v">; 1506 def VSSEG#nf#E64_V : 1507 VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">; 1508 1509 // Vector Strided Segment Instructions 1510 def VLSSEG#nf#E64_V : 1511 VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">; 1512 def VSSSEG#nf#E64_V : 1513 VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">; 1514 } 1515} // Predicates = [HasVInstructionsI64] 1516let Predicates = [HasVInstructionsI64, IsRV64] in { 1517 foreach nf=2-8 in { 1518 // Vector Indexed Segment Instructions 1519 def VLUXSEG#nf#EI64_V : 1520 VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, LSWidth64, 1521 "vluxseg"#nf#"ei64.v">; 1522 def VLOXSEG#nf#EI64_V : 1523 VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, LSWidth64, 1524 "vloxseg"#nf#"ei64.v">; 1525 def VSUXSEG#nf#EI64_V : 1526 VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, LSWidth64, 1527 "vsuxseg"#nf#"ei64.v">; 1528 def VSOXSEG#nf#EI64_V : 1529 VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, LSWidth64, 1530 "vsoxseg"#nf#"ei64.v">; 1531 } 1532} // Predicates = [HasVInstructionsI64, IsRV64] 1533 1534include "RISCVInstrInfoVPseudos.td" 1535