1//===-- RISCVInstrInfoV.td - RISC-V 'V' instructions -------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file describes the RISC-V instructions from the standard 'V' Vector 10/// extension, version 1.0. 11/// 12//===----------------------------------------------------------------------===// 13 14include "RISCVInstrFormatsV.td" 15 16//===----------------------------------------------------------------------===// 17// Operand and SDNode transformation definitions. 18//===----------------------------------------------------------------------===// 19 20class VTypeIAsmOperand<int VTypeINum> : AsmOperandClass { 21 let Name = "VTypeI" # VTypeINum; 22 let ParserMethod = "parseVTypeI"; 23 let DiagnosticType = "InvalidVTypeI"; 24 let RenderMethod = "addVTypeIOperands"; 25} 26 27class VTypeIOp<int VTypeINum> : RISCVOp { 28 let ParserMatchClass = VTypeIAsmOperand<VTypeINum>; 29 let PrintMethod = "printVTypeI"; 30 let DecoderMethod = "decodeUImmOperand<"#VTypeINum#">"; 31 let OperandType = "OPERAND_VTYPEI" # VTypeINum; 32 let MCOperandPredicate = [{ 33 int64_t Imm; 34 if (MCOp.evaluateAsConstantImm(Imm)) 35 return isUInt<VTypeINum>(Imm); 36 return MCOp.isBareSymbolRef(); 37 }]; 38} 39 40def VTypeIOp10 : VTypeIOp<10>; 41def VTypeIOp11 : VTypeIOp<11>; 42 43def VMaskAsmOperand : AsmOperandClass { 44 let Name = "RVVMaskRegOpOperand"; 45 let RenderMethod = "addRegOperands"; 46 let PredicateMethod = "isV0Reg"; 47 let ParserMethod = "parseMaskReg"; 48 let IsOptional = 1; 49 let DefaultMethod = "defaultMaskRegOp"; 50 let DiagnosticType = "InvalidVMaskRegister"; 51} 52 53def VMaskOp : RegisterOperand<VMV0> { 54 let ParserMatchClass = VMaskAsmOperand; 55 let PrintMethod = "printVMaskReg"; 56 let EncoderMethod = "getVMaskReg"; 57 let DecoderMethod = "decodeVMaskReg"; 58} 59 60def simm5 : RISCVSImmLeafOp<5> { 61 let MCOperandPredicate = [{ 62 int64_t Imm; 63 if (MCOp.evaluateAsConstantImm(Imm)) 64 return isInt<5>(Imm); 65 return MCOp.isBareSymbolRef(); 66 }]; 67} 68 69def SImm5Plus1AsmOperand : AsmOperandClass { 70 let Name = "SImm5Plus1"; 71 let RenderMethod = "addImmOperands"; 72 let DiagnosticType = "InvalidSImm5Plus1"; 73} 74 75def simm5_plus1 : RISCVOp, ImmLeaf<XLenVT, 76 [{return (isInt<5>(Imm) && Imm != -16) || Imm == 16;}]> { 77 let ParserMatchClass = SImm5Plus1AsmOperand; 78 let OperandType = "OPERAND_SIMM5_PLUS1"; 79 let MCOperandPredicate = [{ 80 int64_t Imm; 81 if (MCOp.evaluateAsConstantImm(Imm)) 82 return (isInt<5>(Imm) && Imm != -16) || Imm == 16; 83 return MCOp.isBareSymbolRef(); 84 }]; 85} 86 87def simm5_plus1_nonzero : ImmLeaf<XLenVT, 88 [{return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);}]>; 89 90//===----------------------------------------------------------------------===// 91// Scheduling definitions. 92//===----------------------------------------------------------------------===// 93 94// Common class of scheduling definitions. 95// `ReadVMergeOp` will be prepended to reads if instruction is masked. 96// `ReadVMask` will be appended to reads if instruction is masked. 97// Operands: 98// `writes` SchedWrites that are listed for each explicit def operand 99// in order. 100// `reads` SchedReads that are listed for each explicit use operand. 101// `forceMasked` Forced to be masked (e.g. Add-with-Carry Instructions). 102// `forceMergeOpRead` Force to have read for merge operand. 103class SchedCommon<list<SchedWrite> writes, list<SchedRead> reads, 104 string mx = "WorstCase", int sew = 0, bit forceMasked = 0, 105 bit forceMergeOpRead = 0> : Sched<[]> { 106 defvar isMasked = !ne(!find(NAME, "_MASK"), -1); 107 defvar isMaskedOrForceMasked = !or(forceMasked, isMasked); 108 defvar mergeRead = !if(!or(!eq(mx, "WorstCase"), !eq(sew, 0)), 109 !cast<SchedRead>("ReadVMergeOp_" # mx), 110 !cast<SchedRead>("ReadVMergeOp_" # mx # "_E" #sew)); 111 defvar needsMergeRead = !or(isMaskedOrForceMasked, forceMergeOpRead); 112 defvar readsWithMask = 113 !if(isMaskedOrForceMasked, !listconcat(reads, [ReadVMask]), reads); 114 defvar allReads = 115 !if(needsMergeRead, !listconcat([mergeRead], readsWithMask), reads); 116 let SchedRW = !listconcat(writes, allReads); 117} 118 119// Common class of scheduling definitions for n-ary instructions. 120// The scheudling resources are relevant to LMUL and may be relevant to SEW. 121class SchedNary<string write, list<string> reads, string mx, int sew = 0, 122 bit forceMasked = 0, bit forceMergeOpRead = 0> 123 : SchedCommon<[!cast<SchedWrite>( 124 !if(sew, 125 write # "_" # mx # "_E" # sew, 126 write # "_" # mx))], 127 !foreach(read, reads, 128 !cast<SchedRead>(!if(sew, read #"_" #mx #"_E" #sew, 129 read #"_" #mx))), 130 mx, sew, forceMasked, forceMergeOpRead>; 131 132// Classes with postfix "MC" are only used in MC layer. 133// For these classes, we assume that they are with the worst case costs and 134// `ReadVMask` is always needed (with some exceptions). 135 136// For instructions with no operand. 137class SchedNullary<string write, string mx, int sew = 0, bit forceMasked = 0, 138 bit forceMergeOpRead = 0>: 139 SchedNary<write, [], mx, sew, forceMasked, forceMergeOpRead>; 140class SchedNullaryMC<string write, bit forceMasked = 1>: 141 SchedNullary<write, "WorstCase", forceMasked=forceMasked>; 142 143// For instructions with one operand. 144class SchedUnary<string write, string read0, string mx, int sew = 0, 145 bit forceMasked = 0, bit forceMergeOpRead = 0>: 146 SchedNary<write, [read0], mx, sew, forceMasked, forceMergeOpRead>; 147class SchedUnaryMC<string write, string read0, bit forceMasked = 1>: 148 SchedUnary<write, read0, "WorstCase", forceMasked=forceMasked>; 149 150// For instructions with two operands. 151class SchedBinary<string write, string read0, string read1, string mx, 152 int sew = 0, bit forceMasked = 0, bit forceMergeOpRead = 0> 153 : SchedNary<write, [read0, read1], mx, sew, forceMasked, forceMergeOpRead>; 154class SchedBinaryMC<string write, string read0, string read1, 155 bit forceMasked = 1>: 156 SchedBinary<write, read0, read1, "WorstCase", forceMasked=forceMasked>; 157 158// For instructions with three operands. 159class SchedTernary<string write, string read0, string read1, string read2, 160 string mx, int sew = 0, bit forceMasked = 0, 161 bit forceMergeOpRead = 0> 162 : SchedNary<write, [read0, read1, read2], mx, sew, forceMasked, 163 forceMergeOpRead>; 164class SchedTernaryMC<string write, string read0, string read1, string read2, 165 int sew = 0, bit forceMasked = 1>: 166 SchedNary<write, [read0, read1, read2], "WorstCase", sew, forceMasked>; 167 168// For reduction instructions. 169class SchedReduction<string write, string read, string mx, int sew, 170 bit forceMergeOpRead = 0> 171 : SchedCommon<[!cast<SchedWrite>(write #"_" #mx #"_E" #sew)], 172 !listsplat(!cast<SchedRead>(read), 3), mx, sew, forceMergeOpRead>; 173class SchedReductionMC<string write, string readV, string readV0>: 174 SchedCommon<[!cast<SchedWrite>(write # "_WorstCase")], 175 [!cast<SchedRead>(readV), !cast<SchedRead>(readV0)], 176 forceMasked=1>; 177 178// Whole Vector Register Move 179class VMVRSched<int n> : SchedCommon< 180 [!cast<SchedWrite>("WriteVMov" # n # "V")], 181 [!cast<SchedRead>("ReadVMov" # n # "V")] 182>; 183 184// Vector Unit-Stride Loads and Stores 185class VLESched<string lmul, bit forceMasked = 0> : SchedCommon< 186 [!cast<SchedWrite>("WriteVLDE_" # lmul)], 187 [ReadVLDX], mx=lmul, forceMasked=forceMasked 188>; 189class VLESchedMC : VLESched<"WorstCase", forceMasked=1>; 190 191class VSESched<string lmul, bit forceMasked = 0> : SchedCommon< 192 [!cast<SchedWrite>("WriteVSTE_" # lmul)], 193 [!cast<SchedRead>("ReadVSTEV_" # lmul), ReadVSTX], mx=lmul, 194 forceMasked=forceMasked 195>; 196class VSESchedMC : VSESched<"WorstCase", forceMasked=1>; 197 198// Vector Strided Loads and Stores 199class VLSSched<int eew, string emul, bit forceMasked = 0> : SchedCommon< 200 [!cast<SchedWrite>("WriteVLDS" # eew # "_" # emul)], 201 [ReadVLDX, ReadVLDSX], emul, eew, forceMasked 202>; 203class VLSSchedMC<int eew> : VLSSched<eew, "WorstCase", forceMasked=1>; 204 205class VSSSched<int eew, string emul, bit forceMasked = 0> : SchedCommon< 206 [!cast<SchedWrite>("WriteVSTS" # eew # "_" # emul)], 207 [!cast<SchedRead>("ReadVSTS" # eew # "V_" # emul), ReadVSTX, ReadVSTSX], 208 emul, eew, forceMasked 209>; 210class VSSSchedMC<int eew> : VSSSched<eew, "WorstCase", forceMasked=1>; 211 212// Vector Indexed Loads and Stores 213class VLXSched<int dataEEW, bit isOrdered, string dataEMUL, string idxEMUL, 214 bit forceMasked = 0> : SchedCommon< 215 [!cast<SchedWrite>("WriteVLD" # !if(isOrdered, "O", "U") # "X" # dataEEW # "_" # dataEMUL)], 216 [ReadVLDX, !cast<SchedRead>("ReadVLD" # !if(isOrdered, "O", "U") # "XV_" # idxEMUL)], 217 dataEMUL, dataEEW, forceMasked 218>; 219class VLXSchedMC<int dataEEW, bit isOrdered>: 220 VLXSched<dataEEW, isOrdered, "WorstCase", "WorstCase", forceMasked=1>; 221 222class VSXSched<int dataEEW, bit isOrdered, string dataEMUL, string idxEMUL, 223 bit forceMasked = 0> : SchedCommon< 224 [!cast<SchedWrite>("WriteVST" # !if(isOrdered, "O", "U") # "X" # dataEEW # "_" # dataEMUL)], 225 [!cast<SchedRead>("ReadVST" # !if(isOrdered, "O", "U") #"X" # dataEEW # "_" # dataEMUL), 226 ReadVSTX, !cast<SchedRead>("ReadVST" # !if(isOrdered, "O", "U") # "XV_" # idxEMUL)], 227 dataEMUL, dataEEW, forceMasked 228>; 229class VSXSchedMC<int dataEEW, bit isOrdered>: 230 VSXSched<dataEEW, isOrdered, "WorstCase", "WorstCase", forceMasked=1>; 231 232// Unit-stride Fault-Only-First Loads 233class VLFSched<string lmul, bit forceMasked = 0> : SchedCommon< 234 [!cast<SchedWrite>("WriteVLDFF_" # lmul)], 235 [ReadVLDX], mx=lmul, forceMasked=forceMasked 236>; 237class VLFSchedMC: VLFSched<"WorstCase", forceMasked=1>; 238 239// Unit-Stride Segment Loads and Stores 240class VLSEGSched<int nf, int eew, string emul, bit forceMasked = 0> : SchedCommon< 241 [!cast<SchedWrite>("WriteVLSEG" #nf #"e" #eew #"_" #emul)], 242 [ReadVLDX], emul, eew, forceMasked 243>; 244class VLSEGSchedMC<int nf, int eew> : VLSEGSched<nf, eew, "WorstCase", 245 forceMasked=1>; 246 247class VSSEGSched<int nf, int eew, string emul, bit forceMasked = 0> : SchedCommon< 248 [!cast<SchedWrite>("WriteVSSEG" # nf # "e" # eew # "_" # emul)], 249 [!cast<SchedRead>("ReadVSTEV_" #emul), ReadVSTX], emul, eew, forceMasked 250>; 251class VSSEGSchedMC<int nf, int eew> : VSSEGSched<nf, eew, "WorstCase", 252 forceMasked=1>; 253 254class VLSEGFFSched<int nf, int eew, string emul, bit forceMasked = 0> : SchedCommon< 255 [!cast<SchedWrite>("WriteVLSEGFF" # nf # "e" # eew # "_" # emul)], 256 [ReadVLDX], emul, eew, forceMasked 257>; 258class VLSEGFFSchedMC<int nf, int eew> : VLSEGFFSched<nf, eew, "WorstCase", 259 forceMasked=1>; 260 261// Strided Segment Loads and Stores 262class VLSSEGSched<int nf, int eew, string emul, bit forceMasked = 0> : SchedCommon< 263 [!cast<SchedWrite>("WriteVLSSEG" #nf #"e" #eew #"_" #emul)], 264 [ReadVLDX, ReadVLDSX], emul, eew, forceMasked 265>; 266class VLSSEGSchedMC<int nf, int eew> : VLSSEGSched<nf, eew, "WorstCase", 267 forceMasked=1>; 268 269class VSSSEGSched<int nf, int eew, string emul, bit forceMasked = 0> : SchedCommon< 270 [!cast<SchedWrite>("WriteVSSSEG" #nf #"e" #eew #"_" #emul)], 271 [!cast<SchedRead>("ReadVSTS" #eew #"V_" #emul), 272 ReadVSTX, ReadVSTSX], emul, eew, forceMasked 273>; 274class VSSSEGSchedMC<int nf, int eew> : VSSSEGSched<nf, eew, "WorstCase", 275 forceMasked=1>; 276 277// Indexed Segment Loads and Stores 278class VLXSEGSched<int nf, int eew, bit isOrdered, string emul, 279 bit forceMasked = 0> : SchedCommon< 280 [!cast<SchedWrite>("WriteVL" #!if(isOrdered, "O", "U") #"XSEG" #nf #"e" #eew #"_" #emul)], 281 [ReadVLDX, !cast<SchedRead>("ReadVLD" #!if(isOrdered, "O", "U") #"XV_" #emul)], 282 emul, eew, forceMasked 283>; 284class VLXSEGSchedMC<int nf, int eew, bit isOrdered>: 285 VLXSEGSched<nf, eew, isOrdered, "WorstCase", forceMasked=1>; 286 287// Passes sew=0 instead of eew=0 since this pseudo does not follow MX_E form. 288class VSXSEGSched<int nf, int eew, bit isOrdered, string emul, 289 bit forceMasked = 0> : SchedCommon< 290 [!cast<SchedWrite>("WriteVS" #!if(isOrdered, "O", "U") #"XSEG" #nf #"e" #eew #"_" #emul)], 291 [!cast<SchedRead>("ReadVST" #!if(isOrdered, "O", "U") #"X" #eew #"_" #emul), 292 ReadVSTX, !cast<SchedRead>("ReadVST" #!if(isOrdered, "O", "U") #"XV_" #emul)], 293 emul, sew=0, forceMasked=forceMasked 294>; 295class VSXSEGSchedMC<int nf, int eew, bit isOrdered>: 296 VSXSEGSched<nf, eew, isOrdered, "WorstCase", forceMasked=1>; 297 298//===----------------------------------------------------------------------===// 299// Instruction class templates 300//===----------------------------------------------------------------------===// 301 302let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in { 303// unit-stride load vd, (rs1), vm 304class VUnitStrideLoad<RISCVWidth width, string opcodestr> 305 : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStride, width.Value{2-0}, 306 (outs VR:$vd), 307 (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">; 308 309let vm = 1, RVVConstraint = NoConstraint in { 310// unit-stride whole register load vl<nf>r.v vd, (rs1) 311class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC> 312 : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg, 313 width.Value{2-0}, (outs VRC:$vd), (ins GPRMemZeroOffset:$rs1), 314 opcodestr, "$vd, $rs1"> { 315 let Uses = []; 316} 317 318// unit-stride mask load vd, (rs1) 319class VUnitStrideLoadMask<string opcodestr> 320 : RVInstVLU<0b000, LSWidth8.Value{3}, LUMOPUnitStrideMask, LSWidth8.Value{2-0}, 321 (outs VR:$vd), 322 (ins GPRMemZeroOffset:$rs1), opcodestr, "$vd, $rs1">; 323} // vm = 1, RVVConstraint = NoConstraint 324 325// unit-stride fault-only-first load vd, (rs1), vm 326class VUnitStrideLoadFF<RISCVWidth width, string opcodestr> 327 : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0}, 328 (outs VR:$vd), 329 (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">; 330 331// strided load vd, (rs1), rs2, vm 332class VStridedLoad<RISCVWidth width, string opcodestr> 333 : RVInstVLS<0b000, width.Value{3}, width.Value{2-0}, 334 (outs VR:$vd), 335 (ins GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr, 336 "$vd, $rs1, $rs2$vm">; 337 338// indexed load vd, (rs1), vs2, vm 339class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr> 340 : RVInstVLX<0b000, width.Value{3}, mop, width.Value{2-0}, 341 (outs VR:$vd), 342 (ins GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr, 343 "$vd, $rs1, $vs2$vm">; 344 345// unit-stride segment load vd, (rs1), vm 346class VUnitStrideSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr> 347 : RVInstVLU<nf, width.Value{3}, LUMOPUnitStride, width.Value{2-0}, 348 (outs VR:$vd), 349 (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">; 350 351// segment fault-only-first load vd, (rs1), vm 352class VUnitStrideSegmentLoadFF<bits<3> nf, RISCVWidth width, string opcodestr> 353 : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0}, 354 (outs VR:$vd), 355 (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">; 356 357// strided segment load vd, (rs1), rs2, vm 358class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr> 359 : RVInstVLS<nf, width.Value{3}, width.Value{2-0}, 360 (outs VR:$vd), 361 (ins GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr, 362 "$vd, $rs1, $rs2$vm">; 363 364// indexed segment load vd, (rs1), vs2, vm 365class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width, 366 string opcodestr> 367 : RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0}, 368 (outs VR:$vd), 369 (ins GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr, 370 "$vd, $rs1, $vs2$vm">; 371} // hasSideEffects = 0, mayLoad = 1, mayStore = 0 372 373let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in { 374// unit-stride store vd, vs3, (rs1), vm 375class VUnitStrideStore<RISCVWidth width, string opcodestr> 376 : RVInstVSU<0b000, width.Value{3}, SUMOPUnitStride, width.Value{2-0}, 377 (outs), (ins VR:$vs3, GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, 378 "$vs3, ${rs1}$vm">; 379 380let vm = 1 in { 381// vs<nf>r.v vd, (rs1) 382class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC> 383 : RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg, 384 0b000, (outs), (ins VRC:$vs3, GPRMemZeroOffset:$rs1), 385 opcodestr, "$vs3, $rs1"> { 386 let Uses = []; 387} 388 389// unit-stride mask store vd, vs3, (rs1) 390class VUnitStrideStoreMask<string opcodestr> 391 : RVInstVSU<0b000, LSWidth8.Value{3}, SUMOPUnitStrideMask, LSWidth8.Value{2-0}, 392 (outs), (ins VR:$vs3, GPRMemZeroOffset:$rs1), opcodestr, 393 "$vs3, $rs1">; 394} // vm = 1 395 396// strided store vd, vs3, (rs1), rs2, vm 397class VStridedStore<RISCVWidth width, string opcodestr> 398 : RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs), 399 (ins VR:$vs3, GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm), 400 opcodestr, "$vs3, $rs1, $rs2$vm">; 401 402// indexed store vd, vs3, (rs1), vs2, vm 403class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr> 404 : RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs), 405 (ins VR:$vs3, GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm), 406 opcodestr, "$vs3, $rs1, $vs2$vm">; 407 408// segment store vd, vs3, (rs1), vm 409class VUnitStrideSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr> 410 : RVInstVSU<nf, width.Value{3}, SUMOPUnitStride, width.Value{2-0}, 411 (outs), (ins VR:$vs3, GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, 412 "$vs3, ${rs1}$vm">; 413 414// segment store vd, vs3, (rs1), rs2, vm 415class VStridedSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr> 416 : RVInstVSS<nf, width.Value{3}, width.Value{2-0}, (outs), 417 (ins VR:$vs3, GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm), 418 opcodestr, "$vs3, $rs1, $rs2$vm">; 419 420// segment store vd, vs3, (rs1), vs2, vm 421class VIndexedSegmentStore<bits<3> nf, RISCVMOP mop, RISCVWidth width, 422 string opcodestr> 423 : RVInstVSX<nf, width.Value{3}, mop, width.Value{2-0}, (outs), 424 (ins VR:$vs3, GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm), 425 opcodestr, "$vs3, $rs1, $vs2$vm">; 426} // hasSideEffects = 0, mayLoad = 0, mayStore = 1 427 428let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { 429// op vd, vs2, vs1, vm 430class VALUVV<bits<6> funct6, RISCVVFormat opv, string opcodestr> 431 : RVInstVV<funct6, opv, (outs VR:$vd), 432 (ins VR:$vs2, VR:$vs1, VMaskOp:$vm), 433 opcodestr, "$vd, $vs2, $vs1$vm">; 434 435// op vd, vs2, vs1, v0 (without mask, use v0 as carry input) 436class VALUmVV<bits<6> funct6, RISCVVFormat opv, string opcodestr> 437 : RVInstVV<funct6, opv, (outs VR:$vd), 438 (ins VR:$vs2, VR:$vs1, VMV0:$v0), 439 opcodestr, "$vd, $vs2, $vs1, v0"> { 440 let vm = 0; 441} 442 443// op vd, vs1, vs2, vm (reverse the order of vs1 and vs2) 444class VALUrVV<bits<6> funct6, RISCVVFormat opv, string opcodestr, 445 bit EarlyClobber = 0> 446 : RVInstVV<funct6, opv, (outs VR:$vd_wb), 447 (ins VR:$vd, VR:$vs1, VR:$vs2, VMaskOp:$vm), 448 opcodestr, "$vd, $vs1, $vs2$vm"> { 449 let Constraints = !if(EarlyClobber, "@earlyclobber $vd_wb, $vd = $vd_wb", 450 "$vd = $vd_wb"); 451} 452 453// op vd, vs2, vs1 454class VALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr> 455 : RVInstVV<funct6, opv, (outs VR:$vd), 456 (ins VR:$vs2, VR:$vs1), 457 opcodestr, "$vd, $vs2, $vs1"> { 458 let vm = 1; 459} 460 461// op vd, vs2, rs1, vm 462class VALUVX<bits<6> funct6, RISCVVFormat opv, string opcodestr> 463 : RVInstVX<funct6, opv, (outs VR:$vd), 464 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm), 465 opcodestr, "$vd, $vs2, $rs1$vm">; 466 467// op vd, vs2, rs1, v0 (without mask, use v0 as carry input) 468class VALUmVX<bits<6> funct6, RISCVVFormat opv, string opcodestr> 469 : RVInstVX<funct6, opv, (outs VR:$vd), 470 (ins VR:$vs2, GPR:$rs1, VMV0:$v0), 471 opcodestr, "$vd, $vs2, $rs1, v0"> { 472 let vm = 0; 473} 474 475// op vd, rs1, vs2, vm (reverse the order of rs1 and vs2) 476class VALUrVX<bits<6> funct6, RISCVVFormat opv, string opcodestr, 477 bit EarlyClobber = 0> 478 : RVInstVX<funct6, opv, (outs VR:$vd_wb), 479 (ins VR:$vd, GPR:$rs1, VR:$vs2, VMaskOp:$vm), 480 opcodestr, "$vd, $rs1, $vs2$vm"> { 481 let Constraints = !if(EarlyClobber, "@earlyclobber $vd_wb, $vd = $vd_wb", 482 "$vd = $vd_wb"); 483} 484 485// op vd, vs1, vs2 486class VALUVXNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr> 487 : RVInstVX<funct6, opv, (outs VR:$vd), 488 (ins VR:$vs2, GPR:$rs1), 489 opcodestr, "$vd, $vs2, $rs1"> { 490 let vm = 1; 491} 492 493// op vd, vs2, imm, vm 494class VALUVI<bits<6> funct6, string opcodestr, Operand optype = simm5> 495 : RVInstIVI<funct6, (outs VR:$vd), 496 (ins VR:$vs2, optype:$imm, VMaskOp:$vm), 497 opcodestr, "$vd, $vs2, $imm$vm">; 498 499// op vd, vs2, imm, v0 (without mask, use v0 as carry input) 500class VALUmVI<bits<6> funct6, string opcodestr, Operand optype = simm5> 501 : RVInstIVI<funct6, (outs VR:$vd), 502 (ins VR:$vs2, optype:$imm, VMV0:$v0), 503 opcodestr, "$vd, $vs2, $imm, v0"> { 504 let vm = 0; 505} 506 507// op vd, vs2, imm, vm 508class VALUVINoVm<bits<6> funct6, string opcodestr, Operand optype = simm5> 509 : RVInstIVI<funct6, (outs VR:$vd), 510 (ins VR:$vs2, optype:$imm), 511 opcodestr, "$vd, $vs2, $imm"> { 512 let vm = 1; 513} 514 515// op vd, vs2, rs1, vm (Float) 516class VALUVF<bits<6> funct6, RISCVVFormat opv, string opcodestr> 517 : RVInstVX<funct6, opv, (outs VR:$vd), 518 (ins VR:$vs2, FPR32:$rs1, VMaskOp:$vm), 519 opcodestr, "$vd, $vs2, $rs1$vm">; 520 521// op vd, rs1, vs2, vm (Float) (with mask, reverse the order of rs1 and vs2) 522class VALUrVF<bits<6> funct6, RISCVVFormat opv, string opcodestr, 523 bit EarlyClobber = 0> 524 : RVInstVX<funct6, opv, (outs VR:$vd_wb), 525 (ins VR:$vd, FPR32:$rs1, VR:$vs2, VMaskOp:$vm), 526 opcodestr, "$vd, $rs1, $vs2$vm"> { 527 let Constraints = !if(EarlyClobber, "@earlyclobber $vd_wb, $vd = $vd_wb", 528 "$vd = $vd_wb"); 529} 530 531// op vd, vs2, vm (use vs1 as instruction encoding) 532class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr> 533 : RVInstV<funct6, vs1, opv, (outs VR:$vd), 534 (ins VR:$vs2, VMaskOp:$vm), 535 opcodestr, "$vd, $vs2$vm">; 536 537// op vd, vs2 (use vs1 as instruction encoding) 538class VALUVs2NoVm<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr> 539 : RVInstV<funct6, vs1, opv, (outs VR:$vd), 540 (ins VR:$vs2), opcodestr, 541 "$vd, $vs2"> { 542 let vm = 1; 543} 544} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 545 546//===----------------------------------------------------------------------===// 547// Combination of instruction classes. 548// Use these multiclasses to define instructions more easily. 549//===----------------------------------------------------------------------===// 550 551multiclass VIndexLoadStore<int eew> { 552 defvar w = !cast<RISCVWidth>("LSWidth" # eew); 553 554 def VLUXEI # eew # _V : 555 VIndexedLoad<MOPLDIndexedUnord, w, "vluxei" # eew # ".v">, 556 VLXSchedMC<eew, isOrdered=0>; 557 def VLOXEI # eew # _V : 558 VIndexedLoad<MOPLDIndexedOrder, w, "vloxei" # eew # ".v">, 559 VLXSchedMC<eew, isOrdered=1>; 560 561 def VSUXEI # eew # _V : 562 VIndexedStore<MOPSTIndexedUnord, w, "vsuxei" # eew # ".v">, 563 VSXSchedMC<eew, isOrdered=0>; 564 def VSOXEI # eew # _V : 565 VIndexedStore<MOPSTIndexedOrder, w, "vsoxei" # eew # ".v">, 566 VSXSchedMC<eew, isOrdered=1>; 567} 568 569multiclass VALU_IV_V<string opcodestr, bits<6> funct6> { 570 def V : VALUVV<funct6, OPIVV, opcodestr # ".vv">, 571 SchedBinaryMC<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV">; 572} 573 574multiclass VALU_IV_X<string opcodestr, bits<6> funct6> { 575 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">, 576 SchedBinaryMC<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX">; 577} 578 579multiclass VALU_IV_I<string opcodestr, bits<6> funct6> { 580 def I : VALUVI<funct6, opcodestr # ".vi", simm5>, 581 SchedUnaryMC<"WriteVIALUI", "ReadVIALUV">; 582} 583 584multiclass VALU_IV_V_X_I<string opcodestr, bits<6> funct6> 585 : VALU_IV_V<opcodestr, funct6>, 586 VALU_IV_X<opcodestr, funct6>, 587 VALU_IV_I<opcodestr, funct6>; 588 589multiclass VALU_IV_V_X<string opcodestr, bits<6> funct6> 590 : VALU_IV_V<opcodestr, funct6>, 591 VALU_IV_X<opcodestr, funct6>; 592 593multiclass VALU_IV_X_I<string opcodestr, bits<6> funct6> 594 : VALU_IV_X<opcodestr, funct6>, 595 VALU_IV_I<opcodestr, funct6>; 596 597multiclass VALU_MV_V_X<string opcodestr, bits<6> funct6, string vw> { 598 def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">, 599 SchedBinaryMC<"WriteVIWALUV", "ReadVIWALUV", "ReadVIWALUV">; 600 def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">, 601 SchedBinaryMC<"WriteVIWALUX", "ReadVIWALUV", "ReadVIWALUX">; 602} 603 604multiclass VMAC_MV_V_X<string opcodestr, bits<6> funct6> { 605 def V : VALUrVV<funct6, OPMVV, opcodestr # ".vv">, 606 SchedTernaryMC<"WriteVIMulAddV", "ReadVIMulAddV", "ReadVIMulAddV", 607 "ReadVIMulAddV">; 608 def X : VALUrVX<funct6, OPMVX, opcodestr # ".vx">, 609 SchedTernaryMC<"WriteVIMulAddX", "ReadVIMulAddV", "ReadVIMulAddX", 610 "ReadVIMulAddV">; 611} 612 613multiclass VWMAC_MV_X<string opcodestr, bits<6> funct6> { 614 let RVVConstraint = WidenV in 615 def X : VALUrVX<funct6, OPMVX, opcodestr # ".vx">, 616 SchedTernaryMC<"WriteVIWMulAddX", "ReadVIWMulAddV", "ReadVIWMulAddX", 617 "ReadVIWMulAddV">; 618} 619 620multiclass VWMAC_MV_V_X<string opcodestr, bits<6> funct6> 621 : VWMAC_MV_X<opcodestr, funct6> { 622 let RVVConstraint = WidenV in 623 def V : VALUrVV<funct6, OPMVV, opcodestr # ".vv", EarlyClobber=1>, 624 SchedTernaryMC<"WriteVIWMulAddV", "ReadVIWMulAddV", "ReadVIWMulAddV", 625 "ReadVIWMulAddV">; 626} 627 628multiclass VALU_MV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 629 def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>, 630 SchedUnaryMC<"WriteVExtV", "ReadVExtV">; 631} 632 633multiclass VMRG_IV_V_X_I<string opcodestr, bits<6> funct6> { 634 def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">, 635 SchedBinaryMC<"WriteVIMergeV", "ReadVIMergeV", "ReadVIMergeV">; 636 def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">, 637 SchedBinaryMC<"WriteVIMergeX", "ReadVIMergeV", "ReadVIMergeX">; 638 def IM : VALUmVI<funct6, opcodestr # ".vim">, 639 SchedUnaryMC<"WriteVIMergeI", "ReadVIMergeV">; 640} 641 642multiclass VALUm_IV_V_X<string opcodestr, bits<6> funct6> { 643 def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">, 644 SchedBinaryMC<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV">; 645 def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">, 646 SchedBinaryMC<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX">; 647} 648 649multiclass VALUm_IV_V_X_I<string opcodestr, bits<6> funct6> 650 : VALUm_IV_V_X<opcodestr, funct6> { 651 def IM : VALUmVI<funct6, opcodestr # ".vim">, 652 SchedUnaryMC<"WriteVICALUI", "ReadVICALUV">; 653} 654 655multiclass VALUNoVm_IV_V_X<string opcodestr, bits<6> funct6> { 656 def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">, 657 SchedBinaryMC<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", 658 forceMasked=0>; 659 def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">, 660 SchedBinaryMC<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", 661 forceMasked=0>; 662} 663 664multiclass VALUNoVm_IV_V_X_I<string opcodestr, bits<6> funct6> 665 : VALUNoVm_IV_V_X<opcodestr, funct6> { 666 def I : VALUVINoVm<funct6, opcodestr # ".vi", simm5>, 667 SchedUnaryMC<"WriteVICALUI", "ReadVICALUV", forceMasked=0>; 668} 669 670multiclass VALU_FV_F<string opcodestr, bits<6> funct6> { 671 def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">, 672 SchedBinaryMC<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF">; 673} 674 675multiclass VALU_FV_V_F<string opcodestr, bits<6> funct6> 676 : VALU_FV_F<opcodestr, funct6> { 677 def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">, 678 SchedBinaryMC<"WriteVFALUV", "ReadVFALUV", "ReadVFALUV">; 679} 680 681multiclass VWALU_FV_V_F<string opcodestr, bits<6> funct6, string vw> { 682 def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">, 683 SchedBinaryMC<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV">; 684 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">, 685 SchedBinaryMC<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF">; 686} 687 688multiclass VMUL_FV_V_F<string opcodestr, bits<6> funct6> { 689 def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">, 690 SchedBinaryMC<"WriteVFMulV", "ReadVFMulV", "ReadVFMulV">; 691 def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">, 692 SchedBinaryMC<"WriteVFMulF", "ReadVFMulV", "ReadVFMulF">; 693} 694 695multiclass VDIV_FV_F<string opcodestr, bits<6> funct6> { 696 def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">, 697 SchedBinaryMC<"WriteVFDivF", "ReadVFDivV", "ReadVFDivF">; 698} 699 700multiclass VDIV_FV_V_F<string opcodestr, bits<6> funct6> 701 : VDIV_FV_F<opcodestr, funct6> { 702 def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">, 703 SchedBinaryMC<"WriteVFDivV", "ReadVFDivV", "ReadVFDivV">; 704} 705 706multiclass VWMUL_FV_V_F<string opcodestr, bits<6> funct6> { 707 def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">, 708 SchedBinaryMC<"WriteVFWMulV", "ReadVFWMulV", "ReadVFWMulV">; 709 def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">, 710 SchedBinaryMC<"WriteVFWMulF", "ReadVFWMulV", "ReadVFWMulF">; 711} 712 713multiclass VMAC_FV_V_F<string opcodestr, bits<6> funct6> { 714 def V : VALUrVV<funct6, OPFVV, opcodestr # ".vv">, 715 SchedTernaryMC<"WriteVFMulAddV", "ReadVFMulAddV", "ReadVFMulAddV", 716 "ReadVFMulAddV">; 717 def F : VALUrVF<funct6, OPFVF, opcodestr # ".vf">, 718 SchedTernaryMC<"WriteVFMulAddF", "ReadVFMulAddV", "ReadVFMulAddF", 719 "ReadVFMulAddV">; 720} 721 722multiclass VWMAC_FV_V_F<string opcodestr, bits<6> funct6> { 723 let RVVConstraint = WidenV in { 724 def V : VALUrVV<funct6, OPFVV, opcodestr # ".vv", EarlyClobber=1>, 725 SchedTernaryMC<"WriteVFWMulAddV", "ReadVFWMulAddV", "ReadVFWMulAddV", 726 "ReadVFWMulAddV">; 727 def F : VALUrVF<funct6, OPFVF, opcodestr # ".vf", EarlyClobber=1>, 728 SchedTernaryMC<"WriteVFWMulAddF", "ReadVFWMulAddV", "ReadVFWMulAddF", 729 "ReadVFWMulAddV">; 730 } 731} 732 733multiclass VSQR_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 734 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 735 SchedUnaryMC<"WriteVFSqrtV", "ReadVFSqrtV">; 736} 737 738multiclass VRCP_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 739 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 740 SchedUnaryMC<"WriteVFRecpV", "ReadVFRecpV">; 741} 742 743multiclass VMINMAX_FV_V_F<string opcodestr, bits<6> funct6> { 744 def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">, 745 SchedBinaryMC<"WriteVFMinMaxV", "ReadVFMinMaxV", "ReadVFMinMaxV">; 746 def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">, 747 SchedBinaryMC<"WriteVFMinMaxF", "ReadVFMinMaxV", "ReadVFMinMaxF">; 748} 749 750multiclass VCMP_FV_F<string opcodestr, bits<6> funct6> { 751 def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">, 752 SchedBinaryMC<"WriteVFCmpF", "ReadVFCmpV", "ReadVFCmpF">; 753} 754 755multiclass VCMP_FV_V_F<string opcodestr, bits<6> funct6> 756 : VCMP_FV_F<opcodestr, funct6> { 757 def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">, 758 SchedBinaryMC<"WriteVFCmpV", "ReadVFCmpV", "ReadVFCmpV">; 759} 760 761multiclass VSGNJ_FV_V_F<string opcodestr, bits<6> funct6> { 762 def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">, 763 SchedBinaryMC<"WriteVFSgnjV", "ReadVFSgnjV", "ReadVFSgnjV">; 764 def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">, 765 SchedBinaryMC<"WriteVFSgnjF", "ReadVFSgnjV", "ReadVFSgnjF">; 766} 767 768multiclass VCLS_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 769 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 770 SchedUnaryMC<"WriteVFClassV", "ReadVFClassV">; 771} 772 773multiclass VCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 774 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 775 SchedUnaryMC<"WriteVFCvtIToFV", "ReadVFCvtIToFV">; 776} 777 778multiclass VCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 779 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 780 SchedUnaryMC<"WriteVFCvtFToIV", "ReadVFCvtFToIV">; 781} 782 783multiclass VWCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 784 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 785 SchedUnaryMC<"WriteVFWCvtIToFV", "ReadVFWCvtIToFV">; 786} 787 788multiclass VWCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 789 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 790 SchedUnaryMC<"WriteVFWCvtFToIV", "ReadVFWCvtFToIV">; 791} 792 793multiclass VWCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 794 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 795 SchedUnaryMC<"WriteVFWCvtFToFV", "ReadVFWCvtFToFV">; 796} 797 798multiclass VNCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 799 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 800 SchedUnaryMC<"WriteVFNCvtIToFV", "ReadVFNCvtIToFV">; 801} 802 803multiclass VNCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 804 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 805 SchedUnaryMC<"WriteVFNCvtFToIV", "ReadVFNCvtFToIV">; 806} 807 808multiclass VNCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> { 809 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>, 810 SchedUnaryMC<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV">; 811} 812 813multiclass VRED_MV_V<string opcodestr, bits<6> funct6> { 814 def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">, 815 SchedReductionMC<"WriteVIRedV_From", "ReadVIRedV", "ReadVIRedV0">; 816} 817 818multiclass VREDMINMAX_MV_V<string opcodestr, bits<6> funct6> { 819 def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">, 820 SchedReductionMC<"WriteVIRedMinMaxV_From", "ReadVIRedV", "ReadVIRedV0">; 821} 822 823multiclass VWRED_IV_V<string opcodestr, bits<6> funct6> { 824 def _VS : VALUVV<funct6, OPIVV, opcodestr # ".vs">, 825 SchedReductionMC<"WriteVIWRedV_From", "ReadVIWRedV", "ReadVIWRedV0">; 826} 827 828multiclass VRED_FV_V<string opcodestr, bits<6> funct6> { 829 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">, 830 SchedReductionMC<"WriteVFRedV_From", "ReadVFRedV", "ReadVFRedV0">; 831} 832 833multiclass VREDMINMAX_FV_V<string opcodestr, bits<6> funct6> { 834 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">, 835 SchedReductionMC<"WriteVFRedMinMaxV_From", "ReadVFRedV", "ReadVFRedV0">; 836} 837 838multiclass VREDO_FV_V<string opcodestr, bits<6> funct6> { 839 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">, 840 SchedReductionMC<"WriteVFRedOV_From", "ReadVFRedOV", "ReadVFRedOV0">; 841} 842 843multiclass VWRED_FV_V<string opcodestr, bits<6> funct6> { 844 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">, 845 SchedReductionMC<"WriteVFWRedV_From", "ReadVFWRedV", "ReadVFWRedV0">; 846} 847 848multiclass VWREDO_FV_V<string opcodestr, bits<6> funct6> { 849 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">, 850 SchedReductionMC<"WriteVFWRedOV_From", "ReadVFWRedOV", "ReadVFWRedOV0">; 851} 852 853multiclass VMALU_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> { 854 def M : VALUVVNoVm<funct6, OPMVV, opcodestr #"." #vm #"m">, 855 SchedBinaryMC<"WriteVMALUV", "ReadVMALUV", "ReadVMALUV", 856 forceMasked=0>; 857} 858 859multiclass VMSFS_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> { 860 def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>, 861 SchedUnaryMC<"WriteVMSFSV", "ReadVMSFSV">; 862} 863 864multiclass VMIOT_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> { 865 def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>, 866 SchedUnaryMC<"WriteVMIotV", "ReadVMIotV">; 867} 868 869multiclass VSHT_IV_V_X_I<string opcodestr, bits<6> funct6> { 870 def V : VALUVV<funct6, OPIVV, opcodestr # ".vv">, 871 SchedBinaryMC<"WriteVShiftV", "ReadVShiftV", "ReadVShiftV">; 872 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">, 873 SchedBinaryMC<"WriteVShiftX", "ReadVShiftV", "ReadVShiftX">; 874 def I : VALUVI<funct6, opcodestr # ".vi", uimm5>, 875 SchedUnaryMC<"WriteVShiftI", "ReadVShiftV">; 876} 877 878multiclass VNSHT_IV_V_X_I<string opcodestr, bits<6> funct6> { 879 def V : VALUVV<funct6, OPIVV, opcodestr # ".wv">, 880 SchedBinaryMC<"WriteVNShiftV", "ReadVNShiftV", "ReadVNShiftV">; 881 def X : VALUVX<funct6, OPIVX, opcodestr # ".wx">, 882 SchedBinaryMC<"WriteVNShiftX", "ReadVNShiftV", "ReadVNShiftX">; 883 def I : VALUVI<funct6, opcodestr # ".wi", uimm5>, 884 SchedUnaryMC<"WriteVNShiftI", "ReadVNShiftV">; 885} 886 887multiclass VMINMAX_IV_V_X<string opcodestr, bits<6> funct6> { 888 def V : VALUVV<funct6, OPIVV, opcodestr # ".vv">, 889 SchedBinaryMC<"WriteVIMinMaxV", "ReadVIMinMaxV", "ReadVIMinMaxV">; 890 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">, 891 SchedBinaryMC<"WriteVIMinMaxX", "ReadVIMinMaxV", "ReadVIMinMaxX">; 892} 893 894multiclass VCMP_IV_V<string opcodestr, bits<6> funct6> { 895 def V : VALUVV<funct6, OPIVV, opcodestr # ".vv">, 896 SchedBinaryMC<"WriteVICmpV", "ReadVICmpV", "ReadVICmpV">; 897} 898 899multiclass VCMP_IV_X<string opcodestr, bits<6> funct6> { 900 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">, 901 SchedBinaryMC<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX">; 902} 903 904multiclass VCMP_IV_I<string opcodestr, bits<6> funct6> { 905 def I : VALUVI<funct6, opcodestr # ".vi", simm5>, 906 SchedUnaryMC<"WriteVICmpI", "ReadVICmpV">; 907} 908 909multiclass VCMP_IV_V_X_I<string opcodestr, bits<6> funct6> 910 : VCMP_IV_V<opcodestr, funct6>, 911 VCMP_IV_X<opcodestr, funct6>, 912 VCMP_IV_I<opcodestr, funct6>; 913 914multiclass VCMP_IV_X_I<string opcodestr, bits<6> funct6> 915 : VCMP_IV_X<opcodestr, funct6>, 916 VCMP_IV_I<opcodestr, funct6>; 917 918multiclass VCMP_IV_V_X<string opcodestr, bits<6> funct6> 919 : VCMP_IV_V<opcodestr, funct6>, 920 VCMP_IV_X<opcodestr, funct6>; 921 922multiclass VMUL_MV_V_X<string opcodestr, bits<6> funct6> { 923 def V : VALUVV<funct6, OPMVV, opcodestr # ".vv">, 924 SchedBinaryMC<"WriteVIMulV", "ReadVIMulV", "ReadVIMulV">; 925 def X : VALUVX<funct6, OPMVX, opcodestr # ".vx">, 926 SchedBinaryMC<"WriteVIMulX", "ReadVIMulV", "ReadVIMulX">; 927} 928 929multiclass VWMUL_MV_V_X<string opcodestr, bits<6> funct6> { 930 def V : VALUVV<funct6, OPMVV, opcodestr # ".vv">, 931 SchedBinaryMC<"WriteVIWMulV", "ReadVIWMulV", "ReadVIWMulV">; 932 def X : VALUVX<funct6, OPMVX, opcodestr # ".vx">, 933 SchedBinaryMC<"WriteVIWMulX", "ReadVIWMulV", "ReadVIWMulX">; 934} 935 936multiclass VDIV_MV_V_X<string opcodestr, bits<6> funct6> { 937 def V : VALUVV<funct6, OPMVV, opcodestr # ".vv">, 938 SchedBinaryMC<"WriteVIDivV", "ReadVIDivV", "ReadVIDivV">; 939 def X : VALUVX<funct6, OPMVX, opcodestr # ".vx">, 940 SchedBinaryMC<"WriteVIDivX", "ReadVIDivV", "ReadVIDivX">; 941} 942 943multiclass VSALU_IV_V_X<string opcodestr, bits<6> funct6> { 944 def V : VALUVV<funct6, OPIVV, opcodestr # ".vv">, 945 SchedBinaryMC<"WriteVSALUV", "ReadVSALUV", "ReadVSALUV">; 946 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">, 947 SchedBinaryMC<"WriteVSALUX", "ReadVSALUV", "ReadVSALUX">; 948} 949 950multiclass VSALU_IV_V_X_I<string opcodestr, bits<6> funct6> 951 : VSALU_IV_V_X<opcodestr, funct6> { 952 def I : VALUVI<funct6, opcodestr # ".vi", simm5>, 953 SchedUnaryMC<"WriteVSALUI", "ReadVSALUV">; 954} 955 956multiclass VAALU_MV_V_X<string opcodestr, bits<6> funct6> { 957 def V : VALUVV<funct6, OPMVV, opcodestr # ".vv">, 958 SchedBinaryMC<"WriteVAALUV", "ReadVAALUV", "ReadVAALUV">; 959 def X : VALUVX<funct6, OPMVX, opcodestr # ".vx">, 960 SchedBinaryMC<"WriteVAALUX", "ReadVAALUV", "ReadVAALUX">; 961} 962 963multiclass VSMUL_IV_V_X<string opcodestr, bits<6> funct6> { 964 def V : VALUVV<funct6, OPIVV, opcodestr # ".vv">, 965 SchedBinaryMC<"WriteVSMulV", "ReadVSMulV", "ReadVSMulV">; 966 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">, 967 SchedBinaryMC<"WriteVSMulX", "ReadVSMulV", "ReadVSMulX">; 968} 969 970multiclass VSSHF_IV_V_X_I<string opcodestr, bits<6> funct6> { 971 def V : VALUVV<funct6, OPIVV, opcodestr # ".vv">, 972 SchedBinaryMC<"WriteVSShiftV", "ReadVSShiftV", "ReadVSShiftV">; 973 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">, 974 SchedBinaryMC<"WriteVSShiftX", "ReadVSShiftV", "ReadVSShiftX">; 975 def I : VALUVI<funct6, opcodestr # ".vi", uimm5>, 976 SchedUnaryMC<"WriteVSShiftI", "ReadVSShiftV">; 977} 978 979multiclass VNCLP_IV_V_X_I<string opcodestr, bits<6> funct6> { 980 def V : VALUVV<funct6, OPIVV, opcodestr # ".wv">, 981 SchedBinaryMC<"WriteVNClipV", "ReadVNClipV", "ReadVNClipV">; 982 def X : VALUVX<funct6, OPIVX, opcodestr # ".wx">, 983 SchedBinaryMC<"WriteVNClipX", "ReadVNClipV", "ReadVNClipX">; 984 def I : VALUVI<funct6, opcodestr # ".wi", uimm5>, 985 SchedUnaryMC<"WriteVNClipI", "ReadVNClipV">; 986} 987 988multiclass VSLD_IV_X_I<string opcodestr, bits<6> funct6> { 989 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">, 990 SchedBinaryMC<"WriteVISlideX", "ReadVISlideV", "ReadVISlideX">; 991 def I : VALUVI<funct6, opcodestr # ".vi", uimm5>, 992 SchedUnaryMC<"WriteVISlideI", "ReadVISlideV">; 993} 994 995multiclass VSLD1_MV_X<string opcodestr, bits<6> funct6> { 996 def X : VALUVX<funct6, OPMVX, opcodestr # ".vx">, 997 SchedBinaryMC<"WriteVISlide1X", "ReadVISlideV", "ReadVISlideX">; 998} 999 1000multiclass VSLD1_FV_F<string opcodestr, bits<6> funct6> { 1001 def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">, 1002 SchedBinaryMC<"WriteVFSlide1F", "ReadVFSlideV", "ReadVFSlideF">; 1003} 1004 1005multiclass VGTR_IV_V_X_I<string opcodestr, bits<6> funct6> { 1006 def V : VALUVV<funct6, OPIVV, opcodestr # ".vv">, 1007 SchedBinaryMC<"WriteVRGatherVV", "ReadVRGatherVV_data", 1008 "ReadVRGatherVV_index">; 1009 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">, 1010 SchedBinaryMC<"WriteVRGatherVX", "ReadVRGatherVX_data", 1011 "ReadVRGatherVX_index">; 1012 def I : VALUVI<funct6, opcodestr # ".vi", uimm5>, 1013 SchedUnaryMC<"WriteVRGatherVI", "ReadVRGatherVI_data">; 1014} 1015 1016multiclass VCPR_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> { 1017 def M : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">, 1018 SchedBinaryMC<"WriteVCompressV", "ReadVCompressV", "ReadVCompressV">; 1019} 1020 1021multiclass VWholeLoadN<int l, bits<3> nf, string opcodestr, RegisterClass VRC> { 1022 defvar w = !cast<RISCVWidth>("LSWidth" # l); 1023 defvar s = !cast<SchedWrite>("WriteVLD" # !add(nf, 1) # "R"); 1024 1025 def E # l # _V : VWholeLoad<nf, w, opcodestr # "e" # l # ".v", VRC>, 1026 Sched<[s, ReadVLDX]>; 1027} 1028 1029//===----------------------------------------------------------------------===// 1030// Instructions 1031//===----------------------------------------------------------------------===// 1032 1033let Predicates = [HasVInstructions] in { 1034let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in { 1035def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp11:$vtypei), 1036 "vsetvli", "$rd, $rs1, $vtypei">, 1037 Sched<[WriteVSETVLI, ReadVSETVLI]>; 1038def VSETIVLI : RVInstSetiVLi<(outs GPR:$rd), (ins uimm5:$uimm, VTypeIOp10:$vtypei), 1039 "vsetivli", "$rd, $uimm, $vtypei">, 1040 Sched<[WriteVSETIVLI]>; 1041 1042def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2), 1043 "vsetvl", "$rd, $rs1, $rs2">, 1044 Sched<[WriteVSETVL, ReadVSETVL, ReadVSETVL]>; 1045} // hasSideEffects = 1, mayLoad = 0, mayStore = 0 1046} // Predicates = [HasVInstructions] 1047 1048foreach eew = [8, 16, 32, 64] in { 1049 defvar w = !cast<RISCVWidth>("LSWidth" # eew); 1050 1051 let Predicates = !if(!eq(eew, 64), [HasVInstructionsI64], 1052 [HasVInstructions]) in { 1053 // Vector Unit-Stride Instructions 1054 def VLE#eew#_V : VUnitStrideLoad<w, "vle"#eew#".v">, VLESchedMC; 1055 def VSE#eew#_V : VUnitStrideStore<w, "vse"#eew#".v">, VSESchedMC; 1056 1057 // Vector Unit-Stride Fault-only-First Loads 1058 def VLE#eew#FF_V : VUnitStrideLoadFF<w, "vle"#eew#"ff.v">, VLFSchedMC; 1059 1060 // Vector Strided Instructions 1061 def VLSE#eew#_V : VStridedLoad<w, "vlse"#eew#".v">, VLSSchedMC<eew>; 1062 def VSSE#eew#_V : VStridedStore<w, "vsse"#eew#".v">, VSSSchedMC<eew>; 1063 1064 defm VL1R : VWholeLoadN<eew, 0, "vl1r", VR>; 1065 defm VL2R : VWholeLoadN<eew, 1, "vl2r", VRM2>; 1066 defm VL4R : VWholeLoadN<eew, 3, "vl4r", VRM4>; 1067 defm VL8R : VWholeLoadN<eew, 7, "vl8r", VRM8>; 1068 } 1069 1070 let Predicates = !if(!eq(eew, 64), [IsRV64, HasVInstructionsI64], 1071 [HasVInstructions]) in 1072 defm "" : VIndexLoadStore<eew>; 1073} 1074 1075let Predicates = [HasVInstructions] in { 1076def VLM_V : VUnitStrideLoadMask<"vlm.v">, 1077 Sched<[WriteVLDM_WorstCase, ReadVLDX]>; 1078def VSM_V : VUnitStrideStoreMask<"vsm.v">, 1079 Sched<[WriteVSTM_WorstCase, ReadVSTM_WorstCase, ReadVSTX]>; 1080def : InstAlias<"vle1.v $vd, (${rs1})", 1081 (VLM_V VR:$vd, GPR:$rs1), 0>; 1082def : InstAlias<"vse1.v $vs3, (${rs1})", 1083 (VSM_V VR:$vs3, GPR:$rs1), 0>; 1084 1085def VS1R_V : VWholeStore<0, "vs1r.v", VR>, 1086 Sched<[WriteVST1R, ReadVST1R, ReadVSTX]>; 1087def VS2R_V : VWholeStore<1, "vs2r.v", VRM2>, 1088 Sched<[WriteVST2R, ReadVST2R, ReadVSTX]>; 1089def VS4R_V : VWholeStore<3, "vs4r.v", VRM4>, 1090 Sched<[WriteVST4R, ReadVST4R, ReadVSTX]>; 1091def VS8R_V : VWholeStore<7, "vs8r.v", VRM8>, 1092 Sched<[WriteVST8R, ReadVST8R, ReadVSTX]>; 1093 1094def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>; 1095def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>; 1096def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>; 1097def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>; 1098} // Predicates = [HasVInstructions] 1099 1100let Predicates = [HasVInstructions] in { 1101// Vector Single-Width Integer Add and Subtract 1102defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>; 1103defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>; 1104defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>; 1105 1106def : InstAlias<"vneg.v $vd, $vs$vm", (VRSUB_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; 1107def : InstAlias<"vneg.v $vd, $vs", (VRSUB_VX VR:$vd, VR:$vs, X0, zero_reg)>; 1108 1109// Vector Widening Integer Add/Subtract 1110// Refer to 11.2 Widening Vector Arithmetic Instructions 1111// The destination vector register group cannot overlap a source vector 1112// register group of a different element width (including the mask register 1113// if masked), otherwise an illegal instruction exception is raised. 1114let Constraints = "@earlyclobber $vd" in { 1115let RVVConstraint = WidenV in { 1116defm VWADDU_V : VALU_MV_V_X<"vwaddu", 0b110000, "v">; 1117defm VWSUBU_V : VALU_MV_V_X<"vwsubu", 0b110010, "v">; 1118defm VWADD_V : VALU_MV_V_X<"vwadd", 0b110001, "v">; 1119defm VWSUB_V : VALU_MV_V_X<"vwsub", 0b110011, "v">; 1120} // RVVConstraint = WidenV 1121// Set earlyclobber for following instructions for second and mask operands. 1122// This has the downside that the earlyclobber constraint is too coarse and 1123// will impose unnecessary restrictions by not allowing the destination to 1124// overlap with the first (wide) operand. 1125let RVVConstraint = WidenW in { 1126defm VWADDU_W : VALU_MV_V_X<"vwaddu", 0b110100, "w">; 1127defm VWSUBU_W : VALU_MV_V_X<"vwsubu", 0b110110, "w">; 1128defm VWADD_W : VALU_MV_V_X<"vwadd", 0b110101, "w">; 1129defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">; 1130} // RVVConstraint = WidenW 1131} // Constraints = "@earlyclobber $vd" 1132 1133def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm", 1134 (VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; 1135def : InstAlias<"vwcvt.x.x.v $vd, $vs", 1136 (VWADD_VX VR:$vd, VR:$vs, X0, zero_reg)>; 1137def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm", 1138 (VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; 1139def : InstAlias<"vwcvtu.x.x.v $vd, $vs", 1140 (VWADDU_VX VR:$vd, VR:$vs, X0, zero_reg)>; 1141 1142// Vector Integer Extension 1143defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>; 1144defm VSEXT_VF8 : VALU_MV_VS2<"vsext.vf8", 0b010010, 0b00011>; 1145defm VZEXT_VF4 : VALU_MV_VS2<"vzext.vf4", 0b010010, 0b00100>; 1146defm VSEXT_VF4 : VALU_MV_VS2<"vsext.vf4", 0b010010, 0b00101>; 1147defm VZEXT_VF2 : VALU_MV_VS2<"vzext.vf2", 0b010010, 0b00110>; 1148defm VSEXT_VF2 : VALU_MV_VS2<"vsext.vf2", 0b010010, 0b00111>; 1149 1150// Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions 1151defm VADC_V : VALUm_IV_V_X_I<"vadc", 0b010000>; 1152let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in { 1153defm VMADC_V : VALUm_IV_V_X_I<"vmadc", 0b010001>; 1154defm VMADC_V : VALUNoVm_IV_V_X_I<"vmadc", 0b010001>; 1155} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint 1156defm VSBC_V : VALUm_IV_V_X<"vsbc", 0b010010>; 1157let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in { 1158defm VMSBC_V : VALUm_IV_V_X<"vmsbc", 0b010011>; 1159defm VMSBC_V : VALUNoVm_IV_V_X<"vmsbc", 0b010011>; 1160} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint 1161 1162// Vector Bitwise Logical Instructions 1163defm VAND_V : VALU_IV_V_X_I<"vand", 0b001001>; 1164defm VOR_V : VALU_IV_V_X_I<"vor", 0b001010>; 1165defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>; 1166 1167def : InstAlias<"vnot.v $vd, $vs$vm", 1168 (VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>; 1169def : InstAlias<"vnot.v $vd, $vs", 1170 (VXOR_VI VR:$vd, VR:$vs, -1, zero_reg)>; 1171 1172// Vector Single-Width Bit Shift Instructions 1173defm VSLL_V : VSHT_IV_V_X_I<"vsll", 0b100101>; 1174defm VSRL_V : VSHT_IV_V_X_I<"vsrl", 0b101000>; 1175defm VSRA_V : VSHT_IV_V_X_I<"vsra", 0b101001>; 1176 1177// Vector Narrowing Integer Right Shift Instructions 1178// Refer to 11.3. Narrowing Vector Arithmetic Instructions 1179// The destination vector register group cannot overlap the first source 1180// vector register group (specified by vs2). The destination vector register 1181// group cannot overlap the mask register if used, unless LMUL=1. 1182let Constraints = "@earlyclobber $vd" in { 1183defm VNSRL_W : VNSHT_IV_V_X_I<"vnsrl", 0b101100>; 1184defm VNSRA_W : VNSHT_IV_V_X_I<"vnsra", 0b101101>; 1185} // Constraints = "@earlyclobber $vd" 1186 1187def : InstAlias<"vncvt.x.x.w $vd, $vs$vm", 1188 (VNSRL_WX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; 1189def : InstAlias<"vncvt.x.x.w $vd, $vs", 1190 (VNSRL_WX VR:$vd, VR:$vs, X0, zero_reg)>; 1191 1192// Vector Integer Comparison Instructions 1193let RVVConstraint = NoConstraint in { 1194defm VMSEQ_V : VCMP_IV_V_X_I<"vmseq", 0b011000>; 1195defm VMSNE_V : VCMP_IV_V_X_I<"vmsne", 0b011001>; 1196defm VMSLTU_V : VCMP_IV_V_X<"vmsltu", 0b011010>; 1197defm VMSLT_V : VCMP_IV_V_X<"vmslt", 0b011011>; 1198defm VMSLEU_V : VCMP_IV_V_X_I<"vmsleu", 0b011100>; 1199defm VMSLE_V : VCMP_IV_V_X_I<"vmsle", 0b011101>; 1200defm VMSGTU_V : VCMP_IV_X_I<"vmsgtu", 0b011110>; 1201defm VMSGT_V : VCMP_IV_X_I<"vmsgt", 0b011111>; 1202} // RVVConstraint = NoConstraint 1203 1204def : InstAlias<"vmsgtu.vv $vd, $va, $vb$vm", 1205 (VMSLTU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; 1206def : InstAlias<"vmsgt.vv $vd, $va, $vb$vm", 1207 (VMSLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; 1208def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm", 1209 (VMSLEU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; 1210def : InstAlias<"vmsge.vv $vd, $va, $vb$vm", 1211 (VMSLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; 1212 1213let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0, 1214 mayStore = 0 in { 1215// For unsigned comparisons we need to special case 0 immediate to maintain 1216// the always true/false semantics we would invert if we just decremented the 1217// immediate like we do for signed. To match the GNU assembler we will use 1218// vmseq/vmsne.vv with the same register for both operands which we can't do 1219// from an InstAlias. 1220def PseudoVMSGEU_VI : Pseudo<(outs VR:$vd), 1221 (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm), 1222 [], "vmsgeu.vi", "$vd, $vs2, $imm$vm">; 1223def PseudoVMSLTU_VI : Pseudo<(outs VR:$vd), 1224 (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm), 1225 [], "vmsltu.vi", "$vd, $vs2, $imm$vm">; 1226// Handle signed with pseudos as well for more consistency in the 1227// implementation. 1228def PseudoVMSGE_VI : Pseudo<(outs VR:$vd), 1229 (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm), 1230 [], "vmsge.vi", "$vd, $vs2, $imm$vm">; 1231def PseudoVMSLT_VI : Pseudo<(outs VR:$vd), 1232 (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm), 1233 [], "vmslt.vi", "$vd, $vs2, $imm$vm">; 1234} 1235 1236let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0, 1237 mayStore = 0 in { 1238def PseudoVMSGEU_VX : Pseudo<(outs VR:$vd), 1239 (ins VR:$vs2, GPR:$rs1), 1240 [], "vmsgeu.vx", "$vd, $vs2, $rs1">; 1241def PseudoVMSGE_VX : Pseudo<(outs VR:$vd), 1242 (ins VR:$vs2, GPR:$rs1), 1243 [], "vmsge.vx", "$vd, $vs2, $rs1">; 1244def PseudoVMSGEU_VX_M : Pseudo<(outs VRNoV0:$vd), 1245 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm), 1246 [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm">; 1247def PseudoVMSGE_VX_M : Pseudo<(outs VRNoV0:$vd), 1248 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm), 1249 [], "vmsge.vx", "$vd, $vs2, $rs1$vm">; 1250def PseudoVMSGEU_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch), 1251 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm), 1252 [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm, $scratch">; 1253def PseudoVMSGE_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch), 1254 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm), 1255 [], "vmsge.vx", "$vd, $vs2, $rs1$vm, $scratch">; 1256} 1257 1258// Vector Integer Min/Max Instructions 1259defm VMINU_V : VMINMAX_IV_V_X<"vminu", 0b000100>; 1260defm VMIN_V : VMINMAX_IV_V_X<"vmin", 0b000101>; 1261defm VMAXU_V : VMINMAX_IV_V_X<"vmaxu", 0b000110>; 1262defm VMAX_V : VMINMAX_IV_V_X<"vmax", 0b000111>; 1263 1264// Vector Single-Width Integer Multiply Instructions 1265defm VMUL_V : VMUL_MV_V_X<"vmul", 0b100101>; 1266defm VMULH_V : VMUL_MV_V_X<"vmulh", 0b100111>; 1267defm VMULHU_V : VMUL_MV_V_X<"vmulhu", 0b100100>; 1268defm VMULHSU_V : VMUL_MV_V_X<"vmulhsu", 0b100110>; 1269 1270// Vector Integer Divide Instructions 1271defm VDIVU_V : VDIV_MV_V_X<"vdivu", 0b100000>; 1272defm VDIV_V : VDIV_MV_V_X<"vdiv", 0b100001>; 1273defm VREMU_V : VDIV_MV_V_X<"vremu", 0b100010>; 1274defm VREM_V : VDIV_MV_V_X<"vrem", 0b100011>; 1275 1276// Vector Widening Integer Multiply Instructions 1277let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in { 1278defm VWMUL_V : VWMUL_MV_V_X<"vwmul", 0b111011>; 1279defm VWMULU_V : VWMUL_MV_V_X<"vwmulu", 0b111000>; 1280defm VWMULSU_V : VWMUL_MV_V_X<"vwmulsu", 0b111010>; 1281} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV 1282 1283// Vector Single-Width Integer Multiply-Add Instructions 1284defm VMACC_V : VMAC_MV_V_X<"vmacc", 0b101101>; 1285defm VNMSAC_V : VMAC_MV_V_X<"vnmsac", 0b101111>; 1286defm VMADD_V : VMAC_MV_V_X<"vmadd", 0b101001>; 1287defm VNMSUB_V : VMAC_MV_V_X<"vnmsub", 0b101011>; 1288 1289// Vector Widening Integer Multiply-Add Instructions 1290defm VWMACCU_V : VWMAC_MV_V_X<"vwmaccu", 0b111100>; 1291defm VWMACC_V : VWMAC_MV_V_X<"vwmacc", 0b111101>; 1292defm VWMACCSU_V : VWMAC_MV_V_X<"vwmaccsu", 0b111111>; 1293defm VWMACCUS_V : VWMAC_MV_X<"vwmaccus", 0b111110>; 1294 1295// Vector Integer Merge Instructions 1296defm VMERGE_V : VMRG_IV_V_X_I<"vmerge", 0b010111>; 1297 1298// Vector Integer Move Instructions 1299let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vs2 = 0, vm = 1, 1300 RVVConstraint = NoConstraint in { 1301// op vd, vs1 1302def VMV_V_V : RVInstVV<0b010111, OPIVV, (outs VR:$vd), 1303 (ins VR:$vs1), "vmv.v.v", "$vd, $vs1">, 1304 SchedUnaryMC<"WriteVIMovV", "ReadVIMovV", forceMasked=0>; 1305// op vd, rs1 1306def VMV_V_X : RVInstVX<0b010111, OPIVX, (outs VR:$vd), 1307 (ins GPR:$rs1), "vmv.v.x", "$vd, $rs1">, 1308 SchedUnaryMC<"WriteVIMovX", "ReadVIMovX", forceMasked=0>; 1309// op vd, imm 1310def VMV_V_I : RVInstIVI<0b010111, (outs VR:$vd), 1311 (ins simm5:$imm), "vmv.v.i", "$vd, $imm">, 1312 SchedNullaryMC<"WriteVIMovI", forceMasked=0>; 1313} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 1314 1315// Vector Fixed-Point Arithmetic Instructions 1316defm VSADDU_V : VSALU_IV_V_X_I<"vsaddu", 0b100000>; 1317defm VSADD_V : VSALU_IV_V_X_I<"vsadd", 0b100001>; 1318defm VSSUBU_V : VSALU_IV_V_X<"vssubu", 0b100010>; 1319defm VSSUB_V : VSALU_IV_V_X<"vssub", 0b100011>; 1320 1321// Vector Single-Width Averaging Add and Subtract 1322defm VAADDU_V : VAALU_MV_V_X<"vaaddu", 0b001000>; 1323defm VAADD_V : VAALU_MV_V_X<"vaadd", 0b001001>; 1324defm VASUBU_V : VAALU_MV_V_X<"vasubu", 0b001010>; 1325defm VASUB_V : VAALU_MV_V_X<"vasub", 0b001011>; 1326 1327// Vector Single-Width Fractional Multiply with Rounding and Saturation 1328defm VSMUL_V : VSMUL_IV_V_X<"vsmul", 0b100111>; 1329 1330// Vector Single-Width Scaling Shift Instructions 1331defm VSSRL_V : VSSHF_IV_V_X_I<"vssrl", 0b101010>; 1332defm VSSRA_V : VSSHF_IV_V_X_I<"vssra", 0b101011>; 1333 1334// Vector Narrowing Fixed-Point Clip Instructions 1335let Constraints = "@earlyclobber $vd" in { 1336defm VNCLIPU_W : VNCLP_IV_V_X_I<"vnclipu", 0b101110>; 1337defm VNCLIP_W : VNCLP_IV_V_X_I<"vnclip", 0b101111>; 1338} // Constraints = "@earlyclobber $vd" 1339} // Predicates = [HasVInstructions] 1340 1341let Predicates = [HasVInstructionsAnyF] in { 1342// Vector Single-Width Floating-Point Add/Subtract Instructions 1343let Uses = [FRM], mayRaiseFPException = true in { 1344defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>; 1345defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>; 1346defm VFRSUB_V : VALU_FV_F<"vfrsub", 0b100111>; 1347} 1348 1349// Vector Widening Floating-Point Add/Subtract Instructions 1350let Constraints = "@earlyclobber $vd", 1351 Uses = [FRM], 1352 mayRaiseFPException = true in { 1353let RVVConstraint = WidenV in { 1354defm VFWADD_V : VWALU_FV_V_F<"vfwadd", 0b110000, "v">; 1355defm VFWSUB_V : VWALU_FV_V_F<"vfwsub", 0b110010, "v">; 1356} // RVVConstraint = WidenV 1357// Set earlyclobber for following instructions for second and mask operands. 1358// This has the downside that the earlyclobber constraint is too coarse and 1359// will impose unnecessary restrictions by not allowing the destination to 1360// overlap with the first (wide) operand. 1361let RVVConstraint = WidenW in { 1362defm VFWADD_W : VWALU_FV_V_F<"vfwadd", 0b110100, "w">; 1363defm VFWSUB_W : VWALU_FV_V_F<"vfwsub", 0b110110, "w">; 1364} // RVVConstraint = WidenW 1365} // Constraints = "@earlyclobber $vd", Uses = [FRM], mayRaiseFPException = true 1366 1367// Vector Single-Width Floating-Point Multiply/Divide Instructions 1368let Uses = [FRM], mayRaiseFPException = true in { 1369defm VFMUL_V : VMUL_FV_V_F<"vfmul", 0b100100>; 1370defm VFDIV_V : VDIV_FV_V_F<"vfdiv", 0b100000>; 1371defm VFRDIV_V : VDIV_FV_F<"vfrdiv", 0b100001>; 1372} 1373 1374// Vector Widening Floating-Point Multiply 1375let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, 1376 Uses = [FRM], mayRaiseFPException = true in { 1377defm VFWMUL_V : VWMUL_FV_V_F<"vfwmul", 0b111000>; 1378} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, Uses = [FRM], mayRaiseFPException = true 1379 1380// Vector Single-Width Floating-Point Fused Multiply-Add Instructions 1381let Uses = [FRM], mayRaiseFPException = true in { 1382defm VFMACC_V : VMAC_FV_V_F<"vfmacc", 0b101100>; 1383defm VFNMACC_V : VMAC_FV_V_F<"vfnmacc", 0b101101>; 1384defm VFMSAC_V : VMAC_FV_V_F<"vfmsac", 0b101110>; 1385defm VFNMSAC_V : VMAC_FV_V_F<"vfnmsac", 0b101111>; 1386defm VFMADD_V : VMAC_FV_V_F<"vfmadd", 0b101000>; 1387defm VFNMADD_V : VMAC_FV_V_F<"vfnmadd", 0b101001>; 1388defm VFMSUB_V : VMAC_FV_V_F<"vfmsub", 0b101010>; 1389defm VFNMSUB_V : VMAC_FV_V_F<"vfnmsub", 0b101011>; 1390} 1391 1392// Vector Widening Floating-Point Fused Multiply-Add Instructions 1393let Uses = [FRM], mayRaiseFPException = true in { 1394defm VFWMACC_V : VWMAC_FV_V_F<"vfwmacc", 0b111100>; 1395defm VFWNMACC_V : VWMAC_FV_V_F<"vfwnmacc", 0b111101>; 1396defm VFWMSAC_V : VWMAC_FV_V_F<"vfwmsac", 0b111110>; 1397defm VFWNMSAC_V : VWMAC_FV_V_F<"vfwnmsac", 0b111111>; 1398} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, Uses = [FRM], mayRaiseFPException = true 1399 1400// Vector Floating-Point Square-Root Instruction 1401let Uses = [FRM], mayRaiseFPException = true in { 1402defm VFSQRT_V : VSQR_FV_VS2<"vfsqrt.v", 0b010011, 0b00000>; 1403defm VFREC7_V : VRCP_FV_VS2<"vfrec7.v", 0b010011, 0b00101>; 1404} 1405 1406let mayRaiseFPException = true in 1407defm VFRSQRT7_V : VRCP_FV_VS2<"vfrsqrt7.v", 0b010011, 0b00100>; 1408 1409// Vector Floating-Point MIN/MAX Instructions 1410let mayRaiseFPException = true in { 1411defm VFMIN_V : VMINMAX_FV_V_F<"vfmin", 0b000100>; 1412defm VFMAX_V : VMINMAX_FV_V_F<"vfmax", 0b000110>; 1413} 1414 1415// Vector Floating-Point Sign-Injection Instructions 1416defm VFSGNJ_V : VSGNJ_FV_V_F<"vfsgnj", 0b001000>; 1417defm VFSGNJN_V : VSGNJ_FV_V_F<"vfsgnjn", 0b001001>; 1418defm VFSGNJX_V : VSGNJ_FV_V_F<"vfsgnjx", 0b001010>; 1419 1420def : InstAlias<"vfneg.v $vd, $vs$vm", 1421 (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>; 1422def : InstAlias<"vfneg.v $vd, $vs", 1423 (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>; 1424def : InstAlias<"vfabs.v $vd, $vs$vm", 1425 (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>; 1426def : InstAlias<"vfabs.v $vd, $vs", 1427 (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>; 1428 1429// Vector Floating-Point Compare Instructions 1430let RVVConstraint = NoConstraint, mayRaiseFPException = true in { 1431defm VMFEQ_V : VCMP_FV_V_F<"vmfeq", 0b011000>; 1432defm VMFNE_V : VCMP_FV_V_F<"vmfne", 0b011100>; 1433defm VMFLT_V : VCMP_FV_V_F<"vmflt", 0b011011>; 1434defm VMFLE_V : VCMP_FV_V_F<"vmfle", 0b011001>; 1435defm VMFGT_V : VCMP_FV_F<"vmfgt", 0b011101>; 1436defm VMFGE_V : VCMP_FV_F<"vmfge", 0b011111>; 1437} // RVVConstraint = NoConstraint, mayRaiseFPException = true 1438 1439def : InstAlias<"vmfgt.vv $vd, $va, $vb$vm", 1440 (VMFLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; 1441def : InstAlias<"vmfge.vv $vd, $va, $vb$vm", 1442 (VMFLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; 1443 1444// Vector Floating-Point Classify Instruction 1445defm VFCLASS_V : VCLS_FV_VS2<"vfclass.v", 0b010011, 0b10000>; 1446 1447let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { 1448 1449// Vector Floating-Point Merge Instruction 1450let vm = 0 in 1451def VFMERGE_VFM : RVInstVX<0b010111, OPFVF, (outs VR:$vd), 1452 (ins VR:$vs2, FPR32:$rs1, VMV0:$v0), 1453 "vfmerge.vfm", "$vd, $vs2, $rs1, v0">, 1454 SchedBinaryMC<"WriteVFMergeV", "ReadVFMergeV", "ReadVFMergeF">; 1455 1456// Vector Floating-Point Move Instruction 1457let RVVConstraint = NoConstraint in 1458let vm = 1, vs2 = 0 in 1459def VFMV_V_F : RVInstVX<0b010111, OPFVF, (outs VR:$vd), 1460 (ins FPR32:$rs1), "vfmv.v.f", "$vd, $rs1">, 1461 SchedUnaryMC<"WriteVFMovV", "ReadVFMovF", forceMasked=0>; 1462 1463} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 1464 1465// Single-Width Floating-Point/Integer Type-Convert Instructions 1466let mayRaiseFPException = true in { 1467let Uses = [FRM] in { 1468defm VFCVT_XU_F_V : VCVTI_FV_VS2<"vfcvt.xu.f.v", 0b010010, 0b00000>; 1469defm VFCVT_X_F_V : VCVTI_FV_VS2<"vfcvt.x.f.v", 0b010010, 0b00001>; 1470} 1471defm VFCVT_RTZ_XU_F_V : VCVTI_FV_VS2<"vfcvt.rtz.xu.f.v", 0b010010, 0b00110>; 1472defm VFCVT_RTZ_X_F_V : VCVTI_FV_VS2<"vfcvt.rtz.x.f.v", 0b010010, 0b00111>; 1473let Uses = [FRM] in { 1474defm VFCVT_F_XU_V : VCVTF_IV_VS2<"vfcvt.f.xu.v", 0b010010, 0b00010>; 1475defm VFCVT_F_X_V : VCVTF_IV_VS2<"vfcvt.f.x.v", 0b010010, 0b00011>; 1476} 1477} // mayRaiseFPException = true 1478 1479// Widening Floating-Point/Integer Type-Convert Instructions 1480let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt, 1481 mayRaiseFPException = true in { 1482let Uses = [FRM] in { 1483defm VFWCVT_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.xu.f.v", 0b010010, 0b01000>; 1484defm VFWCVT_X_F_V : VWCVTI_FV_VS2<"vfwcvt.x.f.v", 0b010010, 0b01001>; 1485} 1486defm VFWCVT_RTZ_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.xu.f.v", 0b010010, 0b01110>; 1487defm VFWCVT_RTZ_X_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.x.f.v", 0b010010, 0b01111>; 1488defm VFWCVT_F_XU_V : VWCVTF_IV_VS2<"vfwcvt.f.xu.v", 0b010010, 0b01010>; 1489defm VFWCVT_F_X_V : VWCVTF_IV_VS2<"vfwcvt.f.x.v", 0b010010, 0b01011>; 1490defm VFWCVT_F_F_V : VWCVTF_FV_VS2<"vfwcvt.f.f.v", 0b010010, 0b01100>; 1491} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt 1492 1493// Narrowing Floating-Point/Integer Type-Convert Instructions 1494let Constraints = "@earlyclobber $vd", mayRaiseFPException = true in { 1495let Uses = [FRM] in { 1496defm VFNCVT_XU_F_W : VNCVTI_FV_VS2<"vfncvt.xu.f.w", 0b010010, 0b10000>; 1497defm VFNCVT_X_F_W : VNCVTI_FV_VS2<"vfncvt.x.f.w", 0b010010, 0b10001>; 1498} 1499defm VFNCVT_RTZ_XU_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.xu.f.w", 0b010010, 0b10110>; 1500defm VFNCVT_RTZ_X_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.x.f.w", 0b010010, 0b10111>; 1501let Uses = [FRM] in { 1502defm VFNCVT_F_XU_W : VNCVTF_IV_VS2<"vfncvt.f.xu.w", 0b010010, 0b10010>; 1503defm VFNCVT_F_X_W : VNCVTF_IV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>; 1504defm VFNCVT_F_F_W : VNCVTF_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>; 1505} 1506defm VFNCVT_ROD_F_F_W : VNCVTF_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>; 1507} // Constraints = "@earlyclobber $vd", mayRaiseFPException = true 1508} // Predicates = HasVInstructionsAnyF] 1509 1510let Predicates = [HasVInstructions] in { 1511 1512// Vector Single-Width Integer Reduction Instructions 1513let RVVConstraint = NoConstraint in { 1514defm VREDSUM : VRED_MV_V<"vredsum", 0b000000>; 1515defm VREDMAXU : VREDMINMAX_MV_V<"vredmaxu", 0b000110>; 1516defm VREDMAX : VREDMINMAX_MV_V<"vredmax", 0b000111>; 1517defm VREDMINU : VREDMINMAX_MV_V<"vredminu", 0b000100>; 1518defm VREDMIN : VREDMINMAX_MV_V<"vredmin", 0b000101>; 1519defm VREDAND : VRED_MV_V<"vredand", 0b000001>; 1520defm VREDOR : VRED_MV_V<"vredor", 0b000010>; 1521defm VREDXOR : VRED_MV_V<"vredxor", 0b000011>; 1522} // RVVConstraint = NoConstraint 1523 1524// Vector Widening Integer Reduction Instructions 1525let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in { 1526// Set earlyclobber for following instructions for second and mask operands. 1527// This has the downside that the earlyclobber constraint is too coarse and 1528// will impose unnecessary restrictions by not allowing the destination to 1529// overlap with the first (wide) operand. 1530defm VWREDSUMU : VWRED_IV_V<"vwredsumu", 0b110000>; 1531defm VWREDSUM : VWRED_IV_V<"vwredsum", 0b110001>; 1532} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint 1533 1534} // Predicates = [HasVInstructions] 1535 1536let Predicates = [HasVInstructionsAnyF] in { 1537// Vector Single-Width Floating-Point Reduction Instructions 1538let RVVConstraint = NoConstraint in { 1539let Uses = [FRM], mayRaiseFPException = true in { 1540defm VFREDOSUM : VREDO_FV_V<"vfredosum", 0b000011>; 1541defm VFREDUSUM : VRED_FV_V<"vfredusum", 0b000001>; 1542} 1543let mayRaiseFPException = true in { 1544defm VFREDMAX : VREDMINMAX_FV_V<"vfredmax", 0b000111>; 1545defm VFREDMIN : VREDMINMAX_FV_V<"vfredmin", 0b000101>; 1546} 1547} // RVVConstraint = NoConstraint 1548 1549def : InstAlias<"vfredsum.vs $vd, $vs2, $vs1$vm", 1550 (VFREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>; 1551 1552// Vector Widening Floating-Point Reduction Instructions 1553let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in { 1554// Set earlyclobber for following instructions for second and mask operands. 1555// This has the downside that the earlyclobber constraint is too coarse and 1556// will impose unnecessary restrictions by not allowing the destination to 1557// overlap with the first (wide) operand. 1558let Uses = [FRM], mayRaiseFPException = true in { 1559defm VFWREDOSUM : VWREDO_FV_V<"vfwredosum", 0b110011>; 1560defm VFWREDUSUM : VWRED_FV_V<"vfwredusum", 0b110001>; 1561} 1562} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint 1563 1564def : InstAlias<"vfwredsum.vs $vd, $vs2, $vs1$vm", 1565 (VFWREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>; 1566} // Predicates = [HasVInstructionsAnyF] 1567 1568let Predicates = [HasVInstructions] in { 1569// Vector Mask-Register Logical Instructions 1570let RVVConstraint = NoConstraint in { 1571defm VMAND_M : VMALU_MV_Mask<"vmand", 0b011001, "m">; 1572defm VMNAND_M : VMALU_MV_Mask<"vmnand", 0b011101, "m">; 1573defm VMANDN_M : VMALU_MV_Mask<"vmandn", 0b011000, "m">; 1574defm VMXOR_M : VMALU_MV_Mask<"vmxor", 0b011011, "m">; 1575defm VMOR_M : VMALU_MV_Mask<"vmor", 0b011010, "m">; 1576defm VMNOR_M : VMALU_MV_Mask<"vmnor", 0b011110, "m">; 1577defm VMORN_M : VMALU_MV_Mask<"vmorn", 0b011100, "m">; 1578defm VMXNOR_M : VMALU_MV_Mask<"vmxnor", 0b011111, "m">; 1579} 1580 1581def : InstAlias<"vmmv.m $vd, $vs", 1582 (VMAND_MM VR:$vd, VR:$vs, VR:$vs)>; 1583def : InstAlias<"vmclr.m $vd", 1584 (VMXOR_MM VR:$vd, VR:$vd, VR:$vd)>; 1585def : InstAlias<"vmset.m $vd", 1586 (VMXNOR_MM VR:$vd, VR:$vd, VR:$vd)>; 1587def : InstAlias<"vmnot.m $vd, $vs", 1588 (VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>; 1589 1590def : InstAlias<"vmandnot.mm $vd, $vs2, $vs1", 1591 (VMANDN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>; 1592def : InstAlias<"vmornot.mm $vd, $vs2, $vs1", 1593 (VMORN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>; 1594 1595let hasSideEffects = 0, mayLoad = 0, mayStore = 0, 1596 RVVConstraint = NoConstraint in { 1597 1598// Vector mask population count vcpop 1599def VCPOP_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd), 1600 (ins VR:$vs2, VMaskOp:$vm), 1601 "vcpop.m", "$vd, $vs2$vm">, 1602 SchedUnaryMC<"WriteVMPopV", "ReadVMPopV">; 1603 1604// vfirst find-first-set mask bit 1605def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd), 1606 (ins VR:$vs2, VMaskOp:$vm), 1607 "vfirst.m", "$vd, $vs2$vm">, 1608 SchedUnaryMC<"WriteVMFFSV", "ReadVMFFSV">; 1609 1610} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 1611 1612def : InstAlias<"vpopc.m $vd, $vs2$vm", 1613 (VCPOP_M GPR:$vd, VR:$vs2, VMaskOp:$vm), 0>; 1614 1615let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in { 1616 1617// vmsbf.m set-before-first mask bit 1618defm VMSBF_M : VMSFS_MV_V<"vmsbf.m", 0b010100, 0b00001>; 1619// vmsif.m set-including-first mask bit 1620defm VMSIF_M : VMSFS_MV_V<"vmsif.m", 0b010100, 0b00011>; 1621// vmsof.m set-only-first mask bit 1622defm VMSOF_M : VMSFS_MV_V<"vmsof.m", 0b010100, 0b00010>; 1623// Vector Iota Instruction 1624defm VIOTA_M : VMIOT_MV_V<"viota.m", 0b010100, 0b10000>; 1625 1626} // Constraints = "@earlyclobber $vd", RVVConstraint = Iota 1627 1628// Vector Element Index Instruction 1629let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { 1630 1631let vs2 = 0 in 1632def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VR:$vd), 1633 (ins VMaskOp:$vm), "vid.v", "$vd$vm">, 1634 SchedNullaryMC<"WriteVMIdxV">; 1635 1636// Integer Scalar Move Instructions 1637let vm = 1, RVVConstraint = NoConstraint in { 1638def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd), 1639 (ins VR:$vs2), "vmv.x.s", "$vd, $vs2">, 1640 Sched<[WriteVIMovVX, ReadVIMovVX]>; 1641let Constraints = "$vd = $vd_wb" in 1642def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb), 1643 (ins VR:$vd, GPR:$rs1), "vmv.s.x", "$vd, $rs1">, 1644 Sched<[WriteVIMovXV, ReadVIMovXV, ReadVIMovXX]>; 1645} 1646 1647} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 1648 1649} // Predicates = [HasVInstructions] 1650 1651let Predicates = [HasVInstructionsAnyF] in { 1652 1653let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1, 1654 RVVConstraint = NoConstraint in { 1655// Floating-Point Scalar Move Instructions 1656def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd), 1657 (ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">, 1658 Sched<[WriteVFMovVF, ReadVFMovVF]>; 1659let Constraints = "$vd = $vd_wb" in 1660def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb), 1661 (ins VR:$vd, FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">, 1662 Sched<[WriteVFMovFV, ReadVFMovFV, ReadVFMovFX]>; 1663 1664} // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1 1665 1666} // Predicates = [HasVInstructionsAnyF] 1667 1668let Predicates = [HasVInstructions] in { 1669// Vector Slide Instructions 1670let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in { 1671defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110>; 1672defm VSLIDE1UP_V : VSLD1_MV_X<"vslide1up", 0b001110>; 1673} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp 1674defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111>; 1675defm VSLIDE1DOWN_V : VSLD1_MV_X<"vslide1down", 0b001111>; 1676} // Predicates = [HasVInstructions] 1677 1678let Predicates = [HasVInstructionsAnyF] in { 1679let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in { 1680defm VFSLIDE1UP_V : VSLD1_FV_F<"vfslide1up", 0b001110>; 1681} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp 1682defm VFSLIDE1DOWN_V : VSLD1_FV_F<"vfslide1down", 0b001111>; 1683} // Predicates = [HasVInstructionsAnyF] 1684 1685let Predicates = [HasVInstructions] in { 1686// Vector Register Gather Instruction 1687let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in { 1688defm VRGATHER_V : VGTR_IV_V_X_I<"vrgather", 0b001100>; 1689def VRGATHEREI16_VV : VALUVV<0b001110, OPIVV, "vrgatherei16.vv">, 1690 SchedBinaryMC<"WriteVRGatherVV", "ReadVRGatherVV_data", 1691 "ReadVRGatherVV_index">; 1692} // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather 1693 1694// Vector Compress Instruction 1695let Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress in { 1696defm VCOMPRESS_V : VCPR_MV_Mask<"vcompress", 0b010111>; 1697} // Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress 1698 1699let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isMoveReg = 1, 1700 RVVConstraint = NoConstraint in { 1701// A future extension may relax the vector register alignment restrictions. 1702foreach n = [1, 2, 4, 8] in { 1703 defvar vrc = !cast<VReg>(!if(!eq(n, 1), "VR", "VRM"#n)); 1704 def VMV#n#R_V : RVInstV<0b100111, !add(n, -1), OPIVI, (outs vrc:$vd), 1705 (ins vrc:$vs2), "vmv" # n # "r.v", "$vd, $vs2">, 1706 VMVRSched<n> { 1707 let Uses = []; 1708 let vm = 1; 1709 } 1710} 1711} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 1712} // Predicates = [HasVInstructions] 1713 1714let Predicates = [HasVInstructions] in { 1715 foreach nf=2-8 in { 1716 foreach eew = [8, 16, 32] in { 1717 defvar w = !cast<RISCVWidth>("LSWidth"#eew); 1718 1719 def VLSEG#nf#E#eew#_V : 1720 VUnitStrideSegmentLoad<!add(nf, -1), w, "vlseg"#nf#"e"#eew#".v">, 1721 VLSEGSchedMC<nf, eew>; 1722 def VLSEG#nf#E#eew#FF_V : 1723 VUnitStrideSegmentLoadFF<!add(nf, -1), w, "vlseg"#nf#"e"#eew#"ff.v">, 1724 VLSEGFFSchedMC<nf, eew>; 1725 def VSSEG#nf#E#eew#_V : 1726 VUnitStrideSegmentStore<!add(nf, -1), w, "vsseg"#nf#"e"#eew#".v">, 1727 VSSEGSchedMC<nf, eew>; 1728 // Vector Strided Instructions 1729 def VLSSEG#nf#E#eew#_V : 1730 VStridedSegmentLoad<!add(nf, -1), w, "vlsseg"#nf#"e"#eew#".v">, 1731 VLSSEGSchedMC<nf, eew>; 1732 def VSSSEG#nf#E#eew#_V : 1733 VStridedSegmentStore<!add(nf, -1), w, "vssseg"#nf#"e"#eew#".v">, 1734 VSSSEGSchedMC<nf, eew>; 1735 1736 // Vector Indexed Instructions 1737 def VLUXSEG#nf#EI#eew#_V : 1738 VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, w, 1739 "vluxseg"#nf#"ei"#eew#".v">, 1740 VLXSEGSchedMC<nf, eew, isOrdered=0>; 1741 def VLOXSEG#nf#EI#eew#_V : 1742 VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, w, 1743 "vloxseg"#nf#"ei"#eew#".v">, 1744 VLXSEGSchedMC<nf, eew, isOrdered=1>; 1745 def VSUXSEG#nf#EI#eew#_V : 1746 VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, w, 1747 "vsuxseg"#nf#"ei"#eew#".v">, 1748 VSXSEGSchedMC<nf, eew, isOrdered=0>; 1749 def VSOXSEG#nf#EI#eew#_V : 1750 VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, w, 1751 "vsoxseg"#nf#"ei"#eew#".v">, 1752 VSXSEGSchedMC<nf, eew, isOrdered=1>; 1753 } 1754 } 1755} // Predicates = [HasVInstructions] 1756 1757let Predicates = [HasVInstructionsI64] in { 1758 foreach nf=2-8 in { 1759 // Vector Unit-strided Segment Instructions 1760 def VLSEG#nf#E64_V : 1761 VUnitStrideSegmentLoad<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64.v">, 1762 VLSEGSchedMC<nf, 64>; 1763 def VLSEG#nf#E64FF_V : 1764 VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64ff.v">, 1765 VLSEGFFSchedMC<nf, 64>; 1766 def VSSEG#nf#E64_V : 1767 VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">, 1768 VSSEGSchedMC<nf, 64>; 1769 1770 // Vector Strided Segment Instructions 1771 def VLSSEG#nf#E64_V : 1772 VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">, 1773 VLSSEGSchedMC<nf, 64>; 1774 def VSSSEG#nf#E64_V : 1775 VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">, 1776 VSSSEGSchedMC<nf, 64>; 1777 } 1778} // Predicates = [HasVInstructionsI64] 1779let Predicates = [HasVInstructionsI64, IsRV64] in { 1780 foreach nf = 2 - 8 in { 1781 // Vector Indexed Segment Instructions 1782 def VLUXSEG #nf #EI64_V 1783 : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, LSWidth64, 1784 "vluxseg" #nf #"ei64.v">, 1785 VLXSEGSchedMC<nf, 64, isOrdered=0>; 1786 def VLOXSEG #nf #EI64_V 1787 : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, LSWidth64, 1788 "vloxseg" #nf #"ei64.v">, 1789 VLXSEGSchedMC<nf, 64, isOrdered=1>; 1790 def VSUXSEG #nf #EI64_V 1791 : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, LSWidth64, 1792 "vsuxseg" #nf #"ei64.v">, 1793 VSXSEGSchedMC<nf, 64, isOrdered=0>; 1794 def VSOXSEG #nf #EI64_V 1795 : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, LSWidth64, 1796 "vsoxseg" #nf #"ei64.v">, 1797 VSXSEGSchedMC<nf, 64, isOrdered=1>; 1798 } 1799} // Predicates = [HasVInstructionsI64, IsRV64] 1800 1801include "RISCVInstrInfoZvfbf.td" 1802include "RISCVInstrInfoVPseudos.td" 1803