1//===-- RISCVInstrInfoZvk.td - RISC-V 'Zvk' instructions -------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the RISC-V instructions from the standard 'Zvk', 10// Vector Cryptography Instructions extension, version Release 1.0.0. 11// 12//===----------------------------------------------------------------------===// 13 14//===----------------------------------------------------------------------===// 15// Operand and SDNode transformation definitions. 16//===----------------------------------------------------------------------===// 17 18def tuimm5 : Operand<XLenVT>, TImmLeaf<XLenVT, [{return isUInt<5>(Imm);}]> { 19 let ParserMatchClass = UImmAsmOperand<5>; 20 let EncoderMethod = "getUImmOpValue"; 21 let DecoderMethod = "decodeUImmOperand<5>"; 22 let MCOperandPredicate = [{ 23 int64_t UImm; 24 if (MCOp.evaluateAsConstantImm(UImm)) 25 return isUInt<5>(UImm); 26 return MCOp.isBareSymbolRef(); 27 }]; 28} 29 30//===----------------------------------------------------------------------===// 31// Instruction class templates 32//===----------------------------------------------------------------------===// 33 34let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { 35multiclass VCLMUL_MV_V_X<string opcodestr, bits<6> funct6> { 36 def V : VALUVV<funct6, OPMVV, opcodestr # "." # "vv">, 37 Sched<[WriteVIALUV_WorstCase, ReadVIALUV_WorstCase, 38 ReadVIALUV_WorstCase, ReadVMask]>; 39 def X : VALUVX<funct6, OPMVX, opcodestr # "." # "vx">, 40 Sched<[WriteVIALUX_WorstCase, ReadVIALUV_WorstCase, 41 ReadVIALUX_WorstCase, ReadVMask]>; 42} 43 44class RVInstIVI_VROR<bits<6> funct6, dag outs, dag ins, string opcodestr, 45 string argstr> 46 : RVInst<outs, ins, opcodestr, argstr, [], InstFormatR> { 47 bits<5> vs2; 48 bits<6> imm; 49 bits<5> vd; 50 bit vm; 51 52 let Inst{31-27} = funct6{5-1}; 53 let Inst{26} = imm{5}; 54 let Inst{25} = vm; 55 let Inst{24-20} = vs2; 56 let Inst{19-15} = imm{4-0}; 57 let Inst{14-12} = OPIVI.Value; 58 let Inst{11-7} = vd; 59 let Inst{6-0} = OPC_OP_V.Value; 60 61 let Uses = [VTYPE, VL]; 62 let RVVConstraint = VMConstraint; 63} 64 65multiclass VROR_IV_V_X_I<string opcodestr, bits<6> funct6> 66 : VALU_IV_V_X<opcodestr, funct6> { 67 def I : RVInstIVI_VROR<funct6, (outs VR:$vd), 68 (ins VR:$vs2, uimm6:$imm, VMaskOp:$vm), 69 opcodestr # ".vi", "$vd, $vs2, $imm$vm">, 70 Sched<[WriteVIALUI_WorstCase, ReadVIALUV_WorstCase, 71 ReadVMask]>; 72} 73 74// op vd, vs2, vs1 75class PALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr> 76 : VALUVVNoVm<funct6, opv, opcodestr> { 77 let Inst{6-0} = OPC_OP_P.Value; 78} 79 80// op vd, vs2, imm, vm 81class PALUVINoVm<bits<6> funct6, string opcodestr, Operand optype = simm5> 82 : VALUVINoVm<funct6, opcodestr, optype> { 83 let Inst{6-0} = OPC_OP_P.Value; 84 let Inst{14-12} = OPMVV.Value; 85} 86 87// op vd, vs2 (use vs1 as instruction encoding) 88class PALUVs2NoVm<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr> 89 : VALUVs2NoVm<funct6, vs1, opv, opcodestr> { 90 let Inst{6-0} = OPC_OP_P.Value; 91} 92 93multiclass VAES_MV_V_S<bits<6> funct6_vv, bits<6> funct6_vs, bits<5> vs1, 94 RISCVVFormat opv, string opcodestr> { 95 def NAME # _VV : PALUVs2NoVm<funct6_vv, vs1, opv, opcodestr # ".vv">; 96 def NAME # _VS : PALUVs2NoVm<funct6_vs, vs1, opv, opcodestr # ".vs">; 97} 98 99// vaeskf1.vi and vaeskf2.vi uses different opcode and format, we need 100// to customize one for them. 101class VAESKF_MV_I<bits<6> funct6, string opcodestr, Operand optype> 102 : VALUVINoVm<funct6, opcodestr, optype> { 103 let Inst{6-0} = OPC_OP_P.Value; 104 let Inst{14-12} = OPMVV.Value; 105} 106} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 107 108//===----------------------------------------------------------------------===// 109// Instructions 110//===----------------------------------------------------------------------===// 111 112let Predicates = [HasStdExtZvbb] in { 113 def VBREV_V : VALUVs2<0b010010, 0b01010, OPMVV, "vbrev.v">; 114 def VCLZ_V : VALUVs2<0b010010, 0b01100, OPMVV, "vclz.v">; 115 def VCPOP_V : VALUVs2<0b010010, 0b01110, OPMVV, "vcpop.v">; 116 def VCTZ_V : VALUVs2<0b010010, 0b01101, OPMVV, "vctz.v">; 117 let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in 118 defm VWSLL_V : VSHT_IV_V_X_I<"vwsll", 0b110101>; 119} // Predicates = [HasStdExtZvbb] 120 121let Predicates = [HasStdExtZvbc] in { 122 defm VCLMUL_V : VCLMUL_MV_V_X<"vclmul", 0b001100>; 123 defm VCLMULH_V : VCLMUL_MV_V_X<"vclmulh", 0b001101>; 124} // Predicates = [HasStdExtZvbc] 125 126let Predicates = [HasStdExtZvkb] in { 127 defm VANDN_V : VALU_IV_V_X<"vandn", 0b000001>; 128 def VBREV8_V : VALUVs2<0b010010, 0b01000, OPMVV, "vbrev8.v">; 129 def VREV8_V : VALUVs2<0b010010, 0b01001, OPMVV, "vrev8.v">; 130 defm VROL_V : VALU_IV_V_X<"vrol", 0b010101>; 131 defm VROR_V : VROR_IV_V_X_I<"vror", 0b010100>; 132} // Predicates = [HasStdExtZvkb] 133 134let Predicates = [HasStdExtZvkg], RVVConstraint = NoConstraint in { 135 def VGHSH_VV : PALUVVNoVm<0b101100, OPMVV, "vghsh.vv">; 136 def VGMUL_VV : PALUVs2NoVm<0b101000, 0b10001, OPMVV, "vgmul.vv">; 137} // Predicates = [HasStdExtZvkg] 138 139let Predicates = [HasStdExtZvknhaOrZvknhb], RVVConstraint = NoConstraint in { 140 def VSHA2CH_VV : PALUVVNoVm<0b101110, OPMVV, "vsha2ch.vv">; 141 def VSHA2CL_VV : PALUVVNoVm<0b101111, OPMVV, "vsha2cl.vv">; 142 def VSHA2MS_VV : PALUVVNoVm<0b101101, OPMVV, "vsha2ms.vv">; 143} // Predicates = [HasStdExtZvknhaOrZvknhb] 144 145let Predicates = [HasStdExtZvkned], RVVConstraint = NoConstraint in { 146 defm VAESDF : VAES_MV_V_S<0b101000, 0b101001, 0b00001, OPMVV, "vaesdf">; 147 defm VAESDM : VAES_MV_V_S<0b101000, 0b101001, 0b00000, OPMVV, "vaesdm">; 148 defm VAESEF : VAES_MV_V_S<0b101000, 0b101001, 0b00011, OPMVV, "vaesef">; 149 defm VAESEM : VAES_MV_V_S<0b101000, 0b101001, 0b00010, OPMVV, "vaesem">; 150 def VAESKF1_VI : VAESKF_MV_I<0b100010, "vaeskf1.vi", uimm5>; 151 def VAESKF2_VI : VAESKF_MV_I<0b101010, "vaeskf2.vi", uimm5>; 152 def VAESZ_VS : PALUVs2NoVm<0b101001, 0b00111, OPMVV, "vaesz.vs">; 153} // Predicates = [HasStdExtZvkned] 154 155let Predicates = [HasStdExtZvksed], RVVConstraint = NoConstraint in { 156 def VSM4K_VI : PALUVINoVm<0b100001, "vsm4k.vi", uimm5>; 157 defm VSM4R : VAES_MV_V_S<0b101000, 0b101001, 0b10000, OPMVV, "vsm4r">; 158} // Predicates = [HasStdExtZvksed] 159 160let Predicates = [HasStdExtZvksh], RVVConstraint = NoConstraint in { 161 def VSM3C_VI : PALUVINoVm<0b101011, "vsm3c.vi", uimm5>; 162 def VSM3ME_VV : PALUVVNoVm<0b100000, OPMVV, "vsm3me.vv">; 163} // Predicates = [HasStdExtZvksh] 164 165//===----------------------------------------------------------------------===// 166// Pseudo instructions 167//===----------------------------------------------------------------------===// 168 169defvar I32IntegerVectors = !filter(vti, AllIntegerVectors, !eq(vti.SEW, 32)); 170defvar I32I64IntegerVectors = !filter(vti, AllIntegerVectors, 171 !or(!eq(vti.SEW, 32), !eq(vti.SEW, 64))); 172 173class ZvkI32IntegerVectors<string vd_lmul> { 174 list<VTypeInfo> vs2_types = !cond(!eq(vd_lmul, "M8") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 32)), 175 !eq(vd_lmul, "M4") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 32)), 176 !eq(vd_lmul, "M2") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 16)), 177 !eq(vd_lmul, "M1") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 8)), 178 !eq(vd_lmul, "MF2") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 4)), 179 !eq(vd_lmul, "MF4") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 2)), 180 !eq(vd_lmul, "MF8") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 1))); 181} 182 183class ZvkMxSet<string vd_lmul> { 184 list<LMULInfo> vs2_lmuls = !cond(!eq(vd_lmul, "M8") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4], 185 !eq(vd_lmul, "M4") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4], 186 !eq(vd_lmul, "M2") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2], 187 !eq(vd_lmul, "M1") : [V_MF8, V_MF4, V_MF2, V_M1], 188 !eq(vd_lmul, "MF2") : [V_MF8, V_MF4, V_MF2], 189 !eq(vd_lmul, "MF4") : [V_MF8, V_MF4], 190 !eq(vd_lmul, "MF8") : [V_MF8]); 191} 192 193class VPseudoUnaryNoMask_Zvk<DAGOperand RetClass, VReg OpClass, string Constraint = ""> : 194 Pseudo<(outs RetClass:$rd), 195 (ins RetClass:$merge, OpClass:$rs2, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, 196 RISCVVPseudo { 197 let mayLoad = 0; 198 let mayStore = 0; 199 let hasSideEffects = 0; 200 let Constraints = !interleave([Constraint, "$rd = $merge"], ","); 201 let HasVLOp = 1; 202 let HasSEWOp = 1; 203 let HasVecPolicyOp = 1; 204 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 205} 206 207class VPseudoBinaryNoMask_Zvk<VReg RetClass, 208 VReg Op1Class, 209 DAGOperand Op2Class, 210 string Constraint> : 211 Pseudo<(outs RetClass:$rd), 212 (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, 213 AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, 214 RISCVVPseudo { 215 let mayLoad = 0; 216 let mayStore = 0; 217 let hasSideEffects = 0; 218 let Constraints = !interleave([Constraint, "$rd = $merge"], ","); 219 let HasVLOp = 1; 220 let HasSEWOp = 1; 221 let HasVecPolicyOp = 1; 222 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 223} 224 225multiclass VPseudoBinaryNoMask_Zvk<VReg RetClass, 226 VReg Op1Class, 227 DAGOperand Op2Class, 228 LMULInfo MInfo, 229 string Constraint = ""> { 230 let VLMul = MInfo.value in 231 def "_" # MInfo.MX : VPseudoBinaryNoMask_Zvk<RetClass, Op1Class, Op2Class, 232 Constraint>; 233} 234 235multiclass VPseudoUnaryV_V_NoMask_Zvk<LMULInfo m, string Constraint = ""> { 236 let VLMul = m.value in { 237 def "_VV_" # m.MX : VPseudoUnaryNoMask_Zvk<m.vrclass, m.vrclass, Constraint>; 238 } 239} 240 241multiclass VPseudoUnaryV_S_NoMask_Zvk<LMULInfo m, string Constraint = ""> { 242 let VLMul = m.value in 243 foreach vs2_lmul = ZvkMxSet<m.MX>.vs2_lmuls in 244 def "_VS_" # m.MX # "_" # vs2_lmul.MX : VPseudoUnaryNoMask_Zvk<m.vrclass, vs2_lmul.vrclass, Constraint>; 245} 246 247multiclass VPseudoVALU_V_NoMask_Zvk<string Constraint = ""> { 248 foreach m = MxListVF4 in { 249 defvar mx = m.MX; 250 defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx); 251 defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx); 252 253 defm "" : VPseudoUnaryV_V_NoMask_Zvk<m, Constraint>, 254 Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; 255 } 256} 257 258multiclass VPseudoVALU_S_NoMask_Zvk<string Constraint = ""> { 259 foreach m = MxListVF4 in { 260 defvar mx = m.MX; 261 defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx); 262 defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx); 263 264 defm "" : VPseudoUnaryV_S_NoMask_Zvk<m, Constraint>, 265 Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; 266 } 267} 268 269multiclass VPseudoVALU_V_S_NoMask_Zvk<string Constraint = ""> { 270 defm "" : VPseudoVALU_V_NoMask_Zvk<Constraint>; 271 defm "" : VPseudoVALU_S_NoMask_Zvk<Constraint>; 272} 273 274multiclass VPseudoVALU_VV_NoMask_Zvk<string Constraint = ""> { 275 foreach m = MxListVF4 in { 276 defvar mx = m.MX; 277 defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx); 278 defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx); 279 280 defm _VV : VPseudoBinaryNoMask_Zvk<m.vrclass, m.vrclass, m.vrclass, m, 281 Constraint>, 282 Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; 283 } 284} 285 286multiclass VPseudoVALU_VI_NoMask_Zvk<Operand ImmType = simm5, string Constraint = ""> { 287 foreach m = MxListVF4 in { 288 defvar mx = m.MX; 289 defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx); 290 defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx); 291 292 defm _VI : VPseudoBinaryNoMask_Zvk<m.vrclass, m.vrclass, ImmType, m, 293 Constraint>, 294 Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; 295 } 296} 297 298multiclass VPseudoVALU_VI_NoMaskTU_Zvk<Operand ImmType = uimm5, string Constraint = ""> { 299 foreach m = MxListVF4 in { 300 defvar mx = m.MX; 301 defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx); 302 defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx); 303 304 defm _VI : VPseudoBinaryNoMask<m.vrclass, m.vrclass, ImmType, m, 305 Constraint>, 306 Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; 307 } 308} 309 310multiclass VPseudoVALU_VV_NoMaskTU_Zvk<string Constraint = ""> { 311 foreach m = MxListVF4 in { 312 defvar mx = m.MX; 313 defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx); 314 defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx); 315 316 defm _VV : VPseudoBinaryNoMask<m.vrclass, m.vrclass, m.vrclass, m, 317 Constraint>, 318 Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; 319 } 320} 321 322multiclass VPseudoVCLMUL_VV_VX { 323 foreach m = MxList in { 324 defvar mx = m.MX; 325 defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx); 326 defvar WriteVIALUX_MX = !cast<SchedWrite>("WriteVIALUV_" # mx); 327 defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx); 328 defvar ReadVIALUX_MX = !cast<SchedRead>("ReadVIALUX_" # mx); 329 330 defm "" : VPseudoBinaryV_VV<m>, 331 Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; 332 defm "" : VPseudoBinaryV_VX<m>, 333 Sched<[WriteVIALUX_MX, ReadVIALUV_MX, ReadVIALUX_MX, ReadVMask]>; 334 } 335} 336 337multiclass VPseudoUnaryV_V<LMULInfo m> { 338 let VLMul = m.value in { 339 defvar suffix = "_V_" # m.MX; 340 def suffix : VPseudoUnaryNoMask<m.vrclass, m.vrclass>; 341 def suffix # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>, 342 RISCVMaskedPseudo<MaskIdx=2>; 343 } 344} 345 346multiclass VPseudoVALU_V { 347 foreach m = MxList in { 348 defvar mx = m.MX; 349 defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx); 350 defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx); 351 352 defm "" : VPseudoUnaryV_V<m>, 353 Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; 354 } 355} 356 357let Predicates = [HasStdExtZvbb] in { 358 defm PseudoVBREV : VPseudoVALU_V; 359 defm PseudoVCLZ : VPseudoVALU_V; 360 defm PseudoVCTZ : VPseudoVALU_V; 361 defm PseudoVCPOP : VPseudoVALU_V; 362 defm PseudoVWSLL : VPseudoVWALU_VV_VX_VI<uimm5>; 363} // Predicates = [HasStdExtZvbb] 364 365let Predicates = [HasStdExtZvbc] in { 366 defm PseudoVCLMUL : VPseudoVCLMUL_VV_VX; 367 defm PseudoVCLMULH : VPseudoVCLMUL_VV_VX; 368} // Predicates = [HasStdExtZvbc] 369 370let Predicates = [HasStdExtZvkb] in { 371 defm PseudoVANDN : VPseudoVALU_VV_VX; 372 defm PseudoVBREV8 : VPseudoVALU_V; 373 defm PseudoVREV8 : VPseudoVALU_V; 374 defm PseudoVROL : VPseudoVALU_VV_VX; 375 defm PseudoVROR : VPseudoVALU_VV_VX_VI<uimm6>; 376} // Predicates = [HasStdExtZvkb] 377 378let Predicates = [HasStdExtZvkg] in { 379 defm PseudoVGHSH : VPseudoVALU_VV_NoMask_Zvk; 380 defm PseudoVGMUL : VPseudoVALU_V_NoMask_Zvk; 381} // Predicates = [HasStdExtZvkg] 382 383let Predicates = [HasStdExtZvkned] in { 384 defm PseudoVAESDF : VPseudoVALU_V_S_NoMask_Zvk; 385 defm PseudoVAESDM : VPseudoVALU_V_S_NoMask_Zvk; 386 defm PseudoVAESEF : VPseudoVALU_V_S_NoMask_Zvk; 387 defm PseudoVAESEM : VPseudoVALU_V_S_NoMask_Zvk; 388 defm PseudoVAESKF1 : VPseudoVALU_VI_NoMaskTU_Zvk; 389 defm PseudoVAESKF2 : VPseudoVALU_VI_NoMask_Zvk<uimm5>; 390 defm PseudoVAESZ : VPseudoVALU_S_NoMask_Zvk; 391} // Predicates = [HasStdExtZvkned] 392 393let Predicates = [HasStdExtZvknhaOrZvknhb] in { 394 defm PseudoVSHA2CH : VPseudoVALU_VV_NoMask_Zvk; 395 defm PseudoVSHA2CL : VPseudoVALU_VV_NoMask_Zvk; 396 defm PseudoVSHA2MS : VPseudoVALU_VV_NoMask_Zvk; 397} // Predicates = [HasStdExtZvknhaOrZvknhb] 398 399let Predicates = [HasStdExtZvksed] in { 400 defm PseudoVSM4K : VPseudoVALU_VI_NoMaskTU_Zvk; 401 defm PseudoVSM4R : VPseudoVALU_V_S_NoMask_Zvk; 402} // Predicates = [HasStdExtZvksed] 403 404let Predicates = [HasStdExtZvksh] in { 405 defm PseudoVSM3C : VPseudoVALU_VI_NoMask_Zvk<uimm5>; 406 defm PseudoVSM3ME : VPseudoVALU_VV_NoMaskTU_Zvk; 407} // Predicates = [HasStdExtZvksh] 408 409//===----------------------------------------------------------------------===// 410// SDNode patterns 411//===----------------------------------------------------------------------===// 412 413multiclass VPatUnarySDNode_V<SDPatternOperator op, string instruction_name, 414 Predicate predicate = HasStdExtZvbb> { 415 foreach vti = AllIntegerVectors in { 416 let Predicates = !listconcat([predicate], 417 GetVTypePredicates<vti>.Predicates) in { 418 def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1))), 419 (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX) 420 (vti.Vector (IMPLICIT_DEF)), 421 vti.RegClass:$rs1, 422 vti.AVL, vti.Log2SEW, TA_MA)>; 423 } 424 } 425} 426 427// Helpers for detecting splats since we preprocess splat_vector to vmv.v.x 428// This should match the logic in RISCVDAGToDAGISel::selectVSplat 429def riscv_splat_vector : PatFrag<(ops node:$rs1), 430 (riscv_vmv_v_x_vl undef, node:$rs1, srcvalue)>; 431def riscv_vnot : PatFrag<(ops node:$rs1), (xor node:$rs1, 432 (riscv_splat_vector -1))>; 433 434foreach vti = AllIntegerVectors in { 435 let Predicates = !listconcat([HasStdExtZvkb], 436 GetVTypePredicates<vti>.Predicates) in { 437 def : Pat<(vti.Vector (and (riscv_vnot vti.RegClass:$rs1), 438 vti.RegClass:$rs2)), 439 (!cast<Instruction>("PseudoVANDN_VV_"#vti.LMul.MX) 440 (vti.Vector (IMPLICIT_DEF)), 441 vti.RegClass:$rs2, 442 vti.RegClass:$rs1, 443 vti.AVL, vti.Log2SEW, TA_MA)>; 444 def : Pat<(vti.Vector (and (riscv_splat_vector 445 (not vti.ScalarRegClass:$rs1)), 446 vti.RegClass:$rs2)), 447 (!cast<Instruction>("PseudoVANDN_VX_"#vti.LMul.MX) 448 (vti.Vector (IMPLICIT_DEF)), 449 vti.RegClass:$rs2, 450 vti.ScalarRegClass:$rs1, 451 vti.AVL, vti.Log2SEW, TA_MA)>; 452 } 453} 454 455defm : VPatUnarySDNode_V<bitreverse, "PseudoVBREV">; 456defm : VPatUnarySDNode_V<bswap, "PseudoVREV8", HasStdExtZvkb>; 457defm : VPatUnarySDNode_V<ctlz, "PseudoVCLZ">; 458defm : VPatUnarySDNode_V<cttz, "PseudoVCTZ">; 459defm : VPatUnarySDNode_V<ctpop, "PseudoVCPOP">; 460 461defm : VPatBinarySDNode_VV_VX<rotl, "PseudoVROL">; 462 463// Invert the immediate and mask it to SEW for readability. 464def InvRot8Imm : SDNodeXForm<imm, [{ 465 return CurDAG->getTargetConstant(0x7 & (64 - N->getZExtValue()), SDLoc(N), 466 N->getValueType(0)); 467}]>; 468def InvRot16Imm : SDNodeXForm<imm, [{ 469 return CurDAG->getTargetConstant(0xf & (64 - N->getZExtValue()), SDLoc(N), 470 N->getValueType(0)); 471}]>; 472def InvRot32Imm : SDNodeXForm<imm, [{ 473 return CurDAG->getTargetConstant(0x1f & (64 - N->getZExtValue()), SDLoc(N), 474 N->getValueType(0)); 475}]>; 476def InvRot64Imm : SDNodeXForm<imm, [{ 477 return CurDAG->getTargetConstant(0x3f & (64 - N->getZExtValue()), SDLoc(N), 478 N->getValueType(0)); 479}]>; 480 481// Although there is no vrol.vi, an immediate rotate left can be achieved by 482// negating the immediate in vror.vi 483foreach vti = AllIntegerVectors in { 484 let Predicates = !listconcat([HasStdExtZvkb], 485 GetVTypePredicates<vti>.Predicates) in { 486 def : Pat<(vti.Vector (rotl vti.RegClass:$rs2, 487 (vti.Vector (SplatPat_uimm6 uimm6:$rs1)))), 488 (!cast<Instruction>("PseudoVROR_VI_"#vti.LMul.MX) 489 (vti.Vector (IMPLICIT_DEF)), 490 vti.RegClass:$rs2, 491 (!cast<SDNodeXForm>("InvRot" # vti.SEW # "Imm") uimm6:$rs1), 492 vti.AVL, vti.Log2SEW, TA_MA)>; 493 } 494} 495defm : VPatBinarySDNode_VV_VX_VI<rotr, "PseudoVROR", uimm6>; 496 497foreach vtiToWti = AllWidenableIntVectors in { 498 defvar vti = vtiToWti.Vti; 499 defvar wti = vtiToWti.Wti; 500 let Predicates = !listconcat([HasStdExtZvbb], 501 GetVTypePredicates<vti>.Predicates, 502 GetVTypePredicates<wti>.Predicates) in { 503 def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 504 (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1)))), 505 (!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX) 506 (wti.Vector (IMPLICIT_DEF)), 507 vti.RegClass:$rs2, vti.RegClass:$rs1, 508 vti.AVL, vti.Log2SEW, TA_MA)>; 509 510 def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 511 (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1)))), 512 (!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX) 513 (wti.Vector (IMPLICIT_DEF)), 514 vti.RegClass:$rs2, GPR:$rs1, 515 vti.AVL, vti.Log2SEW, TA_MA)>; 516 517 def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 518 (wti.Vector (SplatPat_uimm5 uimm5:$rs1))), 519 (!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX) 520 (wti.Vector (IMPLICIT_DEF)), 521 vti.RegClass:$rs2, uimm5:$rs1, 522 vti.AVL, vti.Log2SEW, TA_MA)>; 523 } 524} 525 526//===----------------------------------------------------------------------===// 527// VL patterns 528//===----------------------------------------------------------------------===// 529 530multiclass VPatUnaryVL_V<SDPatternOperator op, string instruction_name, 531 Predicate predicate = HasStdExtZvbb> { 532 foreach vti = AllIntegerVectors in { 533 let Predicates = !listconcat([predicate], 534 GetVTypePredicates<vti>.Predicates) in { 535 def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1), 536 (vti.Vector vti.RegClass:$merge), 537 (vti.Mask V0), 538 VLOpFrag)), 539 (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX#"_MASK") 540 vti.RegClass:$merge, 541 vti.RegClass:$rs1, 542 (vti.Mask V0), 543 GPR:$vl, 544 vti.Log2SEW, 545 TAIL_AGNOSTIC)>; 546 } 547 } 548} 549 550foreach vti = AllIntegerVectors in { 551 let Predicates = !listconcat([HasStdExtZvkb], 552 GetVTypePredicates<vti>.Predicates) in { 553 def : Pat<(vti.Vector (riscv_and_vl (riscv_xor_vl 554 (vti.Vector vti.RegClass:$rs1), 555 (riscv_splat_vector -1), 556 (vti.Vector vti.RegClass:$merge), 557 (vti.Mask V0), 558 VLOpFrag), 559 (vti.Vector vti.RegClass:$rs2), 560 (vti.Vector vti.RegClass:$merge), 561 (vti.Mask V0), 562 VLOpFrag)), 563 (!cast<Instruction>("PseudoVANDN_VV_"#vti.LMul.MX#"_MASK") 564 vti.RegClass:$merge, 565 vti.RegClass:$rs2, 566 vti.RegClass:$rs1, 567 (vti.Mask V0), 568 GPR:$vl, 569 vti.Log2SEW, 570 TAIL_AGNOSTIC)>; 571 572 def : Pat<(vti.Vector (riscv_and_vl (riscv_splat_vector 573 (not vti.ScalarRegClass:$rs1)), 574 (vti.Vector vti.RegClass:$rs2), 575 (vti.Vector vti.RegClass:$merge), 576 (vti.Mask V0), 577 VLOpFrag)), 578 (!cast<Instruction>("PseudoVANDN_VX_"#vti.LMul.MX#"_MASK") 579 vti.RegClass:$merge, 580 vti.RegClass:$rs2, 581 vti.ScalarRegClass:$rs1, 582 (vti.Mask V0), 583 GPR:$vl, 584 vti.Log2SEW, 585 TAIL_AGNOSTIC)>; 586 } 587} 588 589defm : VPatUnaryVL_V<riscv_bitreverse_vl, "PseudoVBREV">; 590defm : VPatUnaryVL_V<riscv_bswap_vl, "PseudoVREV8", HasStdExtZvkb>; 591defm : VPatUnaryVL_V<riscv_ctlz_vl, "PseudoVCLZ">; 592defm : VPatUnaryVL_V<riscv_cttz_vl, "PseudoVCTZ">; 593defm : VPatUnaryVL_V<riscv_ctpop_vl, "PseudoVCPOP">; 594 595defm : VPatBinaryVL_VV_VX<riscv_rotl_vl, "PseudoVROL">; 596// Although there is no vrol.vi, an immediate rotate left can be achieved by 597// negating the immediate in vror.vi 598foreach vti = AllIntegerVectors in { 599 let Predicates = !listconcat([HasStdExtZvkb], 600 GetVTypePredicates<vti>.Predicates) in { 601 def : Pat<(riscv_rotl_vl vti.RegClass:$rs2, 602 (vti.Vector (SplatPat_uimm6 uimm6:$rs1)), 603 (vti.Vector vti.RegClass:$merge), 604 (vti.Mask V0), VLOpFrag), 605 (!cast<Instruction>("PseudoVROR_VI_"#vti.LMul.MX#"_MASK") 606 vti.RegClass:$merge, 607 vti.RegClass:$rs2, 608 (!cast<SDNodeXForm>("InvRot" # vti.SEW # "Imm") uimm6:$rs1), 609 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 610 } 611} 612defm : VPatBinaryVL_VV_VX_VI<riscv_rotr_vl, "PseudoVROR", uimm6>; 613 614foreach vtiToWti = AllWidenableIntVectors in { 615 defvar vti = vtiToWti.Vti; 616 defvar wti = vtiToWti.Wti; 617 let Predicates = !listconcat([HasStdExtZvbb], 618 GetVTypePredicates<vti>.Predicates, 619 GetVTypePredicates<wti>.Predicates) in { 620 def : Pat<(riscv_shl_vl 621 (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 622 (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1))), 623 (wti.Vector wti.RegClass:$merge), 624 (vti.Mask V0), VLOpFrag), 625 (!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK") 626 wti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 627 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 628 629 def : Pat<(riscv_shl_vl 630 (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 631 (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))), 632 (wti.Vector wti.RegClass:$merge), 633 (vti.Mask V0), VLOpFrag), 634 (!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK") 635 wti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 636 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 637 638 def : Pat<(riscv_shl_vl 639 (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 640 (wti.Vector (SplatPat_uimm5 uimm5:$rs1)), 641 (wti.Vector wti.RegClass:$merge), 642 (vti.Mask V0), VLOpFrag), 643 (!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK") 644 wti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$rs1, 645 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 646 647 def : Pat<(riscv_vwsll_vl 648 (vti.Vector vti.RegClass:$rs2), 649 (vti.Vector vti.RegClass:$rs1), 650 (wti.Vector wti.RegClass:$merge), 651 (vti.Mask V0), VLOpFrag), 652 (!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK") 653 wti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 654 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 655 656 def : Pat<(riscv_vwsll_vl 657 (vti.Vector vti.RegClass:$rs2), 658 (vti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))), 659 (wti.Vector wti.RegClass:$merge), 660 (vti.Mask V0), VLOpFrag), 661 (!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK") 662 wti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 663 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 664 665 def : Pat<(riscv_vwsll_vl 666 (vti.Vector vti.RegClass:$rs2), 667 (vti.Vector (SplatPat_uimm5 uimm5:$rs1)), 668 (wti.Vector wti.RegClass:$merge), 669 (vti.Mask V0), VLOpFrag), 670 (!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK") 671 wti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$rs1, 672 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 673 } 674} 675 676//===----------------------------------------------------------------------===// 677// Codegen patterns 678//===----------------------------------------------------------------------===// 679 680class VPatUnaryNoMask_Zvk<string intrinsic_name, 681 string inst, 682 string kind, 683 ValueType result_type, 684 ValueType op2_type, 685 int sew, 686 LMULInfo vlmul, 687 VReg result_reg_class, 688 VReg op2_reg_class> : 689 Pat<(result_type (!cast<Intrinsic>(intrinsic_name) 690 (result_type result_reg_class:$merge), 691 (op2_type op2_reg_class:$rs2), 692 VLOpFrag, (XLenVT timm:$policy))), 693 (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX) 694 (result_type result_reg_class:$merge), 695 (op2_type op2_reg_class:$rs2), 696 GPR:$vl, sew, (XLenVT timm:$policy))>; 697 698class VPatUnaryNoMask_VS_Zvk<string intrinsic_name, 699 string inst, 700 string kind, 701 ValueType result_type, 702 ValueType op2_type, 703 int sew, 704 LMULInfo vlmul, 705 LMULInfo vs2_lmul, 706 VReg result_reg_class, 707 VReg op2_reg_class> : 708 Pat<(result_type (!cast<Intrinsic>(intrinsic_name) 709 (result_type result_reg_class:$merge), 710 (op2_type op2_reg_class:$rs2), 711 VLOpFrag, (XLenVT timm:$policy))), 712 (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_"#vs2_lmul.MX) 713 (result_type result_reg_class:$merge), 714 (op2_type op2_reg_class:$rs2), 715 GPR:$vl, sew, (XLenVT timm:$policy))>; 716 717multiclass VPatUnaryV_V_NoMask_Zvk<string intrinsic, string instruction, 718 list<VTypeInfo> vtilist> { 719 foreach vti = vtilist in 720 def : VPatUnaryNoMask_Zvk<intrinsic # "_vv", instruction, "VV", 721 vti.Vector, vti.Vector, vti.Log2SEW, 722 vti.LMul, vti.RegClass, vti.RegClass>; 723} 724 725multiclass VPatUnaryV_S_NoMaskVectorCrypto<string intrinsic, string instruction, 726 list<VTypeInfo> vtilist> { 727 foreach vti = vtilist in 728 foreach vti_vs2 = ZvkI32IntegerVectors<vti.LMul.MX>.vs2_types in 729 def : VPatUnaryNoMask_VS_Zvk<intrinsic # "_vs", instruction, "VS", 730 vti.Vector, vti_vs2.Vector, vti.Log2SEW, 731 vti.LMul, vti_vs2.LMul, vti.RegClass, vti_vs2.RegClass>; 732} 733 734multiclass VPatUnaryV_V_S_NoMask_Zvk<string intrinsic, string instruction, 735 list<VTypeInfo> vtilist> { 736 defm : VPatUnaryV_V_NoMask_Zvk<intrinsic, instruction, vtilist>; 737 defm : VPatUnaryV_S_NoMaskVectorCrypto<intrinsic, instruction, vtilist>; 738} 739 740multiclass VPatBinaryV_VV_NoMask<string intrinsic, string instruction, 741 list<VTypeInfo> vtilist> { 742 foreach vti = vtilist in 743 def : VPatTernaryNoMaskWithPolicy<intrinsic, instruction, "VV", 744 vti.Vector, vti.Vector, vti.Vector, 745 vti.Log2SEW, vti.LMul, vti.RegClass, 746 vti.RegClass, vti.RegClass>; 747} 748 749multiclass VPatBinaryV_VI_NoMask<string intrinsic, string instruction, 750 list<VTypeInfo> vtilist, Operand imm_type = tuimm5> { 751 foreach vti = vtilist in 752 def : VPatTernaryNoMaskWithPolicy<intrinsic, instruction, "VI", 753 vti.Vector, vti.Vector, XLenVT, 754 vti.Log2SEW, vti.LMul, vti.RegClass, 755 vti.RegClass, imm_type>; 756} 757 758multiclass VPatBinaryV_VI_NoMaskTU<string intrinsic, string instruction, 759 list<VTypeInfo> vtilist, Operand imm_type = tuimm5> { 760 foreach vti = vtilist in 761 def : VPatBinaryNoMaskTU<intrinsic, instruction # "_VI_" # vti.LMul.MX, 762 vti.Vector, vti.Vector, XLenVT, vti.Log2SEW, 763 vti.RegClass, vti.RegClass, imm_type>; 764} 765 766multiclass VPatBinaryV_VV_NoMaskTU<string intrinsic, string instruction, 767 list<VTypeInfo> vtilist> { 768 foreach vti = vtilist in 769 def : VPatBinaryNoMaskTU<intrinsic, instruction # "_VV_" # vti.LMul.MX, 770 vti.Vector, vti.Vector, vti.Vector, vti.Log2SEW, 771 vti.RegClass, vti.RegClass, vti.RegClass>; 772} 773 774multiclass VPatBinaryV_VX_VROTATE<string intrinsic, string instruction, 775 list<VTypeInfo> vtilist, bit isSEWAware = 0> { 776 foreach vti = vtilist in { 777 defvar kind = "V"#vti.ScalarSuffix; 778 let Predicates = GetVTypePredicates<vti>.Predicates in 779 defm : VPatBinary<intrinsic, 780 !if(isSEWAware, 781 instruction#"_"#kind#"_"#vti.LMul.MX#"_E"#vti.SEW, 782 instruction#"_"#kind#"_"#vti.LMul.MX), 783 vti.Vector, vti.Vector, XLenVT, vti.Mask, 784 vti.Log2SEW, vti.RegClass, 785 vti.RegClass, vti.ScalarRegClass>; 786 } 787} 788 789multiclass VPatBinaryV_VI_VROL<string intrinsic, string instruction, 790 list<VTypeInfo> vtilist, bit isSEWAware = 0> { 791 foreach vti = vtilist in { 792 defvar Intr = !cast<Intrinsic>(intrinsic); 793 defvar Pseudo = !cast<Instruction>( 794 !if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW, 795 instruction#"_VI_"#vti.LMul.MX)); 796 let Predicates = GetVTypePredicates<vti>.Predicates in 797 def : Pat<(vti.Vector (Intr (vti.Vector vti.RegClass:$merge), 798 (vti.Vector vti.RegClass:$rs2), 799 (XLenVT uimm6:$rs1), 800 VLOpFrag)), 801 (Pseudo (vti.Vector vti.RegClass:$merge), 802 (vti.Vector vti.RegClass:$rs2), 803 (InvRot64Imm uimm6:$rs1), 804 GPR:$vl, vti.Log2SEW, TU_MU)>; 805 806 defvar IntrMask = !cast<Intrinsic>(intrinsic#"_mask"); 807 defvar PseudoMask = !cast<Instruction>( 808 !if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK", 809 instruction#"_VI_"#vti.LMul.MX#"_MASK")); 810 let Predicates = GetVTypePredicates<vti>.Predicates in 811 def : Pat<(vti.Vector (IntrMask (vti.Vector vti.RegClass:$merge), 812 (vti.Vector vti.RegClass:$rs2), 813 (XLenVT uimm6:$rs1), 814 (vti.Mask V0), 815 VLOpFrag, (XLenVT timm:$policy))), 816 (PseudoMask (vti.Vector vti.RegClass:$merge), 817 (vti.Vector vti.RegClass:$rs2), 818 (InvRot64Imm uimm6:$rs1), 819 (vti.Mask V0), 820 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 821 } 822} 823 824multiclass VPatBinaryV_VV_VX_VROL<string intrinsic, string instruction, 825 string instruction2, list<VTypeInfo> vtilist> 826 : VPatBinaryV_VV<intrinsic, instruction, vtilist>, 827 VPatBinaryV_VX_VROTATE<intrinsic, instruction, vtilist>, 828 VPatBinaryV_VI_VROL<intrinsic, instruction2, vtilist>; 829 830multiclass VPatBinaryV_VV_VX_VI_VROR<string intrinsic, string instruction, 831 list<VTypeInfo> vtilist, Operand ImmType = uimm6> 832 : VPatBinaryV_VV<intrinsic, instruction, vtilist>, 833 VPatBinaryV_VX_VROTATE<intrinsic, instruction, vtilist>, 834 VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>; 835 836multiclass VPatBinaryW_VI_VWSLL<string intrinsic, string instruction, 837 list<VTypeInfoToWide> vtilist> { 838 foreach VtiToWti = vtilist in { 839 defvar Vti = VtiToWti.Vti; 840 defvar Wti = VtiToWti.Wti; 841 defm : VPatBinary<intrinsic, instruction # "_VI_" # Vti.LMul.MX, 842 Wti.Vector, Vti.Vector, XLenVT, Vti.Mask, 843 Vti.Log2SEW, Wti.RegClass, 844 Vti.RegClass, uimm5>; 845 } 846} 847 848multiclass VPatBinaryW_VX_VWSLL<string intrinsic, string instruction, 849 list<VTypeInfoToWide> vtilist> { 850 foreach VtiToWti = vtilist in { 851 defvar Vti = VtiToWti.Vti; 852 defvar Wti = VtiToWti.Wti; 853 defvar kind = "V"#Vti.ScalarSuffix; 854 let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates, 855 GetVTypePredicates<Wti>.Predicates) in 856 defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX, 857 Wti.Vector, Vti.Vector, XLenVT, Vti.Mask, 858 Vti.Log2SEW, Wti.RegClass, 859 Vti.RegClass, Vti.ScalarRegClass>; 860 } 861} 862 863multiclass VPatBinaryW_VV_VX_VI_VWSLL<string intrinsic, string instruction, 864 list<VTypeInfoToWide> vtilist> 865 : VPatBinaryW_VV<intrinsic, instruction, vtilist>, 866 VPatBinaryW_VX_VWSLL<intrinsic, instruction, vtilist>, 867 VPatBinaryW_VI_VWSLL<intrinsic, instruction, vtilist>; 868 869let Predicates = [HasStdExtZvbb] in { 870 defm : VPatUnaryV_V<"int_riscv_vbrev", "PseudoVBREV", AllIntegerVectors>; 871 defm : VPatUnaryV_V<"int_riscv_vclz", "PseudoVCLZ", AllIntegerVectors>; 872 defm : VPatUnaryV_V<"int_riscv_vctz", "PseudoVCTZ", AllIntegerVectors>; 873 defm : VPatUnaryV_V<"int_riscv_vcpopv", "PseudoVCPOP", AllIntegerVectors>; 874 defm : VPatBinaryW_VV_VX_VI_VWSLL<"int_riscv_vwsll", "PseudoVWSLL", AllWidenableIntVectors>; 875} // Predicates = [HasStdExtZvbb] 876 877let Predicates = [HasStdExtZvbc] in { 878 defm : VPatBinaryV_VV_VX<"int_riscv_vclmul", "PseudoVCLMUL", I64IntegerVectors>; 879 defm : VPatBinaryV_VV_VX<"int_riscv_vclmulh", "PseudoVCLMULH", I64IntegerVectors>; 880} // Predicates = [HasStdExtZvbc] 881 882let Predicates = [HasStdExtZvkb] in { 883 defm : VPatBinaryV_VV_VX<"int_riscv_vandn", "PseudoVANDN", AllIntegerVectors>; 884 defm : VPatUnaryV_V<"int_riscv_vbrev8", "PseudoVBREV8", AllIntegerVectors>; 885 defm : VPatUnaryV_V<"int_riscv_vrev8", "PseudoVREV8", AllIntegerVectors>; 886 defm : VPatBinaryV_VV_VX_VROL<"int_riscv_vrol", "PseudoVROL", "PseudoVROR", AllIntegerVectors>; 887 defm : VPatBinaryV_VV_VX_VI_VROR<"int_riscv_vror", "PseudoVROR", AllIntegerVectors>; 888} // Predicates = [HasStdExtZvkb] 889 890let Predicates = [HasStdExtZvkg] in { 891 defm : VPatBinaryV_VV_NoMask<"int_riscv_vghsh", "PseudoVGHSH", I32IntegerVectors>; 892 defm : VPatUnaryV_V_NoMask_Zvk<"int_riscv_vgmul", "PseudoVGMUL", I32IntegerVectors>; 893} // Predicates = [HasStdExtZvkg] 894 895let Predicates = [HasStdExtZvkned] in { 896 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesdf", "PseudoVAESDF", I32IntegerVectors>; 897 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesdm", "PseudoVAESDM", I32IntegerVectors>; 898 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesef", "PseudoVAESEF", I32IntegerVectors>; 899 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesem", "PseudoVAESEM", I32IntegerVectors>; 900 defm : VPatBinaryV_VI_NoMaskTU<"int_riscv_vaeskf1", "PseudoVAESKF1", I32IntegerVectors>; 901 defm : VPatBinaryV_VI_NoMask<"int_riscv_vaeskf2", "PseudoVAESKF2", I32IntegerVectors>; 902 defm : VPatUnaryV_S_NoMaskVectorCrypto<"int_riscv_vaesz", "PseudoVAESZ", I32IntegerVectors>; 903} // Predicates = [HasStdExtZvkned] 904 905let Predicates = [HasStdExtZvknha] in { 906 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ch", "PseudoVSHA2CH", I32IntegerVectors>; 907 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2cl", "PseudoVSHA2CH", I32IntegerVectors>; 908 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ms", "PseudoVSHA2MS", I32IntegerVectors>; 909} // Predicates = [HasStdExtZvknha] 910 911let Predicates = [HasStdExtZvknhb] in { 912 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ch", "PseudoVSHA2CH", I32I64IntegerVectors>; 913 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2cl", "PseudoVSHA2CH", I32I64IntegerVectors>; 914 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ms", "PseudoVSHA2MS", I32I64IntegerVectors>; 915} // Predicates = [HasStdExtZvknhb] 916 917let Predicates = [HasStdExtZvksed] in { 918 defm : VPatBinaryV_VI_NoMaskTU<"int_riscv_vsm4k", "PseudoVSM4K", I32IntegerVectors>; 919 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vsm4r", "PseudoVSM4R", I32IntegerVectors>; 920} // Predicates = [HasStdExtZvksed] 921 922let Predicates = [HasStdExtZvksh] in { 923 defm : VPatBinaryV_VI_NoMask<"int_riscv_vsm3c", "PseudoVSM3C", I32IntegerVectors>; 924 defm : VPatBinaryV_VV_NoMaskTU<"int_riscv_vsm3me", "PseudoVSM3ME", I32IntegerVectors>; 925} // Predicates = [HasStdExtZvksh] 926