1//===-- RISCVInstrInfoZvk.td - RISC-V 'Zvk' instructions ---*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the RISC-V instructions from the standard 'Zvk', 10// Vector Cryptography Instructions extension, version Release 1.0.0. 11// 12//===----------------------------------------------------------------------===// 13 14//===----------------------------------------------------------------------===// 15// Operand and SDNode transformation definitions. 16//===----------------------------------------------------------------------===// 17 18def tuimm5 : RISCVOp, TImmLeaf<XLenVT, [{return isUInt<5>(Imm);}]>; 19 20//===----------------------------------------------------------------------===// 21// Instruction class templates 22//===----------------------------------------------------------------------===// 23 24let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { 25multiclass VCLMUL_MV_V_X<string opcodestr, bits<6> funct6> { 26 def V : VALUVV<funct6, OPMVV, opcodestr # "." # "vv">, 27 SchedBinaryMC<"WriteVCLMULV", "ReadVCLMULV", "ReadVCLMULV">; 28 def X : VALUVX<funct6, OPMVX, opcodestr # "." # "vx">, 29 SchedBinaryMC<"WriteVCLMULX", "ReadVCLMULV", "ReadVCLMULX">; 30} 31 32class RVInstIVI_VROR<bits<6> funct6, dag outs, dag ins, string opcodestr, 33 string argstr> 34 : RVInst<outs, ins, opcodestr, argstr, [], InstFormatR> { 35 bits<5> vs2; 36 bits<6> imm; 37 bits<5> vd; 38 bit vm; 39 40 let Inst{31-27} = funct6{5-1}; 41 let Inst{26} = imm{5}; 42 let Inst{25} = vm; 43 let Inst{24-20} = vs2; 44 let Inst{19-15} = imm{4-0}; 45 let Inst{14-12} = OPIVI.Value; 46 let Inst{11-7} = vd; 47 let Inst{6-0} = OPC_OP_V.Value; 48 49 let Uses = [VTYPE, VL]; 50 let RVVConstraint = VMConstraint; 51} 52 53multiclass VROR_IV_V_X_I<string opcodestr, bits<6> funct6> 54 : VALU_IV_V_X<opcodestr, funct6> { 55 def I : RVInstIVI_VROR<funct6, (outs VR:$vd), 56 (ins VR:$vs2, uimm6:$imm, VMaskOp:$vm), 57 opcodestr # ".vi", "$vd, $vs2, $imm$vm">, 58 SchedUnaryMC<"WriteVRotI", "ReadVRotV">; 59} 60 61// op vd, vs2, vs1 62class PALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr> 63 : VALUVVNoVm<funct6, opv, opcodestr> { 64 let Inst{6-0} = OPC_OP_VE.Value; 65} 66 67// op vd, vs2, vs1 68class PALUVVNoVmTernary<bits<6> funct6, RISCVVFormat opv, string opcodestr> 69 : RVInstVV<funct6, opv, (outs VR:$vd_wb), 70 (ins VR:$vd, VR:$vs2, VR:$vs1), 71 opcodestr, "$vd, $vs2, $vs1"> { 72 let Constraints = "$vd = $vd_wb"; 73 let vm = 1; 74 let Inst{6-0} = OPC_OP_VE.Value; 75} 76 77// op vd, vs2, imm 78class PALUVINoVm<bits<6> funct6, string opcodestr, Operand optype> 79 : VALUVINoVm<funct6, opcodestr, optype> { 80 let Inst{6-0} = OPC_OP_VE.Value; 81 let Inst{14-12} = OPMVV.Value; 82} 83 84// op vd, vs2, imm where vd is also a source regardless of tail policy 85class PALUVINoVmBinary<bits<6> funct6, string opcodestr, Operand optype> 86 : RVInstIVI<funct6, (outs VR:$vd_wb), 87 (ins VR:$vd, VR:$vs2, optype:$imm), 88 opcodestr, "$vd, $vs2, $imm"> { 89 let Constraints = "$vd = $vd_wb"; 90 let vm = 1; 91 let Inst{6-0} = OPC_OP_VE.Value; 92 let Inst{14-12} = OPMVV.Value; 93} 94 95// op vd, vs2 (use vs1 as instruction encoding) where vd is also a source 96// regardless of tail policy 97class PALUVs2NoVmBinary<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, 98 string opcodestr> 99 : RVInstV<funct6, vs1, opv, (outs VR:$vd_wb), (ins VR:$vd, VR:$vs2), 100 opcodestr, "$vd, $vs2"> { 101 let Constraints = "$vd = $vd_wb"; 102 let vm = 1; 103 let Inst{6-0} = OPC_OP_VE.Value; 104} 105 106multiclass VAES_MV_V_S<bits<6> funct6_vv, bits<6> funct6_vs, bits<5> vs1, 107 RISCVVFormat opv, string opcodestr> { 108 let RVVConstraint = NoConstraint in 109 def NAME # _VV : PALUVs2NoVmBinary<funct6_vv, vs1, opv, opcodestr # ".vv">, 110 SchedBinaryMC<"WriteVAESMVV", "ReadVAESMVV", "ReadVAESMVV">; 111 let RVVConstraint = VS2Constraint in 112 def NAME # _VS : PALUVs2NoVmBinary<funct6_vs, vs1, opv, opcodestr # ".vs">, 113 SchedBinaryMC<"WriteVAESMVV", "ReadVAESMVV", "ReadVAESMVV">; 114} 115} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 116 117//===----------------------------------------------------------------------===// 118// Instructions 119//===----------------------------------------------------------------------===// 120 121let Predicates = [HasStdExtZvbb] in { 122 def VBREV_V : VALUVs2<0b010010, 0b01010, OPMVV, "vbrev.v">; 123 def VCLZ_V : VALUVs2<0b010010, 0b01100, OPMVV, "vclz.v">; 124 def VCPOP_V : VALUVs2<0b010010, 0b01110, OPMVV, "vcpop.v">; 125 def VCTZ_V : VALUVs2<0b010010, 0b01101, OPMVV, "vctz.v">; 126 let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in 127 defm VWSLL_V : VSHT_IV_V_X_I<"vwsll", 0b110101>; 128} // Predicates = [HasStdExtZvbb] 129 130let Predicates = [HasStdExtZvbc] in { 131 defm VCLMUL_V : VCLMUL_MV_V_X<"vclmul", 0b001100>; 132 defm VCLMULH_V : VCLMUL_MV_V_X<"vclmulh", 0b001101>; 133} // Predicates = [HasStdExtZvbc] 134 135let Predicates = [HasStdExtZvkb] in { 136 defm VANDN_V : VALU_IV_V_X<"vandn", 0b000001>; 137 def VBREV8_V : VALUVs2<0b010010, 0b01000, OPMVV, "vbrev8.v">; 138 def VREV8_V : VALUVs2<0b010010, 0b01001, OPMVV, "vrev8.v">; 139 defm VROL_V : VALU_IV_V_X<"vrol", 0b010101>; 140 defm VROR_V : VROR_IV_V_X_I<"vror", 0b010100>; 141} // Predicates = [HasStdExtZvkb] 142 143let Predicates = [HasStdExtZvkg], RVVConstraint = NoConstraint in { 144 def VGHSH_VV : PALUVVNoVmTernary<0b101100, OPMVV, "vghsh.vv">, 145 SchedTernaryMC<"WriteVGHSHV", "ReadVGHSHV", "ReadVGHSHV", 146 "ReadVGHSHV">; 147 def VGMUL_VV : PALUVs2NoVmBinary<0b101000, 0b10001, OPMVV, "vgmul.vv">, 148 SchedBinaryMC<"WriteVGMULV", "ReadVGMULV", "ReadVGMULV">; 149} // Predicates = [HasStdExtZvkg] 150 151let Predicates = [HasStdExtZvknhaOrZvknhb], RVVConstraint = Sha2Constraint in { 152 def VSHA2CH_VV : PALUVVNoVmTernary<0b101110, OPMVV, "vsha2ch.vv">, 153 SchedTernaryMC<"WriteVSHA2CHV", "ReadVSHA2CHV", "ReadVSHA2CHV", 154 "ReadVSHA2CHV">; 155 def VSHA2CL_VV : PALUVVNoVmTernary<0b101111, OPMVV, "vsha2cl.vv">, 156 SchedTernaryMC<"WriteVSHA2CLV", "ReadVSHA2CLV", "ReadVSHA2CLV", 157 "ReadVSHA2CLV">; 158 def VSHA2MS_VV : PALUVVNoVmTernary<0b101101, OPMVV, "vsha2ms.vv">, 159 SchedTernaryMC<"WriteVSHA2MSV", "ReadVSHA2MSV", "ReadVSHA2MSV", 160 "ReadVSHA2MSV">; 161} // Predicates = [HasStdExtZvknhaOrZvknhb] 162 163let Predicates = [HasStdExtZvkned] in { 164 defm VAESDF : VAES_MV_V_S<0b101000, 0b101001, 0b00001, OPMVV, "vaesdf">; 165 defm VAESDM : VAES_MV_V_S<0b101000, 0b101001, 0b00000, OPMVV, "vaesdm">; 166 defm VAESEF : VAES_MV_V_S<0b101000, 0b101001, 0b00011, OPMVV, "vaesef">; 167 defm VAESEM : VAES_MV_V_S<0b101000, 0b101001, 0b00010, OPMVV, "vaesem">; 168 def VAESKF1_VI : PALUVINoVm<0b100010, "vaeskf1.vi", uimm5>, 169 SchedUnaryMC<"WriteVAESKF1V", "ReadVAESKF1V">; 170 def VAESKF2_VI : PALUVINoVmBinary<0b101010, "vaeskf2.vi", uimm5>, 171 SchedBinaryMC<"WriteVAESKF2V", "ReadVAESKF2V", "ReadVAESKF2V">; 172 let RVVConstraint = VS2Constraint in 173 def VAESZ_VS : PALUVs2NoVmBinary<0b101001, 0b00111, OPMVV, "vaesz.vs">, 174 SchedBinaryMC<"WriteVAESZV", "ReadVAESZV", "ReadVAESZV">; 175} // Predicates = [HasStdExtZvkned] 176 177let Predicates = [HasStdExtZvksed] in { 178 let RVVConstraint = NoConstraint in 179 def VSM4K_VI : PALUVINoVm<0b100001, "vsm4k.vi", uimm5>, 180 SchedUnaryMC<"WriteVSM4KV", "ReadVSM4KV">; 181 defm VSM4R : VAES_MV_V_S<0b101000, 0b101001, 0b10000, OPMVV, "vsm4r">; 182} // Predicates = [HasStdExtZvksed] 183 184let Predicates = [HasStdExtZvksh], RVVConstraint = VS2Constraint in { 185 def VSM3C_VI : PALUVINoVmBinary<0b101011, "vsm3c.vi", uimm5>, 186 SchedBinaryMC<"WriteVSM3CV", "ReadVSM3CV", "ReadVSM3CV">; 187 def VSM3ME_VV : PALUVVNoVm<0b100000, OPMVV, "vsm3me.vv">, 188 SchedUnaryMC<"WriteVSM3MEV", "ReadVSM3MEV">; 189} // Predicates = [HasStdExtZvksh] 190 191//===----------------------------------------------------------------------===// 192// Pseudo instructions 193//===----------------------------------------------------------------------===// 194 195defvar I32IntegerVectors = !filter(vti, AllIntegerVectors, !eq(vti.SEW, 32)); 196defvar I32I64IntegerVectors = !filter(vti, AllIntegerVectors, 197 !or(!eq(vti.SEW, 32), !eq(vti.SEW, 64))); 198 199class ZvkI32IntegerVectors<string vd_lmul> { 200 list<VTypeInfo> vs2_types = !cond(!eq(vd_lmul, "M8") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 32)), 201 !eq(vd_lmul, "M4") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 32)), 202 !eq(vd_lmul, "M2") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 16)), 203 !eq(vd_lmul, "M1") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 8)), 204 !eq(vd_lmul, "MF2") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 4)), 205 !eq(vd_lmul, "MF4") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 2)), 206 !eq(vd_lmul, "MF8") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 1))); 207} 208 209class ZvkMxSet<string vd_lmul> { 210 list<LMULInfo> vs2_lmuls = !cond(!eq(vd_lmul, "M8") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4], 211 !eq(vd_lmul, "M4") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4], 212 !eq(vd_lmul, "M2") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2], 213 !eq(vd_lmul, "M1") : [V_MF8, V_MF4, V_MF2, V_M1], 214 !eq(vd_lmul, "MF2") : [V_MF8, V_MF4, V_MF2], 215 !eq(vd_lmul, "MF4") : [V_MF8, V_MF4], 216 !eq(vd_lmul, "MF8") : [V_MF8]); 217} 218 219class VPseudoBinaryNoMask_Zvk<DAGOperand RetClass, VReg OpClass> : 220 Pseudo<(outs RetClass:$rd_wb), 221 (ins RetClass:$rd, OpClass:$rs2, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, 222 RISCVVPseudo { 223 let mayLoad = 0; 224 let mayStore = 0; 225 let hasSideEffects = 0; 226 let Constraints = "$rd_wb = $rd"; 227 let HasVLOp = 1; 228 let HasSEWOp = 1; 229 let HasVecPolicyOp = 1; 230 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 231} 232 233class VPseudoTernaryNoMask_Zvk<VReg RetClass, 234 VReg Op1Class, 235 DAGOperand Op2Class> : 236 Pseudo<(outs RetClass:$rd_wb), 237 (ins RetClass:$rd, Op1Class:$rs2, Op2Class:$rs1, 238 AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, 239 RISCVVPseudo { 240 let mayLoad = 0; 241 let mayStore = 0; 242 let hasSideEffects = 0; 243 let Constraints = "$rd_wb = $rd"; 244 let HasVLOp = 1; 245 let HasSEWOp = 1; 246 let HasVecPolicyOp = 1; 247 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 248} 249 250multiclass VPseudoBinaryNoMaskPolicy_Zvk<VReg RetClass, 251 VReg Op1Class, 252 DAGOperand Op2Class, 253 LMULInfo MInfo, 254 string Constraint = ""> { 255 let VLMul = MInfo.value in { 256 def "_" # MInfo.MX : VPseudoBinaryNoMaskPolicy<RetClass, Op1Class, Op2Class, 257 Constraint>; 258 } 259} 260 261multiclass VPseudoTernaryNoMask_Zvk<VReg RetClass, 262 VReg Op1Class, 263 DAGOperand Op2Class, 264 LMULInfo MInfo> { 265 let VLMul = MInfo.value in 266 def "_" # MInfo.MX : VPseudoTernaryNoMask_Zvk<RetClass, Op1Class, Op2Class>; 267} 268 269multiclass VPseudoBinaryV_V_NoMask_Zvk<LMULInfo m> { 270 let VLMul = m.value in { 271 def "_VV_" # m.MX : VPseudoBinaryNoMask_Zvk<m.vrclass, m.vrclass>; 272 } 273} 274 275multiclass VPseudoBinaryV_S_NoMask_Zvk<LMULInfo m> { 276 let VLMul = m.value in 277 foreach vs2_lmul = ZvkMxSet<m.MX>.vs2_lmuls in 278 def "_VS_" # m.MX # "_" # vs2_lmul.MX : VPseudoBinaryNoMask_Zvk<m.vrclass, vs2_lmul.vrclass>; 279} 280 281multiclass VPseudoVGMUL { 282 foreach m = MxListVF4 in { 283 defvar mx = m.MX; 284 defm "" : VPseudoBinaryV_V_NoMask_Zvk<m>, 285 SchedBinary<"WriteVGMULV", "ReadVGMULV", "ReadVGMULV", mx>; 286 } 287} 288 289multiclass VPseudoVAESMV { 290 foreach m = MxListVF4 in { 291 defvar mx = m.MX; 292 defm "" : VPseudoBinaryV_V_NoMask_Zvk<m>, 293 SchedBinary<"WriteVAESMVV", "ReadVAESMVV", "ReadVAESMVV", mx>; 294 defm "" : VPseudoBinaryV_S_NoMask_Zvk<m>, 295 SchedBinary<"WriteVAESMVV", "ReadVAESMVV", "ReadVAESMVV", mx>; 296 297 } 298} 299 300multiclass VPseudoVSM4R { 301 foreach m = MxListVF4 in { 302 defvar mx = m.MX; 303 defm "" : VPseudoBinaryV_V_NoMask_Zvk<m>, 304 SchedBinary<"WriteVSM4RV", "ReadVSM4RV", "ReadVSM4RV", mx>; 305 defm "" : VPseudoBinaryV_S_NoMask_Zvk<m>, 306 SchedBinary<"WriteVSM4RV", "ReadVSM4RV", "ReadVSM4RV", mx>; 307 308 } 309} 310 311multiclass VPseudoVGHSH { 312 foreach m = MxListVF4 in { 313 defvar mx = m.MX; 314 defm _VV : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, m.vrclass, m>, 315 SchedTernary<"WriteVGHSHV", "ReadVGHSHV", "ReadVGHSHV", 316 "ReadVGHSHV", mx>; 317 } 318} 319 320multiclass VPseudoVSHA2CH { 321 foreach m = MxListVF4 in { 322 defvar mx = m.MX; 323 defm _VV : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, m.vrclass, m>, 324 SchedTernary<"WriteVSHA2CHV", "ReadVSHA2CHV", "ReadVSHA2CHV", 325 "ReadVSHA2CHV", mx>; 326 } 327} 328 329multiclass VPseudoVSHA2CL { 330 foreach m = MxListVF4 in { 331 defvar mx = m.MX; 332 defm _VV : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, m.vrclass, m>, 333 SchedTernary<"WriteVSHA2CLV", "ReadVSHA2CLV", "ReadVSHA2CLV", 334 "ReadVSHA2CLV", mx>; 335 } 336} 337 338multiclass VPseudoVSHA2MS { 339 foreach m = MxListVF4 in { 340 defvar mx = m.MX; 341 defm _VV : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, m.vrclass, m>, 342 SchedTernary<"WriteVSHA2MSV", "ReadVSHA2MSV", "ReadVSHA2MSV", 343 "ReadVSHA2MSV", mx>; 344 } 345} 346 347multiclass VPseudoVAESKF1 { 348 foreach m = MxListVF4 in { 349 defvar mx = m.MX; 350 defm _VI : VPseudoBinaryNoMaskPolicy_Zvk<m.vrclass, m.vrclass, uimm5, m>, 351 SchedBinary<"WriteVAESKF1V", "ReadVAESKF1V", "ReadVAESKF1V", mx, 352 forceMergeOpRead=true>; 353 } 354} 355 356multiclass VPseudoVAESKF2 { 357 foreach m = MxListVF4 in { 358 defvar mx = m.MX; 359 defm _VI : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, uimm5, m>, 360 SchedTernary<"WriteVAESKF2V", "ReadVAESKF2V", "ReadVAESKF2V", 361 "ReadVAESKF2V", mx>; 362 } 363} 364 365multiclass VPseudoVAESZ { 366 foreach m = MxListVF4 in { 367 defvar mx = m.MX; 368 defm "" : VPseudoBinaryV_S_NoMask_Zvk<m>, 369 SchedBinary<"WriteVAESZV", "ReadVAESZV", "ReadVAESZV", mx>; 370 } 371} 372 373multiclass VPseudoVSM3C { 374 foreach m = MxListVF4 in { 375 defvar mx = m.MX; 376 defm _VI : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, uimm5, m>, 377 SchedTernary<"WriteVSM3CV", "ReadVSM3CV", "ReadVSM3CV", 378 "ReadVSM3CV", mx>; 379 } 380} 381 382multiclass VPseudoVSM4K { 383 foreach m = MxListVF4 in { 384 defvar mx = m.MX; 385 defm _VI : VPseudoBinaryNoMaskPolicy_Zvk<m.vrclass, m.vrclass, uimm5, m>, 386 SchedBinary<"WriteVSM4KV", "ReadVSM4KV", "ReadVSM4KV", mx, 387 forceMergeOpRead=true>; 388 } 389} 390 391multiclass VPseudoVSM3ME { 392 foreach m = MxListVF4 in { 393 defvar mx = m.MX; 394 defm _VV : VPseudoBinaryNoMaskPolicy_Zvk<m.vrclass, m.vrclass, m.vrclass, m>, 395 SchedBinary<"WriteVSM3MEV", "ReadVSM3MEV", "ReadVSM3MEV", mx, 396 forceMergeOpRead=true>; 397 } 398} 399 400multiclass VPseudoVCLMUL_VV_VX { 401 foreach m = MxList in { 402 defvar mx = m.MX; 403 defm "" : VPseudoBinaryV_VV<m>, 404 SchedBinary<"WriteVCLMULV", "ReadVCLMULV", "ReadVCLMULV", mx, 405 forceMergeOpRead=true>; 406 defm "" : VPseudoBinaryV_VX<m>, 407 SchedBinary<"WriteVCLMULX", "ReadVCLMULV", "ReadVCLMULX", mx, 408 forceMergeOpRead=true>; 409 } 410} 411 412multiclass VPseudoUnaryV_V<LMULInfo m> { 413 let VLMul = m.value in { 414 defvar suffix = "_V_" # m.MX; 415 def suffix : VPseudoUnaryNoMask<m.vrclass, m.vrclass>; 416 def suffix # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>, 417 RISCVMaskedPseudo<MaskIdx=2>; 418 } 419} 420 421multiclass VPseudoVBREV { 422 foreach m = MxList in { 423 defvar mx = m.MX; 424 defm "" : VPseudoUnaryV_V<m>, 425 SchedUnary<"WriteVBREVV", "ReadVBREVV", mx, forceMergeOpRead=true>; 426 } 427} 428 429multiclass VPseudoVCLZ { 430 foreach m = MxList in { 431 defvar mx = m.MX; 432 defm "" : VPseudoUnaryV_V<m>, 433 SchedUnary<"WriteVCLZV", "ReadVCLZV", mx, forceMergeOpRead=true>; 434 } 435} 436 437multiclass VPseudoVCTZ { 438 foreach m = MxList in { 439 defvar mx = m.MX; 440 defm "" : VPseudoUnaryV_V<m>, 441 SchedUnary<"WriteVCTZV", "ReadVCTZV", mx, forceMergeOpRead=true>; 442 } 443} 444 445multiclass VPseudoVCPOP { 446 foreach m = MxList in { 447 defvar mx = m.MX; 448 defm "" : VPseudoUnaryV_V<m>, 449 SchedUnary<"WriteVCPOPV", "ReadVCPOPV", mx, forceMergeOpRead=true>; 450 } 451} 452 453multiclass VPseudoVWSLL { 454 foreach m = MxListW in { 455 defvar mx = m.MX; 456 defm "" : VPseudoBinaryW_VV<m>, 457 SchedBinary<"WriteVWSLLV", "ReadVWSLLV", "ReadVWSLLV", mx, 458 forceMergeOpRead=true>; 459 defm "" : VPseudoBinaryW_VX<m>, 460 SchedBinary<"WriteVWSLLX", "ReadVWSLLV", "ReadVWSLLX", mx, 461 forceMergeOpRead=true>; 462 defm "" : VPseudoBinaryW_VI<uimm5, m>, 463 SchedUnary<"WriteVWSLLI", "ReadVWSLLV", mx, 464 forceMergeOpRead=true>; 465 } 466} 467 468multiclass VPseudoVANDN { 469 foreach m = MxList in { 470 defm "" : VPseudoBinaryV_VV<m>, 471 SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", m.MX, 472 forceMergeOpRead=true>; 473 defm "" : VPseudoBinaryV_VX<m>, 474 SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", m.MX, 475 forceMergeOpRead=true>; 476 } 477} 478 479multiclass VPseudoVBREV8 { 480 foreach m = MxList in { 481 defvar mx = m.MX; 482 defm "" : VPseudoUnaryV_V<m>, 483 SchedUnary<"WriteVBREV8V", "ReadVBREV8V", mx, forceMergeOpRead=true>; 484 } 485} 486 487multiclass VPseudoVREV8 { 488 foreach m = MxList in { 489 defvar mx = m.MX; 490 defm "" : VPseudoUnaryV_V<m>, 491 SchedUnary<"WriteVREV8V", "ReadVREV8V", mx, forceMergeOpRead=true>; 492 } 493} 494 495multiclass VPseudoVROT_VV_VX { 496 foreach m = MxList in { 497 defm "" : VPseudoBinaryV_VV<m>, 498 SchedBinary<"WriteVRotV", "ReadVRotV", "ReadVRotV", m.MX, 499 forceMergeOpRead=true>; 500 defm "" : VPseudoBinaryV_VX<m>, 501 SchedBinary<"WriteVRotX", "ReadVRotV", "ReadVRotX", m.MX, 502 forceMergeOpRead=true>; 503 } 504} 505 506multiclass VPseudoVROT_VV_VX_VI 507 : VPseudoVROT_VV_VX { 508 foreach m = MxList in { 509 defm "" : VPseudoBinaryV_VI<uimm6, m>, 510 SchedUnary<"WriteVRotI", "ReadVRotV", m.MX, 511 forceMergeOpRead=true>; 512 } 513} 514 515let Predicates = [HasStdExtZvbb] in { 516 defm PseudoVBREV : VPseudoVBREV; 517 defm PseudoVCLZ : VPseudoVCLZ; 518 defm PseudoVCTZ : VPseudoVCTZ; 519 defm PseudoVCPOP : VPseudoVCPOP; 520 defm PseudoVWSLL : VPseudoVWSLL; 521} // Predicates = [HasStdExtZvbb] 522 523let Predicates = [HasStdExtZvbc] in { 524 defm PseudoVCLMUL : VPseudoVCLMUL_VV_VX; 525 defm PseudoVCLMULH : VPseudoVCLMUL_VV_VX; 526} // Predicates = [HasStdExtZvbc] 527 528let Predicates = [HasStdExtZvkb] in { 529 defm PseudoVANDN : VPseudoVANDN; 530 defm PseudoVBREV8 : VPseudoVBREV8; 531 defm PseudoVREV8 : VPseudoVREV8; 532 defm PseudoVROL : VPseudoVROT_VV_VX; 533 defm PseudoVROR : VPseudoVROT_VV_VX_VI; 534} // Predicates = [HasStdExtZvkb] 535 536let Predicates = [HasStdExtZvkg] in { 537 defm PseudoVGHSH : VPseudoVGHSH; 538 defm PseudoVGMUL : VPseudoVGMUL; 539} // Predicates = [HasStdExtZvkg] 540 541let Predicates = [HasStdExtZvkned] in { 542 defm PseudoVAESDF : VPseudoVAESMV; 543 defm PseudoVAESDM : VPseudoVAESMV; 544 defm PseudoVAESEF : VPseudoVAESMV; 545 defm PseudoVAESEM : VPseudoVAESMV; 546 defm PseudoVAESKF1 : VPseudoVAESKF1; 547 defm PseudoVAESKF2 : VPseudoVAESKF2; 548 defm PseudoVAESZ : VPseudoVAESZ; 549} // Predicates = [HasStdExtZvkned] 550 551let Predicates = [HasStdExtZvknhaOrZvknhb] in { 552 defm PseudoVSHA2CH : VPseudoVSHA2CH; 553 defm PseudoVSHA2CL : VPseudoVSHA2CL; 554 defm PseudoVSHA2MS : VPseudoVSHA2MS; 555} // Predicates = [HasStdExtZvknhaOrZvknhb] 556 557let Predicates = [HasStdExtZvksed] in { 558 defm PseudoVSM4K : VPseudoVSM4K; 559 defm PseudoVSM4R : VPseudoVSM4R; 560} // Predicates = [HasStdExtZvksed] 561 562let Predicates = [HasStdExtZvksh] in { 563 defm PseudoVSM3C : VPseudoVSM3C; 564 defm PseudoVSM3ME : VPseudoVSM3ME; 565} // Predicates = [HasStdExtZvksh] 566 567//===----------------------------------------------------------------------===// 568// SDNode patterns 569//===----------------------------------------------------------------------===// 570 571multiclass VPatUnarySDNode_V<SDPatternOperator op, string instruction_name, 572 Predicate predicate = HasStdExtZvbb> { 573 foreach vti = AllIntegerVectors in { 574 let Predicates = !listconcat([predicate], 575 GetVTypePredicates<vti>.Predicates) in { 576 def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1))), 577 (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX) 578 (vti.Vector (IMPLICIT_DEF)), 579 vti.RegClass:$rs1, 580 vti.AVL, vti.Log2SEW, TA_MA)>; 581 } 582 } 583} 584 585// Helpers for detecting splats since we preprocess splat_vector to vmv.v.x 586// This should match the logic in RISCVDAGToDAGISel::selectVSplat 587def riscv_splat_vector : PatFrag<(ops node:$rs1), 588 (riscv_vmv_v_x_vl undef, node:$rs1, srcvalue)>; 589def riscv_vnot : PatFrag<(ops node:$rs1), (xor node:$rs1, 590 (riscv_splat_vector -1))>; 591 592foreach vti = AllIntegerVectors in { 593 let Predicates = !listconcat([HasStdExtZvkb], 594 GetVTypePredicates<vti>.Predicates) in { 595 def : Pat<(vti.Vector (and (riscv_vnot vti.RegClass:$rs1), 596 vti.RegClass:$rs2)), 597 (!cast<Instruction>("PseudoVANDN_VV_"#vti.LMul.MX) 598 (vti.Vector (IMPLICIT_DEF)), 599 vti.RegClass:$rs2, 600 vti.RegClass:$rs1, 601 vti.AVL, vti.Log2SEW, TA_MA)>; 602 def : Pat<(vti.Vector (and (riscv_splat_vector 603 (not vti.ScalarRegClass:$rs1)), 604 vti.RegClass:$rs2)), 605 (!cast<Instruction>("PseudoVANDN_VX_"#vti.LMul.MX) 606 (vti.Vector (IMPLICIT_DEF)), 607 vti.RegClass:$rs2, 608 vti.ScalarRegClass:$rs1, 609 vti.AVL, vti.Log2SEW, TA_MA)>; 610 } 611} 612 613defm : VPatUnarySDNode_V<bitreverse, "PseudoVBREV">; 614defm : VPatUnarySDNode_V<bswap, "PseudoVREV8", HasStdExtZvkb>; 615defm : VPatUnarySDNode_V<ctlz, "PseudoVCLZ">; 616defm : VPatUnarySDNode_V<cttz, "PseudoVCTZ">; 617defm : VPatUnarySDNode_V<ctpop, "PseudoVCPOP">; 618 619defm : VPatBinarySDNode_VV_VX<rotl, "PseudoVROL">; 620 621// Invert the immediate and mask it to SEW for readability. 622def InvRot8Imm : SDNodeXForm<imm, [{ 623 return CurDAG->getTargetConstant(0x7 & (64 - N->getZExtValue()), SDLoc(N), 624 N->getValueType(0)); 625}]>; 626def InvRot16Imm : SDNodeXForm<imm, [{ 627 return CurDAG->getTargetConstant(0xf & (64 - N->getZExtValue()), SDLoc(N), 628 N->getValueType(0)); 629}]>; 630def InvRot32Imm : SDNodeXForm<imm, [{ 631 return CurDAG->getTargetConstant(0x1f & (64 - N->getZExtValue()), SDLoc(N), 632 N->getValueType(0)); 633}]>; 634def InvRot64Imm : SDNodeXForm<imm, [{ 635 return CurDAG->getTargetConstant(0x3f & (64 - N->getZExtValue()), SDLoc(N), 636 N->getValueType(0)); 637}]>; 638 639// Although there is no vrol.vi, an immediate rotate left can be achieved by 640// negating the immediate in vror.vi 641foreach vti = AllIntegerVectors in { 642 let Predicates = !listconcat([HasStdExtZvkb], 643 GetVTypePredicates<vti>.Predicates) in { 644 def : Pat<(vti.Vector (rotl vti.RegClass:$rs2, 645 (vti.Vector (SplatPat_uimm6 uimm6:$rs1)))), 646 (!cast<Instruction>("PseudoVROR_VI_"#vti.LMul.MX) 647 (vti.Vector (IMPLICIT_DEF)), 648 vti.RegClass:$rs2, 649 (!cast<SDNodeXForm>("InvRot" # vti.SEW # "Imm") uimm6:$rs1), 650 vti.AVL, vti.Log2SEW, TA_MA)>; 651 } 652} 653defm : VPatBinarySDNode_VV_VX_VI<rotr, "PseudoVROR", uimm6>; 654 655foreach vtiToWti = AllWidenableIntVectors in { 656 defvar vti = vtiToWti.Vti; 657 defvar wti = vtiToWti.Wti; 658 let Predicates = !listconcat([HasStdExtZvbb], 659 GetVTypePredicates<vti>.Predicates, 660 GetVTypePredicates<wti>.Predicates) in { 661 def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 662 (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1)))), 663 (!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX) 664 (wti.Vector (IMPLICIT_DEF)), 665 vti.RegClass:$rs2, vti.RegClass:$rs1, 666 vti.AVL, vti.Log2SEW, TA_MA)>; 667 668 def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 669 (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1)))), 670 (!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX) 671 (wti.Vector (IMPLICIT_DEF)), 672 vti.RegClass:$rs2, GPR:$rs1, 673 vti.AVL, vti.Log2SEW, TA_MA)>; 674 675 def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 676 (wti.Vector (SplatPat_uimm5 uimm5:$rs1))), 677 (!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX) 678 (wti.Vector (IMPLICIT_DEF)), 679 vti.RegClass:$rs2, uimm5:$rs1, 680 vti.AVL, vti.Log2SEW, TA_MA)>; 681 } 682} 683 684//===----------------------------------------------------------------------===// 685// VL patterns 686//===----------------------------------------------------------------------===// 687 688multiclass VPatUnaryVL_V<SDPatternOperator op, string instruction_name, 689 Predicate predicate = HasStdExtZvbb> { 690 foreach vti = AllIntegerVectors in { 691 let Predicates = !listconcat([predicate], 692 GetVTypePredicates<vti>.Predicates) in { 693 def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1), 694 (vti.Vector vti.RegClass:$merge), 695 (vti.Mask V0), 696 VLOpFrag)), 697 (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX#"_MASK") 698 vti.RegClass:$merge, 699 vti.RegClass:$rs1, 700 (vti.Mask V0), 701 GPR:$vl, 702 vti.Log2SEW, 703 TAIL_AGNOSTIC)>; 704 } 705 } 706} 707 708foreach vti = AllIntegerVectors in { 709 let Predicates = !listconcat([HasStdExtZvkb], 710 GetVTypePredicates<vti>.Predicates) in { 711 def : Pat<(vti.Vector (riscv_and_vl (riscv_xor_vl 712 (vti.Vector vti.RegClass:$rs1), 713 (riscv_splat_vector -1), 714 (vti.Vector vti.RegClass:$merge), 715 (vti.Mask V0), 716 VLOpFrag), 717 (vti.Vector vti.RegClass:$rs2), 718 (vti.Vector vti.RegClass:$merge), 719 (vti.Mask V0), 720 VLOpFrag)), 721 (!cast<Instruction>("PseudoVANDN_VV_"#vti.LMul.MX#"_MASK") 722 vti.RegClass:$merge, 723 vti.RegClass:$rs2, 724 vti.RegClass:$rs1, 725 (vti.Mask V0), 726 GPR:$vl, 727 vti.Log2SEW, 728 TAIL_AGNOSTIC)>; 729 730 def : Pat<(vti.Vector (riscv_and_vl (riscv_splat_vector 731 (not vti.ScalarRegClass:$rs1)), 732 (vti.Vector vti.RegClass:$rs2), 733 (vti.Vector vti.RegClass:$merge), 734 (vti.Mask V0), 735 VLOpFrag)), 736 (!cast<Instruction>("PseudoVANDN_VX_"#vti.LMul.MX#"_MASK") 737 vti.RegClass:$merge, 738 vti.RegClass:$rs2, 739 vti.ScalarRegClass:$rs1, 740 (vti.Mask V0), 741 GPR:$vl, 742 vti.Log2SEW, 743 TAIL_AGNOSTIC)>; 744 } 745} 746 747defm : VPatUnaryVL_V<riscv_bitreverse_vl, "PseudoVBREV">; 748defm : VPatUnaryVL_V<riscv_bswap_vl, "PseudoVREV8", HasStdExtZvkb>; 749defm : VPatUnaryVL_V<riscv_ctlz_vl, "PseudoVCLZ">; 750defm : VPatUnaryVL_V<riscv_cttz_vl, "PseudoVCTZ">; 751defm : VPatUnaryVL_V<riscv_ctpop_vl, "PseudoVCPOP">; 752 753defm : VPatBinaryVL_VV_VX<riscv_rotl_vl, "PseudoVROL">; 754// Although there is no vrol.vi, an immediate rotate left can be achieved by 755// negating the immediate in vror.vi 756foreach vti = AllIntegerVectors in { 757 let Predicates = !listconcat([HasStdExtZvkb], 758 GetVTypePredicates<vti>.Predicates) in { 759 def : Pat<(riscv_rotl_vl vti.RegClass:$rs2, 760 (vti.Vector (SplatPat_uimm6 uimm6:$rs1)), 761 (vti.Vector vti.RegClass:$merge), 762 (vti.Mask V0), VLOpFrag), 763 (!cast<Instruction>("PseudoVROR_VI_"#vti.LMul.MX#"_MASK") 764 vti.RegClass:$merge, 765 vti.RegClass:$rs2, 766 (!cast<SDNodeXForm>("InvRot" # vti.SEW # "Imm") uimm6:$rs1), 767 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 768 } 769} 770defm : VPatBinaryVL_VV_VX_VI<riscv_rotr_vl, "PseudoVROR", uimm6>; 771 772foreach vtiToWti = AllWidenableIntVectors in { 773 defvar vti = vtiToWti.Vti; 774 defvar wti = vtiToWti.Wti; 775 let Predicates = !listconcat([HasStdExtZvbb], 776 GetVTypePredicates<vti>.Predicates, 777 GetVTypePredicates<wti>.Predicates) in { 778 def : Pat<(riscv_shl_vl 779 (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 780 (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1))), 781 (wti.Vector wti.RegClass:$merge), 782 (vti.Mask V0), VLOpFrag), 783 (!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK") 784 wti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 785 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 786 787 def : Pat<(riscv_shl_vl 788 (wti.Vector (riscv_zext_vl_oneuse 789 (vti.Vector vti.RegClass:$rs2), 790 (vti.Mask V0), VLOpFrag)), 791 (wti.Vector (riscv_ext_vl_oneuse 792 (vti.Vector vti.RegClass:$rs1), 793 (vti.Mask V0), VLOpFrag)), 794 (wti.Vector wti.RegClass:$merge), 795 (vti.Mask V0), VLOpFrag), 796 (!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK") 797 wti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 798 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 799 800 def : Pat<(riscv_shl_vl 801 (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 802 (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))), 803 (wti.Vector wti.RegClass:$merge), 804 (vti.Mask V0), VLOpFrag), 805 (!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK") 806 wti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 807 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 808 809 def : Pat<(riscv_shl_vl 810 (wti.Vector (riscv_zext_vl_oneuse 811 (vti.Vector vti.RegClass:$rs2), 812 (vti.Mask V0), VLOpFrag)), 813 (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))), 814 (wti.Vector wti.RegClass:$merge), 815 (vti.Mask V0), VLOpFrag), 816 (!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK") 817 wti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 818 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 819 820 def : Pat<(riscv_shl_vl 821 (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), 822 (wti.Vector (SplatPat_uimm5 uimm5:$rs1)), 823 (wti.Vector wti.RegClass:$merge), 824 (vti.Mask V0), VLOpFrag), 825 (!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK") 826 wti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$rs1, 827 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 828 829 def : Pat<(riscv_shl_vl 830 (wti.Vector (riscv_zext_vl_oneuse 831 (vti.Vector vti.RegClass:$rs2), 832 (vti.Mask V0), VLOpFrag)), 833 (wti.Vector (SplatPat_uimm5 uimm5:$rs1)), 834 (wti.Vector wti.RegClass:$merge), 835 (vti.Mask V0), VLOpFrag), 836 (!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK") 837 wti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$rs1, 838 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 839 840 def : Pat<(riscv_vwsll_vl 841 (vti.Vector vti.RegClass:$rs2), 842 (vti.Vector vti.RegClass:$rs1), 843 (wti.Vector wti.RegClass:$merge), 844 (vti.Mask V0), VLOpFrag), 845 (!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK") 846 wti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 847 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 848 849 def : Pat<(riscv_vwsll_vl 850 (vti.Vector vti.RegClass:$rs2), 851 (vti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))), 852 (wti.Vector wti.RegClass:$merge), 853 (vti.Mask V0), VLOpFrag), 854 (!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK") 855 wti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 856 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 857 858 def : Pat<(riscv_vwsll_vl 859 (vti.Vector vti.RegClass:$rs2), 860 (vti.Vector (SplatPat_uimm5 uimm5:$rs1)), 861 (wti.Vector wti.RegClass:$merge), 862 (vti.Mask V0), VLOpFrag), 863 (!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK") 864 wti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$rs1, 865 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 866 } 867} 868 869//===----------------------------------------------------------------------===// 870// Codegen patterns 871//===----------------------------------------------------------------------===// 872 873class VPatUnaryNoMask_Zvk<string intrinsic_name, 874 string inst, 875 string kind, 876 ValueType result_type, 877 ValueType op2_type, 878 int sew, 879 LMULInfo vlmul, 880 VReg result_reg_class, 881 VReg op2_reg_class> : 882 Pat<(result_type (!cast<Intrinsic>(intrinsic_name) 883 (result_type result_reg_class:$rd), 884 (op2_type op2_reg_class:$rs2), 885 VLOpFrag, (XLenVT timm:$policy))), 886 (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX) 887 (result_type result_reg_class:$rd), 888 (op2_type op2_reg_class:$rs2), 889 GPR:$vl, sew, (XLenVT timm:$policy))>; 890 891class VPatUnaryNoMask_VS_Zvk<string intrinsic_name, 892 string inst, 893 string kind, 894 ValueType result_type, 895 ValueType op2_type, 896 int sew, 897 LMULInfo vlmul, 898 LMULInfo vs2_lmul, 899 VReg result_reg_class, 900 VReg op2_reg_class> : 901 Pat<(result_type (!cast<Intrinsic>(intrinsic_name) 902 (result_type result_reg_class:$rd), 903 (op2_type op2_reg_class:$rs2), 904 VLOpFrag, (XLenVT timm:$policy))), 905 (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_"#vs2_lmul.MX) 906 (result_type result_reg_class:$rd), 907 (op2_type op2_reg_class:$rs2), 908 GPR:$vl, sew, (XLenVT timm:$policy))>; 909 910multiclass VPatUnaryV_V_NoMask_Zvk<string intrinsic, string instruction, 911 list<VTypeInfo> vtilist> { 912 foreach vti = vtilist in 913 def : VPatUnaryNoMask_Zvk<intrinsic # "_vv", instruction, "VV", 914 vti.Vector, vti.Vector, vti.Log2SEW, 915 vti.LMul, vti.RegClass, vti.RegClass>; 916} 917 918multiclass VPatUnaryV_S_NoMaskVectorCrypto<string intrinsic, string instruction, 919 list<VTypeInfo> vtilist> { 920 foreach vti = vtilist in 921 foreach vti_vs2 = ZvkI32IntegerVectors<vti.LMul.MX>.vs2_types in 922 def : VPatUnaryNoMask_VS_Zvk<intrinsic # "_vs", instruction, "VS", 923 vti.Vector, vti_vs2.Vector, vti.Log2SEW, 924 vti.LMul, vti_vs2.LMul, vti.RegClass, vti_vs2.RegClass>; 925} 926 927multiclass VPatUnaryV_V_S_NoMask_Zvk<string intrinsic, string instruction, 928 list<VTypeInfo> vtilist> { 929 defm : VPatUnaryV_V_NoMask_Zvk<intrinsic, instruction, vtilist>; 930 defm : VPatUnaryV_S_NoMaskVectorCrypto<intrinsic, instruction, vtilist>; 931} 932 933multiclass VPatBinaryV_VV_NoMask<string intrinsic, string instruction, 934 list<VTypeInfo> vtilist> { 935 foreach vti = vtilist in 936 def : VPatTernaryNoMaskWithPolicy<intrinsic, instruction, "VV", 937 vti.Vector, vti.Vector, vti.Vector, 938 vti.Log2SEW, vti.LMul, vti.RegClass, 939 vti.RegClass, vti.RegClass>; 940} 941 942multiclass VPatBinaryV_VI_NoMask<string intrinsic, string instruction, 943 list<VTypeInfo> vtilist, 944 Operand imm_type = tuimm5> { 945 foreach vti = vtilist in 946 def : VPatTernaryNoMaskWithPolicy<intrinsic, instruction, "VI", 947 vti.Vector, vti.Vector, XLenVT, 948 vti.Log2SEW, vti.LMul, vti.RegClass, 949 vti.RegClass, imm_type>; 950} 951 952multiclass VPatBinaryV_VI_NoMaskTU<string intrinsic, string instruction, 953 list<VTypeInfo> vtilist, 954 Operand imm_type = tuimm5> { 955 foreach vti = vtilist in 956 def : VPatBinaryNoMaskTU<intrinsic, instruction # "_VI_" # vti.LMul.MX, 957 vti.Vector, vti.Vector, XLenVT, vti.Log2SEW, 958 vti.RegClass, vti.RegClass, imm_type>; 959} 960 961multiclass VPatBinaryV_VV_NoMaskTU<string intrinsic, string instruction, 962 list<VTypeInfo> vtilist> { 963 foreach vti = vtilist in 964 def : VPatBinaryNoMaskTU<intrinsic, instruction # "_VV_" # vti.LMul.MX, 965 vti.Vector, vti.Vector, vti.Vector, vti.Log2SEW, 966 vti.RegClass, vti.RegClass, vti.RegClass>; 967} 968 969multiclass VPatBinaryV_VX_VROTATE<string intrinsic, string instruction, 970 list<VTypeInfo> vtilist, bit isSEWAware = 0> { 971 foreach vti = vtilist in { 972 defvar kind = "V"#vti.ScalarSuffix; 973 let Predicates = GetVTypePredicates<vti>.Predicates in 974 defm : VPatBinary<intrinsic, 975 !if(isSEWAware, 976 instruction#"_"#kind#"_"#vti.LMul.MX#"_E"#vti.SEW, 977 instruction#"_"#kind#"_"#vti.LMul.MX), 978 vti.Vector, vti.Vector, XLenVT, vti.Mask, 979 vti.Log2SEW, vti.RegClass, 980 vti.RegClass, vti.ScalarRegClass>; 981 } 982} 983 984multiclass VPatBinaryV_VI_VROL<string intrinsic, string instruction, 985 list<VTypeInfo> vtilist, bit isSEWAware = 0> { 986 foreach vti = vtilist in { 987 defvar Intr = !cast<Intrinsic>(intrinsic); 988 defvar Pseudo = !cast<Instruction>( 989 !if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW, 990 instruction#"_VI_"#vti.LMul.MX)); 991 let Predicates = GetVTypePredicates<vti>.Predicates in 992 def : Pat<(vti.Vector (Intr (vti.Vector vti.RegClass:$merge), 993 (vti.Vector vti.RegClass:$rs2), 994 (XLenVT uimm6:$rs1), 995 VLOpFrag)), 996 (Pseudo (vti.Vector vti.RegClass:$merge), 997 (vti.Vector vti.RegClass:$rs2), 998 (InvRot64Imm uimm6:$rs1), 999 GPR:$vl, vti.Log2SEW, TU_MU)>; 1000 1001 defvar IntrMask = !cast<Intrinsic>(intrinsic#"_mask"); 1002 defvar PseudoMask = !cast<Instruction>( 1003 !if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK", 1004 instruction#"_VI_"#vti.LMul.MX#"_MASK")); 1005 let Predicates = GetVTypePredicates<vti>.Predicates in 1006 def : Pat<(vti.Vector (IntrMask (vti.Vector vti.RegClass:$merge), 1007 (vti.Vector vti.RegClass:$rs2), 1008 (XLenVT uimm6:$rs1), 1009 (vti.Mask V0), 1010 VLOpFrag, (XLenVT timm:$policy))), 1011 (PseudoMask (vti.Vector vti.RegClass:$merge), 1012 (vti.Vector vti.RegClass:$rs2), 1013 (InvRot64Imm uimm6:$rs1), 1014 (vti.Mask V0), 1015 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 1016 } 1017} 1018 1019multiclass VPatBinaryV_VV_VX_VROL<string intrinsic, string instruction, 1020 string instruction2, list<VTypeInfo> vtilist> 1021 : VPatBinaryV_VV<intrinsic, instruction, vtilist>, 1022 VPatBinaryV_VX_VROTATE<intrinsic, instruction, vtilist>, 1023 VPatBinaryV_VI_VROL<intrinsic, instruction2, vtilist>; 1024 1025multiclass VPatBinaryV_VV_VX_VI_VROR<string intrinsic, string instruction, 1026 list<VTypeInfo> vtilist> 1027 : VPatBinaryV_VV<intrinsic, instruction, vtilist>, 1028 VPatBinaryV_VX_VROTATE<intrinsic, instruction, vtilist>, 1029 VPatBinaryV_VI<intrinsic, instruction, vtilist, uimm6>; 1030 1031multiclass VPatBinaryW_VV_VX_VI_VWSLL<string intrinsic, string instruction, 1032 list<VTypeInfoToWide> vtilist> 1033 : VPatBinaryW_VV<intrinsic, instruction, vtilist> { 1034 foreach VtiToWti = vtilist in { 1035 defvar Vti = VtiToWti.Vti; 1036 defvar Wti = VtiToWti.Wti; 1037 defvar kind = "V"#Vti.ScalarSuffix; 1038 let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates, 1039 GetVTypePredicates<Wti>.Predicates) in { 1040 defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX, 1041 Wti.Vector, Vti.Vector, XLenVT, Vti.Mask, 1042 Vti.Log2SEW, Wti.RegClass, 1043 Vti.RegClass, Vti.ScalarRegClass>; 1044 defm : VPatBinary<intrinsic, instruction # "_VI_" # Vti.LMul.MX, 1045 Wti.Vector, Vti.Vector, XLenVT, Vti.Mask, 1046 Vti.Log2SEW, Wti.RegClass, 1047 Vti.RegClass, uimm5>; 1048 } 1049 } 1050} 1051 1052let Predicates = [HasStdExtZvbb] in { 1053 defm : VPatUnaryV_V<"int_riscv_vbrev", "PseudoVBREV", AllIntegerVectors>; 1054 defm : VPatUnaryV_V<"int_riscv_vclz", "PseudoVCLZ", AllIntegerVectors>; 1055 defm : VPatUnaryV_V<"int_riscv_vctz", "PseudoVCTZ", AllIntegerVectors>; 1056 defm : VPatUnaryV_V<"int_riscv_vcpopv", "PseudoVCPOP", AllIntegerVectors>; 1057 defm : VPatBinaryW_VV_VX_VI_VWSLL<"int_riscv_vwsll", "PseudoVWSLL", AllWidenableIntVectors>; 1058} // Predicates = [HasStdExtZvbb] 1059 1060let Predicates = [HasStdExtZvbc] in { 1061 defm : VPatBinaryV_VV_VX<"int_riscv_vclmul", "PseudoVCLMUL", I64IntegerVectors>; 1062 defm : VPatBinaryV_VV_VX<"int_riscv_vclmulh", "PseudoVCLMULH", I64IntegerVectors>; 1063} // Predicates = [HasStdExtZvbc] 1064 1065let Predicates = [HasStdExtZvkb] in { 1066 defm : VPatBinaryV_VV_VX<"int_riscv_vandn", "PseudoVANDN", AllIntegerVectors>; 1067 defm : VPatUnaryV_V<"int_riscv_vbrev8", "PseudoVBREV8", AllIntegerVectors>; 1068 defm : VPatUnaryV_V<"int_riscv_vrev8", "PseudoVREV8", AllIntegerVectors>; 1069 defm : VPatBinaryV_VV_VX_VROL<"int_riscv_vrol", "PseudoVROL", "PseudoVROR", AllIntegerVectors>; 1070 defm : VPatBinaryV_VV_VX_VI_VROR<"int_riscv_vror", "PseudoVROR", AllIntegerVectors>; 1071} // Predicates = [HasStdExtZvkb] 1072 1073let Predicates = [HasStdExtZvkg] in { 1074 defm : VPatBinaryV_VV_NoMask<"int_riscv_vghsh", "PseudoVGHSH", I32IntegerVectors>; 1075 defm : VPatUnaryV_V_NoMask_Zvk<"int_riscv_vgmul", "PseudoVGMUL", I32IntegerVectors>; 1076} // Predicates = [HasStdExtZvkg] 1077 1078let Predicates = [HasStdExtZvkned] in { 1079 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesdf", "PseudoVAESDF", I32IntegerVectors>; 1080 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesdm", "PseudoVAESDM", I32IntegerVectors>; 1081 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesef", "PseudoVAESEF", I32IntegerVectors>; 1082 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesem", "PseudoVAESEM", I32IntegerVectors>; 1083 defm : VPatBinaryV_VI_NoMaskTU<"int_riscv_vaeskf1", "PseudoVAESKF1", I32IntegerVectors>; 1084 defm : VPatBinaryV_VI_NoMask<"int_riscv_vaeskf2", "PseudoVAESKF2", I32IntegerVectors>; 1085 defm : VPatUnaryV_S_NoMaskVectorCrypto<"int_riscv_vaesz", "PseudoVAESZ", I32IntegerVectors>; 1086} // Predicates = [HasStdExtZvkned] 1087 1088let Predicates = [HasStdExtZvknha] in { 1089 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ch", "PseudoVSHA2CH", I32IntegerVectors>; 1090 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2cl", "PseudoVSHA2CH", I32IntegerVectors>; 1091 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ms", "PseudoVSHA2MS", I32IntegerVectors>; 1092} // Predicates = [HasStdExtZvknha] 1093 1094let Predicates = [HasStdExtZvknhb] in { 1095 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ch", "PseudoVSHA2CH", I32I64IntegerVectors>; 1096 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2cl", "PseudoVSHA2CH", I32I64IntegerVectors>; 1097 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ms", "PseudoVSHA2MS", I32I64IntegerVectors>; 1098} // Predicates = [HasStdExtZvknhb] 1099 1100let Predicates = [HasStdExtZvksed] in { 1101 defm : VPatBinaryV_VI_NoMaskTU<"int_riscv_vsm4k", "PseudoVSM4K", I32IntegerVectors>; 1102 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vsm4r", "PseudoVSM4R", I32IntegerVectors>; 1103} // Predicates = [HasStdExtZvksed] 1104 1105let Predicates = [HasStdExtZvksh] in { 1106 defm : VPatBinaryV_VI_NoMask<"int_riscv_vsm3c", "PseudoVSM3C", I32IntegerVectors>; 1107 defm : VPatBinaryV_VV_NoMaskTU<"int_riscv_vsm3me", "PseudoVSM3ME", I32IntegerVectors>; 1108} // Predicates = [HasStdExtZvksh] 1109