//===-- RISCVInstrInfoZvk.td - RISC-V 'Zvk' instructions ---*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file describes the RISC-V instructions from the standard 'Zvk', // Vector Cryptography Instructions extension, version Release 1.0.0. // //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // Operand and SDNode transformation definitions. //===----------------------------------------------------------------------===// def tuimm5 : RISCVOp, TImmLeaf(Imm);}]>; //===----------------------------------------------------------------------===// // Instruction class templates //===----------------------------------------------------------------------===// let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { multiclass VCLMUL_MV_V_X funct6> { def V : VALUVV, SchedBinaryMC<"WriteVCLMULV", "ReadVCLMULV", "ReadVCLMULV">; def X : VALUVX, SchedBinaryMC<"WriteVCLMULX", "ReadVCLMULV", "ReadVCLMULX">; } class RVInstIVI_VROR funct6, dag outs, dag ins, string opcodestr, string argstr> : RVInst { bits<5> vs2; bits<6> imm; bits<5> vd; bit vm; let Inst{31-27} = funct6{5-1}; let Inst{26} = imm{5}; let Inst{25} = vm; let Inst{24-20} = vs2; let Inst{19-15} = imm{4-0}; let Inst{14-12} = OPIVI.Value; let Inst{11-7} = vd; let Inst{6-0} = OPC_OP_V.Value; let Uses = [VTYPE, VL]; let RVVConstraint = VMConstraint; } multiclass VROR_IV_V_X_I funct6> : VALU_IV_V_X { def I : RVInstIVI_VROR, SchedUnaryMC<"WriteVRotI", "ReadVRotV">; } // op vd, vs2, vs1 class PALUVVNoVm funct6, RISCVVFormat opv, string opcodestr> : VALUVVNoVm { let Inst{6-0} = OPC_OP_VE.Value; } // op vd, vs2, vs1 class PALUVVNoVmTernary funct6, RISCVVFormat opv, string opcodestr> : RVInstVV { let Constraints = "$vd = $vd_wb"; let vm = 1; let Inst{6-0} = OPC_OP_VE.Value; } // op vd, vs2, imm class PALUVINoVm funct6, string opcodestr, Operand optype> : VALUVINoVm { let Inst{6-0} = OPC_OP_VE.Value; let Inst{14-12} = OPMVV.Value; } // op vd, vs2, imm where vd is also a source regardless of tail policy class PALUVINoVmBinary funct6, string opcodestr, Operand optype> : RVInstIVI { let Constraints = "$vd = $vd_wb"; let vm = 1; let Inst{6-0} = OPC_OP_VE.Value; let Inst{14-12} = OPMVV.Value; } // op vd, vs2 (use vs1 as instruction encoding) where vd is also a source // regardless of tail policy class PALUVs2NoVmBinary funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr> : RVInstV { let Constraints = "$vd = $vd_wb"; let vm = 1; let Inst{6-0} = OPC_OP_VE.Value; } multiclass VAES_MV_V_S funct6_vv, bits<6> funct6_vs, bits<5> vs1, RISCVVFormat opv, string opcodestr> { let RVVConstraint = NoConstraint in def NAME # _VV : PALUVs2NoVmBinary, SchedBinaryMC<"WriteVAESMVV", "ReadVAESMVV", "ReadVAESMVV">; let RVVConstraint = VS2Constraint in def NAME # _VS : PALUVs2NoVmBinary, SchedBinaryMC<"WriteVAESMVV", "ReadVAESMVV", "ReadVAESMVV">; } } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 //===----------------------------------------------------------------------===// // Instructions //===----------------------------------------------------------------------===// let Predicates = [HasStdExtZvbb] in { def VBREV_V : VALUVs2<0b010010, 0b01010, OPMVV, "vbrev.v">; def VCLZ_V : VALUVs2<0b010010, 0b01100, OPMVV, "vclz.v">; def VCPOP_V : VALUVs2<0b010010, 0b01110, OPMVV, "vcpop.v">; def VCTZ_V : VALUVs2<0b010010, 0b01101, OPMVV, "vctz.v">; let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in defm VWSLL_V : VSHT_IV_V_X_I<"vwsll", 0b110101>; } // Predicates = [HasStdExtZvbb] let Predicates = [HasStdExtZvbc] in { defm VCLMUL_V : VCLMUL_MV_V_X<"vclmul", 0b001100>; defm VCLMULH_V : VCLMUL_MV_V_X<"vclmulh", 0b001101>; } // Predicates = [HasStdExtZvbc] let Predicates = [HasStdExtZvkb] in { defm VANDN_V : VALU_IV_V_X<"vandn", 0b000001>; def VBREV8_V : VALUVs2<0b010010, 0b01000, OPMVV, "vbrev8.v">; def VREV8_V : VALUVs2<0b010010, 0b01001, OPMVV, "vrev8.v">; defm VROL_V : VALU_IV_V_X<"vrol", 0b010101>; defm VROR_V : VROR_IV_V_X_I<"vror", 0b010100>; } // Predicates = [HasStdExtZvkb] let Predicates = [HasStdExtZvkg], RVVConstraint = NoConstraint in { def VGHSH_VV : PALUVVNoVmTernary<0b101100, OPMVV, "vghsh.vv">, SchedTernaryMC<"WriteVGHSHV", "ReadVGHSHV", "ReadVGHSHV", "ReadVGHSHV">; def VGMUL_VV : PALUVs2NoVmBinary<0b101000, 0b10001, OPMVV, "vgmul.vv">, SchedBinaryMC<"WriteVGMULV", "ReadVGMULV", "ReadVGMULV">; } // Predicates = [HasStdExtZvkg] let Predicates = [HasStdExtZvknhaOrZvknhb], RVVConstraint = Sha2Constraint in { def VSHA2CH_VV : PALUVVNoVmTernary<0b101110, OPMVV, "vsha2ch.vv">, SchedTernaryMC<"WriteVSHA2CHV", "ReadVSHA2CHV", "ReadVSHA2CHV", "ReadVSHA2CHV">; def VSHA2CL_VV : PALUVVNoVmTernary<0b101111, OPMVV, "vsha2cl.vv">, SchedTernaryMC<"WriteVSHA2CLV", "ReadVSHA2CLV", "ReadVSHA2CLV", "ReadVSHA2CLV">; def VSHA2MS_VV : PALUVVNoVmTernary<0b101101, OPMVV, "vsha2ms.vv">, SchedTernaryMC<"WriteVSHA2MSV", "ReadVSHA2MSV", "ReadVSHA2MSV", "ReadVSHA2MSV">; } // Predicates = [HasStdExtZvknhaOrZvknhb] let Predicates = [HasStdExtZvkned] in { defm VAESDF : VAES_MV_V_S<0b101000, 0b101001, 0b00001, OPMVV, "vaesdf">; defm VAESDM : VAES_MV_V_S<0b101000, 0b101001, 0b00000, OPMVV, "vaesdm">; defm VAESEF : VAES_MV_V_S<0b101000, 0b101001, 0b00011, OPMVV, "vaesef">; defm VAESEM : VAES_MV_V_S<0b101000, 0b101001, 0b00010, OPMVV, "vaesem">; def VAESKF1_VI : PALUVINoVm<0b100010, "vaeskf1.vi", uimm5>, SchedUnaryMC<"WriteVAESKF1V", "ReadVAESKF1V">; def VAESKF2_VI : PALUVINoVmBinary<0b101010, "vaeskf2.vi", uimm5>, SchedBinaryMC<"WriteVAESKF2V", "ReadVAESKF2V", "ReadVAESKF2V">; let RVVConstraint = VS2Constraint in def VAESZ_VS : PALUVs2NoVmBinary<0b101001, 0b00111, OPMVV, "vaesz.vs">, SchedBinaryMC<"WriteVAESZV", "ReadVAESZV", "ReadVAESZV">; } // Predicates = [HasStdExtZvkned] let Predicates = [HasStdExtZvksed] in { let RVVConstraint = NoConstraint in def VSM4K_VI : PALUVINoVm<0b100001, "vsm4k.vi", uimm5>, SchedUnaryMC<"WriteVSM4KV", "ReadVSM4KV">; defm VSM4R : VAES_MV_V_S<0b101000, 0b101001, 0b10000, OPMVV, "vsm4r">; } // Predicates = [HasStdExtZvksed] let Predicates = [HasStdExtZvksh], RVVConstraint = VS2Constraint in { def VSM3C_VI : PALUVINoVmBinary<0b101011, "vsm3c.vi", uimm5>, SchedBinaryMC<"WriteVSM3CV", "ReadVSM3CV", "ReadVSM3CV">; def VSM3ME_VV : PALUVVNoVm<0b100000, OPMVV, "vsm3me.vv">, SchedUnaryMC<"WriteVSM3MEV", "ReadVSM3MEV">; } // Predicates = [HasStdExtZvksh] //===----------------------------------------------------------------------===// // Pseudo instructions //===----------------------------------------------------------------------===// defvar I32IntegerVectors = !filter(vti, AllIntegerVectors, !eq(vti.SEW, 32)); defvar I32I64IntegerVectors = !filter(vti, AllIntegerVectors, !or(!eq(vti.SEW, 32), !eq(vti.SEW, 64))); class ZvkI32IntegerVectors { list vs2_types = !cond(!eq(vd_lmul, "M8") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 32)), !eq(vd_lmul, "M4") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 32)), !eq(vd_lmul, "M2") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 16)), !eq(vd_lmul, "M1") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 8)), !eq(vd_lmul, "MF2") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 4)), !eq(vd_lmul, "MF4") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 2)), !eq(vd_lmul, "MF8") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 1))); } class ZvkMxSet { list vs2_lmuls = !cond(!eq(vd_lmul, "M8") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4], !eq(vd_lmul, "M4") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4], !eq(vd_lmul, "M2") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2], !eq(vd_lmul, "M1") : [V_MF8, V_MF4, V_MF2, V_M1], !eq(vd_lmul, "MF2") : [V_MF8, V_MF4, V_MF2], !eq(vd_lmul, "MF4") : [V_MF8, V_MF4], !eq(vd_lmul, "MF8") : [V_MF8]); } class VPseudoBinaryNoMask_Zvk : Pseudo<(outs RetClass:$rd_wb), (ins RetClass:$rd, OpClass:$rs2, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = "$rd_wb = $rd"; let HasVLOp = 1; let HasSEWOp = 1; let HasVecPolicyOp = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } class VPseudoTernaryNoMask_Zvk : Pseudo<(outs RetClass:$rd_wb), (ins RetClass:$rd, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = "$rd_wb = $rd"; let HasVLOp = 1; let HasSEWOp = 1; let HasVecPolicyOp = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } multiclass VPseudoBinaryNoMaskPolicy_Zvk { let VLMul = MInfo.value in { def "_" # MInfo.MX : VPseudoBinaryNoMaskPolicy; } } multiclass VPseudoTernaryNoMask_Zvk { let VLMul = MInfo.value in def "_" # MInfo.MX : VPseudoTernaryNoMask_Zvk; } multiclass VPseudoBinaryV_V_NoMask_Zvk { let VLMul = m.value in { def "_VV_" # m.MX : VPseudoBinaryNoMask_Zvk; } } multiclass VPseudoBinaryV_S_NoMask_Zvk { let VLMul = m.value in foreach vs2_lmul = ZvkMxSet.vs2_lmuls in def "_VS_" # m.MX # "_" # vs2_lmul.MX : VPseudoBinaryNoMask_Zvk; } multiclass VPseudoVGMUL { foreach m = MxListVF4 in { defvar mx = m.MX; defm "" : VPseudoBinaryV_V_NoMask_Zvk, SchedBinary<"WriteVGMULV", "ReadVGMULV", "ReadVGMULV", mx>; } } multiclass VPseudoVAESMV { foreach m = MxListVF4 in { defvar mx = m.MX; defm "" : VPseudoBinaryV_V_NoMask_Zvk, SchedBinary<"WriteVAESMVV", "ReadVAESMVV", "ReadVAESMVV", mx>; defm "" : VPseudoBinaryV_S_NoMask_Zvk, SchedBinary<"WriteVAESMVV", "ReadVAESMVV", "ReadVAESMVV", mx>; } } multiclass VPseudoVSM4R { foreach m = MxListVF4 in { defvar mx = m.MX; defm "" : VPseudoBinaryV_V_NoMask_Zvk, SchedBinary<"WriteVSM4RV", "ReadVSM4RV", "ReadVSM4RV", mx>; defm "" : VPseudoBinaryV_S_NoMask_Zvk, SchedBinary<"WriteVSM4RV", "ReadVSM4RV", "ReadVSM4RV", mx>; } } multiclass VPseudoVGHSH { foreach m = MxListVF4 in { defvar mx = m.MX; defm _VV : VPseudoTernaryNoMask_Zvk, SchedTernary<"WriteVGHSHV", "ReadVGHSHV", "ReadVGHSHV", "ReadVGHSHV", mx>; } } multiclass VPseudoVSHA2CH { foreach m = MxListVF4 in { defvar mx = m.MX; defm _VV : VPseudoTernaryNoMask_Zvk, SchedTernary<"WriteVSHA2CHV", "ReadVSHA2CHV", "ReadVSHA2CHV", "ReadVSHA2CHV", mx>; } } multiclass VPseudoVSHA2CL { foreach m = MxListVF4 in { defvar mx = m.MX; defm _VV : VPseudoTernaryNoMask_Zvk, SchedTernary<"WriteVSHA2CLV", "ReadVSHA2CLV", "ReadVSHA2CLV", "ReadVSHA2CLV", mx>; } } multiclass VPseudoVSHA2MS { foreach m = MxListVF4 in { defvar mx = m.MX; defm _VV : VPseudoTernaryNoMask_Zvk, SchedTernary<"WriteVSHA2MSV", "ReadVSHA2MSV", "ReadVSHA2MSV", "ReadVSHA2MSV", mx>; } } multiclass VPseudoVAESKF1 { foreach m = MxListVF4 in { defvar mx = m.MX; defm _VI : VPseudoBinaryNoMaskPolicy_Zvk, SchedBinary<"WriteVAESKF1V", "ReadVAESKF1V", "ReadVAESKF1V", mx, forceMergeOpRead=true>; } } multiclass VPseudoVAESKF2 { foreach m = MxListVF4 in { defvar mx = m.MX; defm _VI : VPseudoTernaryNoMask_Zvk, SchedTernary<"WriteVAESKF2V", "ReadVAESKF2V", "ReadVAESKF2V", "ReadVAESKF2V", mx>; } } multiclass VPseudoVAESZ { foreach m = MxListVF4 in { defvar mx = m.MX; defm "" : VPseudoBinaryV_S_NoMask_Zvk, SchedBinary<"WriteVAESZV", "ReadVAESZV", "ReadVAESZV", mx>; } } multiclass VPseudoVSM3C { foreach m = MxListVF4 in { defvar mx = m.MX; defm _VI : VPseudoTernaryNoMask_Zvk, SchedTernary<"WriteVSM3CV", "ReadVSM3CV", "ReadVSM3CV", "ReadVSM3CV", mx>; } } multiclass VPseudoVSM4K { foreach m = MxListVF4 in { defvar mx = m.MX; defm _VI : VPseudoBinaryNoMaskPolicy_Zvk, SchedBinary<"WriteVSM4KV", "ReadVSM4KV", "ReadVSM4KV", mx, forceMergeOpRead=true>; } } multiclass VPseudoVSM3ME { foreach m = MxListVF4 in { defvar mx = m.MX; defm _VV : VPseudoBinaryNoMaskPolicy_Zvk, SchedBinary<"WriteVSM3MEV", "ReadVSM3MEV", "ReadVSM3MEV", mx, forceMergeOpRead=true>; } } multiclass VPseudoVCLMUL_VV_VX { foreach m = MxList in { defvar mx = m.MX; defm "" : VPseudoBinaryV_VV, SchedBinary<"WriteVCLMULV", "ReadVCLMULV", "ReadVCLMULV", mx, forceMergeOpRead=true>; defm "" : VPseudoBinaryV_VX, SchedBinary<"WriteVCLMULX", "ReadVCLMULV", "ReadVCLMULX", mx, forceMergeOpRead=true>; } } multiclass VPseudoUnaryV_V { let VLMul = m.value in { defvar suffix = "_V_" # m.MX; def suffix : VPseudoUnaryNoMask; def suffix # "_MASK" : VPseudoUnaryMask, RISCVMaskedPseudo; } } multiclass VPseudoVBREV { foreach m = MxList in { defvar mx = m.MX; defm "" : VPseudoUnaryV_V, SchedUnary<"WriteVBREVV", "ReadVBREVV", mx, forceMergeOpRead=true>; } } multiclass VPseudoVCLZ { foreach m = MxList in { defvar mx = m.MX; defm "" : VPseudoUnaryV_V, SchedUnary<"WriteVCLZV", "ReadVCLZV", mx, forceMergeOpRead=true>; } } multiclass VPseudoVCTZ { foreach m = MxList in { defvar mx = m.MX; defm "" : VPseudoUnaryV_V, SchedUnary<"WriteVCTZV", "ReadVCTZV", mx, forceMergeOpRead=true>; } } multiclass VPseudoVCPOP { foreach m = MxList in { defvar mx = m.MX; defm "" : VPseudoUnaryV_V, SchedUnary<"WriteVCPOPV", "ReadVCPOPV", mx, forceMergeOpRead=true>; } } multiclass VPseudoVWSLL { foreach m = MxListW in { defvar mx = m.MX; defm "" : VPseudoBinaryW_VV, SchedBinary<"WriteVWSLLV", "ReadVWSLLV", "ReadVWSLLV", mx, forceMergeOpRead=true>; defm "" : VPseudoBinaryW_VX, SchedBinary<"WriteVWSLLX", "ReadVWSLLV", "ReadVWSLLX", mx, forceMergeOpRead=true>; defm "" : VPseudoBinaryW_VI, SchedUnary<"WriteVWSLLI", "ReadVWSLLV", mx, forceMergeOpRead=true>; } } multiclass VPseudoVANDN { foreach m = MxList in { defm "" : VPseudoBinaryV_VV, SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", m.MX, forceMergeOpRead=true>; defm "" : VPseudoBinaryV_VX, SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", m.MX, forceMergeOpRead=true>; } } multiclass VPseudoVBREV8 { foreach m = MxList in { defvar mx = m.MX; defm "" : VPseudoUnaryV_V, SchedUnary<"WriteVBREV8V", "ReadVBREV8V", mx, forceMergeOpRead=true>; } } multiclass VPseudoVREV8 { foreach m = MxList in { defvar mx = m.MX; defm "" : VPseudoUnaryV_V, SchedUnary<"WriteVREV8V", "ReadVREV8V", mx, forceMergeOpRead=true>; } } multiclass VPseudoVROT_VV_VX { foreach m = MxList in { defm "" : VPseudoBinaryV_VV, SchedBinary<"WriteVRotV", "ReadVRotV", "ReadVRotV", m.MX, forceMergeOpRead=true>; defm "" : VPseudoBinaryV_VX, SchedBinary<"WriteVRotX", "ReadVRotV", "ReadVRotX", m.MX, forceMergeOpRead=true>; } } multiclass VPseudoVROT_VV_VX_VI : VPseudoVROT_VV_VX { foreach m = MxList in { defm "" : VPseudoBinaryV_VI, SchedUnary<"WriteVRotI", "ReadVRotV", m.MX, forceMergeOpRead=true>; } } let Predicates = [HasStdExtZvbb] in { defm PseudoVBREV : VPseudoVBREV; defm PseudoVCLZ : VPseudoVCLZ; defm PseudoVCTZ : VPseudoVCTZ; defm PseudoVCPOP : VPseudoVCPOP; defm PseudoVWSLL : VPseudoVWSLL; } // Predicates = [HasStdExtZvbb] let Predicates = [HasStdExtZvbc] in { defm PseudoVCLMUL : VPseudoVCLMUL_VV_VX; defm PseudoVCLMULH : VPseudoVCLMUL_VV_VX; } // Predicates = [HasStdExtZvbc] let Predicates = [HasStdExtZvkb] in { defm PseudoVANDN : VPseudoVANDN; defm PseudoVBREV8 : VPseudoVBREV8; defm PseudoVREV8 : VPseudoVREV8; defm PseudoVROL : VPseudoVROT_VV_VX; defm PseudoVROR : VPseudoVROT_VV_VX_VI; } // Predicates = [HasStdExtZvkb] let Predicates = [HasStdExtZvkg] in { defm PseudoVGHSH : VPseudoVGHSH; defm PseudoVGMUL : VPseudoVGMUL; } // Predicates = [HasStdExtZvkg] let Predicates = [HasStdExtZvkned] in { defm PseudoVAESDF : VPseudoVAESMV; defm PseudoVAESDM : VPseudoVAESMV; defm PseudoVAESEF : VPseudoVAESMV; defm PseudoVAESEM : VPseudoVAESMV; defm PseudoVAESKF1 : VPseudoVAESKF1; defm PseudoVAESKF2 : VPseudoVAESKF2; defm PseudoVAESZ : VPseudoVAESZ; } // Predicates = [HasStdExtZvkned] let Predicates = [HasStdExtZvknhaOrZvknhb] in { defm PseudoVSHA2CH : VPseudoVSHA2CH; defm PseudoVSHA2CL : VPseudoVSHA2CL; defm PseudoVSHA2MS : VPseudoVSHA2MS; } // Predicates = [HasStdExtZvknhaOrZvknhb] let Predicates = [HasStdExtZvksed] in { defm PseudoVSM4K : VPseudoVSM4K; defm PseudoVSM4R : VPseudoVSM4R; } // Predicates = [HasStdExtZvksed] let Predicates = [HasStdExtZvksh] in { defm PseudoVSM3C : VPseudoVSM3C; defm PseudoVSM3ME : VPseudoVSM3ME; } // Predicates = [HasStdExtZvksh] //===----------------------------------------------------------------------===// // SDNode patterns //===----------------------------------------------------------------------===// multiclass VPatUnarySDNode_V { foreach vti = AllIntegerVectors in { let Predicates = !listconcat([predicate], GetVTypePredicates.Predicates) in { def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1))), (!cast(instruction_name#"_V_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; } } } // Helpers for detecting splats since we preprocess splat_vector to vmv.v.x // This should match the logic in RISCVDAGToDAGISel::selectVSplat def riscv_splat_vector : PatFrag<(ops node:$rs1), (riscv_vmv_v_x_vl undef, node:$rs1, srcvalue)>; def riscv_vnot : PatFrag<(ops node:$rs1), (xor node:$rs1, (riscv_splat_vector -1))>; foreach vti = AllIntegerVectors in { let Predicates = !listconcat([HasStdExtZvkb], GetVTypePredicates.Predicates) in { def : Pat<(vti.Vector (and (riscv_vnot vti.RegClass:$rs1), vti.RegClass:$rs2)), (!cast("PseudoVANDN_VV_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(vti.Vector (and (riscv_splat_vector (not vti.ScalarRegClass:$rs1)), vti.RegClass:$rs2)), (!cast("PseudoVANDN_VX_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; } } defm : VPatUnarySDNode_V; defm : VPatUnarySDNode_V; defm : VPatUnarySDNode_V; defm : VPatUnarySDNode_V; defm : VPatUnarySDNode_V; defm : VPatBinarySDNode_VV_VX; // Invert the immediate and mask it to SEW for readability. def InvRot8Imm : SDNodeXFormgetTargetConstant(0x7 & (64 - N->getZExtValue()), SDLoc(N), N->getValueType(0)); }]>; def InvRot16Imm : SDNodeXFormgetTargetConstant(0xf & (64 - N->getZExtValue()), SDLoc(N), N->getValueType(0)); }]>; def InvRot32Imm : SDNodeXFormgetTargetConstant(0x1f & (64 - N->getZExtValue()), SDLoc(N), N->getValueType(0)); }]>; def InvRot64Imm : SDNodeXFormgetTargetConstant(0x3f & (64 - N->getZExtValue()), SDLoc(N), N->getValueType(0)); }]>; // Although there is no vrol.vi, an immediate rotate left can be achieved by // negating the immediate in vror.vi foreach vti = AllIntegerVectors in { let Predicates = !listconcat([HasStdExtZvkb], GetVTypePredicates.Predicates) in { def : Pat<(vti.Vector (rotl vti.RegClass:$rs2, (vti.Vector (SplatPat_uimm6 uimm6:$rs1)))), (!cast("PseudoVROR_VI_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, (!cast("InvRot" # vti.SEW # "Imm") uimm6:$rs1), vti.AVL, vti.Log2SEW, TA_MA)>; } } defm : VPatBinarySDNode_VV_VX_VI; foreach vtiToWti = AllWidenableIntVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat([HasStdExtZvbb], GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1)))), (!cast("PseudoVWSLL_VV_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1)))), (!cast("PseudoVWSLL_VX_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, GPR:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), (wti.Vector (SplatPat_uimm5 uimm5:$rs1))), (!cast("PseudoVWSLL_VI_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, uimm5:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; } } //===----------------------------------------------------------------------===// // VL patterns //===----------------------------------------------------------------------===// multiclass VPatUnaryVL_V { foreach vti = AllIntegerVectors in { let Predicates = !listconcat([predicate], GetVTypePredicates.Predicates) in { def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1), (vti.Vector vti.RegClass:$merge), (vti.Mask V0), VLOpFrag)), (!cast(instruction_name#"_V_"#vti.LMul.MX#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } } foreach vti = AllIntegerVectors in { let Predicates = !listconcat([HasStdExtZvkb], GetVTypePredicates.Predicates) in { def : Pat<(vti.Vector (riscv_and_vl (riscv_xor_vl (vti.Vector vti.RegClass:$rs1), (riscv_splat_vector -1), (vti.Vector vti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$merge), (vti.Mask V0), VLOpFrag)), (!cast("PseudoVANDN_VV_"#vti.LMul.MX#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_and_vl (riscv_splat_vector (not vti.ScalarRegClass:$rs1)), (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$merge), (vti.Mask V0), VLOpFrag)), (!cast("PseudoVANDN_VX_"#vti.LMul.MX#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, vti.ScalarRegClass:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } defm : VPatUnaryVL_V; defm : VPatUnaryVL_V; defm : VPatUnaryVL_V; defm : VPatUnaryVL_V; defm : VPatUnaryVL_V; defm : VPatBinaryVL_VV_VX; // Although there is no vrol.vi, an immediate rotate left can be achieved by // negating the immediate in vror.vi foreach vti = AllIntegerVectors in { let Predicates = !listconcat([HasStdExtZvkb], GetVTypePredicates.Predicates) in { def : Pat<(riscv_rotl_vl vti.RegClass:$rs2, (vti.Vector (SplatPat_uimm6 uimm6:$rs1)), (vti.Vector vti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (!cast("PseudoVROR_VI_"#vti.LMul.MX#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, (!cast("InvRot" # vti.SEW # "Imm") uimm6:$rs1), (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } defm : VPatBinaryVL_VV_VX_VI; foreach vtiToWti = AllWidenableIntVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat([HasStdExtZvbb], GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(riscv_shl_vl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1))), (wti.Vector wti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (!cast("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK") wti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(riscv_shl_vl (wti.Vector (riscv_zext_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask V0), VLOpFrag)), (wti.Vector (riscv_ext_vl_oneuse (vti.Vector vti.RegClass:$rs1), (vti.Mask V0), VLOpFrag)), (wti.Vector wti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (!cast("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK") wti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(riscv_shl_vl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))), (wti.Vector wti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (!cast("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK") wti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(riscv_shl_vl (wti.Vector (riscv_zext_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask V0), VLOpFrag)), (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))), (wti.Vector wti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (!cast("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK") wti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(riscv_shl_vl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), (wti.Vector (SplatPat_uimm5 uimm5:$rs1)), (wti.Vector wti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (!cast("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK") wti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(riscv_shl_vl (wti.Vector (riscv_zext_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask V0), VLOpFrag)), (wti.Vector (SplatPat_uimm5 uimm5:$rs1)), (wti.Vector wti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (!cast("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK") wti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(riscv_vwsll_vl (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$rs1), (wti.Vector wti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (!cast("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK") wti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(riscv_vwsll_vl (vti.Vector vti.RegClass:$rs2), (vti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))), (wti.Vector wti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (!cast("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK") wti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(riscv_vwsll_vl (vti.Vector vti.RegClass:$rs2), (vti.Vector (SplatPat_uimm5 uimm5:$rs1)), (wti.Vector wti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (!cast("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK") wti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } //===----------------------------------------------------------------------===// // Codegen patterns //===----------------------------------------------------------------------===// class VPatUnaryNoMask_Zvk : Pat<(result_type (!cast(intrinsic_name) (result_type result_reg_class:$rd), (op2_type op2_reg_class:$rs2), VLOpFrag, (XLenVT timm:$policy))), (!cast(inst#"_"#kind#"_"#vlmul.MX) (result_type result_reg_class:$rd), (op2_type op2_reg_class:$rs2), GPR:$vl, sew, (XLenVT timm:$policy))>; class VPatUnaryNoMask_VS_Zvk : Pat<(result_type (!cast(intrinsic_name) (result_type result_reg_class:$rd), (op2_type op2_reg_class:$rs2), VLOpFrag, (XLenVT timm:$policy))), (!cast(inst#"_"#kind#"_"#vlmul.MX#"_"#vs2_lmul.MX) (result_type result_reg_class:$rd), (op2_type op2_reg_class:$rs2), GPR:$vl, sew, (XLenVT timm:$policy))>; multiclass VPatUnaryV_V_NoMask_Zvk vtilist> { foreach vti = vtilist in def : VPatUnaryNoMask_Zvk; } multiclass VPatUnaryV_S_NoMaskVectorCrypto vtilist> { foreach vti = vtilist in foreach vti_vs2 = ZvkI32IntegerVectors.vs2_types in def : VPatUnaryNoMask_VS_Zvk; } multiclass VPatUnaryV_V_S_NoMask_Zvk vtilist> { defm : VPatUnaryV_V_NoMask_Zvk; defm : VPatUnaryV_S_NoMaskVectorCrypto; } multiclass VPatBinaryV_VV_NoMask vtilist> { foreach vti = vtilist in def : VPatTernaryNoMaskWithPolicy; } multiclass VPatBinaryV_VI_NoMask vtilist, Operand imm_type = tuimm5> { foreach vti = vtilist in def : VPatTernaryNoMaskWithPolicy; } multiclass VPatBinaryV_VI_NoMaskTU vtilist, Operand imm_type = tuimm5> { foreach vti = vtilist in def : VPatBinaryNoMaskTU; } multiclass VPatBinaryV_VV_NoMaskTU vtilist> { foreach vti = vtilist in def : VPatBinaryNoMaskTU; } multiclass VPatBinaryV_VX_VROTATE vtilist, bit isSEWAware = 0> { foreach vti = vtilist in { defvar kind = "V"#vti.ScalarSuffix; let Predicates = GetVTypePredicates.Predicates in defm : VPatBinary; } } multiclass VPatBinaryV_VI_VROL vtilist, bit isSEWAware = 0> { foreach vti = vtilist in { defvar Intr = !cast(intrinsic); defvar Pseudo = !cast( !if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW, instruction#"_VI_"#vti.LMul.MX)); let Predicates = GetVTypePredicates.Predicates in def : Pat<(vti.Vector (Intr (vti.Vector vti.RegClass:$merge), (vti.Vector vti.RegClass:$rs2), (XLenVT uimm6:$rs1), VLOpFrag)), (Pseudo (vti.Vector vti.RegClass:$merge), (vti.Vector vti.RegClass:$rs2), (InvRot64Imm uimm6:$rs1), GPR:$vl, vti.Log2SEW, TU_MU)>; defvar IntrMask = !cast(intrinsic#"_mask"); defvar PseudoMask = !cast( !if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK", instruction#"_VI_"#vti.LMul.MX#"_MASK")); let Predicates = GetVTypePredicates.Predicates in def : Pat<(vti.Vector (IntrMask (vti.Vector vti.RegClass:$merge), (vti.Vector vti.RegClass:$rs2), (XLenVT uimm6:$rs1), (vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))), (PseudoMask (vti.Vector vti.RegClass:$merge), (vti.Vector vti.RegClass:$rs2), (InvRot64Imm uimm6:$rs1), (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; } } multiclass VPatBinaryV_VV_VX_VROL vtilist> : VPatBinaryV_VV, VPatBinaryV_VX_VROTATE, VPatBinaryV_VI_VROL; multiclass VPatBinaryV_VV_VX_VI_VROR vtilist> : VPatBinaryV_VV, VPatBinaryV_VX_VROTATE, VPatBinaryV_VI; multiclass VPatBinaryW_VV_VX_VI_VWSLL vtilist> : VPatBinaryW_VV { foreach VtiToWti = vtilist in { defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; defvar kind = "V"#Vti.ScalarSuffix; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { defm : VPatBinary; defm : VPatBinary; } } } let Predicates = [HasStdExtZvbb] in { defm : VPatUnaryV_V<"int_riscv_vbrev", "PseudoVBREV", AllIntegerVectors>; defm : VPatUnaryV_V<"int_riscv_vclz", "PseudoVCLZ", AllIntegerVectors>; defm : VPatUnaryV_V<"int_riscv_vctz", "PseudoVCTZ", AllIntegerVectors>; defm : VPatUnaryV_V<"int_riscv_vcpopv", "PseudoVCPOP", AllIntegerVectors>; defm : VPatBinaryW_VV_VX_VI_VWSLL<"int_riscv_vwsll", "PseudoVWSLL", AllWidenableIntVectors>; } // Predicates = [HasStdExtZvbb] let Predicates = [HasStdExtZvbc] in { defm : VPatBinaryV_VV_VX<"int_riscv_vclmul", "PseudoVCLMUL", I64IntegerVectors>; defm : VPatBinaryV_VV_VX<"int_riscv_vclmulh", "PseudoVCLMULH", I64IntegerVectors>; } // Predicates = [HasStdExtZvbc] let Predicates = [HasStdExtZvkb] in { defm : VPatBinaryV_VV_VX<"int_riscv_vandn", "PseudoVANDN", AllIntegerVectors>; defm : VPatUnaryV_V<"int_riscv_vbrev8", "PseudoVBREV8", AllIntegerVectors>; defm : VPatUnaryV_V<"int_riscv_vrev8", "PseudoVREV8", AllIntegerVectors>; defm : VPatBinaryV_VV_VX_VROL<"int_riscv_vrol", "PseudoVROL", "PseudoVROR", AllIntegerVectors>; defm : VPatBinaryV_VV_VX_VI_VROR<"int_riscv_vror", "PseudoVROR", AllIntegerVectors>; } // Predicates = [HasStdExtZvkb] let Predicates = [HasStdExtZvkg] in { defm : VPatBinaryV_VV_NoMask<"int_riscv_vghsh", "PseudoVGHSH", I32IntegerVectors>; defm : VPatUnaryV_V_NoMask_Zvk<"int_riscv_vgmul", "PseudoVGMUL", I32IntegerVectors>; } // Predicates = [HasStdExtZvkg] let Predicates = [HasStdExtZvkned] in { defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesdf", "PseudoVAESDF", I32IntegerVectors>; defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesdm", "PseudoVAESDM", I32IntegerVectors>; defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesef", "PseudoVAESEF", I32IntegerVectors>; defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesem", "PseudoVAESEM", I32IntegerVectors>; defm : VPatBinaryV_VI_NoMaskTU<"int_riscv_vaeskf1", "PseudoVAESKF1", I32IntegerVectors>; defm : VPatBinaryV_VI_NoMask<"int_riscv_vaeskf2", "PseudoVAESKF2", I32IntegerVectors>; defm : VPatUnaryV_S_NoMaskVectorCrypto<"int_riscv_vaesz", "PseudoVAESZ", I32IntegerVectors>; } // Predicates = [HasStdExtZvkned] let Predicates = [HasStdExtZvknha] in { defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ch", "PseudoVSHA2CH", I32IntegerVectors>; defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2cl", "PseudoVSHA2CH", I32IntegerVectors>; defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ms", "PseudoVSHA2MS", I32IntegerVectors>; } // Predicates = [HasStdExtZvknha] let Predicates = [HasStdExtZvknhb] in { defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ch", "PseudoVSHA2CH", I32I64IntegerVectors>; defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2cl", "PseudoVSHA2CH", I32I64IntegerVectors>; defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ms", "PseudoVSHA2MS", I32I64IntegerVectors>; } // Predicates = [HasStdExtZvknhb] let Predicates = [HasStdExtZvksed] in { defm : VPatBinaryV_VI_NoMaskTU<"int_riscv_vsm4k", "PseudoVSM4K", I32IntegerVectors>; defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vsm4r", "PseudoVSM4R", I32IntegerVectors>; } // Predicates = [HasStdExtZvksed] let Predicates = [HasStdExtZvksh] in { defm : VPatBinaryV_VI_NoMask<"int_riscv_vsm3c", "PseudoVSM3C", I32IntegerVectors>; defm : VPatBinaryV_VV_NoMaskTU<"int_riscv_vsm3me", "PseudoVSM3ME", I32IntegerVectors>; } // Predicates = [HasStdExtZvksh]