//===-- RISCVInstrInfoZvk.td - RISC-V 'Zvk' instructions -------*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file describes the RISC-V instructions from the standard 'Zvk', // Vector Cryptography Instructions extension, version Release 1.0.0. // //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // Operand and SDNode transformation definitions. //===----------------------------------------------------------------------===// def tuimm5 : Operand, TImmLeaf(Imm);}]> { let ParserMatchClass = UImmAsmOperand<5>; let EncoderMethod = "getUImmOpValue"; let DecoderMethod = "decodeUImmOperand<5>"; let MCOperandPredicate = [{ int64_t UImm; if (MCOp.evaluateAsConstantImm(UImm)) return isUInt<5>(UImm); return MCOp.isBareSymbolRef(); }]; } //===----------------------------------------------------------------------===// // Instruction class templates //===----------------------------------------------------------------------===// let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { multiclass VCLMUL_MV_V_X funct6> { def V : VALUVV, Sched<[WriteVIALUV_WorstCase, ReadVIALUV_WorstCase, ReadVIALUV_WorstCase, ReadVMask]>; def X : VALUVX, Sched<[WriteVIALUX_WorstCase, ReadVIALUV_WorstCase, ReadVIALUX_WorstCase, ReadVMask]>; } class RVInstIVI_VROR funct6, dag outs, dag ins, string opcodestr, string argstr> : RVInst { bits<5> vs2; bits<6> imm; bits<5> vd; bit vm; let Inst{31-27} = funct6{5-1}; let Inst{26} = imm{5}; let Inst{25} = vm; let Inst{24-20} = vs2; let Inst{19-15} = imm{4-0}; let Inst{14-12} = OPIVI.Value; let Inst{11-7} = vd; let Inst{6-0} = OPC_OP_V.Value; let Uses = [VTYPE, VL]; let RVVConstraint = VMConstraint; } multiclass VROR_IV_V_X_I funct6> : VALU_IV_V_X { def I : RVInstIVI_VROR, Sched<[WriteVIALUI_WorstCase, ReadVIALUV_WorstCase, ReadVMask]>; } // op vd, vs2, vs1 class PALUVVNoVm funct6, RISCVVFormat opv, string opcodestr> : VALUVVNoVm { let Inst{6-0} = OPC_OP_P.Value; } // op vd, vs2, imm, vm class PALUVINoVm funct6, string opcodestr, Operand optype = simm5> : VALUVINoVm { let Inst{6-0} = OPC_OP_P.Value; let Inst{14-12} = OPMVV.Value; } // op vd, vs2 (use vs1 as instruction encoding) class PALUVs2NoVm funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr> : VALUVs2NoVm { let Inst{6-0} = OPC_OP_P.Value; } multiclass VAES_MV_V_S funct6_vv, bits<6> funct6_vs, bits<5> vs1, RISCVVFormat opv, string opcodestr> { def NAME # _VV : PALUVs2NoVm; def NAME # _VS : PALUVs2NoVm; } // vaeskf1.vi and vaeskf2.vi uses different opcode and format, we need // to customize one for them. class VAESKF_MV_I funct6, string opcodestr, Operand optype> : VALUVINoVm { let Inst{6-0} = OPC_OP_P.Value; let Inst{14-12} = OPMVV.Value; } } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 //===----------------------------------------------------------------------===// // Instructions //===----------------------------------------------------------------------===// let Predicates = [HasStdExtZvbb] in { def VBREV_V : VALUVs2<0b010010, 0b01010, OPMVV, "vbrev.v">; def VCLZ_V : VALUVs2<0b010010, 0b01100, OPMVV, "vclz.v">; def VCPOP_V : VALUVs2<0b010010, 0b01110, OPMVV, "vcpop.v">; def VCTZ_V : VALUVs2<0b010010, 0b01101, OPMVV, "vctz.v">; let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in defm VWSLL_V : VSHT_IV_V_X_I<"vwsll", 0b110101>; } // Predicates = [HasStdExtZvbb] let Predicates = [HasStdExtZvbc] in { defm VCLMUL_V : VCLMUL_MV_V_X<"vclmul", 0b001100>; defm VCLMULH_V : VCLMUL_MV_V_X<"vclmulh", 0b001101>; } // Predicates = [HasStdExtZvbc] let Predicates = [HasStdExtZvkb] in { defm VANDN_V : VALU_IV_V_X<"vandn", 0b000001>; def VBREV8_V : VALUVs2<0b010010, 0b01000, OPMVV, "vbrev8.v">; def VREV8_V : VALUVs2<0b010010, 0b01001, OPMVV, "vrev8.v">; defm VROL_V : VALU_IV_V_X<"vrol", 0b010101>; defm VROR_V : VROR_IV_V_X_I<"vror", 0b010100>; } // Predicates = [HasStdExtZvkb] let Predicates = [HasStdExtZvkg], RVVConstraint = NoConstraint in { def VGHSH_VV : PALUVVNoVm<0b101100, OPMVV, "vghsh.vv">; def VGMUL_VV : PALUVs2NoVm<0b101000, 0b10001, OPMVV, "vgmul.vv">; } // Predicates = [HasStdExtZvkg] let Predicates = [HasStdExtZvknhaOrZvknhb], RVVConstraint = NoConstraint in { def VSHA2CH_VV : PALUVVNoVm<0b101110, OPMVV, "vsha2ch.vv">; def VSHA2CL_VV : PALUVVNoVm<0b101111, OPMVV, "vsha2cl.vv">; def VSHA2MS_VV : PALUVVNoVm<0b101101, OPMVV, "vsha2ms.vv">; } // Predicates = [HasStdExtZvknhaOrZvknhb] let Predicates = [HasStdExtZvkned], RVVConstraint = NoConstraint in { defm VAESDF : VAES_MV_V_S<0b101000, 0b101001, 0b00001, OPMVV, "vaesdf">; defm VAESDM : VAES_MV_V_S<0b101000, 0b101001, 0b00000, OPMVV, "vaesdm">; defm VAESEF : VAES_MV_V_S<0b101000, 0b101001, 0b00011, OPMVV, "vaesef">; defm VAESEM : VAES_MV_V_S<0b101000, 0b101001, 0b00010, OPMVV, "vaesem">; def VAESKF1_VI : VAESKF_MV_I<0b100010, "vaeskf1.vi", uimm5>; def VAESKF2_VI : VAESKF_MV_I<0b101010, "vaeskf2.vi", uimm5>; def VAESZ_VS : PALUVs2NoVm<0b101001, 0b00111, OPMVV, "vaesz.vs">; } // Predicates = [HasStdExtZvkned] let Predicates = [HasStdExtZvksed], RVVConstraint = NoConstraint in { def VSM4K_VI : PALUVINoVm<0b100001, "vsm4k.vi", uimm5>; defm VSM4R : VAES_MV_V_S<0b101000, 0b101001, 0b10000, OPMVV, "vsm4r">; } // Predicates = [HasStdExtZvksed] let Predicates = [HasStdExtZvksh], RVVConstraint = NoConstraint in { def VSM3C_VI : PALUVINoVm<0b101011, "vsm3c.vi", uimm5>; def VSM3ME_VV : PALUVVNoVm<0b100000, OPMVV, "vsm3me.vv">; } // Predicates = [HasStdExtZvksh] //===----------------------------------------------------------------------===// // Pseudo instructions //===----------------------------------------------------------------------===// defvar I32IntegerVectors = !filter(vti, AllIntegerVectors, !eq(vti.SEW, 32)); defvar I32I64IntegerVectors = !filter(vti, AllIntegerVectors, !or(!eq(vti.SEW, 32), !eq(vti.SEW, 64))); class ZvkI32IntegerVectors { list vs2_types = !cond(!eq(vd_lmul, "M8") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 32)), !eq(vd_lmul, "M4") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 32)), !eq(vd_lmul, "M2") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 16)), !eq(vd_lmul, "M1") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 8)), !eq(vd_lmul, "MF2") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 4)), !eq(vd_lmul, "MF4") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 2)), !eq(vd_lmul, "MF8") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 1))); } class ZvkMxSet { list vs2_lmuls = !cond(!eq(vd_lmul, "M8") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4], !eq(vd_lmul, "M4") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4], !eq(vd_lmul, "M2") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2], !eq(vd_lmul, "M1") : [V_MF8, V_MF4, V_MF2, V_M1], !eq(vd_lmul, "MF2") : [V_MF8, V_MF4, V_MF2], !eq(vd_lmul, "MF4") : [V_MF8, V_MF4], !eq(vd_lmul, "MF8") : [V_MF8]); } class VPseudoUnaryNoMask_Zvk : Pseudo<(outs RetClass:$rd), (ins RetClass:$merge, OpClass:$rs2, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; let HasSEWOp = 1; let HasVecPolicyOp = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } class VPseudoBinaryNoMask_Zvk : Pseudo<(outs RetClass:$rd), (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; let HasSEWOp = 1; let HasVecPolicyOp = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } multiclass VPseudoBinaryNoMask_Zvk { let VLMul = MInfo.value in def "_" # MInfo.MX : VPseudoBinaryNoMask_Zvk; } multiclass VPseudoUnaryV_V_NoMask_Zvk { let VLMul = m.value in { def "_VV_" # m.MX : VPseudoUnaryNoMask_Zvk; } } multiclass VPseudoUnaryV_S_NoMask_Zvk { let VLMul = m.value in foreach vs2_lmul = ZvkMxSet.vs2_lmuls in def "_VS_" # m.MX # "_" # vs2_lmul.MX : VPseudoUnaryNoMask_Zvk; } multiclass VPseudoVALU_V_NoMask_Zvk { foreach m = MxListVF4 in { defvar mx = m.MX; defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); defm "" : VPseudoUnaryV_V_NoMask_Zvk, Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; } } multiclass VPseudoVALU_S_NoMask_Zvk { foreach m = MxListVF4 in { defvar mx = m.MX; defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); defm "" : VPseudoUnaryV_S_NoMask_Zvk, Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; } } multiclass VPseudoVALU_V_S_NoMask_Zvk { defm "" : VPseudoVALU_V_NoMask_Zvk; defm "" : VPseudoVALU_S_NoMask_Zvk; } multiclass VPseudoVALU_VV_NoMask_Zvk { foreach m = MxListVF4 in { defvar mx = m.MX; defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); defm _VV : VPseudoBinaryNoMask_Zvk, Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; } } multiclass VPseudoVALU_VI_NoMask_Zvk { foreach m = MxListVF4 in { defvar mx = m.MX; defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); defm _VI : VPseudoBinaryNoMask_Zvk, Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; } } multiclass VPseudoVALU_VI_NoMaskTU_Zvk { foreach m = MxListVF4 in { defvar mx = m.MX; defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); defm _VI : VPseudoBinaryNoMask, Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; } } multiclass VPseudoVALU_VV_NoMaskTU_Zvk { foreach m = MxListVF4 in { defvar mx = m.MX; defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); defm _VV : VPseudoBinaryNoMask, Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; } } multiclass VPseudoVCLMUL_VV_VX { foreach m = MxList in { defvar mx = m.MX; defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); defvar WriteVIALUX_MX = !cast("WriteVIALUV_" # mx); defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); defvar ReadVIALUX_MX = !cast("ReadVIALUX_" # mx); defm "" : VPseudoBinaryV_VV, Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; defm "" : VPseudoBinaryV_VX, Sched<[WriteVIALUX_MX, ReadVIALUV_MX, ReadVIALUX_MX, ReadVMask]>; } } multiclass VPseudoUnaryV_V { let VLMul = m.value in { defvar suffix = "_V_" # m.MX; def suffix : VPseudoUnaryNoMask; def suffix # "_MASK" : VPseudoUnaryMask, RISCVMaskedPseudo; } } multiclass VPseudoVALU_V { foreach m = MxList in { defvar mx = m.MX; defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); defm "" : VPseudoUnaryV_V, Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; } } let Predicates = [HasStdExtZvbb] in { defm PseudoVBREV : VPseudoVALU_V; defm PseudoVCLZ : VPseudoVALU_V; defm PseudoVCTZ : VPseudoVALU_V; defm PseudoVCPOP : VPseudoVALU_V; defm PseudoVWSLL : VPseudoVWALU_VV_VX_VI; } // Predicates = [HasStdExtZvbb] let Predicates = [HasStdExtZvbc] in { defm PseudoVCLMUL : VPseudoVCLMUL_VV_VX; defm PseudoVCLMULH : VPseudoVCLMUL_VV_VX; } // Predicates = [HasStdExtZvbc] let Predicates = [HasStdExtZvkb] in { defm PseudoVANDN : VPseudoVALU_VV_VX; defm PseudoVBREV8 : VPseudoVALU_V; defm PseudoVREV8 : VPseudoVALU_V; defm PseudoVROL : VPseudoVALU_VV_VX; defm PseudoVROR : VPseudoVALU_VV_VX_VI; } // Predicates = [HasStdExtZvkb] let Predicates = [HasStdExtZvkg] in { defm PseudoVGHSH : VPseudoVALU_VV_NoMask_Zvk; defm PseudoVGMUL : VPseudoVALU_V_NoMask_Zvk; } // Predicates = [HasStdExtZvkg] let Predicates = [HasStdExtZvkned] in { defm PseudoVAESDF : VPseudoVALU_V_S_NoMask_Zvk; defm PseudoVAESDM : VPseudoVALU_V_S_NoMask_Zvk; defm PseudoVAESEF : VPseudoVALU_V_S_NoMask_Zvk; defm PseudoVAESEM : VPseudoVALU_V_S_NoMask_Zvk; defm PseudoVAESKF1 : VPseudoVALU_VI_NoMaskTU_Zvk; defm PseudoVAESKF2 : VPseudoVALU_VI_NoMask_Zvk; defm PseudoVAESZ : VPseudoVALU_S_NoMask_Zvk; } // Predicates = [HasStdExtZvkned] let Predicates = [HasStdExtZvknhaOrZvknhb] in { defm PseudoVSHA2CH : VPseudoVALU_VV_NoMask_Zvk; defm PseudoVSHA2CL : VPseudoVALU_VV_NoMask_Zvk; defm PseudoVSHA2MS : VPseudoVALU_VV_NoMask_Zvk; } // Predicates = [HasStdExtZvknhaOrZvknhb] let Predicates = [HasStdExtZvksed] in { defm PseudoVSM4K : VPseudoVALU_VI_NoMaskTU_Zvk; defm PseudoVSM4R : VPseudoVALU_V_S_NoMask_Zvk; } // Predicates = [HasStdExtZvksed] let Predicates = [HasStdExtZvksh] in { defm PseudoVSM3C : VPseudoVALU_VI_NoMask_Zvk; defm PseudoVSM3ME : VPseudoVALU_VV_NoMaskTU_Zvk; } // Predicates = [HasStdExtZvksh] //===----------------------------------------------------------------------===// // SDNode patterns //===----------------------------------------------------------------------===// multiclass VPatUnarySDNode_V { foreach vti = AllIntegerVectors in { let Predicates = !listconcat([predicate], GetVTypePredicates.Predicates) in { def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1))), (!cast(instruction_name#"_V_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; } } } // Helpers for detecting splats since we preprocess splat_vector to vmv.v.x // This should match the logic in RISCVDAGToDAGISel::selectVSplat def riscv_splat_vector : PatFrag<(ops node:$rs1), (riscv_vmv_v_x_vl undef, node:$rs1, srcvalue)>; def riscv_vnot : PatFrag<(ops node:$rs1), (xor node:$rs1, (riscv_splat_vector -1))>; foreach vti = AllIntegerVectors in { let Predicates = !listconcat([HasStdExtZvkb], GetVTypePredicates.Predicates) in { def : Pat<(vti.Vector (and (riscv_vnot vti.RegClass:$rs1), vti.RegClass:$rs2)), (!cast("PseudoVANDN_VV_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(vti.Vector (and (riscv_splat_vector (not vti.ScalarRegClass:$rs1)), vti.RegClass:$rs2)), (!cast("PseudoVANDN_VX_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; } } defm : VPatUnarySDNode_V; defm : VPatUnarySDNode_V; defm : VPatUnarySDNode_V; defm : VPatUnarySDNode_V; defm : VPatUnarySDNode_V; defm : VPatBinarySDNode_VV_VX; // Invert the immediate and mask it to SEW for readability. def InvRot8Imm : SDNodeXFormgetTargetConstant(0x7 & (64 - N->getZExtValue()), SDLoc(N), N->getValueType(0)); }]>; def InvRot16Imm : SDNodeXFormgetTargetConstant(0xf & (64 - N->getZExtValue()), SDLoc(N), N->getValueType(0)); }]>; def InvRot32Imm : SDNodeXFormgetTargetConstant(0x1f & (64 - N->getZExtValue()), SDLoc(N), N->getValueType(0)); }]>; def InvRot64Imm : SDNodeXFormgetTargetConstant(0x3f & (64 - N->getZExtValue()), SDLoc(N), N->getValueType(0)); }]>; // Although there is no vrol.vi, an immediate rotate left can be achieved by // negating the immediate in vror.vi foreach vti = AllIntegerVectors in { let Predicates = !listconcat([HasStdExtZvkb], GetVTypePredicates.Predicates) in { def : Pat<(vti.Vector (rotl vti.RegClass:$rs2, (vti.Vector (SplatPat_uimm6 uimm6:$rs1)))), (!cast("PseudoVROR_VI_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, (!cast("InvRot" # vti.SEW # "Imm") uimm6:$rs1), vti.AVL, vti.Log2SEW, TA_MA)>; } } defm : VPatBinarySDNode_VV_VX_VI; foreach vtiToWti = AllWidenableIntVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat([HasStdExtZvbb], GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1)))), (!cast("PseudoVWSLL_VV_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1)))), (!cast("PseudoVWSLL_VX_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, GPR:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), (wti.Vector (SplatPat_uimm5 uimm5:$rs1))), (!cast("PseudoVWSLL_VI_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, uimm5:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; } } //===----------------------------------------------------------------------===// // VL patterns //===----------------------------------------------------------------------===// multiclass VPatUnaryVL_V { foreach vti = AllIntegerVectors in { let Predicates = !listconcat([predicate], GetVTypePredicates.Predicates) in { def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1), (vti.Vector vti.RegClass:$merge), (vti.Mask V0), VLOpFrag)), (!cast(instruction_name#"_V_"#vti.LMul.MX#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } } foreach vti = AllIntegerVectors in { let Predicates = !listconcat([HasStdExtZvkb], GetVTypePredicates.Predicates) in { def : Pat<(vti.Vector (riscv_and_vl (riscv_xor_vl (vti.Vector vti.RegClass:$rs1), (riscv_splat_vector -1), (vti.Vector vti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$merge), (vti.Mask V0), VLOpFrag)), (!cast("PseudoVANDN_VV_"#vti.LMul.MX#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_and_vl (riscv_splat_vector (not vti.ScalarRegClass:$rs1)), (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$merge), (vti.Mask V0), VLOpFrag)), (!cast("PseudoVANDN_VX_"#vti.LMul.MX#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, vti.ScalarRegClass:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } defm : VPatUnaryVL_V; defm : VPatUnaryVL_V; defm : VPatUnaryVL_V; defm : VPatUnaryVL_V; defm : VPatUnaryVL_V; defm : VPatBinaryVL_VV_VX; // Although there is no vrol.vi, an immediate rotate left can be achieved by // negating the immediate in vror.vi foreach vti = AllIntegerVectors in { let Predicates = !listconcat([HasStdExtZvkb], GetVTypePredicates.Predicates) in { def : Pat<(riscv_rotl_vl vti.RegClass:$rs2, (vti.Vector (SplatPat_uimm6 uimm6:$rs1)), (vti.Vector vti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (!cast("PseudoVROR_VI_"#vti.LMul.MX#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, (!cast("InvRot" # vti.SEW # "Imm") uimm6:$rs1), (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } defm : VPatBinaryVL_VV_VX_VI; foreach vtiToWti = AllWidenableIntVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat([HasStdExtZvbb], GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(riscv_shl_vl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1))), (wti.Vector wti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (!cast("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK") wti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(riscv_shl_vl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))), (wti.Vector wti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (!cast("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK") wti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(riscv_shl_vl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))), (wti.Vector (SplatPat_uimm5 uimm5:$rs1)), (wti.Vector wti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (!cast("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK") wti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(riscv_vwsll_vl (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$rs1), (wti.Vector wti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (!cast("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK") wti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(riscv_vwsll_vl (vti.Vector vti.RegClass:$rs2), (vti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))), (wti.Vector wti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (!cast("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK") wti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(riscv_vwsll_vl (vti.Vector vti.RegClass:$rs2), (vti.Vector (SplatPat_uimm5 uimm5:$rs1)), (wti.Vector wti.RegClass:$merge), (vti.Mask V0), VLOpFrag), (!cast("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK") wti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } //===----------------------------------------------------------------------===// // Codegen patterns //===----------------------------------------------------------------------===// class VPatUnaryNoMask_Zvk : Pat<(result_type (!cast(intrinsic_name) (result_type result_reg_class:$merge), (op2_type op2_reg_class:$rs2), VLOpFrag, (XLenVT timm:$policy))), (!cast(inst#"_"#kind#"_"#vlmul.MX) (result_type result_reg_class:$merge), (op2_type op2_reg_class:$rs2), GPR:$vl, sew, (XLenVT timm:$policy))>; class VPatUnaryNoMask_VS_Zvk : Pat<(result_type (!cast(intrinsic_name) (result_type result_reg_class:$merge), (op2_type op2_reg_class:$rs2), VLOpFrag, (XLenVT timm:$policy))), (!cast(inst#"_"#kind#"_"#vlmul.MX#"_"#vs2_lmul.MX) (result_type result_reg_class:$merge), (op2_type op2_reg_class:$rs2), GPR:$vl, sew, (XLenVT timm:$policy))>; multiclass VPatUnaryV_V_NoMask_Zvk vtilist> { foreach vti = vtilist in def : VPatUnaryNoMask_Zvk; } multiclass VPatUnaryV_S_NoMaskVectorCrypto vtilist> { foreach vti = vtilist in foreach vti_vs2 = ZvkI32IntegerVectors.vs2_types in def : VPatUnaryNoMask_VS_Zvk; } multiclass VPatUnaryV_V_S_NoMask_Zvk vtilist> { defm : VPatUnaryV_V_NoMask_Zvk; defm : VPatUnaryV_S_NoMaskVectorCrypto; } multiclass VPatBinaryV_VV_NoMask vtilist> { foreach vti = vtilist in def : VPatTernaryNoMaskWithPolicy; } multiclass VPatBinaryV_VI_NoMask vtilist, Operand imm_type = tuimm5> { foreach vti = vtilist in def : VPatTernaryNoMaskWithPolicy; } multiclass VPatBinaryV_VI_NoMaskTU vtilist, Operand imm_type = tuimm5> { foreach vti = vtilist in def : VPatBinaryNoMaskTU; } multiclass VPatBinaryV_VV_NoMaskTU vtilist> { foreach vti = vtilist in def : VPatBinaryNoMaskTU; } multiclass VPatBinaryV_VX_VROTATE vtilist, bit isSEWAware = 0> { foreach vti = vtilist in { defvar kind = "V"#vti.ScalarSuffix; let Predicates = GetVTypePredicates.Predicates in defm : VPatBinary; } } multiclass VPatBinaryV_VI_VROL vtilist, bit isSEWAware = 0> { foreach vti = vtilist in { defvar Intr = !cast(intrinsic); defvar Pseudo = !cast( !if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW, instruction#"_VI_"#vti.LMul.MX)); let Predicates = GetVTypePredicates.Predicates in def : Pat<(vti.Vector (Intr (vti.Vector vti.RegClass:$merge), (vti.Vector vti.RegClass:$rs2), (XLenVT uimm6:$rs1), VLOpFrag)), (Pseudo (vti.Vector vti.RegClass:$merge), (vti.Vector vti.RegClass:$rs2), (InvRot64Imm uimm6:$rs1), GPR:$vl, vti.Log2SEW, TU_MU)>; defvar IntrMask = !cast(intrinsic#"_mask"); defvar PseudoMask = !cast( !if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK", instruction#"_VI_"#vti.LMul.MX#"_MASK")); let Predicates = GetVTypePredicates.Predicates in def : Pat<(vti.Vector (IntrMask (vti.Vector vti.RegClass:$merge), (vti.Vector vti.RegClass:$rs2), (XLenVT uimm6:$rs1), (vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))), (PseudoMask (vti.Vector vti.RegClass:$merge), (vti.Vector vti.RegClass:$rs2), (InvRot64Imm uimm6:$rs1), (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; } } multiclass VPatBinaryV_VV_VX_VROL vtilist> : VPatBinaryV_VV, VPatBinaryV_VX_VROTATE, VPatBinaryV_VI_VROL; multiclass VPatBinaryV_VV_VX_VI_VROR vtilist, Operand ImmType = uimm6> : VPatBinaryV_VV, VPatBinaryV_VX_VROTATE, VPatBinaryV_VI; multiclass VPatBinaryW_VI_VWSLL vtilist> { foreach VtiToWti = vtilist in { defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; defm : VPatBinary; } } multiclass VPatBinaryW_VX_VWSLL vtilist> { foreach VtiToWti = vtilist in { defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; defvar kind = "V"#Vti.ScalarSuffix; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in defm : VPatBinary; } } multiclass VPatBinaryW_VV_VX_VI_VWSLL vtilist> : VPatBinaryW_VV, VPatBinaryW_VX_VWSLL, VPatBinaryW_VI_VWSLL; let Predicates = [HasStdExtZvbb] in { defm : VPatUnaryV_V<"int_riscv_vbrev", "PseudoVBREV", AllIntegerVectors>; defm : VPatUnaryV_V<"int_riscv_vclz", "PseudoVCLZ", AllIntegerVectors>; defm : VPatUnaryV_V<"int_riscv_vctz", "PseudoVCTZ", AllIntegerVectors>; defm : VPatUnaryV_V<"int_riscv_vcpopv", "PseudoVCPOP", AllIntegerVectors>; defm : VPatBinaryW_VV_VX_VI_VWSLL<"int_riscv_vwsll", "PseudoVWSLL", AllWidenableIntVectors>; } // Predicates = [HasStdExtZvbb] let Predicates = [HasStdExtZvbc] in { defm : VPatBinaryV_VV_VX<"int_riscv_vclmul", "PseudoVCLMUL", I64IntegerVectors>; defm : VPatBinaryV_VV_VX<"int_riscv_vclmulh", "PseudoVCLMULH", I64IntegerVectors>; } // Predicates = [HasStdExtZvbc] let Predicates = [HasStdExtZvkb] in { defm : VPatBinaryV_VV_VX<"int_riscv_vandn", "PseudoVANDN", AllIntegerVectors>; defm : VPatUnaryV_V<"int_riscv_vbrev8", "PseudoVBREV8", AllIntegerVectors>; defm : VPatUnaryV_V<"int_riscv_vrev8", "PseudoVREV8", AllIntegerVectors>; defm : VPatBinaryV_VV_VX_VROL<"int_riscv_vrol", "PseudoVROL", "PseudoVROR", AllIntegerVectors>; defm : VPatBinaryV_VV_VX_VI_VROR<"int_riscv_vror", "PseudoVROR", AllIntegerVectors>; } // Predicates = [HasStdExtZvkb] let Predicates = [HasStdExtZvkg] in { defm : VPatBinaryV_VV_NoMask<"int_riscv_vghsh", "PseudoVGHSH", I32IntegerVectors>; defm : VPatUnaryV_V_NoMask_Zvk<"int_riscv_vgmul", "PseudoVGMUL", I32IntegerVectors>; } // Predicates = [HasStdExtZvkg] let Predicates = [HasStdExtZvkned] in { defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesdf", "PseudoVAESDF", I32IntegerVectors>; defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesdm", "PseudoVAESDM", I32IntegerVectors>; defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesef", "PseudoVAESEF", I32IntegerVectors>; defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesem", "PseudoVAESEM", I32IntegerVectors>; defm : VPatBinaryV_VI_NoMaskTU<"int_riscv_vaeskf1", "PseudoVAESKF1", I32IntegerVectors>; defm : VPatBinaryV_VI_NoMask<"int_riscv_vaeskf2", "PseudoVAESKF2", I32IntegerVectors>; defm : VPatUnaryV_S_NoMaskVectorCrypto<"int_riscv_vaesz", "PseudoVAESZ", I32IntegerVectors>; } // Predicates = [HasStdExtZvkned] let Predicates = [HasStdExtZvknha] in { defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ch", "PseudoVSHA2CH", I32IntegerVectors>; defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2cl", "PseudoVSHA2CH", I32IntegerVectors>; defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ms", "PseudoVSHA2MS", I32IntegerVectors>; } // Predicates = [HasStdExtZvknha] let Predicates = [HasStdExtZvknhb] in { defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ch", "PseudoVSHA2CH", I32I64IntegerVectors>; defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2cl", "PseudoVSHA2CH", I32I64IntegerVectors>; defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ms", "PseudoVSHA2MS", I32I64IntegerVectors>; } // Predicates = [HasStdExtZvknhb] let Predicates = [HasStdExtZvksed] in { defm : VPatBinaryV_VI_NoMaskTU<"int_riscv_vsm4k", "PseudoVSM4K", I32IntegerVectors>; defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vsm4r", "PseudoVSM4R", I32IntegerVectors>; } // Predicates = [HasStdExtZvksed] let Predicates = [HasStdExtZvksh] in { defm : VPatBinaryV_VI_NoMask<"int_riscv_vsm3c", "PseudoVSM3C", I32IntegerVectors>; defm : VPatBinaryV_VV_NoMaskTU<"int_riscv_vsm3me", "PseudoVSM3ME", I32IntegerVectors>; } // Predicates = [HasStdExtZvksh]