1//===- RISCVInstrInfoVVLPatterns.td - RVV VL patterns ------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file contains the required infrastructure and VL patterns to 10/// support code generation for the standard 'V' (Vector) extension, version 11/// 0.10. This version is still experimental as the 'V' extension hasn't been 12/// ratified yet. 13/// 14/// This file is included from and depends upon RISCVInstrInfoVPseudos.td 15/// 16/// Note: the patterns for RVV intrinsics are found in 17/// RISCVInstrInfoVPseudos.td. 18/// 19//===----------------------------------------------------------------------===// 20 21//===----------------------------------------------------------------------===// 22// Helpers to define the VL patterns. 23//===----------------------------------------------------------------------===// 24 25def SDT_RISCVVLE_VL : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisPtrTy<1>, 26 SDTCisVT<2, XLenVT>]>; 27def SDT_RISCVVSE_VL : SDTypeProfile<0, 3, [SDTCisVec<0>, SDTCisPtrTy<1>, 28 SDTCisVT<2, XLenVT>]>; 29 30def SDT_RISCVIntBinOp_VL : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, 31 SDTCisSameAs<0, 2>, 32 SDTCisVec<0>, SDTCisInt<0>, 33 SDTCVecEltisVT<3, i1>, 34 SDTCisSameNumEltsAs<0, 3>, 35 SDTCisVT<4, XLenVT>]>; 36 37def SDT_RISCVFPUnOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 38 SDTCisVec<0>, SDTCisFP<0>, 39 SDTCVecEltisVT<2, i1>, 40 SDTCisSameNumEltsAs<0, 2>, 41 SDTCisVT<3, XLenVT>]>; 42def SDT_RISCVFPBinOp_VL : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, 43 SDTCisSameAs<0, 2>, 44 SDTCisVec<0>, SDTCisFP<0>, 45 SDTCVecEltisVT<3, i1>, 46 SDTCisSameNumEltsAs<0, 3>, 47 SDTCisVT<4, XLenVT>]>; 48 49def riscv_vmv_v_x_vl : SDNode<"RISCVISD::VMV_V_X_VL", 50 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<0>, 51 SDTCisVT<1, XLenVT>, 52 SDTCisVT<2, XLenVT>]>>; 53def riscv_vfmv_v_f_vl : SDNode<"RISCVISD::VFMV_V_F_VL", 54 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisFP<0>, 55 SDTCisEltOfVec<1, 0>, 56 SDTCisVT<2, XLenVT>]>>; 57def riscv_vmv_s_x_vl : SDNode<"RISCVISD::VMV_S_X_VL", 58 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 59 SDTCisInt<0>, 60 SDTCisVT<2, XLenVT>, 61 SDTCisVT<3, XLenVT>]>>; 62def riscv_vfmv_s_f_vl : SDNode<"RISCVISD::VFMV_S_F_VL", 63 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 64 SDTCisFP<0>, 65 SDTCisEltOfVec<2, 0>, 66 SDTCisVT<3, XLenVT>]>>; 67 68def riscv_vle_vl : SDNode<"RISCVISD::VLE_VL", SDT_RISCVVLE_VL, 69 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; 70def riscv_vse_vl : SDNode<"RISCVISD::VSE_VL", SDT_RISCVVSE_VL, 71 [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; 72 73def riscv_add_vl : SDNode<"RISCVISD::ADD_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 74def riscv_sub_vl : SDNode<"RISCVISD::SUB_VL", SDT_RISCVIntBinOp_VL>; 75def riscv_mul_vl : SDNode<"RISCVISD::MUL_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 76def riscv_mulhs_vl : SDNode<"RISCVISD::MULHS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 77def riscv_mulhu_vl : SDNode<"RISCVISD::MULHU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 78def riscv_and_vl : SDNode<"RISCVISD::AND_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 79def riscv_or_vl : SDNode<"RISCVISD::OR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 80def riscv_xor_vl : SDNode<"RISCVISD::XOR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 81def riscv_sdiv_vl : SDNode<"RISCVISD::SDIV_VL", SDT_RISCVIntBinOp_VL>; 82def riscv_srem_vl : SDNode<"RISCVISD::SREM_VL", SDT_RISCVIntBinOp_VL>; 83def riscv_udiv_vl : SDNode<"RISCVISD::UDIV_VL", SDT_RISCVIntBinOp_VL>; 84def riscv_urem_vl : SDNode<"RISCVISD::UREM_VL", SDT_RISCVIntBinOp_VL>; 85def riscv_shl_vl : SDNode<"RISCVISD::SHL_VL", SDT_RISCVIntBinOp_VL>; 86def riscv_sra_vl : SDNode<"RISCVISD::SRA_VL", SDT_RISCVIntBinOp_VL>; 87def riscv_srl_vl : SDNode<"RISCVISD::SRL_VL", SDT_RISCVIntBinOp_VL>; 88def riscv_smin_vl : SDNode<"RISCVISD::SMIN_VL", SDT_RISCVIntBinOp_VL>; 89def riscv_smax_vl : SDNode<"RISCVISD::SMAX_VL", SDT_RISCVIntBinOp_VL>; 90def riscv_umin_vl : SDNode<"RISCVISD::UMIN_VL", SDT_RISCVIntBinOp_VL>; 91def riscv_umax_vl : SDNode<"RISCVISD::UMAX_VL", SDT_RISCVIntBinOp_VL>; 92 93def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL>; 94def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL>; 95def riscv_ssubsat_vl : SDNode<"RISCVISD::SSUBSAT_VL", SDT_RISCVIntBinOp_VL>; 96def riscv_usubsat_vl : SDNode<"RISCVISD::USUBSAT_VL", SDT_RISCVIntBinOp_VL>; 97 98def riscv_fadd_vl : SDNode<"RISCVISD::FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 99def riscv_fsub_vl : SDNode<"RISCVISD::FSUB_VL", SDT_RISCVFPBinOp_VL>; 100def riscv_fmul_vl : SDNode<"RISCVISD::FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 101def riscv_fdiv_vl : SDNode<"RISCVISD::FDIV_VL", SDT_RISCVFPBinOp_VL>; 102def riscv_fneg_vl : SDNode<"RISCVISD::FNEG_VL", SDT_RISCVFPUnOp_VL>; 103def riscv_fabs_vl : SDNode<"RISCVISD::FABS_VL", SDT_RISCVFPUnOp_VL>; 104def riscv_fsqrt_vl : SDNode<"RISCVISD::FSQRT_VL", SDT_RISCVFPUnOp_VL>; 105def riscv_fcopysign_vl : SDNode<"RISCVISD::FCOPYSIGN_VL", SDT_RISCVFPBinOp_VL>; 106def riscv_fminnum_vl : SDNode<"RISCVISD::FMINNUM_VL", SDT_RISCVFPBinOp_VL>; 107def riscv_fmaxnum_vl : SDNode<"RISCVISD::FMAXNUM_VL", SDT_RISCVFPBinOp_VL>; 108 109def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 110 SDTCisSameAs<0, 2>, 111 SDTCisSameAs<0, 3>, 112 SDTCisVec<0>, SDTCisFP<0>, 113 SDTCVecEltisVT<4, i1>, 114 SDTCisSameNumEltsAs<0, 4>, 115 SDTCisVT<5, XLenVT>]>; 116def riscv_fma_vl : SDNode<"RISCVISD::FMA_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 117 118def SDT_RISCVFPRoundOp_VL : SDTypeProfile<1, 3, [ 119 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1>, SDTCisSameNumEltsAs<0, 1>, 120 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 121]>; 122def SDT_RISCVFPExtendOp_VL : SDTypeProfile<1, 3, [ 123 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>, 124 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 125]>; 126 127def riscv_fpround_vl : SDNode<"RISCVISD::FP_ROUND_VL", SDT_RISCVFPRoundOp_VL>; 128def riscv_fpextend_vl : SDNode<"RISCVISD::FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL>; 129def riscv_fncvt_rod_vl : SDNode<"RISCVISD::VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL>; 130 131def SDT_RISCVFP2IOp_VL : SDTypeProfile<1, 3, [ 132 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, 133 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 134]>; 135def SDT_RISCVI2FPOp_VL : SDTypeProfile<1, 3, [ 136 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, 137 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 138]>; 139 140def riscv_fp_to_sint_vl : SDNode<"RISCVISD::FP_TO_SINT_VL", SDT_RISCVFP2IOp_VL>; 141def riscv_fp_to_uint_vl : SDNode<"RISCVISD::FP_TO_UINT_VL", SDT_RISCVFP2IOp_VL>; 142def riscv_sint_to_fp_vl : SDNode<"RISCVISD::SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 143def riscv_uint_to_fp_vl : SDNode<"RISCVISD::UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 144 145def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL", 146 SDTypeProfile<1, 5, [SDTCVecEltisVT<0, i1>, 147 SDTCisVec<1>, 148 SDTCisSameNumEltsAs<0, 1>, 149 SDTCisSameAs<1, 2>, 150 SDTCisVT<3, OtherVT>, 151 SDTCisSameAs<0, 4>, 152 SDTCisVT<5, XLenVT>]>>; 153 154def riscv_vrgather_vx_vl : SDNode<"RISCVISD::VRGATHER_VX_VL", 155 SDTypeProfile<1, 4, [SDTCisVec<0>, 156 SDTCisSameAs<0, 1>, 157 SDTCisVT<2, XLenVT>, 158 SDTCVecEltisVT<3, i1>, 159 SDTCisSameNumEltsAs<0, 3>, 160 SDTCisVT<4, XLenVT>]>>; 161def riscv_vrgather_vv_vl : SDNode<"RISCVISD::VRGATHER_VV_VL", 162 SDTypeProfile<1, 4, [SDTCisVec<0>, 163 SDTCisSameAs<0, 1>, 164 SDTCisInt<2>, 165 SDTCisSameNumEltsAs<0, 2>, 166 SDTCisSameSizeAs<0, 2>, 167 SDTCVecEltisVT<3, i1>, 168 SDTCisSameNumEltsAs<0, 3>, 169 SDTCisVT<4, XLenVT>]>>; 170def riscv_vrgatherei16_vv_vl : SDNode<"RISCVISD::VRGATHEREI16_VV_VL", 171 SDTypeProfile<1, 4, [SDTCisVec<0>, 172 SDTCisSameAs<0, 1>, 173 SDTCisInt<2>, 174 SDTCVecEltisVT<2, i16>, 175 SDTCisSameNumEltsAs<0, 2>, 176 SDTCVecEltisVT<3, i1>, 177 SDTCisSameNumEltsAs<0, 3>, 178 SDTCisVT<4, XLenVT>]>>; 179 180def riscv_vselect_vl : SDNode<"RISCVISD::VSELECT_VL", 181 SDTypeProfile<1, 4, [SDTCisVec<0>, 182 SDTCisVec<1>, 183 SDTCisSameNumEltsAs<0, 1>, 184 SDTCVecEltisVT<1, i1>, 185 SDTCisSameAs<0, 2>, 186 SDTCisSameAs<2, 3>, 187 SDTCisVT<4, XLenVT>]>>; 188 189def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 190 SDTCisSameAs<0, 2>, 191 SDTCVecEltisVT<0, i1>, 192 SDTCisVT<3, XLenVT>]>; 193def riscv_vmand_vl : SDNode<"RISCVISD::VMAND_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 194def riscv_vmor_vl : SDNode<"RISCVISD::VMOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 195def riscv_vmxor_vl : SDNode<"RISCVISD::VMXOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 196 197def true_mask : PatLeaf<(riscv_vmset_vl (XLenVT srcvalue))>; 198 199def riscv_vmnot_vl : PatFrag<(ops node:$rs, node:$vl), 200 (riscv_vmxor_vl node:$rs, true_mask, node:$vl)>; 201 202def riscv_vcpop_vl : SDNode<"RISCVISD::VCPOP_VL", 203 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, 204 SDTCisVec<1>, SDTCisInt<1>, 205 SDTCVecEltisVT<2, i1>, 206 SDTCisSameNumEltsAs<1, 2>, 207 SDTCisVT<3, XLenVT>]>>; 208 209def SDT_RISCVVEXTEND_VL : SDTypeProfile<1, 3, [SDTCisVec<0>, 210 SDTCisSameNumEltsAs<0, 1>, 211 SDTCisSameNumEltsAs<1, 2>, 212 SDTCVecEltisVT<2, i1>, 213 SDTCisVT<3, XLenVT>]>; 214def riscv_sext_vl : SDNode<"RISCVISD::VSEXT_VL", SDT_RISCVVEXTEND_VL>; 215def riscv_zext_vl : SDNode<"RISCVISD::VZEXT_VL", SDT_RISCVVEXTEND_VL>; 216 217def riscv_trunc_vector_vl : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL", 218 SDTypeProfile<1, 3, [SDTCisVec<0>, 219 SDTCisVec<1>, 220 SDTCisSameNumEltsAs<0, 2>, 221 SDTCVecEltisVT<2, i1>, 222 SDTCisVT<3, XLenVT>]>>; 223 224def SDT_RISCVVWMUL_VL : SDTypeProfile<1, 4, [SDTCisVec<0>, 225 SDTCisSameNumEltsAs<0, 1>, 226 SDTCisSameAs<1, 2>, 227 SDTCisSameNumEltsAs<1, 3>, 228 SDTCVecEltisVT<3, i1>, 229 SDTCisVT<4, XLenVT>]>; 230def riscv_vwmul_vl : SDNode<"RISCVISD::VWMUL_VL", SDT_RISCVVWMUL_VL, [SDNPCommutative]>; 231def riscv_vwmulu_vl : SDNode<"RISCVISD::VWMULU_VL", SDT_RISCVVWMUL_VL, [SDNPCommutative]>; 232 233def SDTRVVVecReduce : SDTypeProfile<1, 5, [ 234 SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>, 235 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<2, 4>, SDTCisVT<5, XLenVT> 236]>; 237 238def riscv_mul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D), 239 (riscv_mul_vl node:$A, node:$B, node:$C, 240 node:$D), [{ 241 return N->hasOneUse(); 242}]>; 243 244def riscv_vwmul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D), 245 (riscv_vwmul_vl node:$A, node:$B, node:$C, 246 node:$D), [{ 247 return N->hasOneUse(); 248}]>; 249 250def riscv_vwmulu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D), 251 (riscv_vwmulu_vl node:$A, node:$B, node:$C, 252 node:$D), [{ 253 return N->hasOneUse(); 254}]>; 255 256foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR", 257 "FADD", "SEQ_FADD", "FMIN", "FMAX"] in 258 def rvv_vecreduce_#kind#_vl : SDNode<"RISCVISD::VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>; 259 260// Ignore the vl operand. 261def SplatFPOp : PatFrag<(ops node:$op), 262 (riscv_vfmv_v_f_vl node:$op, srcvalue)>; 263 264def sew8simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<8>", []>; 265def sew16simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<16>", []>; 266def sew32simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<32>", []>; 267def sew64simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<64>", []>; 268 269multiclass VPatBinaryVL_VV<SDNode vop, 270 string instruction_name, 271 ValueType result_type, 272 ValueType op_type, 273 ValueType mask_type, 274 int sew, 275 LMULInfo vlmul, 276 VReg op_reg_class> { 277 def : Pat<(result_type (vop 278 (op_type op_reg_class:$rs1), 279 (op_type op_reg_class:$rs2), 280 (mask_type true_mask), 281 VLOpFrag)), 282 (!cast<Instruction>(instruction_name#"_VV_"# vlmul.MX) 283 op_reg_class:$rs1, 284 op_reg_class:$rs2, 285 GPR:$vl, sew)>; 286 def : Pat<(result_type (vop 287 (op_type op_reg_class:$rs1), 288 (op_type op_reg_class:$rs2), 289 (mask_type V0), 290 VLOpFrag)), 291 (!cast<Instruction>(instruction_name#"_VV_"# vlmul.MX#"_MASK") 292 (result_type (IMPLICIT_DEF)), 293 op_reg_class:$rs1, 294 op_reg_class:$rs2, 295 (mask_type V0), GPR:$vl, sew, TAIL_AGNOSTIC)>; 296} 297 298multiclass VPatBinaryVL_XI<SDNode vop, 299 string instruction_name, 300 string suffix, 301 ValueType result_type, 302 ValueType vop_type, 303 ValueType mask_type, 304 int sew, 305 LMULInfo vlmul, 306 VReg vop_reg_class, 307 ComplexPattern SplatPatKind, 308 DAGOperand xop_kind> { 309 def : Pat<(result_type (vop 310 (vop_type vop_reg_class:$rs1), 311 (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))), 312 (mask_type true_mask), 313 VLOpFrag)), 314 (!cast<Instruction>(instruction_name#_#suffix#_# vlmul.MX) 315 vop_reg_class:$rs1, 316 xop_kind:$rs2, 317 GPR:$vl, sew)>; 318 def : Pat<(result_type (vop 319 (vop_type vop_reg_class:$rs1), 320 (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))), 321 (mask_type V0), 322 VLOpFrag)), 323 (!cast<Instruction>(instruction_name#_#suffix#_# vlmul.MX#"_MASK") 324 (result_type (IMPLICIT_DEF)), 325 vop_reg_class:$rs1, 326 xop_kind:$rs2, 327 (mask_type V0), GPR:$vl, sew, TAIL_AGNOSTIC)>; 328} 329 330multiclass VPatBinaryVL_VV_VX<SDNode vop, string instruction_name> { 331 foreach vti = AllIntegerVectors in { 332 defm : VPatBinaryVL_VV<vop, instruction_name, 333 vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, 334 vti.LMul, vti.RegClass>; 335 defm : VPatBinaryVL_XI<vop, instruction_name, "VX", 336 vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, 337 vti.LMul, vti.RegClass, SplatPat, GPR>; 338 } 339} 340 341multiclass VPatBinaryVL_VV_VX_VI<SDNode vop, string instruction_name, 342 Operand ImmType = simm5> 343 : VPatBinaryVL_VV_VX<vop, instruction_name> { 344 foreach vti = AllIntegerVectors in { 345 defm : VPatBinaryVL_XI<vop, instruction_name, "VI", 346 vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, 347 vti.LMul, vti.RegClass, 348 !cast<ComplexPattern>(SplatPat#_#ImmType), 349 ImmType>; 350 } 351} 352 353multiclass VPatBinaryWVL_VV_VX<SDNode vop, string instruction_name> { 354 foreach VtiToWti = AllWidenableIntVectors in { 355 defvar vti = VtiToWti.Vti; 356 defvar wti = VtiToWti.Wti; 357 defm : VPatBinaryVL_VV<vop, instruction_name, 358 wti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, 359 vti.LMul, vti.RegClass>; 360 defm : VPatBinaryVL_XI<vop, instruction_name, "VX", 361 wti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, 362 vti.LMul, vti.RegClass, SplatPat, GPR>; 363 } 364} 365 366class VPatBinaryVL_VF<SDNode vop, 367 string instruction_name, 368 ValueType result_type, 369 ValueType vop_type, 370 ValueType mask_type, 371 int sew, 372 LMULInfo vlmul, 373 VReg vop_reg_class, 374 RegisterClass scalar_reg_class> : 375 Pat<(result_type (vop (vop_type vop_reg_class:$rs1), 376 (vop_type (SplatFPOp scalar_reg_class:$rs2)), 377 (mask_type true_mask), 378 VLOpFrag)), 379 (!cast<Instruction>(instruction_name#"_"#vlmul.MX) 380 vop_reg_class:$rs1, 381 scalar_reg_class:$rs2, 382 GPR:$vl, sew)>; 383 384multiclass VPatBinaryFPVL_VV_VF<SDNode vop, string instruction_name> { 385 foreach vti = AllFloatVectors in { 386 defm : VPatBinaryVL_VV<vop, instruction_name, 387 vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, 388 vti.LMul, vti.RegClass>; 389 def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 390 vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, 391 vti.LMul, vti.RegClass, vti.ScalarRegClass>; 392 } 393} 394 395multiclass VPatBinaryFPVL_R_VF<SDNode vop, string instruction_name> { 396 foreach fvti = AllFloatVectors in 397 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 398 fvti.RegClass:$rs1, 399 (fvti.Mask true_mask), 400 VLOpFrag)), 401 (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 402 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 403 GPR:$vl, fvti.Log2SEW)>; 404} 405 406multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name, 407 CondCode cc> { 408 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 409 vti.RegClass:$rs2, cc, 410 (vti.Mask true_mask), 411 VLOpFrag)), 412 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 413 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, 414 vti.Log2SEW)>; 415} 416 417// Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped. 418multiclass VPatIntegerSetCCVL_VV_Swappable<VTypeInfo vti, string instruction_name, 419 CondCode cc, CondCode invcc> : 420 VPatIntegerSetCCVL_VV<vti, instruction_name, cc> { 421 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2), 422 vti.RegClass:$rs1, invcc, 423 (vti.Mask true_mask), 424 VLOpFrag)), 425 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 426 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, 427 vti.Log2SEW)>; 428} 429 430multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_name, 431 CondCode cc, CondCode invcc> { 432 defvar instruction = !cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX); 433 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 434 (SplatPat (XLenVT GPR:$rs2)), cc, 435 (vti.Mask true_mask), 436 VLOpFrag)), 437 (instruction vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; 438 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)), 439 (vti.Vector vti.RegClass:$rs1), invcc, 440 (vti.Mask true_mask), 441 VLOpFrag)), 442 (instruction vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; 443} 444 445multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_name, 446 CondCode cc, CondCode invcc> { 447 defvar instruction = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX); 448 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 449 (SplatPat_simm5 simm5:$rs2), cc, 450 (vti.Mask true_mask), 451 VLOpFrag)), 452 (instruction vti.RegClass:$rs1, XLenVT:$rs2, GPR:$vl, vti.Log2SEW)>; 453 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2), 454 (vti.Vector vti.RegClass:$rs1), invcc, 455 (vti.Mask true_mask), 456 VLOpFrag)), 457 (instruction vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.Log2SEW)>; 458} 459 460multiclass VPatIntegerSetCCVL_VIPlus1<VTypeInfo vti, string instruction_name, 461 CondCode cc, ComplexPattern splatpat_kind> { 462 defvar instruction = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX); 463 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 464 (splatpat_kind simm5:$rs2), cc, 465 (vti.Mask true_mask), 466 VLOpFrag)), 467 (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2), 468 GPR:$vl, vti.Log2SEW)>; 469} 470 471multiclass VPatFPSetCCVL_VV_VF_FV<CondCode cc, 472 string inst_name, 473 string swapped_op_inst_name> { 474 foreach fvti = AllFloatVectors in { 475 def : Pat<(fvti.Mask (riscv_setcc_vl (fvti.Vector fvti.RegClass:$rs1), 476 fvti.RegClass:$rs2, 477 cc, 478 (fvti.Mask true_mask), 479 VLOpFrag)), 480 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX) 481 fvti.RegClass:$rs1, fvti.RegClass:$rs2, GPR:$vl, fvti.Log2SEW)>; 482 def : Pat<(fvti.Mask (riscv_setcc_vl (fvti.Vector fvti.RegClass:$rs1), 483 (SplatFPOp fvti.ScalarRegClass:$rs2), 484 cc, 485 (fvti.Mask true_mask), 486 VLOpFrag)), 487 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 488 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 489 GPR:$vl, fvti.Log2SEW)>; 490 def : Pat<(fvti.Mask (riscv_setcc_vl (SplatFPOp fvti.ScalarRegClass:$rs2), 491 (fvti.Vector fvti.RegClass:$rs1), 492 cc, 493 (fvti.Mask true_mask), 494 VLOpFrag)), 495 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 496 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 497 GPR:$vl, fvti.Log2SEW)>; 498 } 499} 500 501multiclass VPatExtendSDNode_V_VL<SDNode vop, string inst_name, string suffix, 502 list <VTypeInfoToFraction> fraction_list> { 503 foreach vtiTofti = fraction_list in { 504 defvar vti = vtiTofti.Vti; 505 defvar fti = vtiTofti.Fti; 506 def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2), 507 true_mask, VLOpFrag)), 508 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX) 509 fti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; 510 } 511} 512 513multiclass VPatConvertFP2ISDNode_V_VL<SDNode vop, string instruction_name> { 514 foreach fvti = AllFloatVectors in { 515 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 516 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 517 (fvti.Mask true_mask), 518 VLOpFrag)), 519 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX) 520 fvti.RegClass:$rs1, GPR:$vl, ivti.Log2SEW)>; 521 } 522} 523 524multiclass VPatConvertI2FPSDNode_V_VL<SDNode vop, string instruction_name> { 525 foreach fvti = AllFloatVectors in { 526 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 527 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 528 (ivti.Mask true_mask), 529 VLOpFrag)), 530 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 531 ivti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; 532 } 533} 534 535multiclass VPatWConvertFP2ISDNode_V_VL<SDNode vop, string instruction_name> { 536 foreach fvtiToFWti = AllWidenableFloatVectors in { 537 defvar fvti = fvtiToFWti.Vti; 538 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 539 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 540 (fvti.Mask true_mask), 541 VLOpFrag)), 542 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 543 fvti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; 544 } 545} 546 547multiclass VPatWConvertI2FPSDNode_V_VL<SDNode vop, string instruction_name> { 548 foreach vtiToWti = AllWidenableIntToFloatVectors in { 549 defvar ivti = vtiToWti.Vti; 550 defvar fwti = vtiToWti.Wti; 551 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 552 (ivti.Mask true_mask), 553 VLOpFrag)), 554 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX) 555 ivti.RegClass:$rs1, GPR:$vl, ivti.Log2SEW)>; 556 } 557} 558 559multiclass VPatNConvertFP2ISDNode_V_VL<SDNode vop, string instruction_name> { 560 foreach vtiToWti = AllWidenableIntToFloatVectors in { 561 defvar vti = vtiToWti.Vti; 562 defvar fwti = vtiToWti.Wti; 563 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 564 (fwti.Mask true_mask), 565 VLOpFrag)), 566 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX) 567 fwti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>; 568 } 569} 570 571multiclass VPatNConvertI2FPSDNode_V_VL<SDNode vop, string instruction_name> { 572 foreach fvtiToFWti = AllWidenableFloatVectors in { 573 defvar fvti = fvtiToFWti.Vti; 574 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 575 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), 576 (iwti.Mask true_mask), 577 VLOpFrag)), 578 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 579 iwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; 580 } 581} 582 583multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> { 584 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { 585 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1"); 586 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), VR:$rs2, 587 (vti.Mask true_mask), 588 VLOpFrag)), 589 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX) 590 (vti_m1.Vector VR:$merge), 591 (vti.Vector vti.RegClass:$rs1), 592 (vti_m1.Vector VR:$rs2), 593 GPR:$vl, vti.Log2SEW)>; 594 595 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), VR:$rs2, 596 (vti.Mask V0), VLOpFrag)), 597 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_MASK") 598 (vti_m1.Vector VR:$merge), 599 (vti.Vector vti.RegClass:$rs1), 600 (vti_m1.Vector VR:$rs2), 601 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 602 } 603} 604 605//===----------------------------------------------------------------------===// 606// Patterns. 607//===----------------------------------------------------------------------===// 608 609let Predicates = [HasVInstructions] in { 610 611// 7.4. Vector Unit-Stride Instructions 612foreach vti = AllVectors in { 613 defvar load_instr = !cast<Instruction>("PseudoVLE"#vti.SEW#"_V_"#vti.LMul.MX); 614 defvar store_instr = !cast<Instruction>("PseudoVSE"#vti.SEW#"_V_"#vti.LMul.MX); 615 // Load 616 def : Pat<(vti.Vector (riscv_vle_vl BaseAddr:$rs1, VLOpFrag)), 617 (load_instr BaseAddr:$rs1, GPR:$vl, vti.Log2SEW)>; 618 // Store 619 def : Pat<(riscv_vse_vl (vti.Vector vti.RegClass:$rs2), BaseAddr:$rs1, 620 VLOpFrag), 621 (store_instr vti.RegClass:$rs2, BaseAddr:$rs1, GPR:$vl, vti.Log2SEW)>; 622} 623 624foreach mti = AllMasks in { 625 defvar load_instr = !cast<Instruction>("PseudoVLM_V_"#mti.BX); 626 defvar store_instr = !cast<Instruction>("PseudoVSM_V_"#mti.BX); 627 def : Pat<(mti.Mask (riscv_vle_vl BaseAddr:$rs1, VLOpFrag)), 628 (load_instr BaseAddr:$rs1, GPR:$vl, mti.Log2SEW)>; 629 def : Pat<(riscv_vse_vl (mti.Mask VR:$rs2), BaseAddr:$rs1, 630 VLOpFrag), 631 (store_instr VR:$rs2, BaseAddr:$rs1, GPR:$vl, mti.Log2SEW)>; 632} 633 634// 12.1. Vector Single-Width Integer Add and Subtract 635defm : VPatBinaryVL_VV_VX_VI<riscv_add_vl, "PseudoVADD">; 636defm : VPatBinaryVL_VV_VX<riscv_sub_vl, "PseudoVSUB">; 637// Handle VRSUB specially since it's the only integer binary op with reversed 638// pattern operands 639foreach vti = AllIntegerVectors in { 640 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 641 (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), 642 VLOpFrag), 643 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX) 644 vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; 645 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 646 (vti.Vector vti.RegClass:$rs1), (vti.Mask V0), 647 VLOpFrag), 648 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK") 649 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, 650 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 651 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)), 652 (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), 653 VLOpFrag), 654 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX) 655 vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.Log2SEW)>; 656 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)), 657 (vti.Vector vti.RegClass:$rs1), (vti.Mask V0), 658 VLOpFrag), 659 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK") 660 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, simm5:$rs2, 661 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 662} 663 664// 12.3. Vector Integer Extension 665defm : VPatExtendSDNode_V_VL<riscv_zext_vl, "PseudoVZEXT", "VF2", 666 AllFractionableVF2IntVectors>; 667defm : VPatExtendSDNode_V_VL<riscv_sext_vl, "PseudoVSEXT", "VF2", 668 AllFractionableVF2IntVectors>; 669defm : VPatExtendSDNode_V_VL<riscv_zext_vl, "PseudoVZEXT", "VF4", 670 AllFractionableVF4IntVectors>; 671defm : VPatExtendSDNode_V_VL<riscv_sext_vl, "PseudoVSEXT", "VF4", 672 AllFractionableVF4IntVectors>; 673defm : VPatExtendSDNode_V_VL<riscv_zext_vl, "PseudoVZEXT", "VF8", 674 AllFractionableVF8IntVectors>; 675defm : VPatExtendSDNode_V_VL<riscv_sext_vl, "PseudoVSEXT", "VF8", 676 AllFractionableVF8IntVectors>; 677 678// 12.5. Vector Bitwise Logical Instructions 679defm : VPatBinaryVL_VV_VX_VI<riscv_and_vl, "PseudoVAND">; 680defm : VPatBinaryVL_VV_VX_VI<riscv_or_vl, "PseudoVOR">; 681defm : VPatBinaryVL_VV_VX_VI<riscv_xor_vl, "PseudoVXOR">; 682 683// 12.6. Vector Single-Width Bit Shift Instructions 684defm : VPatBinaryVL_VV_VX_VI<riscv_shl_vl, "PseudoVSLL", uimm5>; 685defm : VPatBinaryVL_VV_VX_VI<riscv_srl_vl, "PseudoVSRL", uimm5>; 686defm : VPatBinaryVL_VV_VX_VI<riscv_sra_vl, "PseudoVSRA", uimm5>; 687 688foreach vti = AllIntegerVectors in { 689 // Emit shift by 1 as an add since it might be faster. 690 def : Pat<(riscv_shl_vl (vti.Vector vti.RegClass:$rs1), 691 (riscv_vmv_v_x_vl 1, (XLenVT srcvalue)), 692 (vti.Mask true_mask), 693 VLOpFrag), 694 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX) 695 vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>; 696} 697 698// 12.7. Vector Narrowing Integer Right Shift Instructions 699foreach vtiTowti = AllWidenableIntVectors in { 700 defvar vti = vtiTowti.Vti; 701 defvar wti = vtiTowti.Wti; 702 def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1), 703 (vti.Mask true_mask), 704 VLOpFrag)), 705 (!cast<Instruction>("PseudoVNSRL_WI_"#vti.LMul.MX) 706 wti.RegClass:$rs1, 0, GPR:$vl, vti.Log2SEW)>; 707 708 def : Pat<(vti.Vector 709 (riscv_trunc_vector_vl 710 (wti.Vector 711 (riscv_sra_vl wti.RegClass:$rs1, (SplatPat XLenVT:$rs2), 712 true_mask, VLOpFrag)), true_mask, VLOpFrag)), 713 (!cast<Instruction>("PseudoVNSRA_WX_"#vti.LMul.MX) 714 wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; 715 def : Pat<(vti.Vector 716 (riscv_trunc_vector_vl 717 (wti.Vector 718 (riscv_sra_vl wti.RegClass:$rs1, (SplatPat_uimm5 uimm5:$rs2), 719 true_mask, VLOpFrag)), true_mask, VLOpFrag)), 720 (!cast<Instruction>("PseudoVNSRA_WI_"#vti.LMul.MX) 721 wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW)>; 722 723 def : Pat<(vti.Vector 724 (riscv_trunc_vector_vl 725 (wti.Vector 726 (riscv_srl_vl wti.RegClass:$rs1, (SplatPat XLenVT:$rs2), 727 true_mask, VLOpFrag)), true_mask, VLOpFrag)), 728 (!cast<Instruction>("PseudoVNSRL_WX_"#vti.LMul.MX) 729 wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; 730 def : Pat<(vti.Vector 731 (riscv_trunc_vector_vl 732 (wti.Vector 733 (riscv_srl_vl wti.RegClass:$rs1, (SplatPat_uimm5 uimm5:$rs2), 734 true_mask, VLOpFrag)), true_mask, VLOpFrag)), 735 (!cast<Instruction>("PseudoVNSRL_WI_"#vti.LMul.MX) 736 wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW)>; 737} 738 739// 12.8. Vector Integer Comparison Instructions 740foreach vti = AllIntegerVectors in { 741 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSEQ", SETEQ>; 742 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSNE", SETNE>; 743 744 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 745 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 746 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 747 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 748 749 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 750 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 751 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 752 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 753 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 754 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 755 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>; 756 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>; 757 // There is no VMSGE(U)_VX instruction 758 759 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 760 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 761 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 762 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 763 764 defm : VPatIntegerSetCCVL_VIPlus1<vti, "PseudoVMSLE", SETLT, 765 SplatPat_simm5_plus1>; 766 defm : VPatIntegerSetCCVL_VIPlus1<vti, "PseudoVMSLEU", SETULT, 767 SplatPat_simm5_plus1_nonzero>; 768 defm : VPatIntegerSetCCVL_VIPlus1<vti, "PseudoVMSGT", SETGE, 769 SplatPat_simm5_plus1>; 770 defm : VPatIntegerSetCCVL_VIPlus1<vti, "PseudoVMSGTU", SETUGE, 771 SplatPat_simm5_plus1_nonzero>; 772} // foreach vti = AllIntegerVectors 773 774// 12.9. Vector Integer Min/Max Instructions 775defm : VPatBinaryVL_VV_VX<riscv_umin_vl, "PseudoVMINU">; 776defm : VPatBinaryVL_VV_VX<riscv_smin_vl, "PseudoVMIN">; 777defm : VPatBinaryVL_VV_VX<riscv_umax_vl, "PseudoVMAXU">; 778defm : VPatBinaryVL_VV_VX<riscv_smax_vl, "PseudoVMAX">; 779 780// 12.10. Vector Single-Width Integer Multiply Instructions 781defm : VPatBinaryVL_VV_VX<riscv_mul_vl, "PseudoVMUL">; 782defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH">; 783defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU">; 784 785// 12.11. Vector Integer Divide Instructions 786defm : VPatBinaryVL_VV_VX<riscv_udiv_vl, "PseudoVDIVU">; 787defm : VPatBinaryVL_VV_VX<riscv_sdiv_vl, "PseudoVDIV">; 788defm : VPatBinaryVL_VV_VX<riscv_urem_vl, "PseudoVREMU">; 789defm : VPatBinaryVL_VV_VX<riscv_srem_vl, "PseudoVREM">; 790 791// 12.12. Vector Widening Integer Multiply Instructions 792defm : VPatBinaryWVL_VV_VX<riscv_vwmul_vl, "PseudoVWMUL">; 793defm : VPatBinaryWVL_VV_VX<riscv_vwmulu_vl, "PseudoVWMULU">; 794 795// 12.13 Vector Single-Width Integer Multiply-Add Instructions 796foreach vti = AllIntegerVectors in { 797 // NOTE: We choose VMADD because it has the most commuting freedom. So it 798 // works best with how TwoAddressInstructionPass tries commuting. 799 defvar suffix = vti.LMul.MX; 800 def : Pat<(vti.Vector 801 (riscv_add_vl vti.RegClass:$rs2, 802 (riscv_mul_vl_oneuse vti.RegClass:$rs1, 803 vti.RegClass:$rd, 804 (vti.Mask true_mask), VLOpFrag), 805 (vti.Mask true_mask), VLOpFrag)), 806 (!cast<Instruction>("PseudoVMADD_VV_"# suffix) 807 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 808 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 809 def : Pat<(vti.Vector 810 (riscv_sub_vl vti.RegClass:$rs2, 811 (riscv_mul_vl_oneuse vti.RegClass:$rs1, 812 vti.RegClass:$rd, 813 (vti.Mask true_mask), VLOpFrag), 814 (vti.Mask true_mask), VLOpFrag)), 815 (!cast<Instruction>("PseudoVNMSUB_VV_"# suffix) 816 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 817 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 818 819 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally 820 // commutable. 821 def : Pat<(vti.Vector 822 (riscv_add_vl vti.RegClass:$rs2, 823 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), 824 vti.RegClass:$rd, 825 (vti.Mask true_mask), VLOpFrag), 826 (vti.Mask true_mask), VLOpFrag)), 827 (!cast<Instruction>("PseudoVMADD_VX_" # suffix) 828 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 829 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 830 def : Pat<(vti.Vector 831 (riscv_sub_vl vti.RegClass:$rs2, 832 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), 833 vti.RegClass:$rd, 834 (vti.Mask true_mask), 835 VLOpFrag), 836 (vti.Mask true_mask), VLOpFrag)), 837 (!cast<Instruction>("PseudoVNMSUB_VX_" # suffix) 838 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 839 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 840} 841 842// 12.14. Vector Widening Integer Multiply-Add Instructions 843foreach vtiTowti = AllWidenableIntVectors in { 844 defvar vti = vtiTowti.Vti; 845 defvar wti = vtiTowti.Wti; 846 def : Pat<(wti.Vector 847 (riscv_add_vl wti.RegClass:$rd, 848 (riscv_vwmul_vl_oneuse vti.RegClass:$rs1, 849 (vti.Vector vti.RegClass:$rs2), 850 (vti.Mask true_mask), VLOpFrag), 851 (vti.Mask true_mask), VLOpFrag)), 852 (!cast<Instruction>("PseudoVWMACC_VV_" # vti.LMul.MX) 853 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 854 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 855 def : Pat<(wti.Vector 856 (riscv_add_vl wti.RegClass:$rd, 857 (riscv_vwmulu_vl_oneuse vti.RegClass:$rs1, 858 (vti.Vector vti.RegClass:$rs2), 859 (vti.Mask true_mask), VLOpFrag), 860 (vti.Mask true_mask), VLOpFrag)), 861 (!cast<Instruction>("PseudoVWMACCU_VV_" # vti.LMul.MX) 862 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 863 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 864 865 def : Pat<(wti.Vector 866 (riscv_add_vl wti.RegClass:$rd, 867 (riscv_vwmul_vl_oneuse (SplatPat XLenVT:$rs1), 868 (vti.Vector vti.RegClass:$rs2), 869 (vti.Mask true_mask), VLOpFrag), 870 (vti.Mask true_mask), VLOpFrag)), 871 (!cast<Instruction>("PseudoVWMACC_VX_" # vti.LMul.MX) 872 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 873 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 874 def : Pat<(wti.Vector 875 (riscv_add_vl wti.RegClass:$rd, 876 (riscv_vwmulu_vl_oneuse (SplatPat XLenVT:$rs1), 877 (vti.Vector vti.RegClass:$rs2), 878 (vti.Mask true_mask), VLOpFrag), 879 (vti.Mask true_mask), VLOpFrag)), 880 (!cast<Instruction>("PseudoVWMACCU_VX_" # vti.LMul.MX) 881 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 882 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 883} 884 885// 12.15. Vector Integer Merge Instructions 886foreach vti = AllIntegerVectors in { 887 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 888 vti.RegClass:$rs1, 889 vti.RegClass:$rs2, 890 VLOpFrag)), 891 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 892 vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), 893 GPR:$vl, vti.Log2SEW)>; 894 895 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 896 (SplatPat XLenVT:$rs1), 897 vti.RegClass:$rs2, 898 VLOpFrag)), 899 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 900 vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 901 902 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 903 (SplatPat_simm5 simm5:$rs1), 904 vti.RegClass:$rs2, 905 VLOpFrag)), 906 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 907 vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 908} 909 910// 12.16. Vector Integer Move Instructions 911foreach vti = AllIntegerVectors in { 912 def : Pat<(vti.Vector (riscv_vmv_v_x_vl GPR:$rs2, VLOpFrag)), 913 (!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX) 914 $rs2, GPR:$vl, vti.Log2SEW)>; 915 defvar ImmPat = !cast<ComplexPattern>("sew"#vti.SEW#"simm5"); 916 def : Pat<(vti.Vector (riscv_vmv_v_x_vl (ImmPat XLenVT:$imm5), 917 VLOpFrag)), 918 (!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX) 919 XLenVT:$imm5, GPR:$vl, vti.Log2SEW)>; 920} 921 922// 12.1. Vector Single-Width Saturating Add and Subtract 923defm : VPatBinaryVL_VV_VX_VI<riscv_saddsat_vl, "PseudoVSADD">; 924defm : VPatBinaryVL_VV_VX_VI<riscv_uaddsat_vl, "PseudoVSADDU">; 925defm : VPatBinaryVL_VV_VX<riscv_ssubsat_vl, "PseudoVSSUB">; 926defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">; 927 928} // Predicates = [HasVInstructions] 929 930// 15.1. Vector Single-Width Integer Reduction Instructions 931let Predicates = [HasVInstructions] in { 932defm : VPatReductionVL<rvv_vecreduce_ADD_vl, "PseudoVREDSUM", /*is_float*/0>; 933defm : VPatReductionVL<rvv_vecreduce_UMAX_vl, "PseudoVREDMAXU", /*is_float*/0>; 934defm : VPatReductionVL<rvv_vecreduce_SMAX_vl, "PseudoVREDMAX", /*is_float*/0>; 935defm : VPatReductionVL<rvv_vecreduce_UMIN_vl, "PseudoVREDMINU", /*is_float*/0>; 936defm : VPatReductionVL<rvv_vecreduce_SMIN_vl, "PseudoVREDMIN", /*is_float*/0>; 937defm : VPatReductionVL<rvv_vecreduce_AND_vl, "PseudoVREDAND", /*is_float*/0>; 938defm : VPatReductionVL<rvv_vecreduce_OR_vl, "PseudoVREDOR", /*is_float*/0>; 939defm : VPatReductionVL<rvv_vecreduce_XOR_vl, "PseudoVREDXOR", /*is_float*/0>; 940} // Predicates = [HasVInstructions] 941 942// 15.3. Vector Single-Width Floating-Point Reduction Instructions 943let Predicates = [HasVInstructionsAnyF] in { 944defm : VPatReductionVL<rvv_vecreduce_SEQ_FADD_vl, "PseudoVFREDOSUM", /*is_float*/1>; 945defm : VPatReductionVL<rvv_vecreduce_FADD_vl, "PseudoVFREDUSUM", /*is_float*/1>; 946defm : VPatReductionVL<rvv_vecreduce_FMIN_vl, "PseudoVFREDMIN", /*is_float*/1>; 947defm : VPatReductionVL<rvv_vecreduce_FMAX_vl, "PseudoVFREDMAX", /*is_float*/1>; 948} // Predicates = [HasVInstructionsAnyF] 949 950let Predicates = [HasVInstructionsAnyF] in { 951 952// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions 953defm : VPatBinaryFPVL_VV_VF<riscv_fadd_vl, "PseudoVFADD">; 954defm : VPatBinaryFPVL_VV_VF<riscv_fsub_vl, "PseudoVFSUB">; 955defm : VPatBinaryFPVL_R_VF<riscv_fsub_vl, "PseudoVFRSUB">; 956 957// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 958defm : VPatBinaryFPVL_VV_VF<riscv_fmul_vl, "PseudoVFMUL">; 959defm : VPatBinaryFPVL_VV_VF<riscv_fdiv_vl, "PseudoVFDIV">; 960defm : VPatBinaryFPVL_R_VF<riscv_fdiv_vl, "PseudoVFRDIV">; 961 962// 14.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. 963foreach vti = AllFloatVectors in { 964 // NOTE: We choose VFMADD because it has the most commuting freedom. So it 965 // works best with how TwoAddressInstructionPass tries commuting. 966 defvar suffix = vti.LMul.MX; 967 def : Pat<(vti.Vector (riscv_fma_vl vti.RegClass:$rs1, vti.RegClass:$rd, 968 vti.RegClass:$rs2, (vti.Mask true_mask), 969 VLOpFrag)), 970 (!cast<Instruction>("PseudoVFMADD_VV_"# suffix) 971 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 972 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 973 def : Pat<(vti.Vector (riscv_fma_vl vti.RegClass:$rs1, vti.RegClass:$rd, 974 (riscv_fneg_vl vti.RegClass:$rs2, 975 (vti.Mask true_mask), 976 VLOpFrag), 977 (vti.Mask true_mask), 978 VLOpFrag)), 979 (!cast<Instruction>("PseudoVFMSUB_VV_"# suffix) 980 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 981 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 982 def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl vti.RegClass:$rs1, 983 (vti.Mask true_mask), 984 VLOpFrag), 985 vti.RegClass:$rd, 986 (riscv_fneg_vl vti.RegClass:$rs2, 987 (vti.Mask true_mask), 988 VLOpFrag), 989 (vti.Mask true_mask), 990 VLOpFrag)), 991 (!cast<Instruction>("PseudoVFNMADD_VV_"# suffix) 992 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 993 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 994 def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl vti.RegClass:$rs1, 995 (vti.Mask true_mask), 996 VLOpFrag), 997 vti.RegClass:$rd, vti.RegClass:$rs2, 998 (vti.Mask true_mask), 999 VLOpFrag)), 1000 (!cast<Instruction>("PseudoVFNMSUB_VV_"# suffix) 1001 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1002 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1003 1004 // The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally 1005 // commutable. 1006 def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1), 1007 vti.RegClass:$rd, vti.RegClass:$rs2, 1008 (vti.Mask true_mask), 1009 VLOpFrag)), 1010 (!cast<Instruction>("PseudoVFMADD_V" # vti.ScalarSuffix # "_" # suffix) 1011 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1012 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1013 def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1), 1014 vti.RegClass:$rd, 1015 (riscv_fneg_vl vti.RegClass:$rs2, 1016 (vti.Mask true_mask), 1017 VLOpFrag), 1018 (vti.Mask true_mask), 1019 VLOpFrag)), 1020 (!cast<Instruction>("PseudoVFMSUB_V" # vti.ScalarSuffix # "_" # suffix) 1021 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1022 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1023 def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1), 1024 (riscv_fneg_vl vti.RegClass:$rd, 1025 (vti.Mask true_mask), 1026 VLOpFrag), 1027 (riscv_fneg_vl vti.RegClass:$rs2, 1028 (vti.Mask true_mask), 1029 VLOpFrag), 1030 (vti.Mask true_mask), 1031 VLOpFrag)), 1032 (!cast<Instruction>("PseudoVFNMADD_V" # vti.ScalarSuffix # "_" # suffix) 1033 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1034 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1035 def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1), 1036 (riscv_fneg_vl vti.RegClass:$rd, 1037 (vti.Mask true_mask), 1038 VLOpFrag), 1039 vti.RegClass:$rs2, 1040 (vti.Mask true_mask), 1041 VLOpFrag)), 1042 (!cast<Instruction>("PseudoVFNMSUB_V" # vti.ScalarSuffix # "_" # suffix) 1043 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1044 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1045 1046 // The splat might be negated. 1047 def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl (SplatFPOp vti.ScalarRegClass:$rs1), 1048 (vti.Mask true_mask), 1049 VLOpFrag), 1050 vti.RegClass:$rd, 1051 (riscv_fneg_vl vti.RegClass:$rs2, 1052 (vti.Mask true_mask), 1053 VLOpFrag), 1054 (vti.Mask true_mask), 1055 VLOpFrag)), 1056 (!cast<Instruction>("PseudoVFNMADD_V" # vti.ScalarSuffix # "_" # suffix) 1057 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1058 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1059 def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl (SplatFPOp vti.ScalarRegClass:$rs1), 1060 (vti.Mask true_mask), 1061 VLOpFrag), 1062 vti.RegClass:$rd, vti.RegClass:$rs2, 1063 (vti.Mask true_mask), 1064 VLOpFrag)), 1065 (!cast<Instruction>("PseudoVFNMSUB_V" # vti.ScalarSuffix # "_" # suffix) 1066 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1067 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1068} 1069 1070// 14.11. Vector Floating-Point MIN/MAX Instructions 1071defm : VPatBinaryFPVL_VV_VF<riscv_fminnum_vl, "PseudoVFMIN">; 1072defm : VPatBinaryFPVL_VV_VF<riscv_fmaxnum_vl, "PseudoVFMAX">; 1073 1074// 14.13. Vector Floating-Point Compare Instructions 1075defm : VPatFPSetCCVL_VV_VF_FV<SETEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 1076defm : VPatFPSetCCVL_VV_VF_FV<SETOEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 1077 1078defm : VPatFPSetCCVL_VV_VF_FV<SETNE, "PseudoVMFNE", "PseudoVMFNE">; 1079defm : VPatFPSetCCVL_VV_VF_FV<SETUNE, "PseudoVMFNE", "PseudoVMFNE">; 1080 1081defm : VPatFPSetCCVL_VV_VF_FV<SETLT, "PseudoVMFLT", "PseudoVMFGT">; 1082defm : VPatFPSetCCVL_VV_VF_FV<SETOLT, "PseudoVMFLT", "PseudoVMFGT">; 1083 1084defm : VPatFPSetCCVL_VV_VF_FV<SETLE, "PseudoVMFLE", "PseudoVMFGE">; 1085defm : VPatFPSetCCVL_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">; 1086 1087foreach vti = AllFloatVectors in { 1088 // 14.8. Vector Floating-Point Square-Root Instruction 1089 def : Pat<(riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), 1090 VLOpFrag), 1091 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX) 1092 vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; 1093 1094 // 14.12. Vector Floating-Point Sign-Injection Instructions 1095 def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask true_mask), 1096 VLOpFrag), 1097 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX) 1098 vti.RegClass:$rs, vti.RegClass:$rs, GPR:$vl, vti.Log2SEW)>; 1099 // Handle fneg with VFSGNJN using the same input for both operands. 1100 def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask true_mask), 1101 VLOpFrag), 1102 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 1103 vti.RegClass:$rs, vti.RegClass:$rs, GPR:$vl, vti.Log2SEW)>; 1104 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 1105 (vti.Vector vti.RegClass:$rs2), 1106 (vti.Mask true_mask), 1107 VLOpFrag), 1108 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX) 1109 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; 1110 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 1111 (riscv_fneg_vl vti.RegClass:$rs2, 1112 (vti.Mask true_mask), 1113 VLOpFrag), 1114 (vti.Mask true_mask), 1115 VLOpFrag), 1116 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 1117 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; 1118 1119 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 1120 (SplatFPOp vti.ScalarRegClass:$rs2), 1121 (vti.Mask true_mask), 1122 VLOpFrag), 1123 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX) 1124 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.Log2SEW)>; 1125} 1126 1127foreach fvti = AllFloatVectors in { 1128 // Floating-point vselects: 1129 // 12.15. Vector Integer Merge Instructions 1130 // 14.15. Vector Floating-Point Merge Instruction 1131 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), 1132 fvti.RegClass:$rs1, 1133 fvti.RegClass:$rs2, 1134 VLOpFrag)), 1135 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 1136 fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 1137 GPR:$vl, fvti.Log2SEW)>; 1138 1139 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), 1140 (SplatFPOp fvti.ScalarRegClass:$rs1), 1141 fvti.RegClass:$rs2, 1142 VLOpFrag)), 1143 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 1144 fvti.RegClass:$rs2, 1145 (fvti.Scalar fvti.ScalarRegClass:$rs1), 1146 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 1147 1148 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), 1149 (SplatFPOp (fvti.Scalar fpimm0)), 1150 fvti.RegClass:$rs2, 1151 VLOpFrag)), 1152 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 1153 fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 1154 1155 // 14.16. Vector Floating-Point Move Instruction 1156 // If we're splatting fpimm0, use vmv.v.x vd, x0. 1157 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 1158 (fvti.Scalar (fpimm0)), VLOpFrag)), 1159 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX) 1160 0, GPR:$vl, fvti.Log2SEW)>; 1161 1162 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 1163 (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), 1164 (!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" # 1165 fvti.LMul.MX) 1166 (fvti.Scalar fvti.ScalarRegClass:$rs2), 1167 GPR:$vl, fvti.Log2SEW)>; 1168 1169 // 14.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions 1170 defm : VPatConvertFP2ISDNode_V_VL<riscv_fp_to_sint_vl, "PseudoVFCVT_RTZ_X_F_V">; 1171 defm : VPatConvertFP2ISDNode_V_VL<riscv_fp_to_uint_vl, "PseudoVFCVT_RTZ_XU_F_V">; 1172 defm : VPatConvertI2FPSDNode_V_VL<riscv_sint_to_fp_vl, "PseudoVFCVT_F_X_V">; 1173 defm : VPatConvertI2FPSDNode_V_VL<riscv_uint_to_fp_vl, "PseudoVFCVT_F_XU_V">; 1174 1175 // 14.18. Widening Floating-Point/Integer Type-Convert Instructions 1176 defm : VPatWConvertFP2ISDNode_V_VL<riscv_fp_to_sint_vl, "PseudoVFWCVT_RTZ_X_F_V">; 1177 defm : VPatWConvertFP2ISDNode_V_VL<riscv_fp_to_uint_vl, "PseudoVFWCVT_RTZ_XU_F_V">; 1178 defm : VPatWConvertI2FPSDNode_V_VL<riscv_sint_to_fp_vl, "PseudoVFWCVT_F_X_V">; 1179 defm : VPatWConvertI2FPSDNode_V_VL<riscv_uint_to_fp_vl, "PseudoVFWCVT_F_XU_V">; 1180 foreach fvtiToFWti = AllWidenableFloatVectors in { 1181 defvar fvti = fvtiToFWti.Vti; 1182 defvar fwti = fvtiToFWti.Wti; 1183 def : Pat<(fwti.Vector (riscv_fpextend_vl (fvti.Vector fvti.RegClass:$rs1), 1184 (fvti.Mask true_mask), 1185 VLOpFrag)), 1186 (!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX) 1187 fvti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; 1188 } 1189 1190 // 14.19 Narrowing Floating-Point/Integer Type-Convert Instructions 1191 defm : VPatNConvertFP2ISDNode_V_VL<riscv_fp_to_sint_vl, "PseudoVFNCVT_RTZ_X_F_W">; 1192 defm : VPatNConvertFP2ISDNode_V_VL<riscv_fp_to_uint_vl, "PseudoVFNCVT_RTZ_XU_F_W">; 1193 defm : VPatNConvertI2FPSDNode_V_VL<riscv_sint_to_fp_vl, "PseudoVFNCVT_F_X_W">; 1194 defm : VPatNConvertI2FPSDNode_V_VL<riscv_uint_to_fp_vl, "PseudoVFNCVT_F_XU_W">; 1195 foreach fvtiToFWti = AllWidenableFloatVectors in { 1196 defvar fvti = fvtiToFWti.Vti; 1197 defvar fwti = fvtiToFWti.Wti; 1198 def : Pat<(fvti.Vector (riscv_fpround_vl (fwti.Vector fwti.RegClass:$rs1), 1199 (fwti.Mask true_mask), 1200 VLOpFrag)), 1201 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX) 1202 fwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; 1203 1204 def : Pat<(fvti.Vector (riscv_fncvt_rod_vl (fwti.Vector fwti.RegClass:$rs1), 1205 (fwti.Mask true_mask), 1206 VLOpFrag)), 1207 (!cast<Instruction>("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX) 1208 fwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; 1209 } 1210} 1211 1212} // Predicates = [HasVInstructionsAnyF] 1213 1214let Predicates = [HasVInstructions] in { 1215 1216foreach mti = AllMasks in { 1217 // 16.1 Vector Mask-Register Logical Instructions 1218 def : Pat<(mti.Mask (riscv_vmset_vl VLOpFrag)), 1219 (!cast<Instruction>("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 1220 def : Pat<(mti.Mask (riscv_vmclr_vl VLOpFrag)), 1221 (!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 1222 1223 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)), 1224 (!cast<Instruction>("PseudoVMAND_MM_" # mti.LMul.MX) 1225 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1226 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 1227 (!cast<Instruction>("PseudoVMOR_MM_" # mti.LMul.MX) 1228 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1229 def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 1230 (!cast<Instruction>("PseudoVMXOR_MM_" # mti.LMul.MX) 1231 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1232 1233 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, 1234 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 1235 VLOpFrag)), 1236 (!cast<Instruction>("PseudoVMANDN_MM_" # mti.LMul.MX) 1237 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1238 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, 1239 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 1240 VLOpFrag)), 1241 (!cast<Instruction>("PseudoVMORN_MM_" # mti.LMul.MX) 1242 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1243 // XOR is associative so we need 2 patterns for VMXNOR. 1244 def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1, 1245 VLOpFrag), 1246 VR:$rs2, VLOpFrag)), 1247 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX) 1248 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1249 1250 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2, 1251 VLOpFrag), 1252 VLOpFrag)), 1253 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX) 1254 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1255 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2, 1256 VLOpFrag), 1257 VLOpFrag)), 1258 (!cast<Instruction>("PseudoVMNOR_MM_" # mti.LMul.MX) 1259 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1260 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2, 1261 VLOpFrag), 1262 VLOpFrag)), 1263 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX) 1264 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1265 1266 // Match the not idiom to the vmnot.m pseudo. 1267 def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)), 1268 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX) 1269 VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>; 1270 1271 // 16.2 Vector count population in mask vcpop.m 1272 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), 1273 VLOpFrag)), 1274 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX) 1275 VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1276 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask V0), 1277 VLOpFrag)), 1278 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX # "_MASK") 1279 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 1280} 1281 1282} // Predicates = [HasVInstructions] 1283 1284let Predicates = [HasVInstructions] in { 1285// 17.1. Integer Scalar Move Instructions 1286// 17.4. Vector Register Gather Instruction 1287foreach vti = AllIntegerVectors in { 1288 def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$merge), 1289 vti.ScalarRegClass:$rs1, 1290 VLOpFrag)), 1291 (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX) 1292 vti.RegClass:$merge, 1293 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; 1294 def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2, 1295 (vti.Vector vti.RegClass:$rs1), 1296 (vti.Mask true_mask), 1297 VLOpFrag)), 1298 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX) 1299 vti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>; 1300 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 1301 (vti.Mask true_mask), 1302 VLOpFrag)), 1303 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX) 1304 vti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW)>; 1305 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, uimm5:$imm, 1306 (vti.Mask true_mask), 1307 VLOpFrag)), 1308 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX) 1309 vti.RegClass:$rs2, uimm5:$imm, GPR:$vl, vti.Log2SEW)>; 1310 1311 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 1312 (riscv_vrgather_vv_vl 1313 vti.RegClass:$rs2, 1314 vti.RegClass:$rs1, 1315 (vti.Mask true_mask), 1316 VLOpFrag), 1317 vti.RegClass:$merge, 1318 VLOpFrag)), 1319 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK") 1320 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 1321 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1322 1323 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 1324 (riscv_vrgather_vx_vl 1325 vti.RegClass:$rs2, 1326 uimm5:$imm, 1327 (vti.Mask true_mask), 1328 VLOpFrag), 1329 vti.RegClass:$merge, 1330 VLOpFrag)), 1331 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 1332 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, 1333 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1334 1335 // emul = lmul * 16 / sew 1336 defvar vlmul = vti.LMul; 1337 defvar octuple_lmul = vlmul.octuple; 1338 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 1339 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 1340 defvar emul_str = octuple_to_str<octuple_emul>.ret; 1341 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 1342 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_" # emul_str; 1343 def : Pat<(vti.Vector (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 1344 (ivti.Vector ivti.RegClass:$rs1), 1345 (vti.Mask true_mask), 1346 VLOpFrag)), 1347 (!cast<Instruction>(inst) 1348 vti.RegClass:$rs2, ivti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>; 1349 1350 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 1351 (riscv_vrgatherei16_vv_vl 1352 vti.RegClass:$rs2, 1353 (ivti.Vector ivti.RegClass:$rs1), 1354 (vti.Mask true_mask), 1355 VLOpFrag), 1356 vti.RegClass:$merge, 1357 VLOpFrag)), 1358 (!cast<Instruction>(inst#"_MASK") 1359 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, 1360 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1361 } 1362} 1363 1364} // Predicates = [HasVInstructions] 1365 1366let Predicates = [HasVInstructionsAnyF] in { 1367 1368// 17.2. Floating-Point Scalar Move Instructions 1369foreach vti = AllFloatVectors in { 1370 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 1371 vti.ScalarRegClass:$rs1, 1372 VLOpFrag)), 1373 (!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX) 1374 vti.RegClass:$merge, 1375 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; 1376 defvar ivti = GetIntVTypeInfo<vti>.Vti; 1377 def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2, 1378 (ivti.Vector vti.RegClass:$rs1), 1379 (vti.Mask true_mask), 1380 VLOpFrag)), 1381 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX) 1382 vti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>; 1383 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 1384 (vti.Mask true_mask), 1385 VLOpFrag)), 1386 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX) 1387 vti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW)>; 1388 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, uimm5:$imm, 1389 (vti.Mask true_mask), 1390 VLOpFrag)), 1391 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX) 1392 vti.RegClass:$rs2, uimm5:$imm, GPR:$vl, vti.Log2SEW)>; 1393 1394 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 1395 (riscv_vrgather_vv_vl 1396 vti.RegClass:$rs2, 1397 (ivti.Vector vti.RegClass:$rs1), 1398 (vti.Mask true_mask), 1399 VLOpFrag), 1400 vti.RegClass:$merge, 1401 VLOpFrag)), 1402 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK") 1403 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 1404 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1405 1406 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 1407 (riscv_vrgather_vx_vl 1408 vti.RegClass:$rs2, 1409 uimm5:$imm, 1410 (vti.Mask true_mask), 1411 VLOpFrag), 1412 vti.RegClass:$merge, 1413 VLOpFrag)), 1414 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 1415 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, 1416 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1417 1418 defvar vlmul = vti.LMul; 1419 defvar octuple_lmul = vlmul.octuple; 1420 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 1421 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 1422 defvar emul_str = octuple_to_str<octuple_emul>.ret; 1423 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 1424 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_" # emul_str; 1425 def : Pat<(vti.Vector (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 1426 (ivti.Vector ivti.RegClass:$rs1), 1427 (vti.Mask true_mask), 1428 VLOpFrag)), 1429 (!cast<Instruction>(inst) 1430 vti.RegClass:$rs2, ivti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>; 1431 1432 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 1433 (riscv_vrgatherei16_vv_vl 1434 vti.RegClass:$rs2, 1435 (ivti.Vector ivti.RegClass:$rs1), 1436 (vti.Mask true_mask), 1437 VLOpFrag), 1438 vti.RegClass:$merge, 1439 VLOpFrag)), 1440 (!cast<Instruction>(inst#"_MASK") 1441 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, 1442 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1443 } 1444} 1445 1446} // Predicates = [HasVInstructionsAnyF] 1447 1448//===----------------------------------------------------------------------===// 1449// Miscellaneous RISCVISD SDNodes 1450//===----------------------------------------------------------------------===// 1451 1452def riscv_vid_vl : SDNode<"RISCVISD::VID_VL", SDTypeProfile<1, 2, 1453 [SDTCisVec<0>, SDTCVecEltisVT<1, i1>, 1454 SDTCisSameNumEltsAs<0, 1>, SDTCisVT<2, XLenVT>]>, []>; 1455 1456def SDTRVVSlide : SDTypeProfile<1, 5, [ 1457 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>, 1458 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT> 1459]>; 1460def SDTRVVSlide1 : SDTypeProfile<1, 4, [ 1461 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisInt<0>, SDTCisVT<2, XLenVT>, 1462 SDTCVecEltisVT<3, i1>, SDTCisSameNumEltsAs<0, 3>, SDTCisVT<4, XLenVT> 1463]>; 1464 1465def riscv_slideup_vl : SDNode<"RISCVISD::VSLIDEUP_VL", SDTRVVSlide, []>; 1466def riscv_slide1up_vl : SDNode<"RISCVISD::VSLIDE1UP_VL", SDTRVVSlide1, []>; 1467def riscv_slidedown_vl : SDNode<"RISCVISD::VSLIDEDOWN_VL", SDTRVVSlide, []>; 1468def riscv_slide1down_vl : SDNode<"RISCVISD::VSLIDE1DOWN_VL", SDTRVVSlide1, []>; 1469 1470let Predicates = [HasVInstructions] in { 1471 1472foreach vti = AllIntegerVectors in { 1473 def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask true_mask), 1474 VLOpFrag)), 1475 (!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX) GPR:$vl, vti.Log2SEW)>; 1476 1477 def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rs1), 1478 GPR:$rs2, (vti.Mask true_mask), 1479 VLOpFrag)), 1480 (!cast<Instruction>("PseudoVSLIDE1UP_VX_"#vti.LMul.MX) 1481 vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; 1482 def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rs1), 1483 GPR:$rs2, (vti.Mask true_mask), 1484 VLOpFrag)), 1485 (!cast<Instruction>("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX) 1486 vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; 1487} 1488 1489foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in { 1490 def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3), 1491 (vti.Vector vti.RegClass:$rs1), 1492 uimm5:$rs2, (vti.Mask true_mask), 1493 VLOpFrag)), 1494 (!cast<Instruction>("PseudoVSLIDEUP_VI_"#vti.LMul.MX) 1495 vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2, 1496 GPR:$vl, vti.Log2SEW)>; 1497 1498 def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3), 1499 (vti.Vector vti.RegClass:$rs1), 1500 GPR:$rs2, (vti.Mask true_mask), 1501 VLOpFrag)), 1502 (!cast<Instruction>("PseudoVSLIDEUP_VX_"#vti.LMul.MX) 1503 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, 1504 GPR:$vl, vti.Log2SEW)>; 1505 1506 def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3), 1507 (vti.Vector vti.RegClass:$rs1), 1508 uimm5:$rs2, (vti.Mask true_mask), 1509 VLOpFrag)), 1510 (!cast<Instruction>("PseudoVSLIDEDOWN_VI_"#vti.LMul.MX) 1511 vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2, 1512 GPR:$vl, vti.Log2SEW)>; 1513 1514 def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3), 1515 (vti.Vector vti.RegClass:$rs1), 1516 GPR:$rs2, (vti.Mask true_mask), 1517 VLOpFrag)), 1518 (!cast<Instruction>("PseudoVSLIDEDOWN_VX_"#vti.LMul.MX) 1519 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, 1520 GPR:$vl, vti.Log2SEW)>; 1521} 1522 1523} // Predicates = [HasVInstructions] 1524