1//===- RISCVInstrInfoVVLPatterns.td - RVV VL patterns ------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file contains the required infrastructure and VL patterns to 10/// support code generation for the standard 'V' (Vector) extension, version 11/// version 1.0. 12/// 13/// This file is included from and depends upon RISCVInstrInfoVPseudos.td 14/// 15/// Note: the patterns for RVV intrinsics are found in 16/// RISCVInstrInfoVPseudos.td. 17/// 18//===----------------------------------------------------------------------===// 19 20//===----------------------------------------------------------------------===// 21// Helpers to define the VL patterns. 22//===----------------------------------------------------------------------===// 23 24def SDT_RISCVIntBinOp_VL : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, 25 SDTCisSameAs<0, 2>, 26 SDTCisVec<0>, SDTCisInt<0>, 27 SDTCVecEltisVT<3, i1>, 28 SDTCisSameNumEltsAs<0, 3>, 29 SDTCisVT<4, XLenVT>]>; 30 31def SDT_RISCVFPUnOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 32 SDTCisVec<0>, SDTCisFP<0>, 33 SDTCVecEltisVT<2, i1>, 34 SDTCisSameNumEltsAs<0, 2>, 35 SDTCisVT<3, XLenVT>]>; 36def SDT_RISCVFPBinOp_VL : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, 37 SDTCisSameAs<0, 2>, 38 SDTCisVec<0>, SDTCisFP<0>, 39 SDTCVecEltisVT<3, i1>, 40 SDTCisSameNumEltsAs<0, 3>, 41 SDTCisVT<4, XLenVT>]>; 42 43def riscv_vmv_v_x_vl : SDNode<"RISCVISD::VMV_V_X_VL", 44 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>, 45 SDTCisSameAs<0, 1>, 46 SDTCisVT<2, XLenVT>, 47 SDTCisVT<3, XLenVT>]>>; 48def riscv_vfmv_v_f_vl : SDNode<"RISCVISD::VFMV_V_F_VL", 49 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>, 50 SDTCisSameAs<0, 1>, 51 SDTCisEltOfVec<2, 0>, 52 SDTCisVT<3, XLenVT>]>>; 53def riscv_vmv_s_x_vl : SDNode<"RISCVISD::VMV_S_X_VL", 54 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 55 SDTCisInt<0>, 56 SDTCisVT<2, XLenVT>, 57 SDTCisVT<3, XLenVT>]>>; 58def riscv_vfmv_s_f_vl : SDNode<"RISCVISD::VFMV_S_F_VL", 59 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 60 SDTCisFP<0>, 61 SDTCisEltOfVec<2, 0>, 62 SDTCisVT<3, XLenVT>]>>; 63 64def riscv_add_vl : SDNode<"RISCVISD::ADD_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 65def riscv_sub_vl : SDNode<"RISCVISD::SUB_VL", SDT_RISCVIntBinOp_VL>; 66def riscv_mul_vl : SDNode<"RISCVISD::MUL_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 67def riscv_mulhs_vl : SDNode<"RISCVISD::MULHS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 68def riscv_mulhu_vl : SDNode<"RISCVISD::MULHU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 69def riscv_and_vl : SDNode<"RISCVISD::AND_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 70def riscv_or_vl : SDNode<"RISCVISD::OR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 71def riscv_xor_vl : SDNode<"RISCVISD::XOR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 72def riscv_sdiv_vl : SDNode<"RISCVISD::SDIV_VL", SDT_RISCVIntBinOp_VL>; 73def riscv_srem_vl : SDNode<"RISCVISD::SREM_VL", SDT_RISCVIntBinOp_VL>; 74def riscv_udiv_vl : SDNode<"RISCVISD::UDIV_VL", SDT_RISCVIntBinOp_VL>; 75def riscv_urem_vl : SDNode<"RISCVISD::UREM_VL", SDT_RISCVIntBinOp_VL>; 76def riscv_shl_vl : SDNode<"RISCVISD::SHL_VL", SDT_RISCVIntBinOp_VL>; 77def riscv_sra_vl : SDNode<"RISCVISD::SRA_VL", SDT_RISCVIntBinOp_VL>; 78def riscv_srl_vl : SDNode<"RISCVISD::SRL_VL", SDT_RISCVIntBinOp_VL>; 79def riscv_smin_vl : SDNode<"RISCVISD::SMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 80def riscv_smax_vl : SDNode<"RISCVISD::SMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 81def riscv_umin_vl : SDNode<"RISCVISD::UMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 82def riscv_umax_vl : SDNode<"RISCVISD::UMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 83 84def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 85def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 86def riscv_ssubsat_vl : SDNode<"RISCVISD::SSUBSAT_VL", SDT_RISCVIntBinOp_VL>; 87def riscv_usubsat_vl : SDNode<"RISCVISD::USUBSAT_VL", SDT_RISCVIntBinOp_VL>; 88 89def riscv_fadd_vl : SDNode<"RISCVISD::FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 90def riscv_fsub_vl : SDNode<"RISCVISD::FSUB_VL", SDT_RISCVFPBinOp_VL>; 91def riscv_fmul_vl : SDNode<"RISCVISD::FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 92def riscv_fdiv_vl : SDNode<"RISCVISD::FDIV_VL", SDT_RISCVFPBinOp_VL>; 93def riscv_fneg_vl : SDNode<"RISCVISD::FNEG_VL", SDT_RISCVFPUnOp_VL>; 94def riscv_fabs_vl : SDNode<"RISCVISD::FABS_VL", SDT_RISCVFPUnOp_VL>; 95def riscv_fsqrt_vl : SDNode<"RISCVISD::FSQRT_VL", SDT_RISCVFPUnOp_VL>; 96def riscv_fcopysign_vl : SDNode<"RISCVISD::FCOPYSIGN_VL", SDT_RISCVFPBinOp_VL>; 97def riscv_fminnum_vl : SDNode<"RISCVISD::FMINNUM_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 98def riscv_fmaxnum_vl : SDNode<"RISCVISD::FMAXNUM_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 99 100def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 101 SDTCisSameAs<0, 2>, 102 SDTCisSameAs<0, 3>, 103 SDTCisVec<0>, SDTCisFP<0>, 104 SDTCVecEltisVT<4, i1>, 105 SDTCisSameNumEltsAs<0, 4>, 106 SDTCisVT<5, XLenVT>]>; 107def riscv_vfmadd_vl : SDNode<"RISCVISD::VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 108def riscv_vfnmadd_vl : SDNode<"RISCVISD::VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 109def riscv_vfmsub_vl : SDNode<"RISCVISD::VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 110def riscv_vfnmsub_vl : SDNode<"RISCVISD::VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 111 112def SDT_RISCVFPRoundOp_VL : SDTypeProfile<1, 3, [ 113 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1>, SDTCisSameNumEltsAs<0, 1>, 114 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 115]>; 116def SDT_RISCVFPExtendOp_VL : SDTypeProfile<1, 3, [ 117 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>, 118 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 119]>; 120 121def riscv_fpround_vl : SDNode<"RISCVISD::FP_ROUND_VL", SDT_RISCVFPRoundOp_VL>; 122def riscv_fpextend_vl : SDNode<"RISCVISD::FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL>; 123def riscv_fncvt_rod_vl : SDNode<"RISCVISD::VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL>; 124 125def SDT_RISCVFP2IOp_VL : SDTypeProfile<1, 3, [ 126 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, 127 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 128]>; 129def SDT_RISCVI2FPOp_VL : SDTypeProfile<1, 3, [ 130 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, 131 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 132]>; 133 134def riscv_fp_to_sint_vl : SDNode<"RISCVISD::FP_TO_SINT_VL", SDT_RISCVFP2IOp_VL>; 135def riscv_fp_to_uint_vl : SDNode<"RISCVISD::FP_TO_UINT_VL", SDT_RISCVFP2IOp_VL>; 136def riscv_sint_to_fp_vl : SDNode<"RISCVISD::SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 137def riscv_uint_to_fp_vl : SDNode<"RISCVISD::UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 138 139def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL", 140 SDTypeProfile<1, 5, [SDTCVecEltisVT<0, i1>, 141 SDTCisVec<1>, 142 SDTCisSameNumEltsAs<0, 1>, 143 SDTCisSameAs<1, 2>, 144 SDTCisVT<3, OtherVT>, 145 SDTCisSameAs<0, 4>, 146 SDTCisVT<5, XLenVT>]>>; 147 148def riscv_vrgather_vx_vl : SDNode<"RISCVISD::VRGATHER_VX_VL", 149 SDTypeProfile<1, 5, [SDTCisVec<0>, 150 SDTCisSameAs<0, 1>, 151 SDTCisVT<2, XLenVT>, 152 SDTCVecEltisVT<3, i1>, 153 SDTCisSameNumEltsAs<0, 3>, 154 SDTCisSameAs<0, 4>, 155 SDTCisVT<5, XLenVT>]>>; 156def riscv_vrgather_vv_vl : SDNode<"RISCVISD::VRGATHER_VV_VL", 157 SDTypeProfile<1, 5, [SDTCisVec<0>, 158 SDTCisSameAs<0, 1>, 159 SDTCisInt<2>, 160 SDTCisSameNumEltsAs<0, 2>, 161 SDTCisSameSizeAs<0, 2>, 162 SDTCVecEltisVT<3, i1>, 163 SDTCisSameNumEltsAs<0, 3>, 164 SDTCisSameAs<0, 4>, 165 SDTCisVT<5, XLenVT>]>>; 166def riscv_vrgatherei16_vv_vl : SDNode<"RISCVISD::VRGATHEREI16_VV_VL", 167 SDTypeProfile<1, 5, [SDTCisVec<0>, 168 SDTCisSameAs<0, 1>, 169 SDTCisInt<2>, 170 SDTCVecEltisVT<2, i16>, 171 SDTCisSameNumEltsAs<0, 2>, 172 SDTCVecEltisVT<3, i1>, 173 SDTCisSameNumEltsAs<0, 3>, 174 SDTCisSameAs<0, 4>, 175 SDTCisVT<5, XLenVT>]>>; 176 177def SDT_RISCVSelect_VL : SDTypeProfile<1, 4, [ 178 SDTCisVec<0>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<1, i1>, 179 SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>, SDTCisVT<4, XLenVT> 180]>; 181 182def riscv_vselect_vl : SDNode<"RISCVISD::VSELECT_VL", SDT_RISCVSelect_VL>; 183def riscv_vp_merge_vl : SDNode<"RISCVISD::VP_MERGE_VL", SDT_RISCVSelect_VL>; 184 185def SDT_RISCVVMSETCLR_VL : SDTypeProfile<1, 1, [SDTCVecEltisVT<0, i1>, 186 SDTCisVT<1, XLenVT>]>; 187def riscv_vmclr_vl : SDNode<"RISCVISD::VMCLR_VL", SDT_RISCVVMSETCLR_VL>; 188def riscv_vmset_vl : SDNode<"RISCVISD::VMSET_VL", SDT_RISCVVMSETCLR_VL>; 189 190def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 191 SDTCisSameAs<0, 2>, 192 SDTCVecEltisVT<0, i1>, 193 SDTCisVT<3, XLenVT>]>; 194def riscv_vmand_vl : SDNode<"RISCVISD::VMAND_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 195def riscv_vmor_vl : SDNode<"RISCVISD::VMOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 196def riscv_vmxor_vl : SDNode<"RISCVISD::VMXOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 197 198def true_mask : PatLeaf<(riscv_vmset_vl (XLenVT srcvalue))>; 199 200def riscv_vmnot_vl : PatFrag<(ops node:$rs, node:$vl), 201 (riscv_vmxor_vl node:$rs, true_mask, node:$vl)>; 202 203def riscv_vcpop_vl : SDNode<"RISCVISD::VCPOP_VL", 204 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, 205 SDTCisVec<1>, SDTCisInt<1>, 206 SDTCVecEltisVT<2, i1>, 207 SDTCisSameNumEltsAs<1, 2>, 208 SDTCisVT<3, XLenVT>]>>; 209 210def SDT_RISCVVEXTEND_VL : SDTypeProfile<1, 3, [SDTCisVec<0>, 211 SDTCisSameNumEltsAs<0, 1>, 212 SDTCisSameNumEltsAs<1, 2>, 213 SDTCVecEltisVT<2, i1>, 214 SDTCisVT<3, XLenVT>]>; 215def riscv_sext_vl : SDNode<"RISCVISD::VSEXT_VL", SDT_RISCVVEXTEND_VL>; 216def riscv_zext_vl : SDNode<"RISCVISD::VZEXT_VL", SDT_RISCVVEXTEND_VL>; 217 218def riscv_trunc_vector_vl : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL", 219 SDTypeProfile<1, 3, [SDTCisVec<0>, 220 SDTCisSameNumEltsAs<0, 1>, 221 SDTCisSameNumEltsAs<0, 2>, 222 SDTCVecEltisVT<2, i1>, 223 SDTCisVT<3, XLenVT>]>>; 224 225def SDT_RISCVVWBinOp_VL : SDTypeProfile<1, 4, [SDTCisVec<0>, 226 SDTCisSameNumEltsAs<0, 1>, 227 SDTCisSameAs<1, 2>, 228 SDTCisSameNumEltsAs<1, 3>, 229 SDTCVecEltisVT<3, i1>, 230 SDTCisVT<4, XLenVT>]>; 231def riscv_vwmul_vl : SDNode<"RISCVISD::VWMUL_VL", SDT_RISCVVWBinOp_VL, [SDNPCommutative]>; 232def riscv_vwmulu_vl : SDNode<"RISCVISD::VWMULU_VL", SDT_RISCVVWBinOp_VL, [SDNPCommutative]>; 233def riscv_vwmulsu_vl : SDNode<"RISCVISD::VWMULSU_VL", SDT_RISCVVWBinOp_VL>; 234def riscv_vwadd_vl : SDNode<"RISCVISD::VWADD_VL", SDT_RISCVVWBinOp_VL, [SDNPCommutative]>; 235def riscv_vwaddu_vl : SDNode<"RISCVISD::VWADDU_VL", SDT_RISCVVWBinOp_VL, [SDNPCommutative]>; 236def riscv_vwsub_vl : SDNode<"RISCVISD::VWSUB_VL", SDT_RISCVVWBinOp_VL, [SDNPCommutative]>; 237def riscv_vwsubu_vl : SDNode<"RISCVISD::VWSUBU_VL", SDT_RISCVVWBinOp_VL, [SDNPCommutative]>; 238 239def SDT_RISCVVWBinOpW_VL : SDTypeProfile<1, 4, [SDTCisVec<0>, 240 SDTCisSameAs<0, 1>, 241 SDTCisSameNumEltsAs<1, 2>, 242 SDTCisOpSmallerThanOp<2, 1>, 243 SDTCisSameNumEltsAs<1, 3>, 244 SDTCVecEltisVT<3, i1>, 245 SDTCisVT<4, XLenVT>]>; 246def riscv_vwadd_w_vl : SDNode<"RISCVISD::VWADD_W_VL", SDT_RISCVVWBinOpW_VL>; 247def riscv_vwaddu_w_vl : SDNode<"RISCVISD::VWADDU_W_VL", SDT_RISCVVWBinOpW_VL>; 248def riscv_vwsub_w_vl : SDNode<"RISCVISD::VWSUB_W_VL", SDT_RISCVVWBinOpW_VL>; 249def riscv_vwsubu_w_vl : SDNode<"RISCVISD::VWSUBU_W_VL", SDT_RISCVVWBinOpW_VL>; 250 251def SDTRVVVecReduce : SDTypeProfile<1, 5, [ 252 SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>, 253 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<2, 4>, SDTCisVT<5, XLenVT> 254]>; 255 256def riscv_mul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D), 257 (riscv_mul_vl node:$A, node:$B, node:$C, 258 node:$D), [{ 259 return N->hasOneUse(); 260}]>; 261 262def riscv_vwmul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D), 263 (riscv_vwmul_vl node:$A, node:$B, node:$C, 264 node:$D), [{ 265 return N->hasOneUse(); 266}]>; 267 268def riscv_vwmulu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D), 269 (riscv_vwmulu_vl node:$A, node:$B, node:$C, 270 node:$D), [{ 271 return N->hasOneUse(); 272}]>; 273 274def riscv_vwmulsu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D), 275 (riscv_vwmulsu_vl node:$A, node:$B, node:$C, 276 node:$D), [{ 277 return N->hasOneUse(); 278}]>; 279 280def riscv_sext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 281 (riscv_sext_vl node:$A, node:$B, node:$C), [{ 282 return N->hasOneUse(); 283}]>; 284 285def riscv_zext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 286 (riscv_zext_vl node:$A, node:$B, node:$C), [{ 287 return N->hasOneUse(); 288}]>; 289 290def riscv_fpextend_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 291 (riscv_fpextend_vl node:$A, node:$B, node:$C), [{ 292 return N->hasOneUse(); 293}]>; 294 295foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR", 296 "FADD", "SEQ_FADD", "FMIN", "FMAX"] in 297 def rvv_vecreduce_#kind#_vl : SDNode<"RISCVISD::VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>; 298 299// Give explicit Complexity to prefer simm5/uimm5. 300def SplatPat : ComplexPattern<vAny, 1, "selectVSplat", [], [], 1>; 301def SplatPat_simm5 : ComplexPattern<vAny, 1, "selectVSplatSimm5", [], [], 2>; 302def SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimm5", [], [], 2>; 303def SplatPat_simm5_plus1 304 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1", [], [], 2>; 305def SplatPat_simm5_plus1_nonzero 306 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1NonZero", [], [], 2>; 307 308// Ignore the vl operand. 309def SplatFPOp : PatFrag<(ops node:$op), 310 (riscv_vfmv_v_f_vl undef, node:$op, srcvalue)>; 311 312def sew8simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<8>", []>; 313def sew16simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<16>", []>; 314def sew32simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<32>", []>; 315def sew64simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<64>", []>; 316 317multiclass VPatBinaryVL_V<SDNode vop, 318 string instruction_name, 319 string suffix, 320 ValueType result_type, 321 ValueType op1_type, 322 ValueType op2_type, 323 ValueType mask_type, 324 int sew, 325 LMULInfo vlmul, 326 VReg op1_reg_class, 327 VReg op2_reg_class> { 328 def : Pat<(result_type (vop 329 (op1_type op1_reg_class:$rs1), 330 (op2_type op2_reg_class:$rs2), 331 (mask_type V0), 332 VLOpFrag)), 333 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_MASK") 334 (result_type (IMPLICIT_DEF)), 335 op1_reg_class:$rs1, 336 op2_reg_class:$rs2, 337 (mask_type V0), GPR:$vl, sew, TAIL_AGNOSTIC)>; 338} 339 340multiclass VPatBinaryVL_XI<SDNode vop, 341 string instruction_name, 342 string suffix, 343 ValueType result_type, 344 ValueType vop1_type, 345 ValueType vop2_type, 346 ValueType mask_type, 347 int sew, 348 LMULInfo vlmul, 349 VReg vop_reg_class, 350 ComplexPattern SplatPatKind, 351 DAGOperand xop_kind> { 352 def : Pat<(result_type (vop 353 (vop1_type vop_reg_class:$rs1), 354 (vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))), 355 (mask_type V0), 356 VLOpFrag)), 357 (!cast<Instruction>(instruction_name#_#suffix#_# vlmul.MX#"_MASK") 358 (result_type (IMPLICIT_DEF)), 359 vop_reg_class:$rs1, 360 xop_kind:$rs2, 361 (mask_type V0), GPR:$vl, sew, TAIL_AGNOSTIC)>; 362} 363 364multiclass VPatBinaryVL_VV_VX<SDNode vop, string instruction_name> { 365 foreach vti = AllIntegerVectors in { 366 defm : VPatBinaryVL_V<vop, instruction_name, "VV", 367 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 368 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>; 369 defm : VPatBinaryVL_XI<vop, instruction_name, "VX", 370 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 371 vti.Log2SEW, vti.LMul, vti.RegClass, SplatPat, GPR>; 372 } 373} 374 375multiclass VPatBinaryVL_VV_VX_VI<SDNode vop, string instruction_name, 376 Operand ImmType = simm5> 377 : VPatBinaryVL_VV_VX<vop, instruction_name> { 378 foreach vti = AllIntegerVectors in { 379 defm : VPatBinaryVL_XI<vop, instruction_name, "VI", 380 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 381 vti.Log2SEW, vti.LMul, vti.RegClass, 382 !cast<ComplexPattern>(SplatPat#_#ImmType), 383 ImmType>; 384 } 385} 386 387multiclass VPatBinaryWVL_VV_VX<SDNode vop, string instruction_name> { 388 foreach VtiToWti = AllWidenableIntVectors in { 389 defvar vti = VtiToWti.Vti; 390 defvar wti = VtiToWti.Wti; 391 defm : VPatBinaryVL_V<vop, instruction_name, "VV", 392 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 393 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>; 394 defm : VPatBinaryVL_XI<vop, instruction_name, "VX", 395 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 396 vti.Log2SEW, vti.LMul, vti.RegClass, SplatPat, GPR>; 397 } 398} 399multiclass VPatBinaryWVL_VV_VX_WV_WX<SDNode vop, SDNode vop_w, 400 string instruction_name> 401 : VPatBinaryWVL_VV_VX<vop, instruction_name> { 402 foreach VtiToWti = AllWidenableIntVectors in { 403 defvar vti = VtiToWti.Vti; 404 defvar wti = VtiToWti.Wti; 405 defm : VPatBinaryVL_V<vop_w, instruction_name, "WV", 406 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 407 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass>; 408 defm : VPatBinaryVL_XI<vop_w, instruction_name, "WX", 409 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 410 vti.Log2SEW, vti.LMul, wti.RegClass, SplatPat, GPR>; 411 } 412} 413 414multiclass VPatBinaryVL_VF<SDNode vop, 415 string instruction_name, 416 ValueType result_type, 417 ValueType vop_type, 418 ValueType mask_type, 419 int sew, 420 LMULInfo vlmul, 421 VReg vop_reg_class, 422 RegisterClass scalar_reg_class> { 423 def : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), 424 (vop_type (SplatFPOp scalar_reg_class:$rs2)), 425 (mask_type V0), 426 VLOpFrag)), 427 (!cast<Instruction>(instruction_name#"_"#vlmul.MX#"_MASK") 428 (result_type (IMPLICIT_DEF)), 429 vop_reg_class:$rs1, 430 scalar_reg_class:$rs2, 431 (mask_type V0), GPR:$vl, sew, TAIL_AGNOSTIC)>; 432} 433 434multiclass VPatBinaryFPVL_VV_VF<SDNode vop, string instruction_name> { 435 foreach vti = AllFloatVectors in { 436 defm : VPatBinaryVL_V<vop, instruction_name, "VV", 437 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 438 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>; 439 defm : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 440 vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, 441 vti.LMul, vti.RegClass, vti.ScalarRegClass>; 442 } 443} 444 445multiclass VPatBinaryFPVL_R_VF<SDNode vop, string instruction_name> { 446 foreach fvti = AllFloatVectors in { 447 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 448 fvti.RegClass:$rs1, 449 (fvti.Mask V0), 450 VLOpFrag)), 451 (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 452 (fvti.Vector (IMPLICIT_DEF)), 453 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 454 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 455 } 456} 457 458multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name, 459 CondCode cc> { 460 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 461 vti.RegClass:$rs2, cc, 462 (vti.Mask V0), 463 VLOpFrag)), 464 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") 465 (vti.Mask (IMPLICIT_DEF)), 466 vti.RegClass:$rs1, 467 vti.RegClass:$rs2, 468 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 469} 470 471// Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped. 472multiclass VPatIntegerSetCCVL_VV_Swappable<VTypeInfo vti, string instruction_name, 473 CondCode cc, CondCode invcc> 474 : VPatIntegerSetCCVL_VV<vti, instruction_name, cc> { 475 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2), 476 vti.RegClass:$rs1, invcc, 477 (vti.Mask V0), 478 VLOpFrag)), 479 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") 480 (vti.Mask (IMPLICIT_DEF)), vti.RegClass:$rs1, 481 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 482} 483 484multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_name, 485 CondCode cc, CondCode invcc> { 486 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK"); 487 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 488 (SplatPat (XLenVT GPR:$rs2)), cc, 489 (vti.Mask V0), 490 VLOpFrag)), 491 (instruction_masked (vti.Mask (IMPLICIT_DEF)), vti.RegClass:$rs1, 492 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 493 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)), 494 (vti.Vector vti.RegClass:$rs1), invcc, 495 (vti.Mask V0), 496 VLOpFrag)), 497 (instruction_masked (vti.Mask (IMPLICIT_DEF)), vti.RegClass:$rs1, 498 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 499} 500 501multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_name, 502 CondCode cc, CondCode invcc> { 503 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); 504 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 505 (SplatPat_simm5 simm5:$rs2), cc, 506 (vti.Mask V0), 507 VLOpFrag)), 508 (instruction_masked (vti.Mask (IMPLICIT_DEF)), vti.RegClass:$rs1, 509 XLenVT:$rs2, (vti.Mask V0), GPR:$vl, 510 vti.Log2SEW)>; 511 512 // FIXME: Can do some canonicalization to remove these patterns. 513 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2), 514 (vti.Vector vti.RegClass:$rs1), invcc, 515 (vti.Mask V0), 516 VLOpFrag)), 517 (instruction_masked (vti.Mask (IMPLICIT_DEF)), vti.RegClass:$rs1, 518 simm5:$rs2, (vti.Mask V0), GPR:$vl, 519 vti.Log2SEW)>; 520} 521 522multiclass VPatIntegerSetCCVL_VIPlus1_Swappable<VTypeInfo vti, 523 string instruction_name, 524 CondCode cc, CondCode invcc, 525 ComplexPattern splatpat_kind> { 526 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); 527 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 528 (splatpat_kind simm5:$rs2), cc, 529 (vti.Mask V0), 530 VLOpFrag)), 531 (instruction_masked (vti.Mask (IMPLICIT_DEF)), vti.RegClass:$rs1, 532 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl, 533 vti.Log2SEW)>; 534 535 // FIXME: Can do some canonicalization to remove these patterns. 536 def : Pat<(vti.Mask (riscv_setcc_vl (splatpat_kind simm5:$rs2), 537 (vti.Vector vti.RegClass:$rs1), invcc, 538 (vti.Mask V0), 539 VLOpFrag)), 540 (instruction_masked (vti.Mask (IMPLICIT_DEF)), vti.RegClass:$rs1, 541 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl, 542 vti.Log2SEW)>; 543} 544 545multiclass VPatFPSetCCVL_VV_VF_FV<CondCode cc, 546 string inst_name, 547 string swapped_op_inst_name> { 548 foreach fvti = AllFloatVectors in { 549 def : Pat<(fvti.Mask (riscv_setcc_vl (fvti.Vector fvti.RegClass:$rs1), 550 fvti.RegClass:$rs2, 551 cc, 552 (fvti.Mask V0), 553 VLOpFrag)), 554 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX#"_MASK") 555 (fvti.Mask (IMPLICIT_DEF)), fvti.RegClass:$rs1, 556 fvti.RegClass:$rs2, (fvti.Mask V0), 557 GPR:$vl, fvti.Log2SEW)>; 558 def : Pat<(fvti.Mask (riscv_setcc_vl (fvti.Vector fvti.RegClass:$rs1), 559 (SplatFPOp fvti.ScalarRegClass:$rs2), 560 cc, 561 (fvti.Mask V0), 562 VLOpFrag)), 563 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 564 (fvti.Mask (IMPLICIT_DEF)), fvti.RegClass:$rs1, 565 fvti.ScalarRegClass:$rs2, (fvti.Mask V0), 566 GPR:$vl, fvti.Log2SEW)>; 567 def : Pat<(fvti.Mask (riscv_setcc_vl (SplatFPOp fvti.ScalarRegClass:$rs2), 568 (fvti.Vector fvti.RegClass:$rs1), 569 cc, 570 (fvti.Mask V0), 571 VLOpFrag)), 572 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 573 (fvti.Mask (IMPLICIT_DEF)), fvti.RegClass:$rs1, 574 fvti.ScalarRegClass:$rs2, (fvti.Mask V0), 575 GPR:$vl, fvti.Log2SEW)>; 576 } 577} 578 579multiclass VPatExtendSDNode_V_VL<SDNode vop, string inst_name, string suffix, 580 list <VTypeInfoToFraction> fraction_list> { 581 foreach vtiTofti = fraction_list in { 582 defvar vti = vtiTofti.Vti; 583 defvar fti = vtiTofti.Fti; 584 def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2), 585 (fti.Mask V0), VLOpFrag)), 586 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX#"_MASK") 587 (vti.Vector (IMPLICIT_DEF)), 588 fti.RegClass:$rs2, 589 (fti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 590 } 591} 592 593multiclass VPatConvertFP2ISDNode_V_VL<SDNode vop, string instruction_name> { 594 foreach fvti = AllFloatVectors in { 595 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 596 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 597 (fvti.Mask V0), 598 VLOpFrag)), 599 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 600 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 601 (fvti.Mask V0), GPR:$vl, ivti.Log2SEW, TAIL_AGNOSTIC)>; 602 } 603} 604 605multiclass VPatConvertI2FPSDNode_V_VL<SDNode vop, string instruction_name> { 606 foreach fvti = AllFloatVectors in { 607 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 608 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 609 (ivti.Mask V0), 610 VLOpFrag)), 611 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 612 (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 613 (ivti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 614 } 615} 616 617multiclass VPatWConvertFP2ISDNode_V_VL<SDNode vop, string instruction_name> { 618 foreach fvtiToFWti = AllWidenableFloatVectors in { 619 defvar fvti = fvtiToFWti.Vti; 620 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 621 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 622 (fvti.Mask V0), 623 VLOpFrag)), 624 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 625 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 626 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 627 } 628} 629 630multiclass VPatWConvertI2FPSDNode_V_VL<SDNode vop, string instruction_name> { 631 foreach vtiToWti = AllWidenableIntToFloatVectors in { 632 defvar ivti = vtiToWti.Vti; 633 defvar fwti = vtiToWti.Wti; 634 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 635 (ivti.Mask V0), 636 VLOpFrag)), 637 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 638 (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 639 (ivti.Mask V0), GPR:$vl, ivti.Log2SEW, TAIL_AGNOSTIC)>; 640 } 641} 642 643multiclass VPatNConvertFP2ISDNode_V_VL<SDNode vop, string instruction_name> { 644 foreach vtiToWti = AllWidenableIntToFloatVectors in { 645 defvar vti = vtiToWti.Vti; 646 defvar fwti = vtiToWti.Wti; 647 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 648 (fwti.Mask V0), 649 VLOpFrag)), 650 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 651 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 652 (fwti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 653 } 654} 655 656multiclass VPatNConvertI2FPSDNode_V_VL<SDNode vop, string instruction_name> { 657 foreach fvtiToFWti = AllWidenableFloatVectors in { 658 defvar fvti = fvtiToFWti.Vti; 659 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 660 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), 661 (iwti.Mask V0), 662 VLOpFrag)), 663 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 664 (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, 665 (iwti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 666 } 667} 668 669multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> { 670 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { 671 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1"); 672 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), VR:$rs2, 673 (vti.Mask true_mask), 674 VLOpFrag)), 675 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX) 676 (vti_m1.Vector VR:$merge), 677 (vti.Vector vti.RegClass:$rs1), 678 (vti_m1.Vector VR:$rs2), 679 GPR:$vl, vti.Log2SEW)>; 680 681 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), VR:$rs2, 682 (vti.Mask V0), VLOpFrag)), 683 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_MASK") 684 (vti_m1.Vector VR:$merge), 685 (vti.Vector vti.RegClass:$rs1), 686 (vti_m1.Vector VR:$rs2), 687 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 688 } 689} 690 691multiclass VPatBinarySDNodeExt_V_WV_WX<SDNode op, PatFrags extop, string instruction_name> { 692 foreach vtiToWti = AllWidenableIntVectors in { 693 defvar vti = vtiToWti.Vti; 694 defvar wti = vtiToWti.Wti; 695 def : Pat< 696 (vti.Vector 697 (riscv_trunc_vector_vl 698 (op (wti.Vector wti.RegClass:$rs2), 699 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1)))), 700 (vti.Mask true_mask), 701 VLOpFrag)), 702 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX) 703 wti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>; 704 def : Pat< 705 (vti.Vector 706 (riscv_trunc_vector_vl 707 (op (wti.Vector wti.RegClass:$rs2), 708 (wti.Vector (extop (vti.Vector (SplatPat GPR:$rs1))))), 709 (vti.Mask true_mask), 710 VLOpFrag)), 711 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 712 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW)>; 713 } 714} 715 716multiclass VPatBinarySDNode_V_WV_WX_WI<SDNode op, string instruction_name> { 717 defm : VPatBinarySDNodeExt_V_WV_WX<op, sext_oneuse, instruction_name>; 718 defm : VPatBinarySDNodeExt_V_WV_WX<op, zext_oneuse, instruction_name>; 719 foreach vtiToWti = AllWidenableIntVectors in { 720 defvar vti = vtiToWti.Vti; 721 defvar wti = vtiToWti.Wti; 722 def : Pat< 723 (vti.Vector 724 (riscv_trunc_vector_vl 725 (op (wti.Vector wti.RegClass:$rs2), 726 (wti.Vector (SplatPat_uimm5 uimm5:$rs1))), (vti.Mask true_mask), 727 VLOpFrag)), 728 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX) 729 wti.RegClass:$rs2, uimm5:$rs1, GPR:$vl, vti.Log2SEW)>; 730 } 731} 732 733multiclass VPatWidenReductionVL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 734 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 735 defvar vti = vtiToWti.Vti; 736 defvar wti = vtiToWti.Wti; 737 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 738 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 739 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 740 VR:$rs2, (vti.Mask true_mask), VLOpFrag)), 741 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX) 742 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 743 (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW)>; 744 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 745 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 746 VR:$rs2, (vti.Mask V0), VLOpFrag)), 747 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_MASK") 748 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 749 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 750 } 751} 752 753multiclass VPatWidenReductionVL_Ext_VL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 754 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 755 defvar vti = vtiToWti.Vti; 756 defvar wti = vtiToWti.Wti; 757 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 758 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 759 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 760 VR:$rs2, (vti.Mask true_mask), VLOpFrag)), 761 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX) 762 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 763 (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW)>; 764 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 765 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 766 VR:$rs2, (vti.Mask V0), VLOpFrag)), 767 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_MASK") 768 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 769 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 770 } 771} 772 773multiclass VPatWidenBinaryFPVL_VV_VF<SDNode op, PatFrags extop, string instruction_name> { 774 foreach fvtiToFWti = AllWidenableFloatVectors in { 775 defvar fvti = fvtiToFWti.Vti; 776 defvar fwti = fvtiToFWti.Wti; 777 def : Pat<(fwti.Vector (op (fwti.Vector (extop (fvti.Vector fvti.RegClass:$rs2), 778 (fvti.Mask true_mask), VLOpFrag)), 779 (fwti.Vector (extop (fvti.Vector fvti.RegClass:$rs1), 780 (fvti.Mask true_mask), VLOpFrag)), 781 (fwti.Mask true_mask), VLOpFrag)), 782 (!cast<Instruction>(instruction_name#"_VV_"#fvti.LMul.MX) 783 fvti.RegClass:$rs2, fvti.RegClass:$rs1, 784 GPR:$vl, fvti.Log2SEW)>; 785 def : Pat<(fwti.Vector (op (fwti.Vector (extop (fvti.Vector fvti.RegClass:$rs2), 786 (fvti.Mask true_mask), VLOpFrag)), 787 (fwti.Vector (extop (fvti.Vector (SplatFPOp fvti.ScalarRegClass:$rs1)), 788 (fvti.Mask true_mask), VLOpFrag)), 789 (fwti.Mask true_mask), VLOpFrag)), 790 (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 791 fvti.RegClass:$rs2, fvti.ScalarRegClass:$rs1, 792 GPR:$vl, fvti.Log2SEW)>; 793 } 794} 795 796multiclass VPatWidenBinaryFPVL_WV_WF<SDNode op, PatFrags extop, string instruction_name> { 797 foreach fvtiToFWti = AllWidenableFloatVectors in { 798 defvar fvti = fvtiToFWti.Vti; 799 defvar fwti = fvtiToFWti.Wti; 800 def : Pat<(fwti.Vector (op (fwti.Vector fwti.RegClass:$rs2), 801 (fwti.Vector (extop (fvti.Vector fvti.RegClass:$rs1), 802 (fvti.Mask true_mask), VLOpFrag)), 803 (fwti.Mask true_mask), VLOpFrag)), 804 (!cast<Instruction>(instruction_name#"_WV_"#fvti.LMul.MX) 805 fwti.RegClass:$rs2, fvti.RegClass:$rs1, 806 GPR:$vl, fvti.Log2SEW)>; 807 def : Pat<(fwti.Vector (op (fwti.Vector fwti.RegClass:$rs2), 808 (fwti.Vector (extop (fvti.Vector (SplatFPOp fvti.ScalarRegClass:$rs1)), 809 (fvti.Mask true_mask), VLOpFrag)), 810 (fwti.Mask true_mask), VLOpFrag)), 811 (!cast<Instruction>(instruction_name#"_W"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 812 fwti.RegClass:$rs2, fvti.ScalarRegClass:$rs1, 813 GPR:$vl, fvti.Log2SEW)>; 814 } 815} 816 817multiclass VPatWidenBinaryFPVL_VV_VF_WV_WF<SDNode op, string instruction_name> { 818 defm : VPatWidenBinaryFPVL_VV_VF<op, riscv_fpextend_vl_oneuse, instruction_name>; 819 defm : VPatWidenBinaryFPVL_WV_WF<op, riscv_fpextend_vl_oneuse, instruction_name>; 820} 821 822multiclass VPatNarrowShiftSplatExt_WX<SDNode op, PatFrags extop, string instruction_name> { 823 foreach vtiToWti = AllWidenableIntVectors in { 824 defvar vti = vtiToWti.Vti; 825 defvar wti = vtiToWti.Wti; 826 def : Pat< 827 (vti.Vector 828 (riscv_trunc_vector_vl 829 (op (wti.Vector wti.RegClass:$rs2), 830 (wti.Vector (extop (vti.Vector (SplatPat GPR:$rs1)), 831 (vti.Mask true_mask), VLOpFrag)), 832 (wti.Mask true_mask), VLOpFrag), 833 (vti.Mask true_mask), VLOpFrag)), 834 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 835 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW)>; 836 } 837} 838 839multiclass VPatMultiplyAddVL_VV_VX<SDNode op, string instruction_name> { 840 foreach vti = AllIntegerVectors in { 841 defvar suffix = vti.LMul.MX; 842 // NOTE: We choose VMADD because it has the most commuting freedom. So it 843 // works best with how TwoAddressInstructionPass tries commuting. 844 def : Pat<(vti.Vector 845 (op vti.RegClass:$rs2, 846 (riscv_mul_vl_oneuse vti.RegClass:$rs1, 847 vti.RegClass:$rd, 848 (vti.Mask true_mask), VLOpFrag), 849 (vti.Mask true_mask), VLOpFrag)), 850 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 851 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 852 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 853 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally 854 // commutable. 855 def : Pat<(vti.Vector 856 (op vti.RegClass:$rs2, 857 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), 858 vti.RegClass:$rd, 859 (vti.Mask true_mask), VLOpFrag), 860 (vti.Mask true_mask), VLOpFrag)), 861 (!cast<Instruction>(instruction_name#"_VX_" # suffix) 862 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 863 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 864 } 865} 866 867multiclass VPatWidenMultiplyAddVL_VV_VX<PatFrag op1, string instruction_name> { 868 foreach vtiTowti = AllWidenableIntVectors in { 869 defvar vti = vtiTowti.Vti; 870 defvar wti = vtiTowti.Wti; 871 def : Pat<(wti.Vector 872 (riscv_add_vl wti.RegClass:$rd, 873 (op1 vti.RegClass:$rs1, 874 (vti.Vector vti.RegClass:$rs2), 875 (vti.Mask true_mask), VLOpFrag), 876 (vti.Mask true_mask), VLOpFrag)), 877 (!cast<Instruction>(instruction_name#"_VV_" # vti.LMul.MX) 878 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 879 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 880 def : Pat<(wti.Vector 881 (riscv_add_vl wti.RegClass:$rd, 882 (op1 (SplatPat XLenVT:$rs1), 883 (vti.Vector vti.RegClass:$rs2), 884 (vti.Mask true_mask), VLOpFrag), 885 (vti.Mask true_mask), VLOpFrag)), 886 (!cast<Instruction>(instruction_name#"_VX_" # vti.LMul.MX) 887 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 888 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 889 } 890} 891 892multiclass VPatNarrowShiftSplat_WX_WI<SDNode op, string instruction_name> { 893 foreach vtiTowti = AllWidenableIntVectors in { 894 defvar vti = vtiTowti.Vti; 895 defvar wti = vtiTowti.Wti; 896 def : Pat<(vti.Vector (riscv_trunc_vector_vl 897 (wti.Vector (op wti.RegClass:$rs1, (SplatPat XLenVT:$rs2), 898 true_mask, VLOpFrag)), true_mask, VLOpFrag)), 899 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 900 wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; 901 def : Pat<(vti.Vector (riscv_trunc_vector_vl 902 (wti.Vector (op wti.RegClass:$rs1, (SplatPat_uimm5 uimm5:$rs2), 903 true_mask, VLOpFrag)), true_mask, VLOpFrag)), 904 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX) 905 wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW)>; 906 } 907} 908 909multiclass VPatFPMulAddVL_VV_VF<SDNode vop, string instruction_name> { 910 foreach vti = AllFloatVectors in { 911 defvar suffix = vti.LMul.MX; 912 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, 913 vti.RegClass:$rs2, (vti.Mask true_mask), 914 VLOpFrag)), 915 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 916 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 917 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 918 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, 919 vti.RegClass:$rs2, (vti.Mask V0), 920 VLOpFrag)), 921 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 922 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 923 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 924 925 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), 926 vti.RegClass:$rd, vti.RegClass:$rs2, 927 (vti.Mask true_mask), 928 VLOpFrag)), 929 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix) 930 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 931 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 932 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), 933 vti.RegClass:$rd, vti.RegClass:$rs2, 934 (vti.Mask V0), 935 VLOpFrag)), 936 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 937 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 938 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 939 } 940} 941 942multiclass VPatWidenFPMulAccVL_VV_VF<SDNode vop, string instruction_name> { 943 foreach vtiToWti = AllWidenableFloatVectors in { 944 defvar vti = vtiToWti.Vti; 945 defvar wti = vtiToWti.Wti; 946 def : Pat<(vop 947 (wti.Vector (riscv_fpextend_vl_oneuse 948 (vti.Vector vti.RegClass:$rs1), 949 (vti.Mask true_mask), VLOpFrag)), 950 (wti.Vector (riscv_fpextend_vl_oneuse 951 (vti.Vector vti.RegClass:$rs2), 952 (vti.Mask true_mask), VLOpFrag)), 953 (wti.Vector wti.RegClass:$rd), (vti.Mask true_mask), 954 VLOpFrag), 955 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 956 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 957 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 958 def : Pat<(vop 959 (wti.Vector (riscv_fpextend_vl_oneuse 960 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 961 (vti.Mask true_mask), VLOpFrag)), 962 (wti.Vector (riscv_fpextend_vl_oneuse 963 (vti.Vector vti.RegClass:$rs2), 964 (vti.Mask true_mask), VLOpFrag)), 965 (wti.Vector wti.RegClass:$rd), (vti.Mask true_mask), 966 VLOpFrag), 967 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 968 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 969 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 970 } 971} 972 973//===----------------------------------------------------------------------===// 974// Patterns. 975//===----------------------------------------------------------------------===// 976 977let Predicates = [HasVInstructions] in { 978 979// 12.1. Vector Single-Width Integer Add and Subtract 980defm : VPatBinaryVL_VV_VX_VI<riscv_add_vl, "PseudoVADD">; 981defm : VPatBinaryVL_VV_VX<riscv_sub_vl, "PseudoVSUB">; 982// Handle VRSUB specially since it's the only integer binary op with reversed 983// pattern operands 984foreach vti = AllIntegerVectors in { 985 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 986 (vti.Vector vti.RegClass:$rs1), (vti.Mask V0), 987 VLOpFrag), 988 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK") 989 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, 990 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 991 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)), 992 (vti.Vector vti.RegClass:$rs1), (vti.Mask V0), 993 VLOpFrag), 994 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK") 995 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, simm5:$rs2, 996 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 997} 998 999// 12.2. Vector Widening Integer Add/Subtract 1000defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwadd_vl, riscv_vwadd_w_vl, "PseudoVWADD">; 1001defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwaddu_vl, riscv_vwaddu_w_vl, "PseudoVWADDU">; 1002defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsub_vl, riscv_vwsub_w_vl, "PseudoVWSUB">; 1003defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsubu_vl, riscv_vwsubu_w_vl, "PseudoVWSUBU">; 1004 1005// 12.3. Vector Integer Extension 1006defm : VPatExtendSDNode_V_VL<riscv_zext_vl, "PseudoVZEXT", "VF2", 1007 AllFractionableVF2IntVectors>; 1008defm : VPatExtendSDNode_V_VL<riscv_sext_vl, "PseudoVSEXT", "VF2", 1009 AllFractionableVF2IntVectors>; 1010defm : VPatExtendSDNode_V_VL<riscv_zext_vl, "PseudoVZEXT", "VF4", 1011 AllFractionableVF4IntVectors>; 1012defm : VPatExtendSDNode_V_VL<riscv_sext_vl, "PseudoVSEXT", "VF4", 1013 AllFractionableVF4IntVectors>; 1014defm : VPatExtendSDNode_V_VL<riscv_zext_vl, "PseudoVZEXT", "VF8", 1015 AllFractionableVF8IntVectors>; 1016defm : VPatExtendSDNode_V_VL<riscv_sext_vl, "PseudoVSEXT", "VF8", 1017 AllFractionableVF8IntVectors>; 1018 1019// 12.5. Vector Bitwise Logical Instructions 1020defm : VPatBinaryVL_VV_VX_VI<riscv_and_vl, "PseudoVAND">; 1021defm : VPatBinaryVL_VV_VX_VI<riscv_or_vl, "PseudoVOR">; 1022defm : VPatBinaryVL_VV_VX_VI<riscv_xor_vl, "PseudoVXOR">; 1023 1024// 12.6. Vector Single-Width Bit Shift Instructions 1025defm : VPatBinaryVL_VV_VX_VI<riscv_shl_vl, "PseudoVSLL", uimm5>; 1026defm : VPatBinaryVL_VV_VX_VI<riscv_srl_vl, "PseudoVSRL", uimm5>; 1027defm : VPatBinaryVL_VV_VX_VI<riscv_sra_vl, "PseudoVSRA", uimm5>; 1028 1029foreach vti = AllIntegerVectors in { 1030 // Emit shift by 1 as an add since it might be faster. 1031 def : Pat<(riscv_shl_vl (vti.Vector vti.RegClass:$rs1), 1032 (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)), 1033 (vti.Mask true_mask), 1034 VLOpFrag), 1035 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX) 1036 vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>; 1037} 1038 1039// 12.7. Vector Narrowing Integer Right Shift Instructions 1040defm : VPatBinarySDNode_V_WV_WX_WI<srl, "PseudoVNSRL">; 1041defm : VPatBinarySDNode_V_WV_WX_WI<sra, "PseudoVNSRA">; 1042 1043defm : VPatNarrowShiftSplat_WX_WI<riscv_sra_vl, "PseudoVNSRA">; 1044defm : VPatNarrowShiftSplat_WX_WI<riscv_srl_vl, "PseudoVNSRL">; 1045defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_sext_vl_oneuse, "PseudoVNSRA">; 1046defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_zext_vl_oneuse, "PseudoVNSRA">; 1047defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_sext_vl_oneuse, "PseudoVNSRL">; 1048defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_zext_vl_oneuse, "PseudoVNSRL">; 1049 1050foreach vtiTowti = AllWidenableIntVectors in { 1051 defvar vti = vtiTowti.Vti; 1052 defvar wti = vtiTowti.Wti; 1053 def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1), 1054 (vti.Mask V0), 1055 VLOpFrag)), 1056 (!cast<Instruction>("PseudoVNSRL_WX_"#vti.LMul.MX#"_MASK") 1057 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, X0, 1058 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1059} 1060 1061// 12.8. Vector Integer Comparison Instructions 1062foreach vti = AllIntegerVectors in { 1063 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSEQ", SETEQ>; 1064 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSNE", SETNE>; 1065 1066 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 1067 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 1068 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 1069 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 1070 1071 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 1072 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 1073 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 1074 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 1075 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 1076 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 1077 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>; 1078 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>; 1079 // There is no VMSGE(U)_VX instruction 1080 1081 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 1082 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 1083 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 1084 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 1085 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>; 1086 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>; 1087 1088 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLE", SETLT, SETGT, 1089 SplatPat_simm5_plus1_nonzero>; 1090 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLEU", SETULT, SETUGT, 1091 SplatPat_simm5_plus1_nonzero>; 1092 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGT", SETGE, SETLE, 1093 SplatPat_simm5_plus1>; 1094 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGTU", SETUGE, SETULE, 1095 SplatPat_simm5_plus1_nonzero>; 1096} // foreach vti = AllIntegerVectors 1097 1098// 12.9. Vector Integer Min/Max Instructions 1099defm : VPatBinaryVL_VV_VX<riscv_umin_vl, "PseudoVMINU">; 1100defm : VPatBinaryVL_VV_VX<riscv_smin_vl, "PseudoVMIN">; 1101defm : VPatBinaryVL_VV_VX<riscv_umax_vl, "PseudoVMAXU">; 1102defm : VPatBinaryVL_VV_VX<riscv_smax_vl, "PseudoVMAX">; 1103 1104// 12.10. Vector Single-Width Integer Multiply Instructions 1105defm : VPatBinaryVL_VV_VX<riscv_mul_vl, "PseudoVMUL">; 1106defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH">; 1107defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU">; 1108 1109// 12.11. Vector Integer Divide Instructions 1110defm : VPatBinaryVL_VV_VX<riscv_udiv_vl, "PseudoVDIVU">; 1111defm : VPatBinaryVL_VV_VX<riscv_sdiv_vl, "PseudoVDIV">; 1112defm : VPatBinaryVL_VV_VX<riscv_urem_vl, "PseudoVREMU">; 1113defm : VPatBinaryVL_VV_VX<riscv_srem_vl, "PseudoVREM">; 1114 1115// 12.12. Vector Widening Integer Multiply Instructions 1116defm : VPatBinaryWVL_VV_VX<riscv_vwmul_vl, "PseudoVWMUL">; 1117defm : VPatBinaryWVL_VV_VX<riscv_vwmulu_vl, "PseudoVWMULU">; 1118defm : VPatBinaryWVL_VV_VX<riscv_vwmulsu_vl, "PseudoVWMULSU">; 1119 1120// 12.13 Vector Single-Width Integer Multiply-Add Instructions 1121defm : VPatMultiplyAddVL_VV_VX<riscv_add_vl, "PseudoVMADD">; 1122defm : VPatMultiplyAddVL_VV_VX<riscv_sub_vl, "PseudoVNMSUB">; 1123 1124// 12.14. Vector Widening Integer Multiply-Add Instructions 1125defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmul_vl_oneuse, "PseudoVWMACC">; 1126defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmulu_vl_oneuse, "PseudoVWMACCU">; 1127defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmulsu_vl_oneuse, "PseudoVWMACCSU">; 1128foreach vtiTowti = AllWidenableIntVectors in { 1129 defvar vti = vtiTowti.Vti; 1130 defvar wti = vtiTowti.Wti; 1131 def : Pat<(wti.Vector 1132 (riscv_add_vl wti.RegClass:$rd, 1133 (riscv_vwmulsu_vl_oneuse (vti.Vector vti.RegClass:$rs1), 1134 (SplatPat XLenVT:$rs2), 1135 (vti.Mask true_mask), VLOpFrag), 1136 (vti.Mask true_mask), VLOpFrag)), 1137 (!cast<Instruction>("PseudoVWMACCUS_VX_" # vti.LMul.MX) 1138 wti.RegClass:$rd, vti.ScalarRegClass:$rs2, vti.RegClass:$rs1, 1139 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1140} 1141 1142// 12.15. Vector Integer Merge Instructions 1143foreach vti = AllIntegerVectors in { 1144 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 1145 vti.RegClass:$rs1, 1146 vti.RegClass:$rs2, 1147 VLOpFrag)), 1148 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 1149 vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), 1150 GPR:$vl, vti.Log2SEW)>; 1151 1152 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 1153 (SplatPat XLenVT:$rs1), 1154 vti.RegClass:$rs2, 1155 VLOpFrag)), 1156 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 1157 vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1158 1159 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 1160 (SplatPat_simm5 simm5:$rs1), 1161 vti.RegClass:$rs2, 1162 VLOpFrag)), 1163 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 1164 vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1165 1166 def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0), 1167 vti.RegClass:$rs1, 1168 vti.RegClass:$rs2, 1169 VLOpFrag)), 1170 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX#"_TU") 1171 vti.RegClass:$rs2, vti.RegClass:$rs2, vti.RegClass:$rs1, 1172 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1173 1174 def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0), 1175 (SplatPat XLenVT:$rs1), 1176 vti.RegClass:$rs2, 1177 VLOpFrag)), 1178 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX#"_TU") 1179 vti.RegClass:$rs2, vti.RegClass:$rs2, GPR:$rs1, 1180 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1181 1182 def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0), 1183 (SplatPat_simm5 simm5:$rs1), 1184 vti.RegClass:$rs2, 1185 VLOpFrag)), 1186 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX#"_TU") 1187 vti.RegClass:$rs2, vti.RegClass:$rs2, simm5:$rs1, 1188 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1189} 1190 1191// 12.16. Vector Integer Move Instructions 1192foreach vti = AllIntegerVectors in { 1193 def : Pat<(vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), GPR:$rs2, VLOpFrag)), 1194 (!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX) 1195 $rs2, GPR:$vl, vti.Log2SEW)>; 1196 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.Vector:$passthru, GPR:$rs2, VLOpFrag)), 1197 (!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX#"_TU") 1198 $passthru, $rs2, GPR:$vl, vti.Log2SEW)>; 1199 defvar ImmPat = !cast<ComplexPattern>("sew"#vti.SEW#"simm5"); 1200 def : Pat<(vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), (ImmPat XLenVT:$imm5), 1201 VLOpFrag)), 1202 (!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX) 1203 XLenVT:$imm5, GPR:$vl, vti.Log2SEW)>; 1204 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.Vector:$passthru, (ImmPat XLenVT:$imm5), 1205 VLOpFrag)), 1206 (!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX#"_TU") 1207 $passthru, XLenVT:$imm5, GPR:$vl, vti.Log2SEW)>; 1208} 1209 1210// 12.1. Vector Single-Width Saturating Add and Subtract 1211defm : VPatBinaryVL_VV_VX_VI<riscv_saddsat_vl, "PseudoVSADD">; 1212defm : VPatBinaryVL_VV_VX_VI<riscv_uaddsat_vl, "PseudoVSADDU">; 1213defm : VPatBinaryVL_VV_VX<riscv_ssubsat_vl, "PseudoVSSUB">; 1214defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">; 1215 1216} // Predicates = [HasVInstructions] 1217 1218// 15.1. Vector Single-Width Integer Reduction Instructions 1219let Predicates = [HasVInstructions] in { 1220defm : VPatReductionVL<rvv_vecreduce_ADD_vl, "PseudoVREDSUM", /*is_float*/0>; 1221defm : VPatReductionVL<rvv_vecreduce_UMAX_vl, "PseudoVREDMAXU", /*is_float*/0>; 1222defm : VPatReductionVL<rvv_vecreduce_SMAX_vl, "PseudoVREDMAX", /*is_float*/0>; 1223defm : VPatReductionVL<rvv_vecreduce_UMIN_vl, "PseudoVREDMINU", /*is_float*/0>; 1224defm : VPatReductionVL<rvv_vecreduce_SMIN_vl, "PseudoVREDMIN", /*is_float*/0>; 1225defm : VPatReductionVL<rvv_vecreduce_AND_vl, "PseudoVREDAND", /*is_float*/0>; 1226defm : VPatReductionVL<rvv_vecreduce_OR_vl, "PseudoVREDOR", /*is_float*/0>; 1227defm : VPatReductionVL<rvv_vecreduce_XOR_vl, "PseudoVREDXOR", /*is_float*/0>; 1228 1229// 15.2. Vector Widening Integer Reduction Instructions 1230defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, anyext_oneuse, "PseudoVWREDSUMU", /*is_float*/0>; 1231defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, zext_oneuse, "PseudoVWREDSUMU", /*is_float*/0>; 1232defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_zext_vl_oneuse, "PseudoVWREDSUMU", /*is_float*/0>; 1233defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, sext_oneuse, "PseudoVWREDSUM", /*is_float*/0>; 1234defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_sext_vl_oneuse, "PseudoVWREDSUM", /*is_float*/0>; 1235} // Predicates = [HasVInstructions] 1236 1237// 15.3. Vector Single-Width Floating-Point Reduction Instructions 1238let Predicates = [HasVInstructionsAnyF] in { 1239defm : VPatReductionVL<rvv_vecreduce_SEQ_FADD_vl, "PseudoVFREDOSUM", /*is_float*/1>; 1240defm : VPatReductionVL<rvv_vecreduce_FADD_vl, "PseudoVFREDUSUM", /*is_float*/1>; 1241defm : VPatReductionVL<rvv_vecreduce_FMIN_vl, "PseudoVFREDMIN", /*is_float*/1>; 1242defm : VPatReductionVL<rvv_vecreduce_FMAX_vl, "PseudoVFREDMAX", /*is_float*/1>; 1243 1244// 15.4. Vector Widening Floating-Point Reduction Instructions 1245defm : VPatWidenReductionVL<rvv_vecreduce_SEQ_FADD_vl, fpext_oneuse, "PseudoVFWREDOSUM", /*is_float*/1>; 1246defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_SEQ_FADD_vl, riscv_fpextend_vl_oneuse, "PseudoVFWREDOSUM", /*is_float*/1>; 1247defm : VPatWidenReductionVL<rvv_vecreduce_FADD_vl, fpext_oneuse, "PseudoVFWREDUSUM", /*is_float*/1>; 1248defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_FADD_vl, riscv_fpextend_vl_oneuse, "PseudoVFWREDUSUM", /*is_float*/1>; 1249} // Predicates = [HasVInstructionsAnyF] 1250 1251let Predicates = [HasVInstructionsAnyF] in { 1252 1253// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions 1254defm : VPatBinaryFPVL_VV_VF<riscv_fadd_vl, "PseudoVFADD">; 1255defm : VPatBinaryFPVL_VV_VF<riscv_fsub_vl, "PseudoVFSUB">; 1256defm : VPatBinaryFPVL_R_VF<riscv_fsub_vl, "PseudoVFRSUB">; 1257 1258// 14.3. Vector Widening Floating-Point Add/Subtract Instructions 1259defm : VPatWidenBinaryFPVL_VV_VF_WV_WF<riscv_fadd_vl, "PseudoVFWADD">; 1260defm : VPatWidenBinaryFPVL_VV_VF_WV_WF<riscv_fsub_vl, "PseudoVFWSUB">; 1261 1262// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 1263defm : VPatBinaryFPVL_VV_VF<riscv_fmul_vl, "PseudoVFMUL">; 1264defm : VPatBinaryFPVL_VV_VF<riscv_fdiv_vl, "PseudoVFDIV">; 1265defm : VPatBinaryFPVL_R_VF<riscv_fdiv_vl, "PseudoVFRDIV">; 1266 1267// 14.5. Vector Widening Floating-Point Multiply Instructions 1268defm : VPatWidenBinaryFPVL_VV_VF<riscv_fmul_vl, riscv_fpextend_vl_oneuse, "PseudoVFWMUL">; 1269 1270// 14.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. 1271defm : VPatFPMulAddVL_VV_VF<riscv_vfmadd_vl, "PseudoVFMADD">; 1272defm : VPatFPMulAddVL_VV_VF<riscv_vfmsub_vl, "PseudoVFMSUB">; 1273defm : VPatFPMulAddVL_VV_VF<riscv_vfnmadd_vl, "PseudoVFNMADD">; 1274defm : VPatFPMulAddVL_VV_VF<riscv_vfnmsub_vl, "PseudoVFNMSUB">; 1275 1276// 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 1277defm : VPatWidenFPMulAccVL_VV_VF<riscv_vfmadd_vl, "PseudoVFWMACC">; 1278defm : VPatWidenFPMulAccVL_VV_VF<riscv_vfnmadd_vl, "PseudoVFWNMACC">; 1279defm : VPatWidenFPMulAccVL_VV_VF<riscv_vfmsub_vl, "PseudoVFWMSAC">; 1280defm : VPatWidenFPMulAccVL_VV_VF<riscv_vfnmsub_vl, "PseudoVFWNMSAC">; 1281 1282// 14.11. Vector Floating-Point MIN/MAX Instructions 1283defm : VPatBinaryFPVL_VV_VF<riscv_fminnum_vl, "PseudoVFMIN">; 1284defm : VPatBinaryFPVL_VV_VF<riscv_fmaxnum_vl, "PseudoVFMAX">; 1285 1286// 14.13. Vector Floating-Point Compare Instructions 1287defm : VPatFPSetCCVL_VV_VF_FV<SETEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 1288defm : VPatFPSetCCVL_VV_VF_FV<SETOEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 1289 1290defm : VPatFPSetCCVL_VV_VF_FV<SETNE, "PseudoVMFNE", "PseudoVMFNE">; 1291defm : VPatFPSetCCVL_VV_VF_FV<SETUNE, "PseudoVMFNE", "PseudoVMFNE">; 1292 1293defm : VPatFPSetCCVL_VV_VF_FV<SETLT, "PseudoVMFLT", "PseudoVMFGT">; 1294defm : VPatFPSetCCVL_VV_VF_FV<SETOLT, "PseudoVMFLT", "PseudoVMFGT">; 1295 1296defm : VPatFPSetCCVL_VV_VF_FV<SETLE, "PseudoVMFLE", "PseudoVMFGE">; 1297defm : VPatFPSetCCVL_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">; 1298 1299foreach vti = AllFloatVectors in { 1300 // 14.8. Vector Floating-Point Square-Root Instruction 1301 def : Pat<(riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), 1302 VLOpFrag), 1303 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX) 1304 vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; 1305 1306 // 14.12. Vector Floating-Point Sign-Injection Instructions 1307 def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask true_mask), 1308 VLOpFrag), 1309 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX) 1310 vti.RegClass:$rs, vti.RegClass:$rs, GPR:$vl, vti.Log2SEW)>; 1311 // Handle fneg with VFSGNJN using the same input for both operands. 1312 def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), 1313 VLOpFrag), 1314 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX #"_MASK") 1315 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, 1316 vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1317 TAIL_AGNOSTIC)>; 1318 1319 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 1320 (vti.Vector vti.RegClass:$rs2), 1321 (vti.Mask true_mask), 1322 VLOpFrag), 1323 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX) 1324 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; 1325 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 1326 (riscv_fneg_vl vti.RegClass:$rs2, 1327 (vti.Mask true_mask), 1328 VLOpFrag), 1329 (vti.Mask true_mask), 1330 VLOpFrag), 1331 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 1332 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; 1333 1334 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 1335 (SplatFPOp vti.ScalarRegClass:$rs2), 1336 (vti.Mask true_mask), 1337 VLOpFrag), 1338 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX) 1339 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.Log2SEW)>; 1340} 1341 1342foreach fvti = AllFloatVectors in { 1343 // Floating-point vselects: 1344 // 12.15. Vector Integer Merge Instructions 1345 // 14.15. Vector Floating-Point Merge Instruction 1346 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), 1347 fvti.RegClass:$rs1, 1348 fvti.RegClass:$rs2, 1349 VLOpFrag)), 1350 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 1351 fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 1352 GPR:$vl, fvti.Log2SEW)>; 1353 1354 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), 1355 (SplatFPOp fvti.ScalarRegClass:$rs1), 1356 fvti.RegClass:$rs2, 1357 VLOpFrag)), 1358 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 1359 fvti.RegClass:$rs2, 1360 (fvti.Scalar fvti.ScalarRegClass:$rs1), 1361 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 1362 1363 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), 1364 (SplatFPOp (fvti.Scalar fpimm0)), 1365 fvti.RegClass:$rs2, 1366 VLOpFrag)), 1367 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 1368 fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 1369 1370 def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0), 1371 fvti.RegClass:$rs1, 1372 fvti.RegClass:$rs2, 1373 VLOpFrag)), 1374 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX#"_TU") 1375 fvti.RegClass:$rs2, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 1376 GPR:$vl, fvti.Log2SEW)>; 1377 1378 def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0), 1379 (SplatFPOp fvti.ScalarRegClass:$rs1), 1380 fvti.RegClass:$rs2, 1381 VLOpFrag)), 1382 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX#"_TU") 1383 fvti.RegClass:$rs2, fvti.RegClass:$rs2, 1384 (fvti.Scalar fvti.ScalarRegClass:$rs1), 1385 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 1386 1387 def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0), 1388 (SplatFPOp (fvti.Scalar fpimm0)), 1389 fvti.RegClass:$rs2, 1390 VLOpFrag)), 1391 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX#"_TU") 1392 fvti.RegClass:$rs2, fvti.RegClass:$rs2, 0, (fvti.Mask V0), 1393 GPR:$vl, fvti.Log2SEW)>; 1394 1395 // 14.16. Vector Floating-Point Move Instruction 1396 // If we're splatting fpimm0, use vmv.v.x vd, x0. 1397 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 1398 (fvti.Vector undef), (fvti.Scalar (fpimm0)), VLOpFrag)), 1399 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX) 1400 0, GPR:$vl, fvti.Log2SEW)>; 1401 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 1402 fvti.Vector:$passthru, (fvti.Scalar (fpimm0)), VLOpFrag)), 1403 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX#"_TU") 1404 $passthru, 0, GPR:$vl, fvti.Log2SEW)>; 1405 1406 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 1407 (fvti.Vector undef), (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), 1408 (!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" # 1409 fvti.LMul.MX) 1410 (fvti.Scalar fvti.ScalarRegClass:$rs2), 1411 GPR:$vl, fvti.Log2SEW)>; 1412 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 1413 fvti.Vector:$passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), 1414 (!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" # 1415 fvti.LMul.MX # "_TU") 1416 $passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), 1417 GPR:$vl, fvti.Log2SEW)>; 1418 1419 // 14.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions 1420 defm : VPatConvertFP2ISDNode_V_VL<riscv_fp_to_sint_vl, "PseudoVFCVT_RTZ_X_F_V">; 1421 defm : VPatConvertFP2ISDNode_V_VL<riscv_fp_to_uint_vl, "PseudoVFCVT_RTZ_XU_F_V">; 1422 defm : VPatConvertI2FPSDNode_V_VL<riscv_sint_to_fp_vl, "PseudoVFCVT_F_X_V">; 1423 defm : VPatConvertI2FPSDNode_V_VL<riscv_uint_to_fp_vl, "PseudoVFCVT_F_XU_V">; 1424 1425 // 14.18. Widening Floating-Point/Integer Type-Convert Instructions 1426 defm : VPatWConvertFP2ISDNode_V_VL<riscv_fp_to_sint_vl, "PseudoVFWCVT_RTZ_X_F_V">; 1427 defm : VPatWConvertFP2ISDNode_V_VL<riscv_fp_to_uint_vl, "PseudoVFWCVT_RTZ_XU_F_V">; 1428 defm : VPatWConvertI2FPSDNode_V_VL<riscv_sint_to_fp_vl, "PseudoVFWCVT_F_X_V">; 1429 defm : VPatWConvertI2FPSDNode_V_VL<riscv_uint_to_fp_vl, "PseudoVFWCVT_F_XU_V">; 1430 foreach fvtiToFWti = AllWidenableFloatVectors in { 1431 defvar fvti = fvtiToFWti.Vti; 1432 defvar fwti = fvtiToFWti.Wti; 1433 def : Pat<(fwti.Vector (riscv_fpextend_vl (fvti.Vector fvti.RegClass:$rs1), 1434 (fvti.Mask V0), 1435 VLOpFrag)), 1436 (!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX#"_MASK") 1437 (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1438 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1439 } 1440 1441 // 14.19 Narrowing Floating-Point/Integer Type-Convert Instructions 1442 defm : VPatNConvertFP2ISDNode_V_VL<riscv_fp_to_sint_vl, "PseudoVFNCVT_RTZ_X_F_W">; 1443 defm : VPatNConvertFP2ISDNode_V_VL<riscv_fp_to_uint_vl, "PseudoVFNCVT_RTZ_XU_F_W">; 1444 defm : VPatNConvertI2FPSDNode_V_VL<riscv_sint_to_fp_vl, "PseudoVFNCVT_F_X_W">; 1445 defm : VPatNConvertI2FPSDNode_V_VL<riscv_uint_to_fp_vl, "PseudoVFNCVT_F_XU_W">; 1446 foreach fvtiToFWti = AllWidenableFloatVectors in { 1447 defvar fvti = fvtiToFWti.Vti; 1448 defvar fwti = fvtiToFWti.Wti; 1449 def : Pat<(fvti.Vector (riscv_fpround_vl (fwti.Vector fwti.RegClass:$rs1), 1450 (fwti.Mask V0), 1451 VLOpFrag)), 1452 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_MASK") 1453 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1454 (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1455 1456 def : Pat<(fvti.Vector (riscv_fncvt_rod_vl (fwti.Vector fwti.RegClass:$rs1), 1457 (fwti.Mask V0), 1458 VLOpFrag)), 1459 (!cast<Instruction>("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX#"_MASK") 1460 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1461 (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1462 } 1463} 1464 1465} // Predicates = [HasVInstructionsAnyF] 1466 1467let Predicates = [HasVInstructions] in { 1468 1469foreach mti = AllMasks in { 1470 // 16.1 Vector Mask-Register Logical Instructions 1471 def : Pat<(mti.Mask (riscv_vmset_vl VLOpFrag)), 1472 (!cast<Instruction>("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 1473 def : Pat<(mti.Mask (riscv_vmclr_vl VLOpFrag)), 1474 (!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 1475 1476 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)), 1477 (!cast<Instruction>("PseudoVMAND_MM_" # mti.LMul.MX) 1478 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1479 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 1480 (!cast<Instruction>("PseudoVMOR_MM_" # mti.LMul.MX) 1481 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1482 def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 1483 (!cast<Instruction>("PseudoVMXOR_MM_" # mti.LMul.MX) 1484 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1485 1486 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, 1487 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 1488 VLOpFrag)), 1489 (!cast<Instruction>("PseudoVMANDN_MM_" # mti.LMul.MX) 1490 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1491 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, 1492 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 1493 VLOpFrag)), 1494 (!cast<Instruction>("PseudoVMORN_MM_" # mti.LMul.MX) 1495 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1496 // XOR is associative so we need 2 patterns for VMXNOR. 1497 def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1, 1498 VLOpFrag), 1499 VR:$rs2, VLOpFrag)), 1500 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX) 1501 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1502 1503 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2, 1504 VLOpFrag), 1505 VLOpFrag)), 1506 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX) 1507 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1508 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2, 1509 VLOpFrag), 1510 VLOpFrag)), 1511 (!cast<Instruction>("PseudoVMNOR_MM_" # mti.LMul.MX) 1512 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1513 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2, 1514 VLOpFrag), 1515 VLOpFrag)), 1516 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX) 1517 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1518 1519 // Match the not idiom to the vmnot.m pseudo. 1520 def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)), 1521 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX) 1522 VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>; 1523 1524 // 16.2 Vector count population in mask vcpop.m 1525 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), 1526 VLOpFrag)), 1527 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX) 1528 VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1529 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask V0), 1530 VLOpFrag)), 1531 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX # "_MASK") 1532 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 1533} 1534 1535} // Predicates = [HasVInstructions] 1536 1537let Predicates = [HasVInstructions] in { 1538// 17.1. Integer Scalar Move Instructions 1539// 17.4. Vector Register Gather Instruction 1540foreach vti = AllIntegerVectors in { 1541 def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$merge), 1542 vti.ScalarRegClass:$rs1, 1543 VLOpFrag)), 1544 (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX) 1545 vti.RegClass:$merge, 1546 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; 1547 1548 def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2, 1549 vti.RegClass:$rs1, 1550 (vti.Mask V0), 1551 vti.RegClass:$merge, 1552 VLOpFrag)), 1553 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK") 1554 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 1555 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1556 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 1557 (vti.Mask V0), 1558 vti.RegClass:$merge, 1559 VLOpFrag)), 1560 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") 1561 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 1562 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1563 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, 1564 uimm5:$imm, 1565 (vti.Mask V0), 1566 vti.RegClass:$merge, 1567 VLOpFrag)), 1568 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 1569 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, 1570 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1571 1572 // emul = lmul * 16 / sew 1573 defvar vlmul = vti.LMul; 1574 defvar octuple_lmul = vlmul.octuple; 1575 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 1576 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 1577 defvar emul_str = octuple_to_str<octuple_emul>.ret; 1578 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 1579 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_" # emul_str; 1580 1581 def : Pat<(vti.Vector 1582 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 1583 (ivti.Vector ivti.RegClass:$rs1), 1584 (vti.Mask V0), 1585 vti.RegClass:$merge, 1586 VLOpFrag)), 1587 (!cast<Instruction>(inst#"_MASK") 1588 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, 1589 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1590 } 1591} 1592 1593} // Predicates = [HasVInstructions] 1594 1595let Predicates = [HasVInstructionsAnyF] in { 1596 1597// 17.2. Floating-Point Scalar Move Instructions 1598foreach vti = AllFloatVectors in { 1599 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 1600 (vti.Scalar (fpimm0)), 1601 VLOpFrag)), 1602 (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX) 1603 vti.RegClass:$merge, X0, GPR:$vl, vti.Log2SEW)>; 1604 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 1605 vti.ScalarRegClass:$rs1, 1606 VLOpFrag)), 1607 (!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX) 1608 vti.RegClass:$merge, 1609 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; 1610 defvar ivti = GetIntVTypeInfo<vti>.Vti; 1611 1612 def : Pat<(vti.Vector 1613 (riscv_vrgather_vv_vl vti.RegClass:$rs2, 1614 (ivti.Vector vti.RegClass:$rs1), 1615 (vti.Mask V0), 1616 vti.RegClass:$merge, 1617 VLOpFrag)), 1618 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK") 1619 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 1620 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1621 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 1622 (vti.Mask V0), 1623 vti.RegClass:$merge, 1624 VLOpFrag)), 1625 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") 1626 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 1627 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1628 def : Pat<(vti.Vector 1629 (riscv_vrgather_vx_vl vti.RegClass:$rs2, 1630 uimm5:$imm, 1631 (vti.Mask V0), 1632 vti.RegClass:$merge, 1633 VLOpFrag)), 1634 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 1635 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, 1636 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1637 1638 defvar vlmul = vti.LMul; 1639 defvar octuple_lmul = vlmul.octuple; 1640 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 1641 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 1642 defvar emul_str = octuple_to_str<octuple_emul>.ret; 1643 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 1644 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_" # emul_str; 1645 1646 def : Pat<(vti.Vector 1647 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 1648 (ivti.Vector ivti.RegClass:$rs1), 1649 (vti.Mask V0), 1650 vti.RegClass:$merge, 1651 VLOpFrag)), 1652 (!cast<Instruction>(inst#"_MASK") 1653 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, 1654 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1655 } 1656} 1657 1658} // Predicates = [HasVInstructionsAnyF] 1659 1660//===----------------------------------------------------------------------===// 1661// Miscellaneous RISCVISD SDNodes 1662//===----------------------------------------------------------------------===// 1663 1664def riscv_vid_vl : SDNode<"RISCVISD::VID_VL", SDTypeProfile<1, 2, 1665 [SDTCisVec<0>, SDTCVecEltisVT<1, i1>, 1666 SDTCisSameNumEltsAs<0, 1>, SDTCisVT<2, XLenVT>]>, []>; 1667 1668def SDTRVVSlide : SDTypeProfile<1, 5, [ 1669 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>, 1670 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT> 1671]>; 1672def SDTRVVSlide1 : SDTypeProfile<1, 5, [ 1673 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisInt<0>, 1674 SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, 1675 SDTCisVT<5, XLenVT> 1676]>; 1677 1678def riscv_slideup_vl : SDNode<"RISCVISD::VSLIDEUP_VL", SDTRVVSlide, []>; 1679def riscv_slide1up_vl : SDNode<"RISCVISD::VSLIDE1UP_VL", SDTRVVSlide1, []>; 1680def riscv_slidedown_vl : SDNode<"RISCVISD::VSLIDEDOWN_VL", SDTRVVSlide, []>; 1681def riscv_slide1down_vl : SDNode<"RISCVISD::VSLIDE1DOWN_VL", SDTRVVSlide1, []>; 1682 1683let Predicates = [HasVInstructions] in { 1684 1685foreach vti = AllIntegerVectors in { 1686 def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask true_mask), 1687 VLOpFrag)), 1688 (!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX) GPR:$vl, vti.Log2SEW)>; 1689 1690 def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector undef), 1691 (vti.Vector vti.RegClass:$rs1), 1692 GPR:$rs2, (vti.Mask true_mask), 1693 VLOpFrag)), 1694 (!cast<Instruction>("PseudoVSLIDE1UP_VX_"#vti.LMul.MX) 1695 vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; 1696 def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rd), 1697 (vti.Vector vti.RegClass:$rs1), 1698 GPR:$rs2, (vti.Mask true_mask), 1699 VLOpFrag)), 1700 (!cast<Instruction>("PseudoVSLIDE1UP_VX_"#vti.LMul.MX#"_TU") 1701 vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; 1702 def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector undef), 1703 (vti.Vector vti.RegClass:$rs1), 1704 GPR:$rs2, (vti.Mask true_mask), 1705 VLOpFrag)), 1706 (!cast<Instruction>("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX) 1707 vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; 1708 def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rd), 1709 (vti.Vector vti.RegClass:$rs1), 1710 GPR:$rs2, (vti.Mask true_mask), 1711 VLOpFrag)), 1712 (!cast<Instruction>("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX#"_TU") 1713 vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; 1714} 1715 1716foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in { 1717 def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3), 1718 (vti.Vector vti.RegClass:$rs1), 1719 uimm5:$rs2, (vti.Mask true_mask), 1720 VLOpFrag)), 1721 (!cast<Instruction>("PseudoVSLIDEUP_VI_"#vti.LMul.MX) 1722 vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2, 1723 GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>; 1724 1725 def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3), 1726 (vti.Vector vti.RegClass:$rs1), 1727 GPR:$rs2, (vti.Mask true_mask), 1728 VLOpFrag)), 1729 (!cast<Instruction>("PseudoVSLIDEUP_VX_"#vti.LMul.MX) 1730 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, 1731 GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>; 1732 1733 def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3), 1734 (vti.Vector vti.RegClass:$rs1), 1735 uimm5:$rs2, (vti.Mask true_mask), 1736 VLOpFrag)), 1737 (!cast<Instruction>("PseudoVSLIDEDOWN_VI_"#vti.LMul.MX) 1738 vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2, 1739 GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>; 1740 def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector undef), 1741 (vti.Vector vti.RegClass:$rs1), 1742 uimm5:$rs2, (vti.Mask true_mask), 1743 VLOpFrag)), 1744 (!cast<Instruction>("PseudoVSLIDEDOWN_VI_"#vti.LMul.MX) 1745 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, uimm5:$rs2, 1746 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1747 1748 def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3), 1749 (vti.Vector vti.RegClass:$rs1), 1750 GPR:$rs2, (vti.Mask true_mask), 1751 VLOpFrag)), 1752 (!cast<Instruction>("PseudoVSLIDEDOWN_VX_"#vti.LMul.MX) 1753 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, 1754 GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>; 1755 def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector undef), 1756 (vti.Vector vti.RegClass:$rs1), 1757 GPR:$rs2, (vti.Mask true_mask), 1758 VLOpFrag)), 1759 (!cast<Instruction>("PseudoVSLIDEDOWN_VX_"#vti.LMul.MX) 1760 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, 1761 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1762} 1763 1764} // Predicates = [HasVInstructions] 1765