1//===- RISCVInstrInfoVVLPatterns.td - RVV VL patterns ------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file contains the required infrastructure and VL patterns to 10/// support code generation for the standard 'V' (Vector) extension, version 11/// version 1.0. 12/// 13/// This file is included from and depends upon RISCVInstrInfoVPseudos.td 14/// 15/// Note: the patterns for RVV intrinsics are found in 16/// RISCVInstrInfoVPseudos.td. 17/// 18//===----------------------------------------------------------------------===// 19 20//===----------------------------------------------------------------------===// 21// Helpers to define the VL patterns. 22//===----------------------------------------------------------------------===// 23 24def SDT_RISCVIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 25 SDTCisSameAs<0, 2>, 26 SDTCisVec<0>, SDTCisInt<0>, 27 SDTCisSameAs<0, 3>, 28 SDTCVecEltisVT<4, i1>, 29 SDTCisSameNumEltsAs<0, 4>, 30 SDTCisVT<5, XLenVT>]>; 31 32def SDT_RISCVFPUnOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 33 SDTCisVec<0>, SDTCisFP<0>, 34 SDTCVecEltisVT<2, i1>, 35 SDTCisSameNumEltsAs<0, 2>, 36 SDTCisVT<3, XLenVT>]>; 37def SDT_RISCVFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 38 SDTCisSameAs<0, 2>, 39 SDTCisVec<0>, SDTCisFP<0>, 40 SDTCisSameAs<0, 3>, 41 SDTCVecEltisVT<4, i1>, 42 SDTCisSameNumEltsAs<0, 4>, 43 SDTCisVT<5, XLenVT>]>; 44 45def SDT_RISCVCopySign_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 46 SDTCisSameAs<0, 2>, 47 SDTCisVec<0>, SDTCisFP<0>, 48 SDTCisSameAs<0, 3>, 49 SDTCVecEltisVT<4, i1>, 50 SDTCisSameNumEltsAs<0, 4>, 51 SDTCisVT<5, XLenVT>]>; 52 53def riscv_vmv_v_x_vl : SDNode<"RISCVISD::VMV_V_X_VL", 54 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>, 55 SDTCisSameAs<0, 1>, 56 SDTCisVT<2, XLenVT>, 57 SDTCisVT<3, XLenVT>]>>; 58def riscv_vfmv_v_f_vl : SDNode<"RISCVISD::VFMV_V_F_VL", 59 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>, 60 SDTCisSameAs<0, 1>, 61 SDTCisEltOfVec<2, 0>, 62 SDTCisVT<3, XLenVT>]>>; 63def riscv_vmv_s_x_vl : SDNode<"RISCVISD::VMV_S_X_VL", 64 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 65 SDTCisInt<0>, 66 SDTCisVT<2, XLenVT>, 67 SDTCisVT<3, XLenVT>]>>; 68def riscv_vfmv_s_f_vl : SDNode<"RISCVISD::VFMV_S_F_VL", 69 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 70 SDTCisFP<0>, 71 SDTCisEltOfVec<2, 0>, 72 SDTCisVT<3, XLenVT>]>>; 73 74def riscv_add_vl : SDNode<"RISCVISD::ADD_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 75def riscv_sub_vl : SDNode<"RISCVISD::SUB_VL", SDT_RISCVIntBinOp_VL>; 76def riscv_mul_vl : SDNode<"RISCVISD::MUL_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 77def riscv_mulhs_vl : SDNode<"RISCVISD::MULHS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 78def riscv_mulhu_vl : SDNode<"RISCVISD::MULHU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 79def riscv_and_vl : SDNode<"RISCVISD::AND_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 80def riscv_or_vl : SDNode<"RISCVISD::OR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 81def riscv_xor_vl : SDNode<"RISCVISD::XOR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 82def riscv_sdiv_vl : SDNode<"RISCVISD::SDIV_VL", SDT_RISCVIntBinOp_VL>; 83def riscv_srem_vl : SDNode<"RISCVISD::SREM_VL", SDT_RISCVIntBinOp_VL>; 84def riscv_udiv_vl : SDNode<"RISCVISD::UDIV_VL", SDT_RISCVIntBinOp_VL>; 85def riscv_urem_vl : SDNode<"RISCVISD::UREM_VL", SDT_RISCVIntBinOp_VL>; 86def riscv_shl_vl : SDNode<"RISCVISD::SHL_VL", SDT_RISCVIntBinOp_VL>; 87def riscv_sra_vl : SDNode<"RISCVISD::SRA_VL", SDT_RISCVIntBinOp_VL>; 88def riscv_srl_vl : SDNode<"RISCVISD::SRL_VL", SDT_RISCVIntBinOp_VL>; 89def riscv_smin_vl : SDNode<"RISCVISD::SMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 90def riscv_smax_vl : SDNode<"RISCVISD::SMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 91def riscv_umin_vl : SDNode<"RISCVISD::UMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 92def riscv_umax_vl : SDNode<"RISCVISD::UMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 93 94def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 95def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 96def riscv_ssubsat_vl : SDNode<"RISCVISD::SSUBSAT_VL", SDT_RISCVIntBinOp_VL>; 97def riscv_usubsat_vl : SDNode<"RISCVISD::USUBSAT_VL", SDT_RISCVIntBinOp_VL>; 98 99def riscv_fadd_vl : SDNode<"RISCVISD::FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 100def riscv_fsub_vl : SDNode<"RISCVISD::FSUB_VL", SDT_RISCVFPBinOp_VL>; 101def riscv_fmul_vl : SDNode<"RISCVISD::FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 102def riscv_fdiv_vl : SDNode<"RISCVISD::FDIV_VL", SDT_RISCVFPBinOp_VL>; 103def riscv_fneg_vl : SDNode<"RISCVISD::FNEG_VL", SDT_RISCVFPUnOp_VL>; 104def riscv_fabs_vl : SDNode<"RISCVISD::FABS_VL", SDT_RISCVFPUnOp_VL>; 105def riscv_fsqrt_vl : SDNode<"RISCVISD::FSQRT_VL", SDT_RISCVFPUnOp_VL>; 106def riscv_fcopysign_vl : SDNode<"RISCVISD::FCOPYSIGN_VL", SDT_RISCVCopySign_VL>; 107def riscv_fminnum_vl : SDNode<"RISCVISD::FMINNUM_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 108def riscv_fmaxnum_vl : SDNode<"RISCVISD::FMAXNUM_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 109 110def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 111 SDTCisSameAs<0, 2>, 112 SDTCisSameAs<0, 3>, 113 SDTCisVec<0>, SDTCisFP<0>, 114 SDTCVecEltisVT<4, i1>, 115 SDTCisSameNumEltsAs<0, 4>, 116 SDTCisVT<5, XLenVT>]>; 117def riscv_vfmadd_vl : SDNode<"RISCVISD::VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 118def riscv_vfnmadd_vl : SDNode<"RISCVISD::VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 119def riscv_vfmsub_vl : SDNode<"RISCVISD::VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 120def riscv_vfnmsub_vl : SDNode<"RISCVISD::VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 121 122def SDT_RISCVFPRoundOp_VL : SDTypeProfile<1, 3, [ 123 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1>, SDTCisSameNumEltsAs<0, 1>, 124 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 125]>; 126def SDT_RISCVFPExtendOp_VL : SDTypeProfile<1, 3, [ 127 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>, 128 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 129]>; 130 131def riscv_fpround_vl : SDNode<"RISCVISD::FP_ROUND_VL", SDT_RISCVFPRoundOp_VL>; 132def riscv_fpextend_vl : SDNode<"RISCVISD::FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL>; 133def riscv_fncvt_rod_vl : SDNode<"RISCVISD::VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL>; 134 135def SDT_RISCVFP2IOp_VL : SDTypeProfile<1, 3, [ 136 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, 137 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 138]>; 139def SDT_RISCVFP2IOp_RM_VL : SDTypeProfile<1, 4, [ 140 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, 141 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>, 142 SDTCisVT<4, XLenVT> // Rounding mode 143]>; 144 145def SDT_RISCVI2FPOp_VL : SDTypeProfile<1, 3, [ 146 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, 147 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 148]>; 149def SDT_RISCVI2FPOp_RM_VL : SDTypeProfile<1, 4, [ 150 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, 151 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>, 152 SDTCisVT<4, XLenVT> // Rounding mode 153]>; 154 155// Float -> Int 156def riscv_vfcvt_xu_f_vl : SDNode<"RISCVISD::VFCVT_XU_F_VL", SDT_RISCVFP2IOp_VL>; 157def riscv_vfcvt_x_f_vl : SDNode<"RISCVISD::VFCVT_X_F_VL", SDT_RISCVFP2IOp_VL>; 158def riscv_vfcvt_rm_xu_f_vl : SDNode<"RISCVISD::VFCVT_RM_XU_F_VL", SDT_RISCVFP2IOp_RM_VL>; 159def riscv_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL>; 160 161def riscv_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL>; 162def riscv_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL>; 163 164// Int -> Float 165def riscv_sint_to_fp_vl : SDNode<"RISCVISD::SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 166def riscv_uint_to_fp_vl : SDNode<"RISCVISD::UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 167def riscv_vfcvt_rm_f_xu_vl : SDNode<"RISCVISD::VFCVT_RM_F_XU_VL", SDT_RISCVI2FPOp_RM_VL>; 168def riscv_vfcvt_rm_f_x_vl : SDNode<"RISCVISD::VFCVT_RM_F_X_VL", SDT_RISCVI2FPOp_RM_VL>; 169 170 171def riscv_vfround_noexcept_vl: SDNode<"RISCVISD::VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL>; 172 173def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL", 174 SDTypeProfile<1, 6, [SDTCVecEltisVT<0, i1>, 175 SDTCisVec<1>, 176 SDTCisSameNumEltsAs<0, 1>, 177 SDTCisSameAs<1, 2>, 178 SDTCisVT<3, OtherVT>, 179 SDTCisSameAs<0, 4>, 180 SDTCisSameAs<0, 5>, 181 SDTCisVT<6, XLenVT>]>>; 182 183def riscv_vrgather_vx_vl : SDNode<"RISCVISD::VRGATHER_VX_VL", 184 SDTypeProfile<1, 5, [SDTCisVec<0>, 185 SDTCisSameAs<0, 1>, 186 SDTCisVT<2, XLenVT>, 187 SDTCisSameAs<0, 3>, 188 SDTCVecEltisVT<4, i1>, 189 SDTCisSameNumEltsAs<0, 4>, 190 SDTCisVT<5, XLenVT>]>>; 191def riscv_vrgather_vv_vl : SDNode<"RISCVISD::VRGATHER_VV_VL", 192 SDTypeProfile<1, 5, [SDTCisVec<0>, 193 SDTCisSameAs<0, 1>, 194 SDTCisInt<2>, 195 SDTCisSameNumEltsAs<0, 2>, 196 SDTCisSameSizeAs<0, 2>, 197 SDTCisSameAs<0, 3>, 198 SDTCVecEltisVT<4, i1>, 199 SDTCisSameNumEltsAs<0, 4>, 200 SDTCisVT<5, XLenVT>]>>; 201def riscv_vrgatherei16_vv_vl : SDNode<"RISCVISD::VRGATHEREI16_VV_VL", 202 SDTypeProfile<1, 5, [SDTCisVec<0>, 203 SDTCisSameAs<0, 1>, 204 SDTCisInt<2>, 205 SDTCVecEltisVT<2, i16>, 206 SDTCisSameNumEltsAs<0, 2>, 207 SDTCisSameAs<0, 3>, 208 SDTCVecEltisVT<4, i1>, 209 SDTCisSameNumEltsAs<0, 4>, 210 SDTCisVT<5, XLenVT>]>>; 211 212def SDT_RISCVSelect_VL : SDTypeProfile<1, 4, [ 213 SDTCisVec<0>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<1, i1>, 214 SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>, SDTCisVT<4, XLenVT> 215]>; 216 217def riscv_vselect_vl : SDNode<"RISCVISD::VSELECT_VL", SDT_RISCVSelect_VL>; 218def riscv_vp_merge_vl : SDNode<"RISCVISD::VP_MERGE_VL", SDT_RISCVSelect_VL>; 219 220def SDT_RISCVVMSETCLR_VL : SDTypeProfile<1, 1, [SDTCVecEltisVT<0, i1>, 221 SDTCisVT<1, XLenVT>]>; 222def riscv_vmclr_vl : SDNode<"RISCVISD::VMCLR_VL", SDT_RISCVVMSETCLR_VL>; 223def riscv_vmset_vl : SDNode<"RISCVISD::VMSET_VL", SDT_RISCVVMSETCLR_VL>; 224 225def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 226 SDTCisSameAs<0, 2>, 227 SDTCVecEltisVT<0, i1>, 228 SDTCisVT<3, XLenVT>]>; 229def riscv_vmand_vl : SDNode<"RISCVISD::VMAND_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 230def riscv_vmor_vl : SDNode<"RISCVISD::VMOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 231def riscv_vmxor_vl : SDNode<"RISCVISD::VMXOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 232 233def true_mask : PatLeaf<(riscv_vmset_vl (XLenVT srcvalue))>; 234 235def riscv_vmnot_vl : PatFrag<(ops node:$rs, node:$vl), 236 (riscv_vmxor_vl node:$rs, true_mask, node:$vl)>; 237 238def riscv_vcpop_vl : SDNode<"RISCVISD::VCPOP_VL", 239 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, 240 SDTCisVec<1>, SDTCisInt<1>, 241 SDTCVecEltisVT<2, i1>, 242 SDTCisSameNumEltsAs<1, 2>, 243 SDTCisVT<3, XLenVT>]>>; 244 245def riscv_vfirst_vl : SDNode<"RISCVISD::VFIRST_VL", 246 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, 247 SDTCisVec<1>, SDTCisInt<1>, 248 SDTCVecEltisVT<2, i1>, 249 SDTCisSameNumEltsAs<1, 2>, 250 SDTCisVT<3, XLenVT>]>>; 251 252def SDT_RISCVVEXTEND_VL : SDTypeProfile<1, 3, [SDTCisVec<0>, 253 SDTCisSameNumEltsAs<0, 1>, 254 SDTCisSameNumEltsAs<1, 2>, 255 SDTCVecEltisVT<2, i1>, 256 SDTCisVT<3, XLenVT>]>; 257def riscv_sext_vl : SDNode<"RISCVISD::VSEXT_VL", SDT_RISCVVEXTEND_VL>; 258def riscv_zext_vl : SDNode<"RISCVISD::VZEXT_VL", SDT_RISCVVEXTEND_VL>; 259 260def riscv_trunc_vector_vl : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL", 261 SDTypeProfile<1, 3, [SDTCisVec<0>, 262 SDTCisSameNumEltsAs<0, 1>, 263 SDTCisSameNumEltsAs<0, 2>, 264 SDTCVecEltisVT<2, i1>, 265 SDTCisVT<3, XLenVT>]>>; 266 267def SDT_RISCVVWBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, 268 SDTCisSameNumEltsAs<0, 1>, 269 SDTCisSameAs<1, 2>, 270 SDTCisSameAs<0, 3>, 271 SDTCisSameNumEltsAs<1, 4>, 272 SDTCVecEltisVT<4, i1>, 273 SDTCisVT<5, XLenVT>]>; 274def riscv_vwmul_vl : SDNode<"RISCVISD::VWMUL_VL", SDT_RISCVVWBinOp_VL, [SDNPCommutative]>; 275def riscv_vwmulu_vl : SDNode<"RISCVISD::VWMULU_VL", SDT_RISCVVWBinOp_VL, [SDNPCommutative]>; 276def riscv_vwmulsu_vl : SDNode<"RISCVISD::VWMULSU_VL", SDT_RISCVVWBinOp_VL>; 277def riscv_vwadd_vl : SDNode<"RISCVISD::VWADD_VL", SDT_RISCVVWBinOp_VL, [SDNPCommutative]>; 278def riscv_vwaddu_vl : SDNode<"RISCVISD::VWADDU_VL", SDT_RISCVVWBinOp_VL, [SDNPCommutative]>; 279def riscv_vwsub_vl : SDNode<"RISCVISD::VWSUB_VL", SDT_RISCVVWBinOp_VL, []>; 280def riscv_vwsubu_vl : SDNode<"RISCVISD::VWSUBU_VL", SDT_RISCVVWBinOp_VL, []>; 281 282def SDT_RISCVVNBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, 283 SDTCisSameNumEltsAs<0, 1>, 284 SDTCisOpSmallerThanOp<0, 1>, 285 SDTCisSameAs<0, 2>, 286 SDTCisSameAs<0, 3>, 287 SDTCisSameNumEltsAs<0, 4>, 288 SDTCVecEltisVT<4, i1>, 289 SDTCisVT<5, XLenVT>]>; 290def riscv_vnsrl_vl : SDNode<"RISCVISD::VNSRL_VL", SDT_RISCVVNBinOp_VL>; 291 292def SDT_RISCVVWBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, 293 SDTCisSameAs<0, 1>, 294 SDTCisSameNumEltsAs<1, 2>, 295 SDTCisOpSmallerThanOp<2, 1>, 296 SDTCisSameAs<0, 3>, 297 SDTCisSameNumEltsAs<1, 4>, 298 SDTCVecEltisVT<4, i1>, 299 SDTCisVT<5, XLenVT>]>; 300def riscv_vwadd_w_vl : SDNode<"RISCVISD::VWADD_W_VL", SDT_RISCVVWBinOpW_VL>; 301def riscv_vwaddu_w_vl : SDNode<"RISCVISD::VWADDU_W_VL", SDT_RISCVVWBinOpW_VL>; 302def riscv_vwsub_w_vl : SDNode<"RISCVISD::VWSUB_W_VL", SDT_RISCVVWBinOpW_VL>; 303def riscv_vwsubu_w_vl : SDNode<"RISCVISD::VWSUBU_W_VL", SDT_RISCVVWBinOpW_VL>; 304 305def SDTRVVVecReduce : SDTypeProfile<1, 5, [ 306 SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>, 307 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<2, 4>, SDTCisVT<5, XLenVT> 308]>; 309 310def riscv_add_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 311 node:$E), 312 (riscv_add_vl node:$A, node:$B, node:$C, 313 node:$D, node:$E), [{ 314 return N->hasOneUse(); 315}]>; 316 317def riscv_sub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 318 node:$E), 319 (riscv_sub_vl node:$A, node:$B, node:$C, 320 node:$D, node:$E), [{ 321 return N->hasOneUse(); 322}]>; 323 324def riscv_mul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 325 node:$E), 326 (riscv_mul_vl node:$A, node:$B, node:$C, 327 node:$D, node:$E), [{ 328 return N->hasOneUse(); 329}]>; 330 331def riscv_vwmul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 332 node:$E), 333 (riscv_vwmul_vl node:$A, node:$B, node:$C, 334 node:$D, node:$E), [{ 335 return N->hasOneUse(); 336}]>; 337 338def riscv_vwmulu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 339 node:$E), 340 (riscv_vwmulu_vl node:$A, node:$B, node:$C, 341 node:$D, node:$E), [{ 342 return N->hasOneUse(); 343}]>; 344 345def riscv_vwmulsu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 346 node:$E), 347 (riscv_vwmulsu_vl node:$A, node:$B, node:$C, 348 node:$D, node:$E), [{ 349 return N->hasOneUse(); 350}]>; 351 352def riscv_sext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 353 (riscv_sext_vl node:$A, node:$B, node:$C), [{ 354 return N->hasOneUse(); 355}]>; 356 357def riscv_zext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 358 (riscv_zext_vl node:$A, node:$B, node:$C), [{ 359 return N->hasOneUse(); 360}]>; 361 362def riscv_fpextend_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 363 (riscv_fpextend_vl node:$A, node:$B, node:$C), [{ 364 return N->hasOneUse(); 365}]>; 366 367def riscv_vfmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 368 node:$E), 369 (riscv_vfmadd_vl node:$A, node:$B, 370 node:$C, node:$D, node:$E), [{ 371 return N->hasOneUse(); 372}]>; 373 374def riscv_vfnmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 375 node:$E), 376 (riscv_vfnmadd_vl node:$A, node:$B, 377 node:$C, node:$D, node:$E), [{ 378 return N->hasOneUse(); 379}]>; 380 381def riscv_vfmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 382 node:$E), 383 (riscv_vfmsub_vl node:$A, node:$B, 384 node:$C, node:$D, node:$E), [{ 385 return N->hasOneUse(); 386}]>; 387 388def riscv_vfnmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 389 node:$E), 390 (riscv_vfnmsub_vl node:$A, node:$B, 391 node:$C, node:$D, node:$E), [{ 392 return N->hasOneUse(); 393}]>; 394 395foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR", 396 "FADD", "SEQ_FADD", "FMIN", "FMAX"] in 397 def rvv_vecreduce_#kind#_vl : SDNode<"RISCVISD::VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>; 398 399// Give explicit Complexity to prefer simm5/uimm5. 400def SplatPat : ComplexPattern<vAny, 1, "selectVSplat", [], [], 1>; 401def SplatPat_simm5 : ComplexPattern<vAny, 1, "selectVSplatSimm5", [], [], 2>; 402def SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimm5", [], [], 2>; 403def SplatPat_simm5_plus1 404 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1", [], [], 2>; 405def SplatPat_simm5_plus1_nonzero 406 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1NonZero", [], [], 2>; 407 408// Ignore the vl operand. 409def SplatFPOp : PatFrag<(ops node:$op), 410 (riscv_vfmv_v_f_vl undef, node:$op, srcvalue)>; 411 412def sew8simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<8>", []>; 413def sew16simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<16>", []>; 414def sew32simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<32>", []>; 415def sew64simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<64>", []>; 416 417multiclass VPatBinaryVL_V<SDNode vop, 418 string instruction_name, 419 string suffix, 420 ValueType result_type, 421 ValueType op1_type, 422 ValueType op2_type, 423 ValueType mask_type, 424 int sew, 425 LMULInfo vlmul, 426 VReg result_reg_class, 427 VReg op1_reg_class, 428 VReg op2_reg_class> { 429 def : Pat<(result_type (vop 430 (op1_type op1_reg_class:$rs1), 431 (op2_type op2_reg_class:$rs2), 432 (result_type result_reg_class:$merge), 433 (mask_type V0), 434 VLOpFrag)), 435 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_MASK") 436 result_reg_class:$merge, 437 op1_reg_class:$rs1, 438 op2_reg_class:$rs2, 439 (mask_type V0), GPR:$vl, sew, TAIL_AGNOSTIC)>; 440} 441 442multiclass VPatTiedBinaryNoMaskVL_V<SDNode vop, 443 string instruction_name, 444 string suffix, 445 ValueType result_type, 446 ValueType op2_type, 447 int sew, 448 LMULInfo vlmul, 449 VReg result_reg_class, 450 VReg op2_reg_class> { 451 def : Pat<(result_type (vop 452 (result_type result_reg_class:$rs1), 453 (op2_type op2_reg_class:$rs2), 454 srcvalue, 455 true_mask, 456 VLOpFrag)), 457 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 458 result_reg_class:$rs1, 459 op2_reg_class:$rs2, 460 GPR:$vl, sew, TAIL_AGNOSTIC)>; 461 // Tail undisturbed 462 def : Pat<(riscv_vp_merge_vl true_mask, 463 (result_type (vop 464 result_reg_class:$rs1, 465 (op2_type op2_reg_class:$rs2), 466 srcvalue, 467 true_mask, 468 VLOpFrag)), 469 result_reg_class:$rs1, VLOpFrag), 470 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 471 result_reg_class:$rs1, 472 op2_reg_class:$rs2, 473 GPR:$vl, sew, TAIL_UNDISTURBED_MASK_UNDISTURBED)>; 474} 475 476multiclass VPatBinaryVL_XI<SDNode vop, 477 string instruction_name, 478 string suffix, 479 ValueType result_type, 480 ValueType vop1_type, 481 ValueType vop2_type, 482 ValueType mask_type, 483 int sew, 484 LMULInfo vlmul, 485 VReg result_reg_class, 486 VReg vop_reg_class, 487 ComplexPattern SplatPatKind, 488 DAGOperand xop_kind> { 489 def : Pat<(result_type (vop 490 (vop1_type vop_reg_class:$rs1), 491 (vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))), 492 (result_type result_reg_class:$merge), 493 (mask_type V0), 494 VLOpFrag)), 495 (!cast<Instruction>(instruction_name#_#suffix#_# vlmul.MX#"_MASK") 496 result_reg_class:$merge, 497 vop_reg_class:$rs1, 498 xop_kind:$rs2, 499 (mask_type V0), GPR:$vl, sew, TAIL_AGNOSTIC)>; 500} 501 502multiclass VPatBinaryVL_VV_VX<SDNode vop, string instruction_name> { 503 foreach vti = AllIntegerVectors in { 504 defm : VPatBinaryVL_V<vop, instruction_name, "VV", 505 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 506 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 507 vti.RegClass>; 508 defm : VPatBinaryVL_XI<vop, instruction_name, "VX", 509 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 510 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 511 SplatPat, GPR>; 512 } 513} 514 515multiclass VPatBinaryVL_VV_VX_VI<SDNode vop, string instruction_name, 516 Operand ImmType = simm5> 517 : VPatBinaryVL_VV_VX<vop, instruction_name> { 518 foreach vti = AllIntegerVectors in { 519 defm : VPatBinaryVL_XI<vop, instruction_name, "VI", 520 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 521 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 522 !cast<ComplexPattern>(SplatPat#_#ImmType), 523 ImmType>; 524 } 525} 526 527multiclass VPatBinaryWVL_VV_VX<SDNode vop, string instruction_name> { 528 foreach VtiToWti = AllWidenableIntVectors in { 529 defvar vti = VtiToWti.Vti; 530 defvar wti = VtiToWti.Wti; 531 defm : VPatBinaryVL_V<vop, instruction_name, "VV", 532 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 533 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 534 vti.RegClass>; 535 defm : VPatBinaryVL_XI<vop, instruction_name, "VX", 536 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 537 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 538 SplatPat, GPR>; 539 } 540} 541multiclass VPatBinaryWVL_VV_VX_WV_WX<SDNode vop, SDNode vop_w, 542 string instruction_name> 543 : VPatBinaryWVL_VV_VX<vop, instruction_name> { 544 foreach VtiToWti = AllWidenableIntVectors in { 545 defvar vti = VtiToWti.Vti; 546 defvar wti = VtiToWti.Wti; 547 defm : VPatTiedBinaryNoMaskVL_V<vop_w, instruction_name, "WV", 548 wti.Vector, vti.Vector, vti.Log2SEW, 549 vti.LMul, wti.RegClass, vti.RegClass>; 550 defm : VPatBinaryVL_V<vop_w, instruction_name, "WV", 551 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 552 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 553 vti.RegClass>; 554 defm : VPatBinaryVL_XI<vop_w, instruction_name, "WX", 555 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 556 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 557 SplatPat, GPR>; 558 } 559} 560 561multiclass VPatBinaryNVL_WV_WX_WI<SDNode vop, string instruction_name> { 562 foreach VtiToWti = AllWidenableIntVectors in { 563 defvar vti = VtiToWti.Vti; 564 defvar wti = VtiToWti.Wti; 565 defm : VPatBinaryVL_V<vop, instruction_name, "WV", 566 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 567 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 568 vti.RegClass>; 569 defm : VPatBinaryVL_XI<vop, instruction_name, "WX", 570 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 571 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 572 SplatPat, GPR>; 573 defm : VPatBinaryVL_XI<vop, instruction_name, "WI", 574 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 575 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 576 !cast<ComplexPattern>(SplatPat#_#uimm5), 577 uimm5>; 578 } 579} 580 581multiclass VPatBinaryVL_VF<SDNode vop, 582 string instruction_name, 583 ValueType result_type, 584 ValueType vop_type, 585 ValueType mask_type, 586 int sew, 587 LMULInfo vlmul, 588 VReg result_reg_class, 589 VReg vop_reg_class, 590 RegisterClass scalar_reg_class> { 591 def : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), 592 (vop_type (SplatFPOp scalar_reg_class:$rs2)), 593 (result_type result_reg_class:$merge), 594 (mask_type V0), 595 VLOpFrag)), 596 (!cast<Instruction>(instruction_name#"_"#vlmul.MX#"_MASK") 597 result_reg_class:$merge, 598 vop_reg_class:$rs1, 599 scalar_reg_class:$rs2, 600 (mask_type V0), GPR:$vl, sew, TAIL_AGNOSTIC)>; 601} 602 603multiclass VPatBinaryFPVL_VV_VF<SDNode vop, string instruction_name> { 604 foreach vti = AllFloatVectors in { 605 defm : VPatBinaryVL_V<vop, instruction_name, "VV", 606 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 607 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 608 vti.RegClass>; 609 defm : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 610 vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, 611 vti.LMul, vti.RegClass, vti.RegClass, 612 vti.ScalarRegClass>; 613 } 614} 615 616multiclass VPatBinaryFPVL_R_VF<SDNode vop, string instruction_name> { 617 foreach fvti = AllFloatVectors in { 618 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 619 fvti.RegClass:$rs1, 620 (fvti.Vector fvti.RegClass:$merge), 621 (fvti.Mask V0), 622 VLOpFrag)), 623 (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 624 fvti.RegClass:$merge, 625 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 626 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 627 } 628} 629 630multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name, 631 CondCode cc> { 632 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 633 vti.RegClass:$rs2, cc, 634 VR:$merge, 635 (vti.Mask V0), 636 VLOpFrag)), 637 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") 638 VR:$merge, 639 vti.RegClass:$rs1, 640 vti.RegClass:$rs2, 641 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 642} 643 644// Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped. 645multiclass VPatIntegerSetCCVL_VV_Swappable<VTypeInfo vti, string instruction_name, 646 CondCode cc, CondCode invcc> 647 : VPatIntegerSetCCVL_VV<vti, instruction_name, cc> { 648 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2), 649 vti.RegClass:$rs1, invcc, 650 VR:$merge, 651 (vti.Mask V0), 652 VLOpFrag)), 653 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") 654 VR:$merge, vti.RegClass:$rs1, 655 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 656} 657 658multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_name, 659 CondCode cc, CondCode invcc> { 660 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK"); 661 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 662 (SplatPat (XLenVT GPR:$rs2)), cc, 663 VR:$merge, 664 (vti.Mask V0), 665 VLOpFrag)), 666 (instruction_masked VR:$merge, vti.RegClass:$rs1, 667 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 668 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)), 669 (vti.Vector vti.RegClass:$rs1), invcc, 670 VR:$merge, 671 (vti.Mask V0), 672 VLOpFrag)), 673 (instruction_masked VR:$merge, vti.RegClass:$rs1, 674 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 675} 676 677multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_name, 678 CondCode cc, CondCode invcc> { 679 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); 680 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 681 (SplatPat_simm5 simm5:$rs2), cc, 682 VR:$merge, 683 (vti.Mask V0), 684 VLOpFrag)), 685 (instruction_masked VR:$merge, vti.RegClass:$rs1, 686 XLenVT:$rs2, (vti.Mask V0), GPR:$vl, 687 vti.Log2SEW)>; 688 689 // FIXME: Can do some canonicalization to remove these patterns. 690 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2), 691 (vti.Vector vti.RegClass:$rs1), invcc, 692 VR:$merge, 693 (vti.Mask V0), 694 VLOpFrag)), 695 (instruction_masked VR:$merge, vti.RegClass:$rs1, 696 simm5:$rs2, (vti.Mask V0), GPR:$vl, 697 vti.Log2SEW)>; 698} 699 700multiclass VPatIntegerSetCCVL_VIPlus1_Swappable<VTypeInfo vti, 701 string instruction_name, 702 CondCode cc, CondCode invcc, 703 ComplexPattern splatpat_kind> { 704 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); 705 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 706 (splatpat_kind simm5:$rs2), cc, 707 VR:$merge, 708 (vti.Mask V0), 709 VLOpFrag)), 710 (instruction_masked VR:$merge, vti.RegClass:$rs1, 711 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl, 712 vti.Log2SEW)>; 713 714 // FIXME: Can do some canonicalization to remove these patterns. 715 def : Pat<(vti.Mask (riscv_setcc_vl (splatpat_kind simm5:$rs2), 716 (vti.Vector vti.RegClass:$rs1), invcc, 717 VR:$merge, 718 (vti.Mask V0), 719 VLOpFrag)), 720 (instruction_masked VR:$merge, vti.RegClass:$rs1, 721 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl, 722 vti.Log2SEW)>; 723} 724 725multiclass VPatFPSetCCVL_VV_VF_FV<CondCode cc, 726 string inst_name, 727 string swapped_op_inst_name> { 728 foreach fvti = AllFloatVectors in { 729 def : Pat<(fvti.Mask (riscv_setcc_vl (fvti.Vector fvti.RegClass:$rs1), 730 fvti.RegClass:$rs2, 731 cc, 732 VR:$merge, 733 (fvti.Mask V0), 734 VLOpFrag)), 735 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX#"_MASK") 736 VR:$merge, fvti.RegClass:$rs1, 737 fvti.RegClass:$rs2, (fvti.Mask V0), 738 GPR:$vl, fvti.Log2SEW)>; 739 def : Pat<(fvti.Mask (riscv_setcc_vl (fvti.Vector fvti.RegClass:$rs1), 740 (SplatFPOp fvti.ScalarRegClass:$rs2), 741 cc, 742 VR:$merge, 743 (fvti.Mask V0), 744 VLOpFrag)), 745 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 746 VR:$merge, fvti.RegClass:$rs1, 747 fvti.ScalarRegClass:$rs2, (fvti.Mask V0), 748 GPR:$vl, fvti.Log2SEW)>; 749 def : Pat<(fvti.Mask (riscv_setcc_vl (SplatFPOp fvti.ScalarRegClass:$rs2), 750 (fvti.Vector fvti.RegClass:$rs1), 751 cc, 752 VR:$merge, 753 (fvti.Mask V0), 754 VLOpFrag)), 755 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 756 VR:$merge, fvti.RegClass:$rs1, 757 fvti.ScalarRegClass:$rs2, (fvti.Mask V0), 758 GPR:$vl, fvti.Log2SEW)>; 759 } 760} 761 762multiclass VPatExtendVL_V<SDNode vop, string inst_name, string suffix, 763 list <VTypeInfoToFraction> fraction_list> { 764 foreach vtiTofti = fraction_list in { 765 defvar vti = vtiTofti.Vti; 766 defvar fti = vtiTofti.Fti; 767 def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2), 768 (fti.Mask V0), VLOpFrag)), 769 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX#"_MASK") 770 (vti.Vector (IMPLICIT_DEF)), 771 fti.RegClass:$rs2, 772 (fti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 773 } 774} 775 776// Single width converting 777 778multiclass VPatConvertFP2IVL_V<SDNode vop, string instruction_name> { 779 foreach fvti = AllFloatVectors in { 780 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 781 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 782 (fvti.Mask V0), 783 VLOpFrag)), 784 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 785 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 786 (fvti.Mask V0), GPR:$vl, ivti.Log2SEW, TA_MA)>; 787 } 788} 789 790multiclass VPatConvertFP2I_RM_VL_V<SDNode vop, string instruction_name> { 791 foreach fvti = AllFloatVectors in { 792 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 793 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 794 (fvti.Mask V0), (XLenVT timm:$frm), 795 VLOpFrag)), 796 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 797 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 798 (fvti.Mask V0), timm:$frm, GPR:$vl, ivti.Log2SEW, 799 TA_MA)>; 800 } 801} 802 803multiclass VPatConvertI2FPVL_V<SDNode vop, string instruction_name> { 804 foreach fvti = AllFloatVectors in { 805 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 806 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 807 (ivti.Mask V0), 808 VLOpFrag)), 809 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 810 (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 811 (ivti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 812 } 813} 814 815multiclass VPatConvertI2FP_RM_VL_V<SDNode vop, string instruction_name> { 816 foreach fvti = AllFloatVectors in { 817 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 818 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 819 (ivti.Mask V0), (XLenVT timm:$frm), 820 VLOpFrag)), 821 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 822 (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 823 (ivti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 824 } 825} 826 827// Widening converting 828 829multiclass VPatWConvertFP2IVL_V<SDNode vop, string instruction_name> { 830 foreach fvtiToFWti = AllWidenableFloatVectors in { 831 defvar fvti = fvtiToFWti.Vti; 832 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 833 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 834 (fvti.Mask V0), 835 VLOpFrag)), 836 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 837 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 838 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 839 } 840} 841 842multiclass VPatWConvertFP2I_RM_VL_V<SDNode vop, string instruction_name> { 843 foreach fvtiToFWti = AllWidenableFloatVectors in { 844 defvar fvti = fvtiToFWti.Vti; 845 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 846 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 847 (fvti.Mask V0), (XLenVT timm:$frm), 848 VLOpFrag)), 849 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 850 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 851 (fvti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 852 } 853} 854 855multiclass VPatWConvertI2FPVL_V<SDNode vop, string instruction_name> { 856 foreach vtiToWti = AllWidenableIntToFloatVectors in { 857 defvar ivti = vtiToWti.Vti; 858 defvar fwti = vtiToWti.Wti; 859 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 860 (ivti.Mask V0), 861 VLOpFrag)), 862 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 863 (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 864 (ivti.Mask V0), GPR:$vl, ivti.Log2SEW, TA_MA)>; 865 } 866} 867 868multiclass VPatWConvertI2FP_RM_VL_V<SDNode vop, string instruction_name> { 869 foreach vtiToWti = AllWidenableIntToFloatVectors in { 870 defvar ivti = vtiToWti.Vti; 871 defvar fwti = vtiToWti.Wti; 872 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 873 (ivti.Mask V0), (XLenVT timm:$frm), 874 VLOpFrag)), 875 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 876 (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 877 (ivti.Mask V0), timm:$frm, GPR:$vl, ivti.Log2SEW, TA_MA)>; 878 } 879} 880 881// Narrowing converting 882 883multiclass VPatNConvertFP2IVL_V<SDNode vop, string instruction_name> { 884 // Reuse the same list of types used in the widening nodes, but just swap the 885 // direction of types around so we're converting from Wti -> Vti 886 foreach vtiToWti = AllWidenableIntToFloatVectors in { 887 defvar vti = vtiToWti.Vti; 888 defvar fwti = vtiToWti.Wti; 889 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 890 (fwti.Mask V0), 891 VLOpFrag)), 892 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 893 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 894 (fwti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 895 } 896} 897 898multiclass VPatNConvertFP2I_RM_VL_V<SDNode vop, string instruction_name> { 899 foreach vtiToWti = AllWidenableIntToFloatVectors in { 900 defvar vti = vtiToWti.Vti; 901 defvar fwti = vtiToWti.Wti; 902 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 903 (fwti.Mask V0), (XLenVT timm:$frm), 904 VLOpFrag)), 905 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 906 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 907 (fwti.Mask V0), timm:$frm, GPR:$vl, vti.Log2SEW, TA_MA)>; 908 } 909} 910 911multiclass VPatNConvertI2FPVL_V<SDNode vop, string instruction_name> { 912 foreach fvtiToFWti = AllWidenableFloatVectors in { 913 defvar fvti = fvtiToFWti.Vti; 914 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 915 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), 916 (iwti.Mask V0), 917 VLOpFrag)), 918 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 919 (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, 920 (iwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 921 } 922} 923 924multiclass VPatNConvertI2FP_RM_VL_V<SDNode vop, string instruction_name> { 925 foreach fvtiToFWti = AllWidenableFloatVectors in { 926 defvar fvti = fvtiToFWti.Vti; 927 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 928 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), 929 (iwti.Mask V0), (XLenVT timm:$frm), 930 VLOpFrag)), 931 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 932 (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, 933 (iwti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 934 } 935} 936 937multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> { 938 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { 939 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1"); 940 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), VR:$rs2, 941 (vti.Mask true_mask), 942 VLOpFrag)), 943 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX) 944 (vti_m1.Vector VR:$merge), 945 (vti.Vector vti.RegClass:$rs1), 946 (vti_m1.Vector VR:$rs2), 947 GPR:$vl, vti.Log2SEW)>; 948 949 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), VR:$rs2, 950 (vti.Mask V0), VLOpFrag)), 951 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_MASK") 952 (vti_m1.Vector VR:$merge), 953 (vti.Vector vti.RegClass:$rs1), 954 (vti_m1.Vector VR:$rs2), 955 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 956 } 957} 958 959multiclass VPatBinaryExtVL_WV_WX<SDNode op, PatFrags extop, string instruction_name> { 960 foreach vtiToWti = AllWidenableIntVectors in { 961 defvar vti = vtiToWti.Vti; 962 defvar wti = vtiToWti.Wti; 963 def : Pat< 964 (vti.Vector 965 (riscv_trunc_vector_vl 966 (op (wti.Vector wti.RegClass:$rs2), 967 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1)))), 968 (vti.Mask true_mask), 969 VLOpFrag)), 970 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX) 971 wti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>; 972 def : Pat< 973 (vti.Vector 974 (riscv_trunc_vector_vl 975 (op (wti.Vector wti.RegClass:$rs2), 976 (wti.Vector (extop (vti.Vector (SplatPat GPR:$rs1))))), 977 (vti.Mask true_mask), 978 VLOpFrag)), 979 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 980 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW)>; 981 } 982} 983 984multiclass VPatBinaryVL_WV_WX_WI<SDNode op, string instruction_name> { 985 defm : VPatBinaryExtVL_WV_WX<op, sext_oneuse, instruction_name>; 986 defm : VPatBinaryExtVL_WV_WX<op, zext_oneuse, instruction_name>; 987 foreach vtiToWti = AllWidenableIntVectors in { 988 defvar vti = vtiToWti.Vti; 989 defvar wti = vtiToWti.Wti; 990 def : Pat< 991 (vti.Vector 992 (riscv_trunc_vector_vl 993 (op (wti.Vector wti.RegClass:$rs2), 994 (wti.Vector (SplatPat_uimm5 uimm5:$rs1))), (vti.Mask true_mask), 995 VLOpFrag)), 996 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX) 997 wti.RegClass:$rs2, uimm5:$rs1, GPR:$vl, vti.Log2SEW)>; 998 } 999} 1000 1001multiclass VPatWidenReductionVL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1002 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1003 defvar vti = vtiToWti.Vti; 1004 defvar wti = vtiToWti.Wti; 1005 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1006 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1007 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 1008 VR:$rs2, (vti.Mask true_mask), VLOpFrag)), 1009 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX) 1010 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1011 (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW)>; 1012 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1013 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 1014 VR:$rs2, (vti.Mask V0), VLOpFrag)), 1015 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_MASK") 1016 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1017 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1018 } 1019} 1020 1021multiclass VPatWidenReductionVL_Ext_VL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1022 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1023 defvar vti = vtiToWti.Vti; 1024 defvar wti = vtiToWti.Wti; 1025 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1026 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1027 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 1028 VR:$rs2, (vti.Mask true_mask), VLOpFrag)), 1029 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX) 1030 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1031 (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW)>; 1032 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1033 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 1034 VR:$rs2, (vti.Mask V0), VLOpFrag)), 1035 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_MASK") 1036 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1037 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1038 } 1039} 1040 1041multiclass VPatWidenBinaryFPVL_VV_VF<SDNode op, PatFrags extop, string instruction_name> { 1042 foreach fvtiToFWti = AllWidenableFloatVectors in { 1043 defvar fvti = fvtiToFWti.Vti; 1044 defvar fwti = fvtiToFWti.Wti; 1045 def : Pat<(fwti.Vector (op (fwti.Vector (extop (fvti.Vector fvti.RegClass:$rs2), 1046 (fvti.Mask true_mask), VLOpFrag)), 1047 (fwti.Vector (extop (fvti.Vector fvti.RegClass:$rs1), 1048 (fvti.Mask true_mask), VLOpFrag)), 1049 srcvalue, (fwti.Mask true_mask), VLOpFrag)), 1050 (!cast<Instruction>(instruction_name#"_VV_"#fvti.LMul.MX) 1051 fvti.RegClass:$rs2, fvti.RegClass:$rs1, 1052 GPR:$vl, fvti.Log2SEW)>; 1053 def : Pat<(fwti.Vector (op (fwti.Vector (extop (fvti.Vector fvti.RegClass:$rs2), 1054 (fvti.Mask true_mask), VLOpFrag)), 1055 (fwti.Vector (extop (fvti.Vector (SplatFPOp fvti.ScalarRegClass:$rs1)), 1056 (fvti.Mask true_mask), VLOpFrag)), 1057 srcvalue, (fwti.Mask true_mask), VLOpFrag)), 1058 (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 1059 fvti.RegClass:$rs2, fvti.ScalarRegClass:$rs1, 1060 GPR:$vl, fvti.Log2SEW)>; 1061 } 1062} 1063 1064multiclass VPatWidenBinaryFPVL_WV_WF<SDNode op, PatFrags extop, string instruction_name> { 1065 foreach fvtiToFWti = AllWidenableFloatVectors in { 1066 defvar fvti = fvtiToFWti.Vti; 1067 defvar fwti = fvtiToFWti.Wti; 1068 def : Pat<(fwti.Vector (op (fwti.Vector fwti.RegClass:$rs2), 1069 (fwti.Vector (extop (fvti.Vector fvti.RegClass:$rs1), 1070 (fvti.Mask true_mask), VLOpFrag)), 1071 srcvalue, (fwti.Mask true_mask), VLOpFrag)), 1072 (!cast<Instruction>(instruction_name#"_WV_"#fvti.LMul.MX#"_TIED") 1073 fwti.RegClass:$rs2, fvti.RegClass:$rs1, 1074 GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1075 def : Pat<(fwti.Vector (op (fwti.Vector fwti.RegClass:$rs2), 1076 (fwti.Vector (extop (fvti.Vector (SplatFPOp fvti.ScalarRegClass:$rs1)), 1077 (fvti.Mask true_mask), VLOpFrag)), 1078 srcvalue, (fwti.Mask true_mask), VLOpFrag)), 1079 (!cast<Instruction>(instruction_name#"_W"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 1080 fwti.RegClass:$rs2, fvti.ScalarRegClass:$rs1, 1081 GPR:$vl, fvti.Log2SEW)>; 1082 } 1083} 1084 1085multiclass VPatWidenBinaryFPVL_VV_VF_WV_WF<SDNode op, string instruction_name> { 1086 defm : VPatWidenBinaryFPVL_VV_VF<op, riscv_fpextend_vl_oneuse, instruction_name>; 1087 defm : VPatWidenBinaryFPVL_WV_WF<op, riscv_fpextend_vl_oneuse, instruction_name>; 1088} 1089 1090multiclass VPatNarrowShiftSplatExt_WX<SDNode op, PatFrags extop, string instruction_name> { 1091 foreach vtiToWti = AllWidenableIntVectors in { 1092 defvar vti = vtiToWti.Vti; 1093 defvar wti = vtiToWti.Wti; 1094 def : Pat< 1095 (vti.Vector 1096 (riscv_trunc_vector_vl 1097 (op (wti.Vector wti.RegClass:$rs2), 1098 (wti.Vector (extop (vti.Vector (SplatPat GPR:$rs1)), 1099 (vti.Mask true_mask), VLOpFrag)), 1100 srcvalue, (wti.Mask true_mask), VLOpFrag), 1101 (vti.Mask true_mask), VLOpFrag)), 1102 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1103 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW)>; 1104 } 1105} 1106 1107multiclass VPatMultiplyAddVL_VV_VX<SDNode op, string instruction_name> { 1108 foreach vti = AllIntegerVectors in { 1109 defvar suffix = vti.LMul.MX; 1110 // NOTE: We choose VMADD because it has the most commuting freedom. So it 1111 // works best with how TwoAddressInstructionPass tries commuting. 1112 def : Pat<(vti.Vector 1113 (op vti.RegClass:$rs2, 1114 (riscv_mul_vl_oneuse vti.RegClass:$rs1, 1115 vti.RegClass:$rd, 1116 srcvalue, (vti.Mask true_mask), VLOpFrag), 1117 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1118 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 1119 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1120 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1121 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally 1122 // commutable. 1123 def : Pat<(vti.Vector 1124 (op vti.RegClass:$rs2, 1125 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), 1126 vti.RegClass:$rd, 1127 srcvalue, (vti.Mask true_mask), VLOpFrag), 1128 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1129 (!cast<Instruction>(instruction_name#"_VX_" # suffix) 1130 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1131 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1132 } 1133} 1134 1135multiclass VPatMultiplyAccVL_VV_VX<PatFrag op, string instruction_name> { 1136 foreach vti = AllIntegerVectors in { 1137 defvar suffix = vti.LMul.MX; 1138 def : Pat<(riscv_vp_merge_vl (vti.Mask true_mask), 1139 (vti.Vector (op vti.RegClass:$rd, 1140 (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2, 1141 srcvalue, (vti.Mask true_mask), VLOpFrag), 1142 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1143 vti.RegClass:$rd, VLOpFrag), 1144 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 1145 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1146 GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>; 1147 def : Pat<(riscv_vp_merge_vl (vti.Mask V0), 1148 (vti.Vector (op vti.RegClass:$rd, 1149 (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2, 1150 srcvalue, (vti.Mask true_mask), VLOpFrag), 1151 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1152 vti.RegClass:$rd, VLOpFrag), 1153 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1154 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1155 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>; 1156 def : Pat<(riscv_vp_merge_vl (vti.Mask true_mask), 1157 (vti.Vector (op vti.RegClass:$rd, 1158 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, 1159 srcvalue, (vti.Mask true_mask), VLOpFrag), 1160 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1161 vti.RegClass:$rd, VLOpFrag), 1162 (!cast<Instruction>(instruction_name#"_VX_"# suffix) 1163 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1164 GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>; 1165 def : Pat<(riscv_vp_merge_vl (vti.Mask V0), 1166 (vti.Vector (op vti.RegClass:$rd, 1167 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, 1168 srcvalue, (vti.Mask true_mask), VLOpFrag), 1169 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1170 vti.RegClass:$rd, VLOpFrag), 1171 (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK") 1172 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1173 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>; 1174 def : Pat<(riscv_vselect_vl (vti.Mask V0), 1175 (vti.Vector (op vti.RegClass:$rd, 1176 (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2, 1177 srcvalue, (vti.Mask true_mask), VLOpFrag), 1178 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1179 vti.RegClass:$rd, VLOpFrag), 1180 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1181 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1182 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1183 def : Pat<(riscv_vselect_vl (vti.Mask V0), 1184 (vti.Vector (op vti.RegClass:$rd, 1185 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, 1186 srcvalue, (vti.Mask true_mask), VLOpFrag), 1187 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1188 vti.RegClass:$rd, VLOpFrag), 1189 (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK") 1190 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1191 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1192 } 1193} 1194 1195multiclass VPatWidenMultiplyAddVL_VV_VX<PatFrag op1, string instruction_name> { 1196 foreach vtiTowti = AllWidenableIntVectors in { 1197 defvar vti = vtiTowti.Vti; 1198 defvar wti = vtiTowti.Wti; 1199 def : Pat<(wti.Vector 1200 (riscv_add_vl wti.RegClass:$rd, 1201 (op1 vti.RegClass:$rs1, 1202 (vti.Vector vti.RegClass:$rs2), 1203 srcvalue, (vti.Mask true_mask), VLOpFrag), 1204 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1205 (!cast<Instruction>(instruction_name#"_VV_" # vti.LMul.MX) 1206 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1207 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1208 def : Pat<(wti.Vector 1209 (riscv_add_vl wti.RegClass:$rd, 1210 (op1 (SplatPat XLenVT:$rs1), 1211 (vti.Vector vti.RegClass:$rs2), 1212 srcvalue, (vti.Mask true_mask), VLOpFrag), 1213 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1214 (!cast<Instruction>(instruction_name#"_VX_" # vti.LMul.MX) 1215 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1216 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1217 } 1218} 1219 1220multiclass VPatNarrowShiftSplat_WX_WI<SDNode op, string instruction_name> { 1221 foreach vtiTowti = AllWidenableIntVectors in { 1222 defvar vti = vtiTowti.Vti; 1223 defvar wti = vtiTowti.Wti; 1224 def : Pat<(vti.Vector (riscv_trunc_vector_vl 1225 (wti.Vector (op wti.RegClass:$rs1, (SplatPat XLenVT:$rs2), 1226 srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), 1227 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1228 wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; 1229 def : Pat<(vti.Vector (riscv_trunc_vector_vl 1230 (wti.Vector (op wti.RegClass:$rs1, (SplatPat_uimm5 uimm5:$rs2), 1231 srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), 1232 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX) 1233 wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW)>; 1234 } 1235} 1236 1237multiclass VPatFPMulAddVL_VV_VF<SDNode vop, string instruction_name> { 1238 foreach vti = AllFloatVectors in { 1239 defvar suffix = vti.LMul.MX; 1240 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, 1241 vti.RegClass:$rs2, (vti.Mask true_mask), 1242 VLOpFrag)), 1243 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 1244 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1245 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1246 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, 1247 vti.RegClass:$rs2, (vti.Mask V0), 1248 VLOpFrag)), 1249 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1250 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1251 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1252 1253 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), 1254 vti.RegClass:$rd, vti.RegClass:$rs2, 1255 (vti.Mask true_mask), 1256 VLOpFrag)), 1257 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix) 1258 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1259 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1260 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), 1261 vti.RegClass:$rd, vti.RegClass:$rs2, 1262 (vti.Mask V0), 1263 VLOpFrag)), 1264 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1265 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1266 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1267 } 1268} 1269 1270multiclass VPatFPMulAccVL_VV_VF<PatFrag vop, string instruction_name> { 1271 foreach vti = AllFloatVectors in { 1272 defvar suffix = vti.LMul.MX; 1273 def : Pat<(riscv_vp_merge_vl (vti.Mask true_mask), 1274 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1275 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1276 vti.RegClass:$rd, VLOpFrag), 1277 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 1278 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1279 GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>; 1280 def : Pat<(riscv_vp_merge_vl (vti.Mask V0), 1281 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1282 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1283 vti.RegClass:$rd, VLOpFrag), 1284 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1285 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1286 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>; 1287 def : Pat<(riscv_vp_merge_vl (vti.Mask true_mask), 1288 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1289 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1290 vti.RegClass:$rd, VLOpFrag), 1291 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix) 1292 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1293 GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>; 1294 def : Pat<(riscv_vp_merge_vl (vti.Mask V0), 1295 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1296 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1297 vti.RegClass:$rd, VLOpFrag), 1298 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1299 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1300 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>; 1301 def : Pat<(riscv_vselect_vl (vti.Mask V0), 1302 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1303 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1304 vti.RegClass:$rd, VLOpFrag), 1305 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1306 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1307 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1308 def : Pat<(riscv_vselect_vl (vti.Mask V0), 1309 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1310 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1311 vti.RegClass:$rd, VLOpFrag), 1312 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1313 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1314 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1315 } 1316} 1317 1318multiclass VPatWidenFPMulAccVL_VV_VF<SDNode vop, string instruction_name> { 1319 foreach vtiToWti = AllWidenableFloatVectors in { 1320 defvar vti = vtiToWti.Vti; 1321 defvar wti = vtiToWti.Wti; 1322 def : Pat<(vop 1323 (wti.Vector (riscv_fpextend_vl_oneuse 1324 (vti.Vector vti.RegClass:$rs1), 1325 (vti.Mask true_mask), VLOpFrag)), 1326 (wti.Vector (riscv_fpextend_vl_oneuse 1327 (vti.Vector vti.RegClass:$rs2), 1328 (vti.Mask true_mask), VLOpFrag)), 1329 (wti.Vector wti.RegClass:$rd), (vti.Mask true_mask), 1330 VLOpFrag), 1331 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 1332 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1333 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1334 def : Pat<(vop 1335 (wti.Vector (riscv_fpextend_vl_oneuse 1336 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 1337 (vti.Mask true_mask), VLOpFrag)), 1338 (wti.Vector (riscv_fpextend_vl_oneuse 1339 (vti.Vector vti.RegClass:$rs2), 1340 (vti.Mask true_mask), VLOpFrag)), 1341 (wti.Vector wti.RegClass:$rd), (vti.Mask true_mask), 1342 VLOpFrag), 1343 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 1344 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1345 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1346 } 1347} 1348 1349//===----------------------------------------------------------------------===// 1350// Patterns. 1351//===----------------------------------------------------------------------===// 1352 1353let Predicates = [HasVInstructions] in { 1354 1355// 11. Vector Integer Arithmetic Instructions 1356 1357// 11.1. Vector Single-Width Integer Add and Subtract 1358defm : VPatBinaryVL_VV_VX_VI<riscv_add_vl, "PseudoVADD">; 1359defm : VPatBinaryVL_VV_VX<riscv_sub_vl, "PseudoVSUB">; 1360// Handle VRSUB specially since it's the only integer binary op with reversed 1361// pattern operands 1362foreach vti = AllIntegerVectors in { 1363 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 1364 (vti.Vector vti.RegClass:$rs1), 1365 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 1366 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK") 1367 vti.RegClass:$merge, vti.RegClass:$rs1, GPR:$rs2, 1368 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1369 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)), 1370 (vti.Vector vti.RegClass:$rs1), 1371 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 1372 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK") 1373 vti.RegClass:$merge, vti.RegClass:$rs1, simm5:$rs2, 1374 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1375} 1376 1377// 11.2. Vector Widening Integer Add/Subtract 1378defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwadd_vl, riscv_vwadd_w_vl, "PseudoVWADD">; 1379defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwaddu_vl, riscv_vwaddu_w_vl, "PseudoVWADDU">; 1380defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsub_vl, riscv_vwsub_w_vl, "PseudoVWSUB">; 1381defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsubu_vl, riscv_vwsubu_w_vl, "PseudoVWSUBU">; 1382 1383// 11.3. Vector Integer Extension 1384defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF2", 1385 AllFractionableVF2IntVectors>; 1386defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF2", 1387 AllFractionableVF2IntVectors>; 1388defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF4", 1389 AllFractionableVF4IntVectors>; 1390defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF4", 1391 AllFractionableVF4IntVectors>; 1392defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF8", 1393 AllFractionableVF8IntVectors>; 1394defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF8", 1395 AllFractionableVF8IntVectors>; 1396 1397// 11.5. Vector Bitwise Logical Instructions 1398defm : VPatBinaryVL_VV_VX_VI<riscv_and_vl, "PseudoVAND">; 1399defm : VPatBinaryVL_VV_VX_VI<riscv_or_vl, "PseudoVOR">; 1400defm : VPatBinaryVL_VV_VX_VI<riscv_xor_vl, "PseudoVXOR">; 1401 1402// 11.6. Vector Single-Width Bit Shift Instructions 1403defm : VPatBinaryVL_VV_VX_VI<riscv_shl_vl, "PseudoVSLL", uimm5>; 1404defm : VPatBinaryVL_VV_VX_VI<riscv_srl_vl, "PseudoVSRL", uimm5>; 1405defm : VPatBinaryVL_VV_VX_VI<riscv_sra_vl, "PseudoVSRA", uimm5>; 1406 1407foreach vti = AllIntegerVectors in { 1408 // Emit shift by 1 as an add since it might be faster. 1409 def : Pat<(riscv_shl_vl (vti.Vector vti.RegClass:$rs1), 1410 (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)), 1411 srcvalue, (vti.Mask true_mask), VLOpFrag), 1412 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX) 1413 vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>; 1414} 1415 1416// 11.7. Vector Narrowing Integer Right Shift Instructions 1417defm : VPatBinaryVL_WV_WX_WI<srl, "PseudoVNSRL">; 1418defm : VPatBinaryVL_WV_WX_WI<sra, "PseudoVNSRA">; 1419 1420defm : VPatNarrowShiftSplat_WX_WI<riscv_sra_vl, "PseudoVNSRA">; 1421defm : VPatNarrowShiftSplat_WX_WI<riscv_srl_vl, "PseudoVNSRL">; 1422defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_sext_vl_oneuse, "PseudoVNSRA">; 1423defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_zext_vl_oneuse, "PseudoVNSRA">; 1424defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_sext_vl_oneuse, "PseudoVNSRL">; 1425defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_zext_vl_oneuse, "PseudoVNSRL">; 1426 1427defm : VPatBinaryNVL_WV_WX_WI<riscv_vnsrl_vl, "PseudoVNSRL">; 1428 1429foreach vtiTowti = AllWidenableIntVectors in { 1430 defvar vti = vtiTowti.Vti; 1431 defvar wti = vtiTowti.Wti; 1432 def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1), 1433 (vti.Mask V0), 1434 VLOpFrag)), 1435 (!cast<Instruction>("PseudoVNSRL_WI_"#vti.LMul.MX#"_MASK") 1436 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, 1437 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1438} 1439 1440// 11.8. Vector Integer Comparison Instructions 1441foreach vti = AllIntegerVectors in { 1442 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSEQ", SETEQ>; 1443 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSNE", SETNE>; 1444 1445 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 1446 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 1447 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 1448 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 1449 1450 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 1451 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 1452 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 1453 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 1454 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 1455 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 1456 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>; 1457 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>; 1458 // There is no VMSGE(U)_VX instruction 1459 1460 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 1461 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 1462 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 1463 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 1464 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>; 1465 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>; 1466 1467 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLE", SETLT, SETGT, 1468 SplatPat_simm5_plus1_nonzero>; 1469 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLEU", SETULT, SETUGT, 1470 SplatPat_simm5_plus1_nonzero>; 1471 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGT", SETGE, SETLE, 1472 SplatPat_simm5_plus1>; 1473 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGTU", SETUGE, SETULE, 1474 SplatPat_simm5_plus1_nonzero>; 1475} // foreach vti = AllIntegerVectors 1476 1477// 11.9. Vector Integer Min/Max Instructions 1478defm : VPatBinaryVL_VV_VX<riscv_umin_vl, "PseudoVMINU">; 1479defm : VPatBinaryVL_VV_VX<riscv_smin_vl, "PseudoVMIN">; 1480defm : VPatBinaryVL_VV_VX<riscv_umax_vl, "PseudoVMAXU">; 1481defm : VPatBinaryVL_VV_VX<riscv_smax_vl, "PseudoVMAX">; 1482 1483// 11.10. Vector Single-Width Integer Multiply Instructions 1484defm : VPatBinaryVL_VV_VX<riscv_mul_vl, "PseudoVMUL">; 1485defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH">; 1486defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU">; 1487 1488// 11.11. Vector Integer Divide Instructions 1489defm : VPatBinaryVL_VV_VX<riscv_udiv_vl, "PseudoVDIVU">; 1490defm : VPatBinaryVL_VV_VX<riscv_sdiv_vl, "PseudoVDIV">; 1491defm : VPatBinaryVL_VV_VX<riscv_urem_vl, "PseudoVREMU">; 1492defm : VPatBinaryVL_VV_VX<riscv_srem_vl, "PseudoVREM">; 1493 1494// 11.12. Vector Widening Integer Multiply Instructions 1495defm : VPatBinaryWVL_VV_VX<riscv_vwmul_vl, "PseudoVWMUL">; 1496defm : VPatBinaryWVL_VV_VX<riscv_vwmulu_vl, "PseudoVWMULU">; 1497defm : VPatBinaryWVL_VV_VX<riscv_vwmulsu_vl, "PseudoVWMULSU">; 1498 1499// 11.13 Vector Single-Width Integer Multiply-Add Instructions 1500defm : VPatMultiplyAddVL_VV_VX<riscv_add_vl, "PseudoVMADD">; 1501defm : VPatMultiplyAddVL_VV_VX<riscv_sub_vl, "PseudoVNMSUB">; 1502defm : VPatMultiplyAccVL_VV_VX<riscv_add_vl_oneuse, "PseudoVMACC">; 1503defm : VPatMultiplyAccVL_VV_VX<riscv_sub_vl_oneuse, "PseudoVNMSAC">; 1504 1505// 11.14. Vector Widening Integer Multiply-Add Instructions 1506defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmul_vl_oneuse, "PseudoVWMACC">; 1507defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmulu_vl_oneuse, "PseudoVWMACCU">; 1508defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmulsu_vl_oneuse, "PseudoVWMACCSU">; 1509foreach vtiTowti = AllWidenableIntVectors in { 1510 defvar vti = vtiTowti.Vti; 1511 defvar wti = vtiTowti.Wti; 1512 def : Pat<(wti.Vector 1513 (riscv_add_vl wti.RegClass:$rd, 1514 (riscv_vwmulsu_vl_oneuse (vti.Vector vti.RegClass:$rs1), 1515 (SplatPat XLenVT:$rs2), 1516 srcvalue, 1517 (vti.Mask true_mask), 1518 VLOpFrag), 1519 srcvalue, (vti.Mask true_mask),VLOpFrag)), 1520 (!cast<Instruction>("PseudoVWMACCUS_VX_" # vti.LMul.MX) 1521 wti.RegClass:$rd, vti.ScalarRegClass:$rs2, vti.RegClass:$rs1, 1522 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1523} 1524 1525// 11.15. Vector Integer Merge Instructions 1526foreach vti = AllIntegerVectors in { 1527 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 1528 vti.RegClass:$rs1, 1529 vti.RegClass:$rs2, 1530 VLOpFrag)), 1531 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 1532 vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), 1533 GPR:$vl, vti.Log2SEW)>; 1534 1535 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 1536 (SplatPat XLenVT:$rs1), 1537 vti.RegClass:$rs2, 1538 VLOpFrag)), 1539 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 1540 vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1541 1542 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 1543 (SplatPat_simm5 simm5:$rs1), 1544 vti.RegClass:$rs2, 1545 VLOpFrag)), 1546 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 1547 vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1548 1549 def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0), 1550 vti.RegClass:$rs1, 1551 vti.RegClass:$rs2, 1552 VLOpFrag)), 1553 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX#"_TU") 1554 vti.RegClass:$rs2, vti.RegClass:$rs2, vti.RegClass:$rs1, 1555 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1556 1557 def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0), 1558 (SplatPat XLenVT:$rs1), 1559 vti.RegClass:$rs2, 1560 VLOpFrag)), 1561 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX#"_TU") 1562 vti.RegClass:$rs2, vti.RegClass:$rs2, GPR:$rs1, 1563 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1564 1565 def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0), 1566 (SplatPat_simm5 simm5:$rs1), 1567 vti.RegClass:$rs2, 1568 VLOpFrag)), 1569 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX#"_TU") 1570 vti.RegClass:$rs2, vti.RegClass:$rs2, simm5:$rs1, 1571 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1572} 1573 1574// 11.16. Vector Integer Move Instructions 1575foreach vti = AllIntegerVectors in { 1576 def : Pat<(vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), GPR:$rs2, VLOpFrag)), 1577 (!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX) 1578 $rs2, GPR:$vl, vti.Log2SEW)>; 1579 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.Vector:$passthru, GPR:$rs2, VLOpFrag)), 1580 (!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX#"_TU") 1581 $passthru, $rs2, GPR:$vl, vti.Log2SEW)>; 1582 defvar ImmPat = !cast<ComplexPattern>("sew"#vti.SEW#"simm5"); 1583 def : Pat<(vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), (ImmPat XLenVT:$imm5), 1584 VLOpFrag)), 1585 (!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX) 1586 XLenVT:$imm5, GPR:$vl, vti.Log2SEW)>; 1587 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.Vector:$passthru, (ImmPat XLenVT:$imm5), 1588 VLOpFrag)), 1589 (!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX#"_TU") 1590 $passthru, XLenVT:$imm5, GPR:$vl, vti.Log2SEW)>; 1591} 1592 1593// 12. Vector Fixed-Point Arithmetic Instructions 1594 1595// 12.1. Vector Single-Width Saturating Add and Subtract 1596defm : VPatBinaryVL_VV_VX_VI<riscv_saddsat_vl, "PseudoVSADD">; 1597defm : VPatBinaryVL_VV_VX_VI<riscv_uaddsat_vl, "PseudoVSADDU">; 1598defm : VPatBinaryVL_VV_VX<riscv_ssubsat_vl, "PseudoVSSUB">; 1599defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">; 1600 1601} // Predicates = [HasVInstructions] 1602 1603// 13. Vector Floating-Point Instructions 1604 1605let Predicates = [HasVInstructionsAnyF] in { 1606 1607// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions 1608defm : VPatBinaryFPVL_VV_VF<riscv_fadd_vl, "PseudoVFADD">; 1609defm : VPatBinaryFPVL_VV_VF<riscv_fsub_vl, "PseudoVFSUB">; 1610defm : VPatBinaryFPVL_R_VF<riscv_fsub_vl, "PseudoVFRSUB">; 1611 1612// 13.3. Vector Widening Floating-Point Add/Subtract Instructions 1613defm : VPatWidenBinaryFPVL_VV_VF_WV_WF<riscv_fadd_vl, "PseudoVFWADD">; 1614defm : VPatWidenBinaryFPVL_VV_VF_WV_WF<riscv_fsub_vl, "PseudoVFWSUB">; 1615 1616// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 1617defm : VPatBinaryFPVL_VV_VF<riscv_fmul_vl, "PseudoVFMUL">; 1618defm : VPatBinaryFPVL_VV_VF<riscv_fdiv_vl, "PseudoVFDIV">; 1619defm : VPatBinaryFPVL_R_VF<riscv_fdiv_vl, "PseudoVFRDIV">; 1620 1621// 13.5. Vector Widening Floating-Point Multiply Instructions 1622defm : VPatWidenBinaryFPVL_VV_VF<riscv_fmul_vl, riscv_fpextend_vl_oneuse, "PseudoVFWMUL">; 1623 1624// 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. 1625defm : VPatFPMulAddVL_VV_VF<riscv_vfmadd_vl, "PseudoVFMADD">; 1626defm : VPatFPMulAddVL_VV_VF<riscv_vfmsub_vl, "PseudoVFMSUB">; 1627defm : VPatFPMulAddVL_VV_VF<riscv_vfnmadd_vl, "PseudoVFNMADD">; 1628defm : VPatFPMulAddVL_VV_VF<riscv_vfnmsub_vl, "PseudoVFNMSUB">; 1629defm : VPatFPMulAccVL_VV_VF<riscv_vfmadd_vl_oneuse, "PseudoVFMACC">; 1630defm : VPatFPMulAccVL_VV_VF<riscv_vfmsub_vl_oneuse, "PseudoVFMSAC">; 1631defm : VPatFPMulAccVL_VV_VF<riscv_vfnmadd_vl_oneuse, "PseudoVFNMACC">; 1632defm : VPatFPMulAccVL_VV_VF<riscv_vfnmsub_vl_oneuse, "PseudoVFNMSAC">; 1633 1634// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 1635defm : VPatWidenFPMulAccVL_VV_VF<riscv_vfmadd_vl, "PseudoVFWMACC">; 1636defm : VPatWidenFPMulAccVL_VV_VF<riscv_vfnmadd_vl, "PseudoVFWNMACC">; 1637defm : VPatWidenFPMulAccVL_VV_VF<riscv_vfmsub_vl, "PseudoVFWMSAC">; 1638defm : VPatWidenFPMulAccVL_VV_VF<riscv_vfnmsub_vl, "PseudoVFWNMSAC">; 1639 1640// 13.11. Vector Floating-Point MIN/MAX Instructions 1641defm : VPatBinaryFPVL_VV_VF<riscv_fminnum_vl, "PseudoVFMIN">; 1642defm : VPatBinaryFPVL_VV_VF<riscv_fmaxnum_vl, "PseudoVFMAX">; 1643 1644// 13.13. Vector Floating-Point Compare Instructions 1645defm : VPatFPSetCCVL_VV_VF_FV<SETEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 1646defm : VPatFPSetCCVL_VV_VF_FV<SETOEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 1647 1648defm : VPatFPSetCCVL_VV_VF_FV<SETNE, "PseudoVMFNE", "PseudoVMFNE">; 1649defm : VPatFPSetCCVL_VV_VF_FV<SETUNE, "PseudoVMFNE", "PseudoVMFNE">; 1650 1651defm : VPatFPSetCCVL_VV_VF_FV<SETLT, "PseudoVMFLT", "PseudoVMFGT">; 1652defm : VPatFPSetCCVL_VV_VF_FV<SETOLT, "PseudoVMFLT", "PseudoVMFGT">; 1653 1654defm : VPatFPSetCCVL_VV_VF_FV<SETLE, "PseudoVMFLE", "PseudoVMFGE">; 1655defm : VPatFPSetCCVL_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">; 1656 1657foreach vti = AllFloatVectors in { 1658 // 13.8. Vector Floating-Point Square-Root Instruction 1659 def : Pat<(riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask V0), 1660 VLOpFrag), 1661 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX #"_MASK") 1662 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 1663 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1664 1665 // 13.12. Vector Floating-Point Sign-Injection Instructions 1666 def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), 1667 VLOpFrag), 1668 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX #"_MASK") 1669 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, 1670 vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1671 TA_MA)>; 1672 // Handle fneg with VFSGNJN using the same input for both operands. 1673 def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), 1674 VLOpFrag), 1675 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX #"_MASK") 1676 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, 1677 vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1678 TA_MA)>; 1679 1680 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 1681 (vti.Vector vti.RegClass:$rs2), 1682 vti.RegClass:$merge, 1683 (vti.Mask V0), 1684 VLOpFrag), 1685 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_MASK") 1686 vti.RegClass:$merge, vti.RegClass:$rs1, 1687 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1688 TAIL_AGNOSTIC)>; 1689 1690 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 1691 (riscv_fneg_vl vti.RegClass:$rs2, 1692 (vti.Mask true_mask), 1693 VLOpFrag), 1694 srcvalue, 1695 (vti.Mask true_mask), 1696 VLOpFrag), 1697 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 1698 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; 1699 1700 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 1701 (SplatFPOp vti.ScalarRegClass:$rs2), 1702 vti.RegClass:$merge, 1703 (vti.Mask V0), 1704 VLOpFrag), 1705 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX#"_MASK") 1706 vti.RegClass:$merge, vti.RegClass:$rs1, 1707 vti.ScalarRegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1708 TAIL_AGNOSTIC)>; 1709 1710 // Rounding without exception to implement nearbyint. 1711 def : Pat<(riscv_vfround_noexcept_vl (vti.Vector vti.RegClass:$rs1), 1712 (vti.Mask V0), VLOpFrag), 1713 (!cast<Instruction>("PseudoVFROUND_NOEXCEPT_V_" # vti.LMul.MX #"_MASK") 1714 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, 1715 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1716} 1717 1718foreach fvti = AllFloatVectors in { 1719 // Floating-point vselects: 1720 // 11.15. Vector Integer Merge Instructions 1721 // 13.15. Vector Floating-Point Merge Instruction 1722 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), 1723 fvti.RegClass:$rs1, 1724 fvti.RegClass:$rs2, 1725 VLOpFrag)), 1726 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 1727 fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 1728 GPR:$vl, fvti.Log2SEW)>; 1729 1730 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), 1731 (SplatFPOp fvti.ScalarRegClass:$rs1), 1732 fvti.RegClass:$rs2, 1733 VLOpFrag)), 1734 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 1735 fvti.RegClass:$rs2, 1736 (fvti.Scalar fvti.ScalarRegClass:$rs1), 1737 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 1738 1739 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), 1740 (SplatFPOp (fvti.Scalar fpimm0)), 1741 fvti.RegClass:$rs2, 1742 VLOpFrag)), 1743 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 1744 fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 1745 1746 def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0), 1747 fvti.RegClass:$rs1, 1748 fvti.RegClass:$rs2, 1749 VLOpFrag)), 1750 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX#"_TU") 1751 fvti.RegClass:$rs2, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 1752 GPR:$vl, fvti.Log2SEW)>; 1753 1754 def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0), 1755 (SplatFPOp fvti.ScalarRegClass:$rs1), 1756 fvti.RegClass:$rs2, 1757 VLOpFrag)), 1758 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX#"_TU") 1759 fvti.RegClass:$rs2, fvti.RegClass:$rs2, 1760 (fvti.Scalar fvti.ScalarRegClass:$rs1), 1761 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 1762 1763 def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0), 1764 (SplatFPOp (fvti.Scalar fpimm0)), 1765 fvti.RegClass:$rs2, 1766 VLOpFrag)), 1767 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX#"_TU") 1768 fvti.RegClass:$rs2, fvti.RegClass:$rs2, 0, (fvti.Mask V0), 1769 GPR:$vl, fvti.Log2SEW)>; 1770 1771 // 13.16. Vector Floating-Point Move Instruction 1772 // If we're splatting fpimm0, use vmv.v.x vd, x0. 1773 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 1774 (fvti.Vector undef), (fvti.Scalar (fpimm0)), VLOpFrag)), 1775 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX) 1776 0, GPR:$vl, fvti.Log2SEW)>; 1777 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 1778 fvti.Vector:$passthru, (fvti.Scalar (fpimm0)), VLOpFrag)), 1779 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX#"_TU") 1780 $passthru, 0, GPR:$vl, fvti.Log2SEW)>; 1781 1782 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 1783 (fvti.Vector undef), (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), 1784 (!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" # 1785 fvti.LMul.MX) 1786 (fvti.Scalar fvti.ScalarRegClass:$rs2), 1787 GPR:$vl, fvti.Log2SEW)>; 1788 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 1789 fvti.Vector:$passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), 1790 (!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" # 1791 fvti.LMul.MX # "_TU") 1792 $passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), 1793 GPR:$vl, fvti.Log2SEW)>; 1794 1795 // 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions 1796 defm : VPatConvertFP2IVL_V<riscv_vfcvt_xu_f_vl, "PseudoVFCVT_XU_F_V">; 1797 defm : VPatConvertFP2IVL_V<riscv_vfcvt_x_f_vl, "PseudoVFCVT_X_F_V">; 1798 defm : VPatConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFCVT_RM_XU_F_V">; 1799 defm : VPatConvertFP2I_RM_VL_V<riscv_vfcvt_rm_x_f_vl, "PseudoVFCVT_RM_X_F_V">; 1800 1801 defm : VPatConvertFP2IVL_V<riscv_vfcvt_rtz_xu_f_vl, "PseudoVFCVT_RTZ_XU_F_V">; 1802 defm : VPatConvertFP2IVL_V<riscv_vfcvt_rtz_x_f_vl, "PseudoVFCVT_RTZ_X_F_V">; 1803 1804 defm : VPatConvertI2FPVL_V<riscv_uint_to_fp_vl, "PseudoVFCVT_F_XU_V">; 1805 defm : VPatConvertI2FPVL_V<riscv_sint_to_fp_vl, "PseudoVFCVT_F_X_V">; 1806 1807 defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_xu_vl, "PseudoVFCVT_RM_F_XU_V">; 1808 defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_x_vl, "PseudoVFCVT_RM_F_X_V">; 1809 1810 // 13.18. Widening Floating-Point/Integer Type-Convert Instructions 1811 defm : VPatWConvertFP2IVL_V<riscv_vfcvt_xu_f_vl, "PseudoVFWCVT_XU_F_V">; 1812 defm : VPatWConvertFP2IVL_V<riscv_vfcvt_x_f_vl, "PseudoVFWCVT_X_F_V">; 1813 defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFWCVT_RM_XU_F_V">; 1814 defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_x_f_vl, "PseudoVFWCVT_RM_X_F_V">; 1815 1816 defm : VPatWConvertFP2IVL_V<riscv_vfcvt_rtz_xu_f_vl, "PseudoVFWCVT_RTZ_XU_F_V">; 1817 defm : VPatWConvertFP2IVL_V<riscv_vfcvt_rtz_x_f_vl, "PseudoVFWCVT_RTZ_X_F_V">; 1818 1819 defm : VPatWConvertI2FPVL_V<riscv_uint_to_fp_vl, "PseudoVFWCVT_F_XU_V">; 1820 defm : VPatWConvertI2FPVL_V<riscv_sint_to_fp_vl, "PseudoVFWCVT_F_X_V">; 1821 1822 defm : VPatWConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_xu_vl, "PseudoVFWCVT_RM_F_XU_V">; 1823 defm : VPatWConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_x_vl, "PseudoVFWCVT_RM_F_X_V">; 1824 1825 foreach fvtiToFWti = AllWidenableFloatVectors in { 1826 defvar fvti = fvtiToFWti.Vti; 1827 defvar fwti = fvtiToFWti.Wti; 1828 def : Pat<(fwti.Vector (riscv_fpextend_vl (fvti.Vector fvti.RegClass:$rs1), 1829 (fvti.Mask V0), 1830 VLOpFrag)), 1831 (!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX#"_MASK") 1832 (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1833 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 1834 } 1835 1836 // 13.19 Narrowing Floating-Point/Integer Type-Convert Instructions 1837 defm : VPatNConvertFP2IVL_V<riscv_vfcvt_xu_f_vl, "PseudoVFNCVT_XU_F_W">; 1838 defm : VPatNConvertFP2IVL_V<riscv_vfcvt_x_f_vl, "PseudoVFNCVT_X_F_W">; 1839 defm : VPatNConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFNCVT_RM_XU_F_W">; 1840 defm : VPatNConvertFP2I_RM_VL_V<riscv_vfcvt_rm_x_f_vl, "PseudoVFNCVT_RM_X_F_W">; 1841 1842 defm : VPatNConvertFP2IVL_V<riscv_vfcvt_rtz_xu_f_vl, "PseudoVFNCVT_RTZ_XU_F_W">; 1843 defm : VPatNConvertFP2IVL_V<riscv_vfcvt_rtz_x_f_vl, "PseudoVFNCVT_RTZ_X_F_W">; 1844 1845 defm : VPatNConvertI2FPVL_V<riscv_uint_to_fp_vl, "PseudoVFNCVT_F_XU_W">; 1846 defm : VPatNConvertI2FPVL_V<riscv_sint_to_fp_vl, "PseudoVFNCVT_F_X_W">; 1847 1848 defm : VPatNConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_xu_vl, "PseudoVFNCVT_RM_F_XU_W">; 1849 defm : VPatNConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_x_vl, "PseudoVFNCVT_RM_F_X_W">; 1850 1851 foreach fvtiToFWti = AllWidenableFloatVectors in { 1852 defvar fvti = fvtiToFWti.Vti; 1853 defvar fwti = fvtiToFWti.Wti; 1854 def : Pat<(fvti.Vector (riscv_fpround_vl (fwti.Vector fwti.RegClass:$rs1), 1855 (fwti.Mask V0), 1856 VLOpFrag)), 1857 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_MASK") 1858 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1859 (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 1860 1861 def : Pat<(fvti.Vector (riscv_fncvt_rod_vl (fwti.Vector fwti.RegClass:$rs1), 1862 (fwti.Mask V0), 1863 VLOpFrag)), 1864 (!cast<Instruction>("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX#"_MASK") 1865 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1866 (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 1867 } 1868} 1869 1870} // Predicates = [HasVInstructionsAnyF] 1871 1872// 14. Vector Reduction Operations 1873 1874// 14.1. Vector Single-Width Integer Reduction Instructions 1875let Predicates = [HasVInstructions] in { 1876defm : VPatReductionVL<rvv_vecreduce_ADD_vl, "PseudoVREDSUM", /*is_float*/0>; 1877defm : VPatReductionVL<rvv_vecreduce_UMAX_vl, "PseudoVREDMAXU", /*is_float*/0>; 1878defm : VPatReductionVL<rvv_vecreduce_SMAX_vl, "PseudoVREDMAX", /*is_float*/0>; 1879defm : VPatReductionVL<rvv_vecreduce_UMIN_vl, "PseudoVREDMINU", /*is_float*/0>; 1880defm : VPatReductionVL<rvv_vecreduce_SMIN_vl, "PseudoVREDMIN", /*is_float*/0>; 1881defm : VPatReductionVL<rvv_vecreduce_AND_vl, "PseudoVREDAND", /*is_float*/0>; 1882defm : VPatReductionVL<rvv_vecreduce_OR_vl, "PseudoVREDOR", /*is_float*/0>; 1883defm : VPatReductionVL<rvv_vecreduce_XOR_vl, "PseudoVREDXOR", /*is_float*/0>; 1884 1885// 14.2. Vector Widening Integer Reduction Instructions 1886defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, anyext_oneuse, "PseudoVWREDSUMU", /*is_float*/0>; 1887defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, zext_oneuse, "PseudoVWREDSUMU", /*is_float*/0>; 1888defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_zext_vl_oneuse, "PseudoVWREDSUMU", /*is_float*/0>; 1889defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, sext_oneuse, "PseudoVWREDSUM", /*is_float*/0>; 1890defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_sext_vl_oneuse, "PseudoVWREDSUM", /*is_float*/0>; 1891} // Predicates = [HasVInstructions] 1892 1893// 14.3. Vector Single-Width Floating-Point Reduction Instructions 1894let Predicates = [HasVInstructionsAnyF] in { 1895defm : VPatReductionVL<rvv_vecreduce_SEQ_FADD_vl, "PseudoVFREDOSUM", /*is_float*/1>; 1896defm : VPatReductionVL<rvv_vecreduce_FADD_vl, "PseudoVFREDUSUM", /*is_float*/1>; 1897defm : VPatReductionVL<rvv_vecreduce_FMIN_vl, "PseudoVFREDMIN", /*is_float*/1>; 1898defm : VPatReductionVL<rvv_vecreduce_FMAX_vl, "PseudoVFREDMAX", /*is_float*/1>; 1899 1900// 14.4. Vector Widening Floating-Point Reduction Instructions 1901defm : VPatWidenReductionVL<rvv_vecreduce_SEQ_FADD_vl, fpext_oneuse, "PseudoVFWREDOSUM", /*is_float*/1>; 1902defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_SEQ_FADD_vl, riscv_fpextend_vl_oneuse, "PseudoVFWREDOSUM", /*is_float*/1>; 1903defm : VPatWidenReductionVL<rvv_vecreduce_FADD_vl, fpext_oneuse, "PseudoVFWREDUSUM", /*is_float*/1>; 1904defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_FADD_vl, riscv_fpextend_vl_oneuse, "PseudoVFWREDUSUM", /*is_float*/1>; 1905} // Predicates = [HasVInstructionsAnyF] 1906 1907// 15. Vector Mask Instructions 1908 1909let Predicates = [HasVInstructions] in { 1910 1911foreach mti = AllMasks in { 1912 // 15.1 Vector Mask-Register Logical Instructions 1913 def : Pat<(mti.Mask (riscv_vmset_vl VLOpFrag)), 1914 (!cast<Instruction>("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 1915 def : Pat<(mti.Mask (riscv_vmclr_vl VLOpFrag)), 1916 (!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 1917 1918 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)), 1919 (!cast<Instruction>("PseudoVMAND_MM_" # mti.LMul.MX) 1920 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1921 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 1922 (!cast<Instruction>("PseudoVMOR_MM_" # mti.LMul.MX) 1923 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1924 def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 1925 (!cast<Instruction>("PseudoVMXOR_MM_" # mti.LMul.MX) 1926 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1927 1928 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, 1929 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 1930 VLOpFrag)), 1931 (!cast<Instruction>("PseudoVMANDN_MM_" # mti.LMul.MX) 1932 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1933 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, 1934 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 1935 VLOpFrag)), 1936 (!cast<Instruction>("PseudoVMORN_MM_" # mti.LMul.MX) 1937 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1938 // XOR is associative so we need 2 patterns for VMXNOR. 1939 def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1, 1940 VLOpFrag), 1941 VR:$rs2, VLOpFrag)), 1942 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX) 1943 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1944 1945 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2, 1946 VLOpFrag), 1947 VLOpFrag)), 1948 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX) 1949 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1950 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2, 1951 VLOpFrag), 1952 VLOpFrag)), 1953 (!cast<Instruction>("PseudoVMNOR_MM_" # mti.LMul.MX) 1954 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1955 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2, 1956 VLOpFrag), 1957 VLOpFrag)), 1958 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX) 1959 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1960 1961 // Match the not idiom to the vmnot.m pseudo. 1962 def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)), 1963 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX) 1964 VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>; 1965 1966 // 15.2 Vector count population in mask vcpop.m 1967 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), 1968 VLOpFrag)), 1969 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX) 1970 VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1971 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask V0), 1972 VLOpFrag)), 1973 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX # "_MASK") 1974 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 1975 1976 // 15.3 vfirst find-first-set mask bit 1977 def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), 1978 VLOpFrag)), 1979 (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX) 1980 VR:$rs2, GPR:$vl, mti.Log2SEW)>; 1981 def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask V0), 1982 VLOpFrag)), 1983 (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX # "_MASK") 1984 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 1985} 1986 1987} // Predicates = [HasVInstructions] 1988 1989// 16. Vector Permutation Instructions 1990 1991let Predicates = [HasVInstructions] in { 1992// 16.1. Integer Scalar Move Instructions 1993// 16.4. Vector Register Gather Instruction 1994foreach vti = AllIntegerVectors in { 1995 def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$merge), 1996 vti.ScalarRegClass:$rs1, 1997 VLOpFrag)), 1998 (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX) 1999 vti.RegClass:$merge, 2000 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; 2001 2002 def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2, 2003 vti.RegClass:$rs1, 2004 vti.RegClass:$merge, 2005 (vti.Mask V0), 2006 VLOpFrag)), 2007 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK") 2008 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 2009 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2010 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 2011 vti.RegClass:$merge, 2012 (vti.Mask V0), 2013 VLOpFrag)), 2014 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") 2015 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 2016 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2017 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, 2018 uimm5:$imm, 2019 vti.RegClass:$merge, 2020 (vti.Mask V0), 2021 VLOpFrag)), 2022 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 2023 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, 2024 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2025 2026 // emul = lmul * 16 / sew 2027 defvar vlmul = vti.LMul; 2028 defvar octuple_lmul = vlmul.octuple; 2029 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 2030 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 2031 defvar emul_str = octuple_to_str<octuple_emul>.ret; 2032 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 2033 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_" # emul_str; 2034 2035 def : Pat<(vti.Vector 2036 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 2037 (ivti.Vector ivti.RegClass:$rs1), 2038 vti.RegClass:$merge, 2039 (vti.Mask V0), 2040 VLOpFrag)), 2041 (!cast<Instruction>(inst#"_MASK") 2042 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, 2043 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2044 } 2045} 2046 2047} // Predicates = [HasVInstructions] 2048 2049let Predicates = [HasVInstructionsAnyF] in { 2050 2051// 16.2. Floating-Point Scalar Move Instructions 2052foreach vti = AllFloatVectors in { 2053 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 2054 (vti.Scalar (fpimm0)), 2055 VLOpFrag)), 2056 (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX) 2057 vti.RegClass:$merge, X0, GPR:$vl, vti.Log2SEW)>; 2058 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 2059 vti.ScalarRegClass:$rs1, 2060 VLOpFrag)), 2061 (!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX) 2062 vti.RegClass:$merge, 2063 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; 2064 defvar ivti = GetIntVTypeInfo<vti>.Vti; 2065 2066 def : Pat<(vti.Vector 2067 (riscv_vrgather_vv_vl vti.RegClass:$rs2, 2068 (ivti.Vector vti.RegClass:$rs1), 2069 vti.RegClass:$merge, 2070 (vti.Mask V0), 2071 VLOpFrag)), 2072 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK") 2073 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 2074 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2075 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 2076 vti.RegClass:$merge, 2077 (vti.Mask V0), 2078 VLOpFrag)), 2079 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") 2080 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 2081 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2082 def : Pat<(vti.Vector 2083 (riscv_vrgather_vx_vl vti.RegClass:$rs2, 2084 uimm5:$imm, 2085 vti.RegClass:$merge, 2086 (vti.Mask V0), 2087 VLOpFrag)), 2088 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 2089 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, 2090 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2091 2092 defvar vlmul = vti.LMul; 2093 defvar octuple_lmul = vlmul.octuple; 2094 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 2095 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 2096 defvar emul_str = octuple_to_str<octuple_emul>.ret; 2097 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 2098 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_" # emul_str; 2099 2100 def : Pat<(vti.Vector 2101 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 2102 (ivti.Vector ivti.RegClass:$rs1), 2103 vti.RegClass:$merge, 2104 (vti.Mask V0), 2105 VLOpFrag)), 2106 (!cast<Instruction>(inst#"_MASK") 2107 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, 2108 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2109 } 2110} 2111 2112} // Predicates = [HasVInstructionsAnyF] 2113 2114//===----------------------------------------------------------------------===// 2115// Miscellaneous RISCVISD SDNodes 2116//===----------------------------------------------------------------------===// 2117 2118def riscv_vid_vl : SDNode<"RISCVISD::VID_VL", SDTypeProfile<1, 2, 2119 [SDTCisVec<0>, SDTCVecEltisVT<1, i1>, 2120 SDTCisSameNumEltsAs<0, 1>, SDTCisVT<2, XLenVT>]>, []>; 2121 2122def SDTRVVSlide : SDTypeProfile<1, 6, [ 2123 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>, 2124 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>, 2125 SDTCisVT<6, XLenVT> 2126]>; 2127def SDTRVVSlide1 : SDTypeProfile<1, 5, [ 2128 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisInt<0>, 2129 SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, 2130 SDTCisVT<5, XLenVT> 2131]>; 2132 2133def riscv_slideup_vl : SDNode<"RISCVISD::VSLIDEUP_VL", SDTRVVSlide, []>; 2134def riscv_slide1up_vl : SDNode<"RISCVISD::VSLIDE1UP_VL", SDTRVVSlide1, []>; 2135def riscv_slidedown_vl : SDNode<"RISCVISD::VSLIDEDOWN_VL", SDTRVVSlide, []>; 2136def riscv_slide1down_vl : SDNode<"RISCVISD::VSLIDE1DOWN_VL", SDTRVVSlide1, []>; 2137 2138let Predicates = [HasVInstructions] in { 2139 2140foreach vti = AllIntegerVectors in { 2141 def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask true_mask), 2142 VLOpFrag)), 2143 (!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX) GPR:$vl, vti.Log2SEW)>; 2144 2145 def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector undef), 2146 (vti.Vector vti.RegClass:$rs1), 2147 GPR:$rs2, (vti.Mask true_mask), 2148 VLOpFrag)), 2149 (!cast<Instruction>("PseudoVSLIDE1UP_VX_"#vti.LMul.MX) 2150 vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; 2151 def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rd), 2152 (vti.Vector vti.RegClass:$rs1), 2153 GPR:$rs2, (vti.Mask true_mask), 2154 VLOpFrag)), 2155 (!cast<Instruction>("PseudoVSLIDE1UP_VX_"#vti.LMul.MX#"_TU") 2156 vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; 2157 def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector undef), 2158 (vti.Vector vti.RegClass:$rs1), 2159 GPR:$rs2, (vti.Mask true_mask), 2160 VLOpFrag)), 2161 (!cast<Instruction>("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX) 2162 vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; 2163 def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rd), 2164 (vti.Vector vti.RegClass:$rs1), 2165 GPR:$rs2, (vti.Mask true_mask), 2166 VLOpFrag)), 2167 (!cast<Instruction>("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX#"_TU") 2168 vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; 2169} 2170 2171foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in { 2172 def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3), 2173 (vti.Vector vti.RegClass:$rs1), 2174 uimm5:$rs2, (vti.Mask true_mask), 2175 VLOpFrag, (XLenVT timm:$policy))), 2176 (!cast<Instruction>("PseudoVSLIDEUP_VI_"#vti.LMul.MX) 2177 vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2, 2178 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 2179 2180 def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3), 2181 (vti.Vector vti.RegClass:$rs1), 2182 GPR:$rs2, (vti.Mask true_mask), 2183 VLOpFrag, (XLenVT timm:$policy))), 2184 (!cast<Instruction>("PseudoVSLIDEUP_VX_"#vti.LMul.MX) 2185 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, 2186 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 2187 2188 def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3), 2189 (vti.Vector vti.RegClass:$rs1), 2190 uimm5:$rs2, (vti.Mask true_mask), 2191 VLOpFrag, (XLenVT timm:$policy))), 2192 (!cast<Instruction>("PseudoVSLIDEDOWN_VI_"#vti.LMul.MX) 2193 vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2, 2194 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 2195 2196 def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3), 2197 (vti.Vector vti.RegClass:$rs1), 2198 GPR:$rs2, (vti.Mask true_mask), 2199 VLOpFrag, (XLenVT timm:$policy))), 2200 (!cast<Instruction>("PseudoVSLIDEDOWN_VX_"#vti.LMul.MX) 2201 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, 2202 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 2203} 2204 2205} // Predicates = [HasVInstructions] 2206