1//===- RISCVInstrInfoVVLPatterns.td - RVV VL patterns ------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file contains the required infrastructure and VL patterns to 10/// support code generation for the standard 'V' (Vector) extension, version 11/// version 1.0. 12/// 13/// This file is included from and depends upon RISCVInstrInfoVPseudos.td 14/// 15/// Note: the patterns for RVV intrinsics are found in 16/// RISCVInstrInfoVPseudos.td. 17/// 18//===----------------------------------------------------------------------===// 19 20//===----------------------------------------------------------------------===// 21// Helpers to define the VL patterns. 22//===----------------------------------------------------------------------===// 23 24def SDT_RISCVIntUnOp_VL : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, 25 SDTCisSameAs<0, 2>, 26 SDTCisVec<0>, SDTCisInt<0>, 27 SDTCVecEltisVT<3, i1>, 28 SDTCisSameNumEltsAs<0, 3>, 29 SDTCisVT<4, XLenVT>]>; 30 31def SDT_RISCVIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 32 SDTCisSameAs<0, 2>, 33 SDTCisVec<0>, SDTCisInt<0>, 34 SDTCisSameAs<0, 3>, 35 SDTCVecEltisVT<4, i1>, 36 SDTCisSameNumEltsAs<0, 4>, 37 SDTCisVT<5, XLenVT>]>; 38 39def SDT_RISCVFPUnOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 40 SDTCisVec<0>, SDTCisFP<0>, 41 SDTCVecEltisVT<2, i1>, 42 SDTCisSameNumEltsAs<0, 2>, 43 SDTCisVT<3, XLenVT>]>; 44def SDT_RISCVFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 45 SDTCisSameAs<0, 2>, 46 SDTCisVec<0>, SDTCisFP<0>, 47 SDTCisSameAs<0, 3>, 48 SDTCVecEltisVT<4, i1>, 49 SDTCisSameNumEltsAs<0, 4>, 50 SDTCisVT<5, XLenVT>]>; 51 52def SDT_RISCVCopySign_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 53 SDTCisSameAs<0, 2>, 54 SDTCisVec<0>, SDTCisFP<0>, 55 SDTCisSameAs<0, 3>, 56 SDTCVecEltisVT<4, i1>, 57 SDTCisSameNumEltsAs<0, 4>, 58 SDTCisVT<5, XLenVT>]>; 59 60def riscv_vmv_v_v_vl : SDNode<"RISCVISD::VMV_V_V_VL", 61 SDTypeProfile<1, 3, [SDTCisVec<0>, 62 SDTCisSameAs<0, 1>, 63 SDTCisSameAs<0, 2>, 64 SDTCisVT<3, XLenVT>]>>; 65def riscv_vmv_v_x_vl : SDNode<"RISCVISD::VMV_V_X_VL", 66 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>, 67 SDTCisSameAs<0, 1>, 68 SDTCisVT<2, XLenVT>, 69 SDTCisVT<3, XLenVT>]>>; 70def riscv_vfmv_v_f_vl : SDNode<"RISCVISD::VFMV_V_F_VL", 71 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>, 72 SDTCisSameAs<0, 1>, 73 SDTCisEltOfVec<2, 0>, 74 SDTCisVT<3, XLenVT>]>>; 75def riscv_vmv_s_x_vl : SDNode<"RISCVISD::VMV_S_X_VL", 76 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 77 SDTCisInt<0>, 78 SDTCisVT<2, XLenVT>, 79 SDTCisVT<3, XLenVT>]>>; 80def riscv_vfmv_s_f_vl : SDNode<"RISCVISD::VFMV_S_F_VL", 81 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 82 SDTCisFP<0>, 83 SDTCisEltOfVec<2, 0>, 84 SDTCisVT<3, XLenVT>]>>; 85 86def riscv_add_vl : SDNode<"RISCVISD::ADD_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 87def riscv_sub_vl : SDNode<"RISCVISD::SUB_VL", SDT_RISCVIntBinOp_VL>; 88def riscv_mul_vl : SDNode<"RISCVISD::MUL_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 89def riscv_mulhs_vl : SDNode<"RISCVISD::MULHS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 90def riscv_mulhu_vl : SDNode<"RISCVISD::MULHU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 91def riscv_and_vl : SDNode<"RISCVISD::AND_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 92def riscv_or_vl : SDNode<"RISCVISD::OR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 93def riscv_xor_vl : SDNode<"RISCVISD::XOR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 94def riscv_sdiv_vl : SDNode<"RISCVISD::SDIV_VL", SDT_RISCVIntBinOp_VL>; 95def riscv_srem_vl : SDNode<"RISCVISD::SREM_VL", SDT_RISCVIntBinOp_VL>; 96def riscv_udiv_vl : SDNode<"RISCVISD::UDIV_VL", SDT_RISCVIntBinOp_VL>; 97def riscv_urem_vl : SDNode<"RISCVISD::UREM_VL", SDT_RISCVIntBinOp_VL>; 98def riscv_shl_vl : SDNode<"RISCVISD::SHL_VL", SDT_RISCVIntBinOp_VL>; 99def riscv_sra_vl : SDNode<"RISCVISD::SRA_VL", SDT_RISCVIntBinOp_VL>; 100def riscv_srl_vl : SDNode<"RISCVISD::SRL_VL", SDT_RISCVIntBinOp_VL>; 101def riscv_smin_vl : SDNode<"RISCVISD::SMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 102def riscv_smax_vl : SDNode<"RISCVISD::SMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 103def riscv_umin_vl : SDNode<"RISCVISD::UMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 104def riscv_umax_vl : SDNode<"RISCVISD::UMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 105 106def riscv_bitreverse_vl : SDNode<"RISCVISD::BITREVERSE_VL", SDT_RISCVIntUnOp_VL>; 107def riscv_bswap_vl : SDNode<"RISCVISD::BSWAP_VL", SDT_RISCVIntUnOp_VL>; 108def riscv_ctlz_vl : SDNode<"RISCVISD::CTLZ_VL", SDT_RISCVIntUnOp_VL>; 109def riscv_cttz_vl : SDNode<"RISCVISD::CTTZ_VL", SDT_RISCVIntUnOp_VL>; 110def riscv_ctpop_vl : SDNode<"RISCVISD::CTPOP_VL", SDT_RISCVIntUnOp_VL>; 111 112def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 113def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 114def riscv_ssubsat_vl : SDNode<"RISCVISD::SSUBSAT_VL", SDT_RISCVIntBinOp_VL>; 115def riscv_usubsat_vl : SDNode<"RISCVISD::USUBSAT_VL", SDT_RISCVIntBinOp_VL>; 116 117def riscv_fadd_vl : SDNode<"RISCVISD::FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 118def riscv_fsub_vl : SDNode<"RISCVISD::FSUB_VL", SDT_RISCVFPBinOp_VL>; 119def riscv_fmul_vl : SDNode<"RISCVISD::FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 120def riscv_fdiv_vl : SDNode<"RISCVISD::FDIV_VL", SDT_RISCVFPBinOp_VL>; 121def riscv_fneg_vl : SDNode<"RISCVISD::FNEG_VL", SDT_RISCVFPUnOp_VL>; 122def riscv_fabs_vl : SDNode<"RISCVISD::FABS_VL", SDT_RISCVFPUnOp_VL>; 123def riscv_fsqrt_vl : SDNode<"RISCVISD::FSQRT_VL", SDT_RISCVFPUnOp_VL>; 124def riscv_fcopysign_vl : SDNode<"RISCVISD::FCOPYSIGN_VL", SDT_RISCVCopySign_VL>; 125def riscv_fminnum_vl : SDNode<"RISCVISD::FMINNUM_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 126def riscv_fmaxnum_vl : SDNode<"RISCVISD::FMAXNUM_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 127 128def riscv_strict_fadd_vl : SDNode<"RISCVISD::STRICT_FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; 129def riscv_strict_fsub_vl : SDNode<"RISCVISD::STRICT_FSUB_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; 130def riscv_strict_fmul_vl : SDNode<"RISCVISD::STRICT_FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; 131def riscv_strict_fdiv_vl : SDNode<"RISCVISD::STRICT_FDIV_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; 132def riscv_strict_fsqrt_vl : SDNode<"RISCVISD::STRICT_FSQRT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; 133 134def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 135 [(riscv_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 136 (riscv_strict_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 137def any_riscv_fsub_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 138 [(riscv_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 139 (riscv_strict_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 140def any_riscv_fmul_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 141 [(riscv_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 142 (riscv_strict_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 143def any_riscv_fdiv_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 144 [(riscv_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 145 (riscv_strict_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 146def any_riscv_fsqrt_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 147 [(riscv_fsqrt_vl node:$src, node:$mask, node:$vl), 148 (riscv_strict_fsqrt_vl node:$src, node:$mask, node:$vl)]>; 149 150def riscv_fclass_vl : SDNode<"RISCVISD::FCLASS_VL", 151 SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisVec<0>, 152 SDTCisFP<1>, SDTCisVec<1>, 153 SDTCisSameSizeAs<0, 1>, 154 SDTCisSameNumEltsAs<0, 1>, 155 SDTCVecEltisVT<2, i1>, 156 SDTCisSameNumEltsAs<0, 2>, 157 SDTCisVT<3, XLenVT>]>>; 158 159def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 160 SDTCisSameAs<0, 2>, 161 SDTCisSameAs<0, 3>, 162 SDTCisVec<0>, SDTCisFP<0>, 163 SDTCVecEltisVT<4, i1>, 164 SDTCisSameNumEltsAs<0, 4>, 165 SDTCisVT<5, XLenVT>]>; 166def riscv_vfmadd_vl : SDNode<"RISCVISD::VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 167def riscv_vfnmadd_vl : SDNode<"RISCVISD::VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 168def riscv_vfmsub_vl : SDNode<"RISCVISD::VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 169def riscv_vfnmsub_vl : SDNode<"RISCVISD::VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 170 171def SDT_RISCVWVecFMA_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 172 SDTCisVec<1>, SDTCisFP<1>, 173 SDTCisOpSmallerThanOp<1, 0>, 174 SDTCisSameNumEltsAs<0, 1>, 175 SDTCisSameAs<1, 2>, 176 SDTCisSameAs<0, 3>, 177 SDTCVecEltisVT<4, i1>, 178 SDTCisSameNumEltsAs<0, 4>, 179 SDTCisVT<5, XLenVT>]>; 180def riscv_vfwmadd_vl : SDNode<"RISCVISD::VFWMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 181def riscv_vfwnmadd_vl : SDNode<"RISCVISD::VFWNMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 182def riscv_vfwmsub_vl : SDNode<"RISCVISD::VFWMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 183def riscv_vfwnmsub_vl : SDNode<"RISCVISD::VFWNMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 184 185def riscv_strict_vfmadd_vl : SDNode<"RISCVISD::STRICT_VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 186def riscv_strict_vfnmadd_vl : SDNode<"RISCVISD::STRICT_VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 187def riscv_strict_vfmsub_vl : SDNode<"RISCVISD::STRICT_VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 188def riscv_strict_vfnmsub_vl : SDNode<"RISCVISD::STRICT_VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 189 190def any_riscv_vfmadd_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 191 [(riscv_vfmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 192 (riscv_strict_vfmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 193def any_riscv_vfnmadd_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 194 [(riscv_vfnmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 195 (riscv_strict_vfnmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 196def any_riscv_vfmsub_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 197 [(riscv_vfmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 198 (riscv_strict_vfmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 199def any_riscv_vfnmsub_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 200 [(riscv_vfnmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 201 (riscv_strict_vfnmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 202 203def SDT_RISCVFPRoundOp_VL : SDTypeProfile<1, 3, [ 204 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1>, SDTCisSameNumEltsAs<0, 1>, 205 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 206]>; 207def SDT_RISCVFPExtendOp_VL : SDTypeProfile<1, 3, [ 208 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>, 209 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 210]>; 211 212def riscv_fpround_vl : SDNode<"RISCVISD::FP_ROUND_VL", SDT_RISCVFPRoundOp_VL>; 213def riscv_strict_fpround_vl : SDNode<"RISCVISD::STRICT_FP_ROUND_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; 214def riscv_fpextend_vl : SDNode<"RISCVISD::FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL>; 215def riscv_strict_fpextend_vl : SDNode<"RISCVISD::STRICT_FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL, [SDNPHasChain]>; 216def riscv_fncvt_rod_vl : SDNode<"RISCVISD::VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL>; 217def riscv_strict_fncvt_rod_vl : SDNode<"RISCVISD::STRICT_VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; 218 219def any_riscv_fpround_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 220 [(riscv_fpround_vl node:$src, node:$mask, node:$vl), 221 (riscv_strict_fpround_vl node:$src, node:$mask, node:$vl)]>; 222def any_riscv_fpextend_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 223 [(riscv_fpextend_vl node:$src, node:$mask, node:$vl), 224 (riscv_strict_fpextend_vl node:$src, node:$mask, node:$vl)]>; 225def any_riscv_fncvt_rod_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 226 [(riscv_fncvt_rod_vl node:$src, node:$mask, node:$vl), 227 (riscv_strict_fncvt_rod_vl node:$src, node:$mask, node:$vl)]>; 228 229def SDT_RISCVFP2IOp_VL : SDTypeProfile<1, 3, [ 230 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, 231 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 232]>; 233def SDT_RISCVFP2IOp_RM_VL : SDTypeProfile<1, 4, [ 234 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, 235 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>, 236 SDTCisVT<4, XLenVT> // Rounding mode 237]>; 238 239def SDT_RISCVI2FPOp_VL : SDTypeProfile<1, 3, [ 240 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, 241 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 242]>; 243def SDT_RISCVI2FPOp_RM_VL : SDTypeProfile<1, 4, [ 244 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, 245 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>, 246 SDTCisVT<4, XLenVT> // Rounding mode 247]>; 248 249def SDT_RISCVSETCCOP_VL : SDTypeProfile<1, 6, [ 250 SDTCVecEltisVT<0, i1>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, 251 SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>, SDTCisSameAs<0, 4>, 252 SDTCisSameAs<0, 5>, SDTCisVT<6, XLenVT>]>; 253 254// Float -> Int 255def riscv_vfcvt_xu_f_vl : SDNode<"RISCVISD::VFCVT_XU_F_VL", SDT_RISCVFP2IOp_VL>; 256def riscv_vfcvt_x_f_vl : SDNode<"RISCVISD::VFCVT_X_F_VL", SDT_RISCVFP2IOp_VL>; 257def riscv_vfcvt_rm_xu_f_vl : SDNode<"RISCVISD::VFCVT_RM_XU_F_VL", SDT_RISCVFP2IOp_RM_VL>; 258def riscv_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL>; 259 260def riscv_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL>; 261def riscv_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL>; 262 263def riscv_strict_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL, [SDNPHasChain]>; 264def riscv_strict_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; 265def riscv_strict_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; 266 267def any_riscv_vfcvt_rm_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl, node:$rm), 268 [(riscv_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm), 269 (riscv_strict_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm)]>; 270def any_riscv_vfcvt_rtz_xu_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 271 [(riscv_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl), 272 (riscv_strict_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl)]>; 273def any_riscv_vfcvt_rtz_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 274 [(riscv_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl), 275 (riscv_strict_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl)]>; 276 277// Int -> Float 278def riscv_sint_to_fp_vl : SDNode<"RISCVISD::SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 279def riscv_uint_to_fp_vl : SDNode<"RISCVISD::UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 280def riscv_vfcvt_rm_f_xu_vl : SDNode<"RISCVISD::VFCVT_RM_F_XU_VL", SDT_RISCVI2FPOp_RM_VL>; 281def riscv_vfcvt_rm_f_x_vl : SDNode<"RISCVISD::VFCVT_RM_F_X_VL", SDT_RISCVI2FPOp_RM_VL>; 282 283def riscv_strict_sint_to_fp_vl : SDNode<"RISCVISD::STRICT_SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; 284def riscv_strict_uint_to_fp_vl : SDNode<"RISCVISD::STRICT_UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; 285 286def any_riscv_sint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 287 [(riscv_sint_to_fp_vl node:$src, node:$mask, node:$vl), 288 (riscv_strict_sint_to_fp_vl node:$src, node:$mask, node:$vl)]>; 289def any_riscv_uint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 290 [(riscv_uint_to_fp_vl node:$src, node:$mask, node:$vl), 291 (riscv_strict_uint_to_fp_vl node:$src, node:$mask, node:$vl)]>; 292 293def riscv_vfround_noexcept_vl: SDNode<"RISCVISD::VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL>; 294def riscv_strict_vfround_noexcept_vl: SDNode<"RISCVISD::STRICT_VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; 295 296def any_riscv_vfround_noexcept_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 297 [(riscv_vfround_noexcept_vl node:$src, node:$mask, node:$vl), 298 (riscv_strict_vfround_noexcept_vl node:$src, node:$mask, node:$vl)]>; 299 300def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL", SDT_RISCVSETCCOP_VL>; 301def riscv_strict_fsetcc_vl : SDNode<"RISCVISD::STRICT_FSETCC_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; 302def riscv_strict_fsetccs_vl : SDNode<"RISCVISD::STRICT_FSETCCS_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; 303def any_riscv_fsetcc_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 304 [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 305 (riscv_strict_fsetcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl)]>; 306def any_riscv_fsetccs_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 307 [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 308 (riscv_strict_fsetccs_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl)]>; 309 310def riscv_vrgather_vx_vl : SDNode<"RISCVISD::VRGATHER_VX_VL", 311 SDTypeProfile<1, 5, [SDTCisVec<0>, 312 SDTCisSameAs<0, 1>, 313 SDTCisVT<2, XLenVT>, 314 SDTCisSameAs<0, 3>, 315 SDTCVecEltisVT<4, i1>, 316 SDTCisSameNumEltsAs<0, 4>, 317 SDTCisVT<5, XLenVT>]>>; 318def riscv_vrgather_vv_vl : SDNode<"RISCVISD::VRGATHER_VV_VL", 319 SDTypeProfile<1, 5, [SDTCisVec<0>, 320 SDTCisSameAs<0, 1>, 321 SDTCisInt<2>, 322 SDTCisSameNumEltsAs<0, 2>, 323 SDTCisSameSizeAs<0, 2>, 324 SDTCisSameAs<0, 3>, 325 SDTCVecEltisVT<4, i1>, 326 SDTCisSameNumEltsAs<0, 4>, 327 SDTCisVT<5, XLenVT>]>>; 328def riscv_vrgatherei16_vv_vl : SDNode<"RISCVISD::VRGATHEREI16_VV_VL", 329 SDTypeProfile<1, 5, [SDTCisVec<0>, 330 SDTCisSameAs<0, 1>, 331 SDTCisInt<2>, 332 SDTCVecEltisVT<2, i16>, 333 SDTCisSameNumEltsAs<0, 2>, 334 SDTCisSameAs<0, 3>, 335 SDTCVecEltisVT<4, i1>, 336 SDTCisSameNumEltsAs<0, 4>, 337 SDTCisVT<5, XLenVT>]>>; 338 339def SDT_RISCVSelect_VL : SDTypeProfile<1, 4, [ 340 SDTCisVec<0>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<1, i1>, 341 SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>, SDTCisVT<4, XLenVT> 342]>; 343 344def riscv_vselect_vl : SDNode<"RISCVISD::VSELECT_VL", SDT_RISCVSelect_VL>; 345def riscv_vp_merge_vl : SDNode<"RISCVISD::VP_MERGE_VL", SDT_RISCVSelect_VL>; 346 347def SDT_RISCVVMSETCLR_VL : SDTypeProfile<1, 1, [SDTCVecEltisVT<0, i1>, 348 SDTCisVT<1, XLenVT>]>; 349def riscv_vmclr_vl : SDNode<"RISCVISD::VMCLR_VL", SDT_RISCVVMSETCLR_VL>; 350def riscv_vmset_vl : SDNode<"RISCVISD::VMSET_VL", SDT_RISCVVMSETCLR_VL>; 351 352def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 353 SDTCisSameAs<0, 2>, 354 SDTCVecEltisVT<0, i1>, 355 SDTCisVT<3, XLenVT>]>; 356def riscv_vmand_vl : SDNode<"RISCVISD::VMAND_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 357def riscv_vmor_vl : SDNode<"RISCVISD::VMOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 358def riscv_vmxor_vl : SDNode<"RISCVISD::VMXOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 359 360def true_mask : PatLeaf<(riscv_vmset_vl (XLenVT srcvalue))>; 361 362def riscv_vmnot_vl : PatFrag<(ops node:$rs, node:$vl), 363 (riscv_vmxor_vl node:$rs, true_mask, node:$vl)>; 364 365def riscv_vcpop_vl : SDNode<"RISCVISD::VCPOP_VL", 366 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, 367 SDTCisVec<1>, SDTCisInt<1>, 368 SDTCVecEltisVT<2, i1>, 369 SDTCisSameNumEltsAs<1, 2>, 370 SDTCisVT<3, XLenVT>]>>; 371 372def riscv_vfirst_vl : SDNode<"RISCVISD::VFIRST_VL", 373 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, 374 SDTCisVec<1>, SDTCisInt<1>, 375 SDTCVecEltisVT<2, i1>, 376 SDTCisSameNumEltsAs<1, 2>, 377 SDTCisVT<3, XLenVT>]>>; 378 379def SDT_RISCVVEXTEND_VL : SDTypeProfile<1, 3, [SDTCisVec<0>, 380 SDTCisSameNumEltsAs<0, 1>, 381 SDTCisSameNumEltsAs<1, 2>, 382 SDTCVecEltisVT<2, i1>, 383 SDTCisVT<3, XLenVT>]>; 384def riscv_sext_vl : SDNode<"RISCVISD::VSEXT_VL", SDT_RISCVVEXTEND_VL>; 385def riscv_zext_vl : SDNode<"RISCVISD::VZEXT_VL", SDT_RISCVVEXTEND_VL>; 386 387def riscv_trunc_vector_vl : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL", 388 SDTypeProfile<1, 3, [SDTCisVec<0>, 389 SDTCisSameNumEltsAs<0, 1>, 390 SDTCisSameNumEltsAs<0, 2>, 391 SDTCVecEltisVT<2, i1>, 392 SDTCisVT<3, XLenVT>]>>; 393 394def SDT_RISCVVWIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 395 SDTCisInt<1>, 396 SDTCisSameNumEltsAs<0, 1>, 397 SDTCisOpSmallerThanOp<1, 0>, 398 SDTCisSameAs<1, 2>, 399 SDTCisSameAs<0, 3>, 400 SDTCisSameNumEltsAs<1, 4>, 401 SDTCVecEltisVT<4, i1>, 402 SDTCisVT<5, XLenVT>]>; 403def riscv_vwmul_vl : SDNode<"RISCVISD::VWMUL_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 404def riscv_vwmulu_vl : SDNode<"RISCVISD::VWMULU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 405def riscv_vwmulsu_vl : SDNode<"RISCVISD::VWMULSU_VL", SDT_RISCVVWIntBinOp_VL>; 406def riscv_vwadd_vl : SDNode<"RISCVISD::VWADD_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 407def riscv_vwaddu_vl : SDNode<"RISCVISD::VWADDU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 408def riscv_vwsub_vl : SDNode<"RISCVISD::VWSUB_VL", SDT_RISCVVWIntBinOp_VL, []>; 409def riscv_vwsubu_vl : SDNode<"RISCVISD::VWSUBU_VL", SDT_RISCVVWIntBinOp_VL, []>; 410 411def SDT_RISCVVWIntTernOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 412 SDTCisInt<1>, 413 SDTCisSameNumEltsAs<0, 1>, 414 SDTCisOpSmallerThanOp<1, 0>, 415 SDTCisSameAs<1, 2>, 416 SDTCisSameAs<0, 3>, 417 SDTCisSameNumEltsAs<1, 4>, 418 SDTCVecEltisVT<4, i1>, 419 SDTCisVT<5, XLenVT>]>; 420def riscv_vwmacc_vl : SDNode<"RISCVISD::VWMACC_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; 421def riscv_vwmaccu_vl : SDNode<"RISCVISD::VWMACCU_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; 422def riscv_vwmaccsu_vl : SDNode<"RISCVISD::VWMACCSU_VL", SDT_RISCVVWIntTernOp_VL, []>; 423 424def SDT_RISCVVWFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 425 SDTCisFP<1>, 426 SDTCisSameNumEltsAs<0, 1>, 427 SDTCisOpSmallerThanOp<1, 0>, 428 SDTCisSameAs<1, 2>, 429 SDTCisSameAs<0, 3>, 430 SDTCisSameNumEltsAs<1, 4>, 431 SDTCVecEltisVT<4, i1>, 432 SDTCisVT<5, XLenVT>]>; 433def riscv_vfwmul_vl : SDNode<"RISCVISD::VFWMUL_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; 434def riscv_vfwadd_vl : SDNode<"RISCVISD::VFWADD_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; 435def riscv_vfwsub_vl : SDNode<"RISCVISD::VFWSUB_VL", SDT_RISCVVWFPBinOp_VL, []>; 436 437def SDT_RISCVVNIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 438 SDTCisInt<1>, 439 SDTCisSameNumEltsAs<0, 1>, 440 SDTCisOpSmallerThanOp<0, 1>, 441 SDTCisSameAs<0, 2>, 442 SDTCisSameAs<0, 3>, 443 SDTCisSameNumEltsAs<0, 4>, 444 SDTCVecEltisVT<4, i1>, 445 SDTCisVT<5, XLenVT>]>; 446def riscv_vnsrl_vl : SDNode<"RISCVISD::VNSRL_VL", SDT_RISCVVNIntBinOp_VL>; 447 448def SDT_RISCVVWIntBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 449 SDTCisSameAs<0, 1>, 450 SDTCisInt<2>, 451 SDTCisSameNumEltsAs<1, 2>, 452 SDTCisOpSmallerThanOp<2, 1>, 453 SDTCisSameAs<0, 3>, 454 SDTCisSameNumEltsAs<1, 4>, 455 SDTCVecEltisVT<4, i1>, 456 SDTCisVT<5, XLenVT>]>; 457def riscv_vwadd_w_vl : SDNode<"RISCVISD::VWADD_W_VL", SDT_RISCVVWIntBinOpW_VL>; 458def riscv_vwaddu_w_vl : SDNode<"RISCVISD::VWADDU_W_VL", SDT_RISCVVWIntBinOpW_VL>; 459def riscv_vwsub_w_vl : SDNode<"RISCVISD::VWSUB_W_VL", SDT_RISCVVWIntBinOpW_VL>; 460def riscv_vwsubu_w_vl : SDNode<"RISCVISD::VWSUBU_W_VL", SDT_RISCVVWIntBinOpW_VL>; 461 462def SDT_RISCVVWFPBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 463 SDTCisSameAs<0, 1>, 464 SDTCisFP<2>, 465 SDTCisSameNumEltsAs<1, 2>, 466 SDTCisOpSmallerThanOp<2, 1>, 467 SDTCisSameAs<0, 3>, 468 SDTCisSameNumEltsAs<1, 4>, 469 SDTCVecEltisVT<4, i1>, 470 SDTCisVT<5, XLenVT>]>; 471 472def riscv_vfwadd_w_vl : SDNode<"RISCVISD::VFWADD_W_VL", SDT_RISCVVWFPBinOpW_VL>; 473def riscv_vfwsub_w_vl : SDNode<"RISCVISD::VFWSUB_W_VL", SDT_RISCVVWFPBinOpW_VL>; 474 475def SDTRVVVecReduce : SDTypeProfile<1, 6, [ 476 SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>, 477 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<2, 4>, SDTCisVT<5, XLenVT>, 478 SDTCisVT<6, XLenVT> 479]>; 480 481def riscv_add_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 482 node:$E), 483 (riscv_add_vl node:$A, node:$B, node:$C, 484 node:$D, node:$E), [{ 485 return N->hasOneUse(); 486}]>; 487 488def riscv_sub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 489 node:$E), 490 (riscv_sub_vl node:$A, node:$B, node:$C, 491 node:$D, node:$E), [{ 492 return N->hasOneUse(); 493}]>; 494 495def riscv_mul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 496 node:$E), 497 (riscv_mul_vl node:$A, node:$B, node:$C, 498 node:$D, node:$E), [{ 499 return N->hasOneUse(); 500}]>; 501 502def riscv_vwmul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 503 node:$E), 504 (riscv_vwmul_vl node:$A, node:$B, node:$C, 505 node:$D, node:$E), [{ 506 return N->hasOneUse(); 507}]>; 508 509def riscv_vwmulu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 510 node:$E), 511 (riscv_vwmulu_vl node:$A, node:$B, node:$C, 512 node:$D, node:$E), [{ 513 return N->hasOneUse(); 514}]>; 515 516def riscv_vwmulsu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 517 node:$E), 518 (riscv_vwmulsu_vl node:$A, node:$B, node:$C, 519 node:$D, node:$E), [{ 520 return N->hasOneUse(); 521}]>; 522 523def riscv_sext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 524 (riscv_sext_vl node:$A, node:$B, node:$C), [{ 525 return N->hasOneUse(); 526}]>; 527 528def riscv_zext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 529 (riscv_zext_vl node:$A, node:$B, node:$C), [{ 530 return N->hasOneUse(); 531}]>; 532 533def riscv_fpextend_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 534 (riscv_fpextend_vl node:$A, node:$B, node:$C), [{ 535 return N->hasOneUse(); 536}]>; 537 538def riscv_vfmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 539 node:$E), 540 (riscv_vfmadd_vl node:$A, node:$B, 541 node:$C, node:$D, node:$E), [{ 542 return N->hasOneUse(); 543}]>; 544 545def riscv_vfnmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 546 node:$E), 547 (riscv_vfnmadd_vl node:$A, node:$B, 548 node:$C, node:$D, node:$E), [{ 549 return N->hasOneUse(); 550}]>; 551 552def riscv_vfmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 553 node:$E), 554 (riscv_vfmsub_vl node:$A, node:$B, 555 node:$C, node:$D, node:$E), [{ 556 return N->hasOneUse(); 557}]>; 558 559def riscv_vfnmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 560 node:$E), 561 (riscv_vfnmsub_vl node:$A, node:$B, 562 node:$C, node:$D, node:$E), [{ 563 return N->hasOneUse(); 564}]>; 565 566foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR", 567 "FADD", "SEQ_FADD", "FMIN", "FMAX"] in 568 def rvv_vecreduce_#kind#_vl : SDNode<"RISCVISD::VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>; 569 570// Give explicit Complexity to prefer simm5/uimm5. 571def SplatPat : ComplexPattern<vAny, 1, "selectVSplat", [], [], 1>; 572def SplatPat_simm5 : ComplexPattern<vAny, 1, "selectVSplatSimm5", [], [], 3>; 573def SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimmBits<5>", [], [], 3>; 574def SplatPat_uimm6 : ComplexPattern<vAny, 1, "selectVSplatUimmBits<6>", [], [], 3>; 575def SplatPat_simm5_plus1 576 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1", [], [], 3>; 577def SplatPat_simm5_plus1_nonzero 578 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1NonZero", [], [], 3>; 579 580def ext_oneuse_SplatPat 581 : ComplexPattern<vAny, 1, "selectExtOneUseVSplat", [], [], 2>; 582 583def SelectFPImm : ComplexPattern<fAny, 1, "selectFPImm", [], [], 1>; 584 585// Ignore the vl operand. 586def SplatFPOp : PatFrag<(ops node:$op), 587 (riscv_vfmv_v_f_vl undef, node:$op, srcvalue)>; 588 589def sew8simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<8>", []>; 590def sew16simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<16>", []>; 591def sew32simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<32>", []>; 592def sew64simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<64>", []>; 593 594class VPatBinaryVL_V<SDPatternOperator vop, 595 string instruction_name, 596 string suffix, 597 ValueType result_type, 598 ValueType op1_type, 599 ValueType op2_type, 600 ValueType mask_type, 601 int log2sew, 602 LMULInfo vlmul, 603 VReg result_reg_class, 604 VReg op1_reg_class, 605 VReg op2_reg_class, 606 bit isSEWAware = 0> 607 : Pat<(result_type (vop 608 (op1_type op1_reg_class:$rs1), 609 (op2_type op2_reg_class:$rs2), 610 (result_type result_reg_class:$merge), 611 (mask_type V0), 612 VLOpFrag)), 613 (!cast<Instruction>( 614 !if(isSEWAware, 615 instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 616 instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) 617 result_reg_class:$merge, 618 op1_reg_class:$rs1, 619 op2_reg_class:$rs2, 620 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 621 622class VPatBinaryVL_V_RM<SDPatternOperator vop, 623 string instruction_name, 624 string suffix, 625 ValueType result_type, 626 ValueType op1_type, 627 ValueType op2_type, 628 ValueType mask_type, 629 int log2sew, 630 LMULInfo vlmul, 631 VReg result_reg_class, 632 VReg op1_reg_class, 633 VReg op2_reg_class, 634 bit isSEWAware = 0> 635 : Pat<(result_type (vop 636 (op1_type op1_reg_class:$rs1), 637 (op2_type op2_reg_class:$rs2), 638 (result_type result_reg_class:$merge), 639 (mask_type V0), 640 VLOpFrag)), 641 (!cast<Instruction>( 642 !if(isSEWAware, 643 instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 644 instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) 645 result_reg_class:$merge, 646 op1_reg_class:$rs1, 647 op2_reg_class:$rs2, 648 (mask_type V0), 649 // Value to indicate no rounding mode change in 650 // RISCVInsertReadWriteCSR 651 FRM_DYN, 652 GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 653 654multiclass VPatTiedBinaryNoMaskVL_V<SDNode vop, 655 string instruction_name, 656 string suffix, 657 ValueType result_type, 658 ValueType op2_type, 659 int sew, 660 LMULInfo vlmul, 661 VReg result_reg_class, 662 VReg op2_reg_class> { 663 def : Pat<(result_type (vop 664 (result_type result_reg_class:$rs1), 665 (op2_type op2_reg_class:$rs2), 666 srcvalue, 667 true_mask, 668 VLOpFrag)), 669 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 670 result_reg_class:$rs1, 671 op2_reg_class:$rs2, 672 GPR:$vl, sew, TAIL_AGNOSTIC)>; 673 // Tail undisturbed 674 def : Pat<(riscv_vp_merge_vl true_mask, 675 (result_type (vop 676 result_reg_class:$rs1, 677 (op2_type op2_reg_class:$rs2), 678 srcvalue, 679 true_mask, 680 VLOpFrag)), 681 result_reg_class:$rs1, VLOpFrag), 682 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 683 result_reg_class:$rs1, 684 op2_reg_class:$rs2, 685 GPR:$vl, sew, TU_MU)>; 686} 687 688multiclass VPatTiedBinaryNoMaskVL_V_RM<SDNode vop, 689 string instruction_name, 690 string suffix, 691 ValueType result_type, 692 ValueType op2_type, 693 int sew, 694 LMULInfo vlmul, 695 VReg result_reg_class, 696 VReg op2_reg_class> { 697 def : Pat<(result_type (vop 698 (result_type result_reg_class:$rs1), 699 (op2_type op2_reg_class:$rs2), 700 srcvalue, 701 true_mask, 702 VLOpFrag)), 703 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 704 result_reg_class:$rs1, 705 op2_reg_class:$rs2, 706 // Value to indicate no rounding mode change in 707 // RISCVInsertReadWriteCSR 708 FRM_DYN, 709 GPR:$vl, sew, TAIL_AGNOSTIC)>; 710 // Tail undisturbed 711 def : Pat<(riscv_vp_merge_vl true_mask, 712 (result_type (vop 713 result_reg_class:$rs1, 714 (op2_type op2_reg_class:$rs2), 715 srcvalue, 716 true_mask, 717 VLOpFrag)), 718 result_reg_class:$rs1, VLOpFrag), 719 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 720 result_reg_class:$rs1, 721 op2_reg_class:$rs2, 722 // Value to indicate no rounding mode change in 723 // RISCVInsertReadWriteCSR 724 FRM_DYN, 725 GPR:$vl, sew, TU_MU)>; 726} 727 728class VPatBinaryVL_XI<SDPatternOperator vop, 729 string instruction_name, 730 string suffix, 731 ValueType result_type, 732 ValueType vop1_type, 733 ValueType vop2_type, 734 ValueType mask_type, 735 int log2sew, 736 LMULInfo vlmul, 737 VReg result_reg_class, 738 VReg vop_reg_class, 739 ComplexPattern SplatPatKind, 740 DAGOperand xop_kind, 741 bit isSEWAware = 0> 742 : Pat<(result_type (vop 743 (vop1_type vop_reg_class:$rs1), 744 (vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))), 745 (result_type result_reg_class:$merge), 746 (mask_type V0), 747 VLOpFrag)), 748 (!cast<Instruction>( 749 !if(isSEWAware, 750 instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 751 instruction_name#_#suffix#_#vlmul.MX#"_MASK")) 752 result_reg_class:$merge, 753 vop_reg_class:$rs1, 754 xop_kind:$rs2, 755 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 756 757multiclass VPatBinaryVL_VV_VX<SDPatternOperator vop, string instruction_name, 758 list<VTypeInfo> vtilist = AllIntegerVectors, 759 bit isSEWAware = 0> { 760 foreach vti = vtilist in { 761 let Predicates = GetVTypePredicates<vti>.Predicates in { 762 def : VPatBinaryVL_V<vop, instruction_name, "VV", 763 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 764 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 765 vti.RegClass, isSEWAware>; 766 def : VPatBinaryVL_XI<vop, instruction_name, "VX", 767 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 768 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 769 SplatPat, GPR, isSEWAware>; 770 } 771 } 772} 773 774multiclass VPatBinaryVL_VV_VX_VI<SDPatternOperator vop, string instruction_name, 775 Operand ImmType = simm5> 776 : VPatBinaryVL_VV_VX<vop, instruction_name> { 777 foreach vti = AllIntegerVectors in { 778 let Predicates = GetVTypePredicates<vti>.Predicates in 779 def : VPatBinaryVL_XI<vop, instruction_name, "VI", 780 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 781 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 782 !cast<ComplexPattern>(SplatPat#_#ImmType), 783 ImmType>; 784 } 785} 786 787multiclass VPatBinaryWVL_VV_VX<SDPatternOperator vop, string instruction_name> { 788 foreach VtiToWti = AllWidenableIntVectors in { 789 defvar vti = VtiToWti.Vti; 790 defvar wti = VtiToWti.Wti; 791 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 792 GetVTypePredicates<wti>.Predicates) in { 793 def : VPatBinaryVL_V<vop, instruction_name, "VV", 794 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 795 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 796 vti.RegClass>; 797 def : VPatBinaryVL_XI<vop, instruction_name, "VX", 798 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 799 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 800 SplatPat, GPR>; 801 } 802 } 803} 804 805multiclass VPatBinaryWVL_VV_VX_WV_WX<SDPatternOperator vop, SDNode vop_w, 806 string instruction_name> 807 : VPatBinaryWVL_VV_VX<vop, instruction_name> { 808 foreach VtiToWti = AllWidenableIntVectors in { 809 defvar vti = VtiToWti.Vti; 810 defvar wti = VtiToWti.Wti; 811 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 812 GetVTypePredicates<wti>.Predicates) in { 813 defm : VPatTiedBinaryNoMaskVL_V<vop_w, instruction_name, "WV", 814 wti.Vector, vti.Vector, vti.Log2SEW, 815 vti.LMul, wti.RegClass, vti.RegClass>; 816 def : VPatBinaryVL_V<vop_w, instruction_name, "WV", 817 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 818 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 819 vti.RegClass>; 820 def : VPatBinaryVL_XI<vop_w, instruction_name, "WX", 821 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 822 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 823 SplatPat, GPR>; 824 } 825 } 826} 827 828multiclass VPatBinaryNVL_WV_WX_WI<SDPatternOperator vop, string instruction_name> { 829 foreach VtiToWti = AllWidenableIntVectors in { 830 defvar vti = VtiToWti.Vti; 831 defvar wti = VtiToWti.Wti; 832 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 833 GetVTypePredicates<wti>.Predicates) in { 834 def : VPatBinaryVL_V<vop, instruction_name, "WV", 835 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 836 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 837 vti.RegClass>; 838 def : VPatBinaryVL_XI<vop, instruction_name, "WX", 839 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 840 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 841 SplatPat, GPR>; 842 def : VPatBinaryVL_XI<vop, instruction_name, "WI", 843 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 844 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 845 !cast<ComplexPattern>(SplatPat#_#uimm5), 846 uimm5>; 847 } 848 } 849} 850 851class VPatBinaryVL_VF<SDPatternOperator vop, 852 string instruction_name, 853 ValueType result_type, 854 ValueType vop1_type, 855 ValueType vop2_type, 856 ValueType mask_type, 857 int log2sew, 858 LMULInfo vlmul, 859 VReg result_reg_class, 860 VReg vop_reg_class, 861 RegisterClass scalar_reg_class, 862 bit isSEWAware = 0> 863 : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), 864 (vop2_type (SplatFPOp scalar_reg_class:$rs2)), 865 (result_type result_reg_class:$merge), 866 (mask_type V0), 867 VLOpFrag)), 868 (!cast<Instruction>( 869 !if(isSEWAware, 870 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 871 instruction_name#"_"#vlmul.MX#"_MASK")) 872 result_reg_class:$merge, 873 vop_reg_class:$rs1, 874 scalar_reg_class:$rs2, 875 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 876 877class VPatBinaryVL_VF_RM<SDPatternOperator vop, 878 string instruction_name, 879 ValueType result_type, 880 ValueType vop1_type, 881 ValueType vop2_type, 882 ValueType mask_type, 883 int log2sew, 884 LMULInfo vlmul, 885 VReg result_reg_class, 886 VReg vop_reg_class, 887 RegisterClass scalar_reg_class, 888 bit isSEWAware = 0> 889 : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), 890 (vop2_type (SplatFPOp scalar_reg_class:$rs2)), 891 (result_type result_reg_class:$merge), 892 (mask_type V0), 893 VLOpFrag)), 894 (!cast<Instruction>( 895 !if(isSEWAware, 896 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 897 instruction_name#"_"#vlmul.MX#"_MASK")) 898 result_reg_class:$merge, 899 vop_reg_class:$rs1, 900 scalar_reg_class:$rs2, 901 (mask_type V0), 902 // Value to indicate no rounding mode change in 903 // RISCVInsertReadWriteCSR 904 FRM_DYN, 905 GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 906 907multiclass VPatBinaryFPVL_VV_VF<SDPatternOperator vop, string instruction_name, 908 bit isSEWAware = 0> { 909 foreach vti = AllFloatVectors in { 910 let Predicates = GetVTypePredicates<vti>.Predicates in { 911 def : VPatBinaryVL_V<vop, instruction_name, "VV", 912 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 913 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 914 vti.RegClass, isSEWAware>; 915 def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 916 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 917 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 918 vti.ScalarRegClass, isSEWAware>; 919 } 920 } 921} 922 923multiclass VPatBinaryFPVL_VV_VF_RM<SDPatternOperator vop, string instruction_name, 924 bit isSEWAware = 0> { 925 foreach vti = AllFloatVectors in { 926 let Predicates = GetVTypePredicates<vti>.Predicates in { 927 def : VPatBinaryVL_V_RM<vop, instruction_name, "VV", 928 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 929 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 930 vti.RegClass, isSEWAware>; 931 def : VPatBinaryVL_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix, 932 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 933 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 934 vti.ScalarRegClass, isSEWAware>; 935 } 936 } 937} 938 939multiclass VPatBinaryFPVL_R_VF<SDPatternOperator vop, string instruction_name, 940 bit isSEWAware = 0> { 941 foreach fvti = AllFloatVectors in { 942 let Predicates = GetVTypePredicates<fvti>.Predicates in 943 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 944 fvti.RegClass:$rs1, 945 (fvti.Vector fvti.RegClass:$merge), 946 (fvti.Mask V0), 947 VLOpFrag)), 948 (!cast<Instruction>( 949 !if(isSEWAware, 950 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", 951 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) 952 fvti.RegClass:$merge, 953 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 954 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 955 } 956} 957 958multiclass VPatBinaryFPVL_R_VF_RM<SDPatternOperator vop, string instruction_name, 959 bit isSEWAware = 0> { 960 foreach fvti = AllFloatVectors in { 961 let Predicates = GetVTypePredicates<fvti>.Predicates in 962 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 963 fvti.RegClass:$rs1, 964 (fvti.Vector fvti.RegClass:$merge), 965 (fvti.Mask V0), 966 VLOpFrag)), 967 (!cast<Instruction>( 968 !if(isSEWAware, 969 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", 970 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) 971 fvti.RegClass:$merge, 972 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 973 (fvti.Mask V0), 974 // Value to indicate no rounding mode change in 975 // RISCVInsertReadWriteCSR 976 FRM_DYN, 977 GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 978 } 979} 980 981multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name, 982 CondCode cc> { 983 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 984 vti.RegClass:$rs2, cc, 985 VR:$merge, 986 (vti.Mask V0), 987 VLOpFrag)), 988 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") 989 VR:$merge, 990 vti.RegClass:$rs1, 991 vti.RegClass:$rs2, 992 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 993} 994 995// Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped. 996multiclass VPatIntegerSetCCVL_VV_Swappable<VTypeInfo vti, string instruction_name, 997 CondCode cc, CondCode invcc> 998 : VPatIntegerSetCCVL_VV<vti, instruction_name, cc> { 999 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2), 1000 vti.RegClass:$rs1, invcc, 1001 VR:$merge, 1002 (vti.Mask V0), 1003 VLOpFrag)), 1004 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") 1005 VR:$merge, vti.RegClass:$rs1, 1006 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1007} 1008 1009multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_name, 1010 CondCode cc, CondCode invcc> { 1011 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK"); 1012 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1013 (SplatPat (XLenVT GPR:$rs2)), cc, 1014 VR:$merge, 1015 (vti.Mask V0), 1016 VLOpFrag)), 1017 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1018 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1019 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)), 1020 (vti.Vector vti.RegClass:$rs1), invcc, 1021 VR:$merge, 1022 (vti.Mask V0), 1023 VLOpFrag)), 1024 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1025 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1026} 1027 1028multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_name, 1029 CondCode cc, CondCode invcc> { 1030 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); 1031 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1032 (SplatPat_simm5 simm5:$rs2), cc, 1033 VR:$merge, 1034 (vti.Mask V0), 1035 VLOpFrag)), 1036 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1037 XLenVT:$rs2, (vti.Mask V0), GPR:$vl, 1038 vti.Log2SEW)>; 1039 1040 // FIXME: Can do some canonicalization to remove these patterns. 1041 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2), 1042 (vti.Vector vti.RegClass:$rs1), invcc, 1043 VR:$merge, 1044 (vti.Mask V0), 1045 VLOpFrag)), 1046 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1047 simm5:$rs2, (vti.Mask V0), GPR:$vl, 1048 vti.Log2SEW)>; 1049} 1050 1051multiclass VPatIntegerSetCCVL_VIPlus1_Swappable<VTypeInfo vti, 1052 string instruction_name, 1053 CondCode cc, CondCode invcc, 1054 ComplexPattern splatpat_kind> { 1055 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); 1056 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1057 (splatpat_kind simm5:$rs2), cc, 1058 VR:$merge, 1059 (vti.Mask V0), 1060 VLOpFrag)), 1061 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1062 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl, 1063 vti.Log2SEW)>; 1064 1065 // FIXME: Can do some canonicalization to remove these patterns. 1066 def : Pat<(vti.Mask (riscv_setcc_vl (splatpat_kind simm5:$rs2), 1067 (vti.Vector vti.RegClass:$rs1), invcc, 1068 VR:$merge, 1069 (vti.Mask V0), 1070 VLOpFrag)), 1071 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1072 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl, 1073 vti.Log2SEW)>; 1074} 1075 1076multiclass VPatFPSetCCVL_VV_VF_FV<SDPatternOperator vop, CondCode cc, 1077 string inst_name, 1078 string swapped_op_inst_name> { 1079 foreach fvti = AllFloatVectors in { 1080 let Predicates = GetVTypePredicates<fvti>.Predicates in { 1081 def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1), 1082 fvti.RegClass:$rs2, 1083 cc, 1084 VR:$merge, 1085 (fvti.Mask V0), 1086 VLOpFrag)), 1087 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX#"_MASK") 1088 VR:$merge, fvti.RegClass:$rs1, 1089 fvti.RegClass:$rs2, (fvti.Mask V0), 1090 GPR:$vl, fvti.Log2SEW)>; 1091 def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1), 1092 (SplatFPOp fvti.ScalarRegClass:$rs2), 1093 cc, 1094 VR:$merge, 1095 (fvti.Mask V0), 1096 VLOpFrag)), 1097 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 1098 VR:$merge, fvti.RegClass:$rs1, 1099 fvti.ScalarRegClass:$rs2, (fvti.Mask V0), 1100 GPR:$vl, fvti.Log2SEW)>; 1101 def : Pat<(fvti.Mask (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 1102 (fvti.Vector fvti.RegClass:$rs1), 1103 cc, 1104 VR:$merge, 1105 (fvti.Mask V0), 1106 VLOpFrag)), 1107 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 1108 VR:$merge, fvti.RegClass:$rs1, 1109 fvti.ScalarRegClass:$rs2, (fvti.Mask V0), 1110 GPR:$vl, fvti.Log2SEW)>; 1111 } 1112 } 1113} 1114 1115multiclass VPatExtendVL_V<SDNode vop, string inst_name, string suffix, 1116 list <VTypeInfoToFraction> fraction_list> { 1117 foreach vtiTofti = fraction_list in { 1118 defvar vti = vtiTofti.Vti; 1119 defvar fti = vtiTofti.Fti; 1120 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1121 GetVTypePredicates<fti>.Predicates) in 1122 def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2), 1123 (fti.Mask V0), VLOpFrag)), 1124 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX#"_MASK") 1125 (vti.Vector (IMPLICIT_DEF)), 1126 fti.RegClass:$rs2, 1127 (fti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1128 } 1129} 1130 1131// Single width converting 1132 1133multiclass VPatConvertFP2IVL_V<SDPatternOperator vop, string instruction_name> { 1134 foreach fvti = AllFloatVectors in { 1135 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1136 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1137 GetVTypePredicates<ivti>.Predicates) in 1138 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1139 (fvti.Mask V0), 1140 VLOpFrag)), 1141 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1142 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1143 (fvti.Mask V0), GPR:$vl, ivti.Log2SEW, TA_MA)>; 1144 } 1145} 1146 1147multiclass VPatConvertFP2IVL_V_RM<SDPatternOperator vop, string instruction_name> { 1148 foreach fvti = AllFloatVectors in { 1149 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1150 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1151 GetVTypePredicates<ivti>.Predicates) in 1152 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1153 (fvti.Mask V0), 1154 VLOpFrag)), 1155 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1156 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1157 (fvti.Mask V0), 1158 // Value to indicate no rounding mode change in 1159 // RISCVInsertReadWriteCSR 1160 FRM_DYN, 1161 GPR:$vl, ivti.Log2SEW, TA_MA)>; 1162 } 1163} 1164 1165 1166multiclass VPatConvertFP2I_RM_VL_V<SDPatternOperator vop, string instruction_name> { 1167 foreach fvti = AllFloatVectors in { 1168 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1169 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1170 GetVTypePredicates<ivti>.Predicates) in 1171 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1172 (fvti.Mask V0), (XLenVT timm:$frm), 1173 VLOpFrag)), 1174 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1175 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1176 (fvti.Mask V0), timm:$frm, GPR:$vl, ivti.Log2SEW, 1177 TA_MA)>; 1178 } 1179} 1180 1181multiclass VPatConvertI2FPVL_V_RM<SDPatternOperator vop, string instruction_name> { 1182 foreach fvti = AllFloatVectors in { 1183 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1184 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1185 GetVTypePredicates<ivti>.Predicates) in 1186 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1187 (ivti.Mask V0), 1188 VLOpFrag)), 1189 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1190 (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1191 (ivti.Mask V0), 1192 // Value to indicate no rounding mode change in 1193 // RISCVInsertReadWriteCSR 1194 FRM_DYN, 1195 GPR:$vl, fvti.Log2SEW, TA_MA)>; 1196 } 1197} 1198 1199multiclass VPatConvertI2FP_RM_VL_V<SDNode vop, string instruction_name> { 1200 foreach fvti = AllFloatVectors in { 1201 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1202 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1203 GetVTypePredicates<ivti>.Predicates) in 1204 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1205 (ivti.Mask V0), (XLenVT timm:$frm), 1206 VLOpFrag)), 1207 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1208 (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1209 (ivti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1210 } 1211} 1212 1213// Widening converting 1214 1215multiclass VPatWConvertFP2IVL_V<SDPatternOperator vop, string instruction_name> { 1216 foreach fvtiToFWti = AllWidenableFloatVectors in { 1217 defvar fvti = fvtiToFWti.Vti; 1218 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1219 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1220 GetVTypePredicates<iwti>.Predicates) in 1221 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1222 (fvti.Mask V0), 1223 VLOpFrag)), 1224 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1225 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1226 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 1227 } 1228} 1229 1230multiclass VPatWConvertFP2IVL_V_RM<SDPatternOperator vop, string instruction_name> { 1231 foreach fvtiToFWti = AllWidenableFloatVectors in { 1232 defvar fvti = fvtiToFWti.Vti; 1233 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1234 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1235 GetVTypePredicates<iwti>.Predicates) in 1236 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1237 (fvti.Mask V0), 1238 VLOpFrag)), 1239 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1240 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1241 (fvti.Mask V0), 1242 // Value to indicate no rounding mode change in 1243 // RISCVInsertReadWriteCSR 1244 FRM_DYN, 1245 GPR:$vl, fvti.Log2SEW, TA_MA)>; 1246 } 1247} 1248 1249 1250multiclass VPatWConvertFP2I_RM_VL_V<SDNode vop, string instruction_name> { 1251 foreach fvtiToFWti = AllWidenableFloatVectors in { 1252 defvar fvti = fvtiToFWti.Vti; 1253 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1254 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1255 GetVTypePredicates<iwti>.Predicates) in 1256 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1257 (fvti.Mask V0), (XLenVT timm:$frm), 1258 VLOpFrag)), 1259 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1260 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1261 (fvti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1262 } 1263} 1264 1265multiclass VPatWConvertI2FPVL_V<SDPatternOperator vop, 1266 string instruction_name> { 1267 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1268 defvar ivti = vtiToWti.Vti; 1269 defvar fwti = vtiToWti.Wti; 1270 let Predicates = !listconcat(GetVTypePredicates<ivti>.Predicates, 1271 GetVTypePredicates<fwti>.Predicates) in 1272 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1273 (ivti.Mask V0), 1274 VLOpFrag)), 1275 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1276 (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1277 (ivti.Mask V0), 1278 GPR:$vl, ivti.Log2SEW, TA_MA)>; 1279 } 1280} 1281 1282// Narrowing converting 1283 1284multiclass VPatNConvertFP2IVL_W<SDPatternOperator vop, 1285 string instruction_name> { 1286 // Reuse the same list of types used in the widening nodes, but just swap the 1287 // direction of types around so we're converting from Wti -> Vti 1288 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1289 defvar vti = vtiToWti.Vti; 1290 defvar fwti = vtiToWti.Wti; 1291 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1292 GetVTypePredicates<fwti>.Predicates) in 1293 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 1294 (fwti.Mask V0), 1295 VLOpFrag)), 1296 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 1297 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1298 (fwti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1299 } 1300} 1301 1302multiclass VPatNConvertFP2IVL_W_RM<SDPatternOperator vop, 1303 string instruction_name> { 1304 // Reuse the same list of types used in the widening nodes, but just swap the 1305 // direction of types around so we're converting from Wti -> Vti 1306 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1307 defvar vti = vtiToWti.Vti; 1308 defvar fwti = vtiToWti.Wti; 1309 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1310 GetVTypePredicates<fwti>.Predicates) in 1311 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 1312 (fwti.Mask V0), 1313 VLOpFrag)), 1314 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 1315 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1316 (fwti.Mask V0), 1317 // Value to indicate no rounding mode change in 1318 // RISCVInsertReadWriteCSR 1319 FRM_DYN, 1320 GPR:$vl, vti.Log2SEW, TA_MA)>; 1321 } 1322} 1323 1324multiclass VPatNConvertFP2I_RM_VL_W<SDNode vop, string instruction_name> { 1325 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1326 defvar vti = vtiToWti.Vti; 1327 defvar fwti = vtiToWti.Wti; 1328 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1329 GetVTypePredicates<fwti>.Predicates) in 1330 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 1331 (fwti.Mask V0), (XLenVT timm:$frm), 1332 VLOpFrag)), 1333 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 1334 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1335 (fwti.Mask V0), timm:$frm, GPR:$vl, vti.Log2SEW, TA_MA)>; 1336 } 1337} 1338 1339multiclass VPatNConvertI2FPVL_W_RM<SDPatternOperator vop, 1340 string instruction_name> { 1341 foreach fvtiToFWti = AllWidenableFloatVectors in { 1342 defvar fvti = fvtiToFWti.Vti; 1343 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1344 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1345 GetVTypePredicates<iwti>.Predicates) in 1346 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), 1347 (iwti.Mask V0), 1348 VLOpFrag)), 1349 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1350 (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, 1351 (iwti.Mask V0), 1352 // Value to indicate no rounding mode change in 1353 // RISCVInsertReadWriteCSR 1354 FRM_DYN, 1355 GPR:$vl, fvti.Log2SEW, TA_MA)>; 1356 } 1357} 1358 1359multiclass VPatNConvertI2FP_RM_VL_W<SDNode vop, string instruction_name> { 1360 foreach fvtiToFWti = AllWidenableFloatVectors in { 1361 defvar fvti = fvtiToFWti.Vti; 1362 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1363 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1364 GetVTypePredicates<iwti>.Predicates) in 1365 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), 1366 (iwti.Mask V0), (XLenVT timm:$frm), 1367 VLOpFrag)), 1368 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1369 (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, 1370 (iwti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1371 } 1372} 1373 1374multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> { 1375 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { 1376 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1"); 1377 let Predicates = GetVTypePredicates<vti>.Predicates in { 1378 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), 1379 (vti.Vector vti.RegClass:$rs1), VR:$rs2, 1380 (vti.Mask true_mask), VLOpFrag, 1381 (XLenVT timm:$policy))), 1382 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW) 1383 (vti_m1.Vector VR:$merge), 1384 (vti.Vector vti.RegClass:$rs1), 1385 (vti_m1.Vector VR:$rs2), 1386 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 1387 1388 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), 1389 (vti.Vector vti.RegClass:$rs1), VR:$rs2, 1390 (vti.Mask V0), VLOpFrag, 1391 (XLenVT timm:$policy))), 1392 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1393 (vti_m1.Vector VR:$merge), 1394 (vti.Vector vti.RegClass:$rs1), 1395 (vti_m1.Vector VR:$rs2), 1396 (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 1397 } 1398 } 1399} 1400 1401multiclass VPatReductionVL_RM<SDNode vop, string instruction_name, bit is_float> { 1402 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { 1403 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1"); 1404 let Predicates = GetVTypePredicates<vti>.Predicates in { 1405 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), 1406 (vti.Vector vti.RegClass:$rs1), VR:$rs2, 1407 (vti.Mask true_mask), VLOpFrag, 1408 (XLenVT timm:$policy))), 1409 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW) 1410 (vti_m1.Vector VR:$merge), 1411 (vti.Vector vti.RegClass:$rs1), 1412 (vti_m1.Vector VR:$rs2), 1413 // Value to indicate no rounding mode change in 1414 // RISCVInsertReadWriteCSR 1415 FRM_DYN, 1416 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 1417 1418 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), 1419 (vti.Vector vti.RegClass:$rs1), VR:$rs2, 1420 (vti.Mask V0), VLOpFrag, 1421 (XLenVT timm:$policy))), 1422 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1423 (vti_m1.Vector VR:$merge), 1424 (vti.Vector vti.RegClass:$rs1), 1425 (vti_m1.Vector VR:$rs2), 1426 (vti.Mask V0), 1427 // Value to indicate no rounding mode change in 1428 // RISCVInsertReadWriteCSR 1429 FRM_DYN, 1430 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 1431 } 1432 } 1433} 1434 1435multiclass VPatBinaryVL_WV_WX_WI<SDNode op, string instruction_name> { 1436 foreach vtiToWti = AllWidenableIntVectors in { 1437 defvar vti = vtiToWti.Vti; 1438 defvar wti = vtiToWti.Wti; 1439 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1440 GetVTypePredicates<wti>.Predicates) in { 1441 def : Pat< 1442 (vti.Vector 1443 (riscv_trunc_vector_vl 1444 (op (wti.Vector wti.RegClass:$rs2), 1445 (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1)))), 1446 (vti.Mask true_mask), 1447 VLOpFrag)), 1448 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX) 1449 (vti.Vector (IMPLICIT_DEF)), 1450 wti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; 1451 1452 def : Pat< 1453 (vti.Vector 1454 (riscv_trunc_vector_vl 1455 (op (wti.Vector wti.RegClass:$rs2), 1456 (wti.Vector (ext_oneuse_SplatPat (XLenVT GPR:$rs1)))), 1457 (vti.Mask true_mask), 1458 VLOpFrag)), 1459 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1460 (vti.Vector (IMPLICIT_DEF)), 1461 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; 1462 1463 def : Pat< 1464 (vti.Vector 1465 (riscv_trunc_vector_vl 1466 (op (wti.Vector wti.RegClass:$rs2), 1467 (wti.Vector (SplatPat_uimm5 uimm5:$rs1))), (vti.Mask true_mask), 1468 VLOpFrag)), 1469 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX) 1470 (vti.Vector (IMPLICIT_DEF)), 1471 wti.RegClass:$rs2, uimm5:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; 1472 } 1473 } 1474} 1475 1476multiclass VPatWidenReductionVL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1477 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1478 defvar vti = vtiToWti.Vti; 1479 defvar wti = vtiToWti.Wti; 1480 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1481 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1482 GetVTypePredicates<wti>.Predicates) in { 1483 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1484 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 1485 VR:$rs2, (vti.Mask true_mask), VLOpFrag, 1486 (XLenVT timm:$policy))), 1487 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW) 1488 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1489 (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW, 1490 (XLenVT timm:$policy))>; 1491 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1492 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 1493 VR:$rs2, (vti.Mask V0), VLOpFrag, 1494 (XLenVT timm:$policy))), 1495 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1496 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1497 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1498 (XLenVT timm:$policy))>; 1499 } 1500 } 1501} 1502 1503multiclass VPatWidenReductionVL_RM<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1504 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1505 defvar vti = vtiToWti.Vti; 1506 defvar wti = vtiToWti.Wti; 1507 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1508 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1509 GetVTypePredicates<wti>.Predicates) in { 1510 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1511 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 1512 VR:$rs2, (vti.Mask true_mask), VLOpFrag, 1513 (XLenVT timm:$policy))), 1514 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW) 1515 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1516 (wti_m1.Vector VR:$rs2), 1517 // Value to indicate no rounding mode change in 1518 // RISCVInsertReadWriteCSR 1519 FRM_DYN, 1520 GPR:$vl, vti.Log2SEW, 1521 (XLenVT timm:$policy))>; 1522 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1523 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 1524 VR:$rs2, (vti.Mask V0), VLOpFrag, 1525 (XLenVT timm:$policy))), 1526 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1527 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1528 (wti_m1.Vector VR:$rs2), (vti.Mask V0), 1529 // Value to indicate no rounding mode change in 1530 // RISCVInsertReadWriteCSR 1531 FRM_DYN, 1532 GPR:$vl, vti.Log2SEW, 1533 (XLenVT timm:$policy))>; 1534 } 1535 } 1536} 1537 1538multiclass VPatWidenReductionVL_Ext_VL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1539 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1540 defvar vti = vtiToWti.Vti; 1541 defvar wti = vtiToWti.Wti; 1542 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1543 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1544 GetVTypePredicates<wti>.Predicates) in { 1545 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1546 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 1547 VR:$rs2, (vti.Mask true_mask), VLOpFrag, 1548 (XLenVT timm:$policy))), 1549 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW) 1550 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1551 (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW, 1552 (XLenVT timm:$policy))>; 1553 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1554 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 1555 VR:$rs2, (vti.Mask V0), VLOpFrag, 1556 (XLenVT timm:$policy))), 1557 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1558 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1559 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1560 (XLenVT timm:$policy))>; 1561 } 1562 } 1563} 1564 1565multiclass VPatWidenReductionVL_Ext_VL_RM<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1566 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1567 defvar vti = vtiToWti.Vti; 1568 defvar wti = vtiToWti.Wti; 1569 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1570 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1571 GetVTypePredicates<wti>.Predicates) in { 1572 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1573 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 1574 VR:$rs2, (vti.Mask true_mask), VLOpFrag, 1575 (XLenVT timm:$policy))), 1576 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW) 1577 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1578 (wti_m1.Vector VR:$rs2), 1579 // Value to indicate no rounding mode change in 1580 // RISCVInsertReadWriteCSR 1581 FRM_DYN, 1582 GPR:$vl, vti.Log2SEW, 1583 (XLenVT timm:$policy))>; 1584 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1585 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 1586 VR:$rs2, (vti.Mask V0), VLOpFrag, 1587 (XLenVT timm:$policy))), 1588 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1589 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1590 (wti_m1.Vector VR:$rs2), (vti.Mask V0), 1591 // Value to indicate no rounding mode change in 1592 // RISCVInsertReadWriteCSR 1593 FRM_DYN, 1594 GPR:$vl, vti.Log2SEW, 1595 (XLenVT timm:$policy))>; 1596 } 1597 } 1598} 1599 1600multiclass VPatBinaryFPWVL_VV_VF<SDNode vop, string instruction_name> { 1601 foreach fvtiToFWti = AllWidenableFloatVectors in { 1602 defvar vti = fvtiToFWti.Vti; 1603 defvar wti = fvtiToFWti.Wti; 1604 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1605 GetVTypePredicates<wti>.Predicates) in { 1606 def : VPatBinaryVL_V<vop, instruction_name, "VV", 1607 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1608 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1609 vti.RegClass>; 1610 def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 1611 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1612 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1613 vti.ScalarRegClass>; 1614 } 1615 } 1616} 1617 1618multiclass VPatBinaryFPWVL_VV_VF_RM<SDNode vop, string instruction_name> { 1619 foreach fvtiToFWti = AllWidenableFloatVectors in { 1620 defvar vti = fvtiToFWti.Vti; 1621 defvar wti = fvtiToFWti.Wti; 1622 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1623 GetVTypePredicates<wti>.Predicates) in { 1624 def : VPatBinaryVL_V_RM<vop, instruction_name, "VV", 1625 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1626 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1627 vti.RegClass>; 1628 def : VPatBinaryVL_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix, 1629 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1630 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1631 vti.ScalarRegClass>; 1632 } 1633 } 1634} 1635 1636multiclass VPatBinaryFPWVL_VV_VF_WV_WF<SDNode vop, SDNode vop_w, string instruction_name> 1637 : VPatBinaryFPWVL_VV_VF<vop, instruction_name> { 1638 foreach fvtiToFWti = AllWidenableFloatVectors in { 1639 defvar vti = fvtiToFWti.Vti; 1640 defvar wti = fvtiToFWti.Wti; 1641 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1642 GetVTypePredicates<wti>.Predicates) in { 1643 defm : VPatTiedBinaryNoMaskVL_V<vop_w, instruction_name, "WV", 1644 wti.Vector, vti.Vector, vti.Log2SEW, 1645 vti.LMul, wti.RegClass, vti.RegClass>; 1646 def : VPatBinaryVL_V<vop_w, instruction_name, "WV", 1647 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1648 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1649 vti.RegClass>; 1650 def : VPatBinaryVL_VF<vop_w, instruction_name#"_W"#vti.ScalarSuffix, 1651 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1652 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1653 vti.ScalarRegClass>; 1654 } 1655 } 1656} 1657 1658multiclass VPatBinaryFPWVL_VV_VF_WV_WF_RM<SDNode vop, SDNode vop_w, string instruction_name> 1659 : VPatBinaryFPWVL_VV_VF_RM<vop, instruction_name> { 1660 foreach fvtiToFWti = AllWidenableFloatVectors in { 1661 defvar vti = fvtiToFWti.Vti; 1662 defvar wti = fvtiToFWti.Wti; 1663 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1664 GetVTypePredicates<wti>.Predicates) in { 1665 defm : VPatTiedBinaryNoMaskVL_V_RM<vop_w, instruction_name, "WV", 1666 wti.Vector, vti.Vector, vti.Log2SEW, 1667 vti.LMul, wti.RegClass, vti.RegClass>; 1668 def : VPatBinaryVL_V_RM<vop_w, instruction_name, "WV", 1669 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1670 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1671 vti.RegClass>; 1672 def : VPatBinaryVL_VF_RM<vop_w, instruction_name#"_W"#vti.ScalarSuffix, 1673 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1674 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1675 vti.ScalarRegClass>; 1676 } 1677 } 1678} 1679 1680multiclass VPatNarrowShiftSplatExt_WX<SDNode op, PatFrags extop, string instruction_name> { 1681 foreach vtiToWti = AllWidenableIntVectors in { 1682 defvar vti = vtiToWti.Vti; 1683 defvar wti = vtiToWti.Wti; 1684 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1685 GetVTypePredicates<wti>.Predicates) in 1686 def : Pat< 1687 (vti.Vector 1688 (riscv_trunc_vector_vl 1689 (op (wti.Vector wti.RegClass:$rs2), 1690 (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1))), 1691 (vti.Mask true_mask), VLOpFrag)), 1692 srcvalue, (wti.Mask true_mask), VLOpFrag), 1693 (vti.Mask true_mask), VLOpFrag)), 1694 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1695 (vti.Vector (IMPLICIT_DEF)), 1696 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; 1697 } 1698} 1699 1700multiclass VPatNarrowShiftExtVL_WV<SDNode op, PatFrags extop, string instruction_name> { 1701 foreach vtiToWti = AllWidenableIntVectors in { 1702 defvar vti = vtiToWti.Vti; 1703 defvar wti = vtiToWti.Wti; 1704 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1705 GetVTypePredicates<wti>.Predicates) in 1706 def : Pat< 1707 (vti.Vector 1708 (riscv_trunc_vector_vl 1709 (op (wti.Vector wti.RegClass:$rs2), 1710 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), 1711 (vti.Mask true_mask), VLOpFrag)), 1712 srcvalue, (vti.Mask true_mask), VLOpFrag), 1713 (vti.Mask V0), VLOpFrag)), 1714 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_MASK") 1715 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, vti.RegClass:$rs1, 1716 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1717 } 1718} 1719 1720multiclass VPatNarrowShiftVL_WV<SDNode op, string instruction_name> { 1721 defm : VPatNarrowShiftExtVL_WV<op, riscv_sext_vl_oneuse, instruction_name>; 1722 defm : VPatNarrowShiftExtVL_WV<op, riscv_zext_vl_oneuse, instruction_name>; 1723} 1724 1725multiclass VPatMultiplyAddVL_VV_VX<SDNode op, string instruction_name> { 1726 foreach vti = AllIntegerVectors in { 1727 defvar suffix = vti.LMul.MX; 1728 let Predicates = GetVTypePredicates<vti>.Predicates in { 1729 // NOTE: We choose VMADD because it has the most commuting freedom. So it 1730 // works best with how TwoAddressInstructionPass tries commuting. 1731 def : Pat<(vti.Vector 1732 (op vti.RegClass:$rs2, 1733 (riscv_mul_vl_oneuse vti.RegClass:$rs1, 1734 vti.RegClass:$rd, 1735 srcvalue, (vti.Mask true_mask), VLOpFrag), 1736 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1737 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 1738 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1739 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1740 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally 1741 // commutable. 1742 def : Pat<(vti.Vector 1743 (op vti.RegClass:$rs2, 1744 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), 1745 vti.RegClass:$rd, 1746 srcvalue, (vti.Mask true_mask), VLOpFrag), 1747 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1748 (!cast<Instruction>(instruction_name#"_VX_" # suffix) 1749 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1750 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1751 } 1752 } 1753} 1754 1755multiclass VPatMultiplyAccVL_VV_VX<PatFrag op, string instruction_name> { 1756 foreach vti = AllIntegerVectors in { 1757 defvar suffix = vti.LMul.MX; 1758 let Predicates = GetVTypePredicates<vti>.Predicates in { 1759 def : Pat<(riscv_vp_merge_vl (vti.Mask V0), 1760 (vti.Vector (op vti.RegClass:$rd, 1761 (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2, 1762 srcvalue, (vti.Mask true_mask), VLOpFrag), 1763 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1764 vti.RegClass:$rd, VLOpFrag), 1765 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1766 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1767 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1768 def : Pat<(riscv_vp_merge_vl (vti.Mask V0), 1769 (vti.Vector (op vti.RegClass:$rd, 1770 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, 1771 srcvalue, (vti.Mask true_mask), VLOpFrag), 1772 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1773 vti.RegClass:$rd, VLOpFrag), 1774 (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK") 1775 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1776 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1777 def : Pat<(riscv_vselect_vl (vti.Mask V0), 1778 (vti.Vector (op vti.RegClass:$rd, 1779 (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2, 1780 srcvalue, (vti.Mask true_mask), VLOpFrag), 1781 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1782 vti.RegClass:$rd, VLOpFrag), 1783 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1784 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1785 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1786 def : Pat<(riscv_vselect_vl (vti.Mask V0), 1787 (vti.Vector (op vti.RegClass:$rd, 1788 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, 1789 srcvalue, (vti.Mask true_mask), VLOpFrag), 1790 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1791 vti.RegClass:$rd, VLOpFrag), 1792 (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK") 1793 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1794 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1795 } 1796 } 1797} 1798 1799multiclass VPatWidenMultiplyAddVL_VV_VX<SDNode vwmacc_op, string instr_name> { 1800 foreach vtiTowti = AllWidenableIntVectors in { 1801 defvar vti = vtiTowti.Vti; 1802 defvar wti = vtiTowti.Wti; 1803 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1804 GetVTypePredicates<wti>.Predicates) in { 1805 def : Pat<(vwmacc_op (vti.Vector vti.RegClass:$rs1), 1806 (vti.Vector vti.RegClass:$rs2), 1807 (wti.Vector wti.RegClass:$rd), 1808 (vti.Mask V0), VLOpFrag), 1809 (!cast<Instruction>(instr_name#"_VV_"#vti.LMul.MX#"_MASK") 1810 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1811 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1812 def : Pat<(vwmacc_op (SplatPat XLenVT:$rs1), 1813 (vti.Vector vti.RegClass:$rs2), 1814 (wti.Vector wti.RegClass:$rd), 1815 (vti.Mask V0), VLOpFrag), 1816 (!cast<Instruction>(instr_name#"_VX_"#vti.LMul.MX#"_MASK") 1817 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, 1818 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1819 TAIL_AGNOSTIC)>; 1820 } 1821 } 1822} 1823 1824multiclass VPatNarrowShiftSplat_WX_WI<SDNode op, string instruction_name> { 1825 foreach vtiTowti = AllWidenableIntVectors in { 1826 defvar vti = vtiTowti.Vti; 1827 defvar wti = vtiTowti.Wti; 1828 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1829 GetVTypePredicates<wti>.Predicates) in { 1830 def : Pat<(vti.Vector (riscv_trunc_vector_vl 1831 (wti.Vector (op wti.RegClass:$rs1, (SplatPat XLenVT:$rs2), 1832 srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), 1833 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1834 (vti.Vector (IMPLICIT_DEF)), 1835 wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 1836 def : Pat<(vti.Vector (riscv_trunc_vector_vl 1837 (wti.Vector (op wti.RegClass:$rs1, (SplatPat_uimm5 uimm5:$rs2), 1838 srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), 1839 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX) 1840 (vti.Vector (IMPLICIT_DEF)), 1841 wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 1842 } 1843 } 1844} 1845 1846multiclass VPatFPMulAddVL_VV_VF<SDPatternOperator vop, string instruction_name> { 1847 foreach vti = AllFloatVectors in { 1848 defvar suffix = vti.LMul.MX; 1849 let Predicates = GetVTypePredicates<vti>.Predicates in { 1850 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, 1851 vti.RegClass:$rs2, (vti.Mask V0), 1852 VLOpFrag)), 1853 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1854 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1855 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1856 1857 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), 1858 vti.RegClass:$rd, vti.RegClass:$rs2, 1859 (vti.Mask V0), 1860 VLOpFrag)), 1861 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1862 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1863 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1864 } 1865 } 1866} 1867 1868multiclass VPatFPMulAddVL_VV_VF_RM<SDPatternOperator vop, string instruction_name> { 1869 foreach vti = AllFloatVectors in { 1870 defvar suffix = vti.LMul.MX; 1871 let Predicates = GetVTypePredicates<vti>.Predicates in { 1872 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, 1873 vti.RegClass:$rs2, (vti.Mask V0), 1874 VLOpFrag)), 1875 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1876 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1877 (vti.Mask V0), 1878 // Value to indicate no rounding mode change in 1879 // RISCVInsertReadWriteCSR 1880 FRM_DYN, 1881 GPR:$vl, vti.Log2SEW, TA_MA)>; 1882 1883 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), 1884 vti.RegClass:$rd, vti.RegClass:$rs2, 1885 (vti.Mask V0), 1886 VLOpFrag)), 1887 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1888 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1889 (vti.Mask V0), 1890 // Value to indicate no rounding mode change in 1891 // RISCVInsertReadWriteCSR 1892 FRM_DYN, 1893 GPR:$vl, vti.Log2SEW, TA_MA)>; 1894 } 1895 } 1896} 1897 1898multiclass VPatFPMulAccVL_VV_VF<PatFrag vop, string instruction_name> { 1899 foreach vti = AllFloatVectors in { 1900 defvar suffix = vti.LMul.MX; 1901 let Predicates = GetVTypePredicates<vti>.Predicates in { 1902 def : Pat<(riscv_vp_merge_vl (vti.Mask V0), 1903 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1904 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1905 vti.RegClass:$rd, VLOpFrag), 1906 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1907 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1908 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1909 def : Pat<(riscv_vp_merge_vl (vti.Mask V0), 1910 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1911 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1912 vti.RegClass:$rd, VLOpFrag), 1913 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1914 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1915 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1916 def : Pat<(riscv_vselect_vl (vti.Mask V0), 1917 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1918 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1919 vti.RegClass:$rd, VLOpFrag), 1920 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1921 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1922 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1923 def : Pat<(riscv_vselect_vl (vti.Mask V0), 1924 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1925 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1926 vti.RegClass:$rd, VLOpFrag), 1927 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1928 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1929 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1930 } 1931 } 1932} 1933 1934multiclass VPatFPMulAccVL_VV_VF_RM<PatFrag vop, string instruction_name> { 1935 foreach vti = AllFloatVectors in { 1936 defvar suffix = vti.LMul.MX; 1937 let Predicates = GetVTypePredicates<vti>.Predicates in { 1938 def : Pat<(riscv_vp_merge_vl (vti.Mask V0), 1939 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1940 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1941 vti.RegClass:$rd, VLOpFrag), 1942 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1943 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1944 (vti.Mask V0), 1945 // Value to indicate no rounding mode change in 1946 // RISCVInsertReadWriteCSR 1947 FRM_DYN, 1948 GPR:$vl, vti.Log2SEW, TU_MU)>; 1949 def : Pat<(riscv_vp_merge_vl (vti.Mask V0), 1950 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1951 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1952 vti.RegClass:$rd, VLOpFrag), 1953 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1954 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1955 (vti.Mask V0), 1956 // Value to indicate no rounding mode change in 1957 // RISCVInsertReadWriteCSR 1958 FRM_DYN, 1959 GPR:$vl, vti.Log2SEW, TU_MU)>; 1960 def : Pat<(riscv_vselect_vl (vti.Mask V0), 1961 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1962 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1963 vti.RegClass:$rd, VLOpFrag), 1964 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1965 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1966 (vti.Mask V0), 1967 // Value to indicate no rounding mode change in 1968 // RISCVInsertReadWriteCSR 1969 FRM_DYN, 1970 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1971 def : Pat<(riscv_vselect_vl (vti.Mask V0), 1972 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1973 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1974 vti.RegClass:$rd, VLOpFrag), 1975 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1976 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1977 (vti.Mask V0), 1978 // Value to indicate no rounding mode change in 1979 // RISCVInsertReadWriteCSR 1980 FRM_DYN, 1981 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1982 } 1983 } 1984} 1985 1986multiclass VPatWidenFPMulAccVL_VV_VF<SDNode vop, string instruction_name> { 1987 foreach vtiToWti = AllWidenableFloatVectors in { 1988 defvar vti = vtiToWti.Vti; 1989 defvar wti = vtiToWti.Wti; 1990 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1991 GetVTypePredicates<wti>.Predicates) in { 1992 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 1993 (vti.Vector vti.RegClass:$rs2), 1994 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 1995 VLOpFrag), 1996 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX #"_MASK") 1997 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1998 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1999 def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 2000 (vti.Vector vti.RegClass:$rs2), 2001 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 2002 VLOpFrag), 2003 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX #"_MASK") 2004 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 2005 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2006 } 2007 } 2008} 2009 2010multiclass VPatWidenFPMulAccVL_VV_VF_RM<SDNode vop, string instruction_name> { 2011 foreach vtiToWti = AllWidenableFloatVectors in { 2012 defvar vti = vtiToWti.Vti; 2013 defvar wti = vtiToWti.Wti; 2014 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2015 GetVTypePredicates<wti>.Predicates) in { 2016 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 2017 (vti.Vector vti.RegClass:$rs2), 2018 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 2019 VLOpFrag), 2020 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX #"_MASK") 2021 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 2022 (vti.Mask V0), 2023 // Value to indicate no rounding mode change in 2024 // RISCVInsertReadWriteCSR 2025 FRM_DYN, 2026 GPR:$vl, vti.Log2SEW, TA_MA)>; 2027 def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 2028 (vti.Vector vti.RegClass:$rs2), 2029 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 2030 VLOpFrag), 2031 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX #"_MASK") 2032 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 2033 (vti.Mask V0), 2034 // Value to indicate no rounding mode change in 2035 // RISCVInsertReadWriteCSR 2036 FRM_DYN, 2037 GPR:$vl, vti.Log2SEW, TA_MA)>; 2038 } 2039 } 2040} 2041 2042//===----------------------------------------------------------------------===// 2043// Patterns. 2044//===----------------------------------------------------------------------===// 2045 2046// 11. Vector Integer Arithmetic Instructions 2047 2048// 11.1. Vector Single-Width Integer Add and Subtract 2049defm : VPatBinaryVL_VV_VX_VI<riscv_add_vl, "PseudoVADD">; 2050defm : VPatBinaryVL_VV_VX<riscv_sub_vl, "PseudoVSUB">; 2051// Handle VRSUB specially since it's the only integer binary op with reversed 2052// pattern operands 2053foreach vti = AllIntegerVectors in { 2054 let Predicates = GetVTypePredicates<vti>.Predicates in { 2055 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 2056 (vti.Vector vti.RegClass:$rs1), 2057 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2058 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK") 2059 vti.RegClass:$merge, vti.RegClass:$rs1, GPR:$rs2, 2060 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2061 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)), 2062 (vti.Vector vti.RegClass:$rs1), 2063 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2064 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK") 2065 vti.RegClass:$merge, vti.RegClass:$rs1, simm5:$rs2, 2066 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2067 } 2068} 2069 2070// 11.2. Vector Widening Integer Add/Subtract 2071defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwadd_vl, riscv_vwadd_w_vl, "PseudoVWADD">; 2072defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwaddu_vl, riscv_vwaddu_w_vl, "PseudoVWADDU">; 2073defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsub_vl, riscv_vwsub_w_vl, "PseudoVWSUB">; 2074defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsubu_vl, riscv_vwsubu_w_vl, "PseudoVWSUBU">; 2075 2076// shl_vl (ext_vl v, splat 1) is a special case of widening add. 2077foreach vtiToWti = AllWidenableIntVectors in { 2078 defvar vti = vtiToWti.Vti; 2079 defvar wti = vtiToWti.Wti; 2080 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2081 GetVTypePredicates<wti>.Predicates) in { 2082 def : Pat<(riscv_shl_vl (wti.Vector (riscv_sext_vl_oneuse 2083 (vti.Vector vti.RegClass:$rs1), 2084 (vti.Mask V0), VLOpFrag)), 2085 (wti.Vector (riscv_vmv_v_x_vl 2086 (wti.Vector undef), 1, VLOpFrag)), 2087 wti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2088 (!cast<Instruction>("PseudoVWADD_VV_"#vti.LMul.MX#"_MASK") 2089 wti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs1, 2090 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2091 def : Pat<(riscv_shl_vl (wti.Vector (riscv_zext_vl_oneuse 2092 (vti.Vector vti.RegClass:$rs1), 2093 (vti.Mask V0), VLOpFrag)), 2094 (wti.Vector (riscv_vmv_v_x_vl 2095 (wti.Vector undef), 1, VLOpFrag)), 2096 wti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2097 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX#"_MASK") 2098 wti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs1, 2099 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2100 } 2101} 2102 2103// 11.3. Vector Integer Extension 2104defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF2", 2105 AllFractionableVF2IntVectors>; 2106defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF2", 2107 AllFractionableVF2IntVectors>; 2108defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF4", 2109 AllFractionableVF4IntVectors>; 2110defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF4", 2111 AllFractionableVF4IntVectors>; 2112defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF8", 2113 AllFractionableVF8IntVectors>; 2114defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF8", 2115 AllFractionableVF8IntVectors>; 2116 2117// 11.5. Vector Bitwise Logical Instructions 2118defm : VPatBinaryVL_VV_VX_VI<riscv_and_vl, "PseudoVAND">; 2119defm : VPatBinaryVL_VV_VX_VI<riscv_or_vl, "PseudoVOR">; 2120defm : VPatBinaryVL_VV_VX_VI<riscv_xor_vl, "PseudoVXOR">; 2121 2122// 11.6. Vector Single-Width Bit Shift Instructions 2123defm : VPatBinaryVL_VV_VX_VI<riscv_shl_vl, "PseudoVSLL", uimm5>; 2124defm : VPatBinaryVL_VV_VX_VI<riscv_srl_vl, "PseudoVSRL", uimm5>; 2125defm : VPatBinaryVL_VV_VX_VI<riscv_sra_vl, "PseudoVSRA", uimm5>; 2126 2127foreach vti = AllIntegerVectors in { 2128 // Emit shift by 1 as an add since it might be faster. 2129 let Predicates = GetVTypePredicates<vti>.Predicates in 2130 def : Pat<(riscv_shl_vl (vti.Vector vti.RegClass:$rs1), 2131 (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)), 2132 srcvalue, (vti.Mask true_mask), VLOpFrag), 2133 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX) 2134 (vti.Vector (IMPLICIT_DEF)), 2135 vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; 2136} 2137 2138// 11.7. Vector Narrowing Integer Right Shift Instructions 2139defm : VPatBinaryVL_WV_WX_WI<srl, "PseudoVNSRL">; 2140defm : VPatBinaryVL_WV_WX_WI<sra, "PseudoVNSRA">; 2141 2142defm : VPatNarrowShiftSplat_WX_WI<riscv_sra_vl, "PseudoVNSRA">; 2143defm : VPatNarrowShiftSplat_WX_WI<riscv_srl_vl, "PseudoVNSRL">; 2144defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_sext_vl_oneuse, "PseudoVNSRA">; 2145defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_zext_vl_oneuse, "PseudoVNSRA">; 2146defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_sext_vl_oneuse, "PseudoVNSRL">; 2147defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_zext_vl_oneuse, "PseudoVNSRL">; 2148 2149defm : VPatNarrowShiftVL_WV<riscv_srl_vl, "PseudoVNSRL">; 2150defm : VPatNarrowShiftVL_WV<riscv_sra_vl, "PseudoVNSRA">; 2151 2152defm : VPatBinaryNVL_WV_WX_WI<riscv_vnsrl_vl, "PseudoVNSRL">; 2153 2154foreach vtiTowti = AllWidenableIntVectors in { 2155 defvar vti = vtiTowti.Vti; 2156 defvar wti = vtiTowti.Wti; 2157 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2158 GetVTypePredicates<wti>.Predicates) in 2159 def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1), 2160 (vti.Mask V0), 2161 VLOpFrag)), 2162 (!cast<Instruction>("PseudoVNSRL_WI_"#vti.LMul.MX#"_MASK") 2163 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, 2164 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2165} 2166 2167// 11.8. Vector Integer Comparison Instructions 2168foreach vti = AllIntegerVectors in { 2169 let Predicates = GetVTypePredicates<vti>.Predicates in { 2170 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSEQ", SETEQ>; 2171 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSNE", SETNE>; 2172 2173 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 2174 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 2175 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2176 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2177 2178 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 2179 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 2180 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 2181 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 2182 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2183 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2184 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>; 2185 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>; 2186 // There is no VMSGE(U)_VX instruction 2187 2188 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 2189 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 2190 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2191 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2192 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>; 2193 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>; 2194 2195 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLE", SETLT, SETGT, 2196 SplatPat_simm5_plus1>; 2197 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLEU", SETULT, SETUGT, 2198 SplatPat_simm5_plus1_nonzero>; 2199 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGT", SETGE, SETLE, 2200 SplatPat_simm5_plus1>; 2201 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGTU", SETUGE, SETULE, 2202 SplatPat_simm5_plus1_nonzero>; 2203 } 2204} // foreach vti = AllIntegerVectors 2205 2206// 11.9. Vector Integer Min/Max Instructions 2207defm : VPatBinaryVL_VV_VX<riscv_umin_vl, "PseudoVMINU">; 2208defm : VPatBinaryVL_VV_VX<riscv_smin_vl, "PseudoVMIN">; 2209defm : VPatBinaryVL_VV_VX<riscv_umax_vl, "PseudoVMAXU">; 2210defm : VPatBinaryVL_VV_VX<riscv_smax_vl, "PseudoVMAX">; 2211 2212// 11.10. Vector Single-Width Integer Multiply Instructions 2213defm : VPatBinaryVL_VV_VX<riscv_mul_vl, "PseudoVMUL">; 2214defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH", IntegerVectorsExceptI64>; 2215defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU", IntegerVectorsExceptI64>; 2216// vsmul.vv and vsmul.vx are not included in EEW=64 in Zve64*. 2217let Predicates = [HasVInstructionsFullMultiply] in { 2218 defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH", I64IntegerVectors>; 2219 defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU", I64IntegerVectors>; 2220} 2221 2222// 11.11. Vector Integer Divide Instructions 2223defm : VPatBinaryVL_VV_VX<riscv_udiv_vl, "PseudoVDIVU", isSEWAware=1>; 2224defm : VPatBinaryVL_VV_VX<riscv_sdiv_vl, "PseudoVDIV", isSEWAware=1>; 2225defm : VPatBinaryVL_VV_VX<riscv_urem_vl, "PseudoVREMU", isSEWAware=1>; 2226defm : VPatBinaryVL_VV_VX<riscv_srem_vl, "PseudoVREM", isSEWAware=1>; 2227 2228// 11.12. Vector Widening Integer Multiply Instructions 2229defm : VPatBinaryWVL_VV_VX<riscv_vwmul_vl, "PseudoVWMUL">; 2230defm : VPatBinaryWVL_VV_VX<riscv_vwmulu_vl, "PseudoVWMULU">; 2231defm : VPatBinaryWVL_VV_VX<riscv_vwmulsu_vl, "PseudoVWMULSU">; 2232 2233// 11.13 Vector Single-Width Integer Multiply-Add Instructions 2234defm : VPatMultiplyAddVL_VV_VX<riscv_add_vl, "PseudoVMADD">; 2235defm : VPatMultiplyAddVL_VV_VX<riscv_sub_vl, "PseudoVNMSUB">; 2236defm : VPatMultiplyAccVL_VV_VX<riscv_add_vl_oneuse, "PseudoVMACC">; 2237defm : VPatMultiplyAccVL_VV_VX<riscv_sub_vl_oneuse, "PseudoVNMSAC">; 2238 2239// 11.14. Vector Widening Integer Multiply-Add Instructions 2240defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmacc_vl, "PseudoVWMACC">; 2241defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmaccu_vl, "PseudoVWMACCU">; 2242defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmaccsu_vl, "PseudoVWMACCSU">; 2243foreach vtiTowti = AllWidenableIntVectors in { 2244 defvar vti = vtiTowti.Vti; 2245 defvar wti = vtiTowti.Wti; 2246 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2247 GetVTypePredicates<wti>.Predicates) in 2248 def : Pat<(riscv_vwmaccsu_vl (vti.Vector vti.RegClass:$rs1), 2249 (SplatPat XLenVT:$rs2), 2250 (wti.Vector wti.RegClass:$rd), 2251 (vti.Mask V0), VLOpFrag), 2252 (!cast<Instruction>("PseudoVWMACCUS_VX_"#vti.LMul.MX#"_MASK") 2253 wti.RegClass:$rd, vti.ScalarRegClass:$rs2, vti.RegClass:$rs1, 2254 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2255} 2256 2257// 11.15. Vector Integer Merge Instructions 2258foreach vti = AllIntegerVectors in { 2259 let Predicates = GetVTypePredicates<vti>.Predicates in { 2260 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 2261 vti.RegClass:$rs1, 2262 vti.RegClass:$rs2, 2263 VLOpFrag)), 2264 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 2265 (vti.Vector (IMPLICIT_DEF)), 2266 vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), 2267 GPR:$vl, vti.Log2SEW)>; 2268 2269 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 2270 (SplatPat XLenVT:$rs1), 2271 vti.RegClass:$rs2, 2272 VLOpFrag)), 2273 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 2274 (vti.Vector (IMPLICIT_DEF)), 2275 vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2276 2277 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 2278 (SplatPat_simm5 simm5:$rs1), 2279 vti.RegClass:$rs2, 2280 VLOpFrag)), 2281 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 2282 (vti.Vector (IMPLICIT_DEF)), 2283 vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2284 2285 def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0), 2286 vti.RegClass:$rs1, 2287 vti.RegClass:$rs2, 2288 VLOpFrag)), 2289 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 2290 vti.RegClass:$rs2, vti.RegClass:$rs2, vti.RegClass:$rs1, 2291 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2292 2293 def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0), 2294 (SplatPat XLenVT:$rs1), 2295 vti.RegClass:$rs2, 2296 VLOpFrag)), 2297 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 2298 vti.RegClass:$rs2, vti.RegClass:$rs2, GPR:$rs1, 2299 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2300 2301 def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0), 2302 (SplatPat_simm5 simm5:$rs1), 2303 vti.RegClass:$rs2, 2304 VLOpFrag)), 2305 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 2306 vti.RegClass:$rs2, vti.RegClass:$rs2, simm5:$rs1, 2307 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2308 } 2309} 2310 2311// 11.16. Vector Integer Move Instructions 2312foreach vti = AllVectors in { 2313 let Predicates = GetVTypePredicates<vti>.Predicates in { 2314 def : Pat<(vti.Vector (riscv_vmv_v_v_vl vti.RegClass:$passthru, 2315 vti.RegClass:$rs2, VLOpFrag)), 2316 (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX) 2317 vti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2318} 2319 2320foreach vti = AllIntegerVectors in { 2321 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.RegClass:$passthru, GPR:$rs2, VLOpFrag)), 2322 (!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX) 2323 vti.RegClass:$passthru, GPR:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2324 defvar ImmPat = !cast<ComplexPattern>("sew"#vti.SEW#"simm5"); 2325 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.RegClass:$passthru, (ImmPat simm5:$imm5), 2326 VLOpFrag)), 2327 (!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX) 2328 vti.RegClass:$passthru, simm5:$imm5, GPR:$vl, vti.Log2SEW, TU_MU)>; 2329 } 2330} 2331 2332// 12. Vector Fixed-Point Arithmetic Instructions 2333 2334// 12.1. Vector Single-Width Saturating Add and Subtract 2335defm : VPatBinaryVL_VV_VX_VI<riscv_saddsat_vl, "PseudoVSADD">; 2336defm : VPatBinaryVL_VV_VX_VI<riscv_uaddsat_vl, "PseudoVSADDU">; 2337defm : VPatBinaryVL_VV_VX<riscv_ssubsat_vl, "PseudoVSSUB">; 2338defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">; 2339 2340// 13. Vector Floating-Point Instructions 2341 2342// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions 2343defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fadd_vl, "PseudoVFADD">; 2344defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fsub_vl, "PseudoVFSUB">; 2345defm : VPatBinaryFPVL_R_VF_RM<any_riscv_fsub_vl, "PseudoVFRSUB">; 2346 2347// 13.3. Vector Widening Floating-Point Add/Subtract Instructions 2348defm : VPatBinaryFPWVL_VV_VF_WV_WF_RM<riscv_vfwadd_vl, riscv_vfwadd_w_vl, "PseudoVFWADD">; 2349defm : VPatBinaryFPWVL_VV_VF_WV_WF_RM<riscv_vfwsub_vl, riscv_vfwsub_w_vl, "PseudoVFWSUB">; 2350 2351// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 2352defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fmul_vl, "PseudoVFMUL">; 2353defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fdiv_vl, "PseudoVFDIV", isSEWAware=1>; 2354defm : VPatBinaryFPVL_R_VF_RM<any_riscv_fdiv_vl, "PseudoVFRDIV", isSEWAware=1>; 2355 2356// 13.5. Vector Widening Floating-Point Multiply Instructions 2357defm : VPatBinaryFPWVL_VV_VF_RM<riscv_vfwmul_vl, "PseudoVFWMUL">; 2358 2359// 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. 2360defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfmadd_vl, "PseudoVFMADD">; 2361defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfmsub_vl, "PseudoVFMSUB">; 2362defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfnmadd_vl, "PseudoVFNMADD">; 2363defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfnmsub_vl, "PseudoVFNMSUB">; 2364defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfmadd_vl_oneuse, "PseudoVFMACC">; 2365defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfmsub_vl_oneuse, "PseudoVFMSAC">; 2366defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfnmadd_vl_oneuse, "PseudoVFNMACC">; 2367defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfnmsub_vl_oneuse, "PseudoVFNMSAC">; 2368 2369// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 2370defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwmadd_vl, "PseudoVFWMACC">; 2371defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwnmadd_vl, "PseudoVFWNMACC">; 2372defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwmsub_vl, "PseudoVFWMSAC">; 2373defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwnmsub_vl, "PseudoVFWNMSAC">; 2374 2375// 13.11. Vector Floating-Point MIN/MAX Instructions 2376defm : VPatBinaryFPVL_VV_VF<riscv_fminnum_vl, "PseudoVFMIN">; 2377defm : VPatBinaryFPVL_VV_VF<riscv_fmaxnum_vl, "PseudoVFMAX">; 2378 2379// 13.13. Vector Floating-Point Compare Instructions 2380defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETEQ, 2381 "PseudoVMFEQ", "PseudoVMFEQ">; 2382defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETOEQ, 2383 "PseudoVMFEQ", "PseudoVMFEQ">; 2384defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETNE, 2385 "PseudoVMFNE", "PseudoVMFNE">; 2386defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETUNE, 2387 "PseudoVMFNE", "PseudoVMFNE">; 2388defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETLT, 2389 "PseudoVMFLT", "PseudoVMFGT">; 2390defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETOLT, 2391 "PseudoVMFLT", "PseudoVMFGT">; 2392defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETLE, 2393 "PseudoVMFLE", "PseudoVMFGE">; 2394defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETOLE, 2395 "PseudoVMFLE", "PseudoVMFGE">; 2396 2397foreach vti = AllFloatVectors in { 2398 let Predicates = GetVTypePredicates<vti>.Predicates in { 2399 // 13.8. Vector Floating-Point Square-Root Instruction 2400 def : Pat<(any_riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask V0), 2401 VLOpFrag), 2402 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX # "_E" # vti.SEW # "_MASK") 2403 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 2404 (vti.Mask V0), 2405 // Value to indicate no rounding mode change in 2406 // RISCVInsertReadWriteCSR 2407 FRM_DYN, 2408 GPR:$vl, vti.Log2SEW, TA_MA)>; 2409 2410 // 13.12. Vector Floating-Point Sign-Injection Instructions 2411 def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), 2412 VLOpFrag), 2413 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX #"_MASK") 2414 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, 2415 vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2416 TA_MA)>; 2417 // Handle fneg with VFSGNJN using the same input for both operands. 2418 def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), 2419 VLOpFrag), 2420 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX #"_MASK") 2421 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, 2422 vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2423 TA_MA)>; 2424 2425 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2426 (vti.Vector vti.RegClass:$rs2), 2427 vti.RegClass:$merge, 2428 (vti.Mask V0), 2429 VLOpFrag), 2430 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_MASK") 2431 vti.RegClass:$merge, vti.RegClass:$rs1, 2432 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2433 TAIL_AGNOSTIC)>; 2434 2435 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2436 (riscv_fneg_vl vti.RegClass:$rs2, 2437 (vti.Mask true_mask), 2438 VLOpFrag), 2439 srcvalue, 2440 (vti.Mask true_mask), 2441 VLOpFrag), 2442 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 2443 (vti.Vector (IMPLICIT_DEF)), 2444 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2445 2446 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2447 (SplatFPOp vti.ScalarRegClass:$rs2), 2448 vti.RegClass:$merge, 2449 (vti.Mask V0), 2450 VLOpFrag), 2451 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX#"_MASK") 2452 vti.RegClass:$merge, vti.RegClass:$rs1, 2453 vti.ScalarRegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2454 TAIL_AGNOSTIC)>; 2455 2456 // Rounding without exception to implement nearbyint. 2457 def : Pat<(any_riscv_vfround_noexcept_vl (vti.Vector vti.RegClass:$rs1), 2458 (vti.Mask V0), VLOpFrag), 2459 (!cast<Instruction>("PseudoVFROUND_NOEXCEPT_V_" # vti.LMul.MX #"_MASK") 2460 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, 2461 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2462 2463 // 14.14. Vector Floating-Point Classify Instruction 2464 def : Pat<(riscv_fclass_vl (vti.Vector vti.RegClass:$rs2), 2465 (vti.Mask true_mask), VLOpFrag), 2466 (!cast<Instruction>("PseudoVFCLASS_V_"# vti.LMul.MX) 2467 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2468 } 2469} 2470 2471foreach fvti = AllFloatVectors in { 2472 // Floating-point vselects: 2473 // 11.15. Vector Integer Merge Instructions 2474 // 13.15. Vector Floating-Point Merge Instruction 2475 let Predicates = GetVTypePredicates<fvti>.Predicates in { 2476 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), 2477 fvti.RegClass:$rs1, 2478 fvti.RegClass:$rs2, 2479 VLOpFrag)), 2480 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 2481 (fvti.Vector (IMPLICIT_DEF)), 2482 fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 2483 GPR:$vl, fvti.Log2SEW)>; 2484 2485 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), 2486 (SplatFPOp fvti.ScalarRegClass:$rs1), 2487 fvti.RegClass:$rs2, 2488 VLOpFrag)), 2489 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 2490 (fvti.Vector (IMPLICIT_DEF)), 2491 fvti.RegClass:$rs2, 2492 (fvti.Scalar fvti.ScalarRegClass:$rs1), 2493 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 2494 2495 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), 2496 (SplatFPOp (SelectFPImm (XLenVT GPR:$imm))), 2497 fvti.RegClass:$rs2, 2498 VLOpFrag)), 2499 (!cast<Instruction>("PseudoVMERGE_VXM_"#fvti.LMul.MX) 2500 (fvti.Vector (IMPLICIT_DEF)), 2501 fvti.RegClass:$rs2, 2502 GPR:$imm, 2503 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 2504 2505 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), 2506 (SplatFPOp (fvti.Scalar fpimm0)), 2507 fvti.RegClass:$rs2, 2508 VLOpFrag)), 2509 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 2510 (fvti.Vector (IMPLICIT_DEF)), 2511 fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 2512 2513 def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0), 2514 fvti.RegClass:$rs1, 2515 fvti.RegClass:$rs2, 2516 VLOpFrag)), 2517 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 2518 fvti.RegClass:$rs2, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 2519 GPR:$vl, fvti.Log2SEW)>; 2520 2521 def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0), 2522 (SplatFPOp fvti.ScalarRegClass:$rs1), 2523 fvti.RegClass:$rs2, 2524 VLOpFrag)), 2525 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 2526 fvti.RegClass:$rs2, fvti.RegClass:$rs2, 2527 (fvti.Scalar fvti.ScalarRegClass:$rs1), 2528 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 2529 2530 def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0), 2531 (SplatFPOp (fvti.Scalar fpimm0)), 2532 fvti.RegClass:$rs2, 2533 VLOpFrag)), 2534 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 2535 fvti.RegClass:$rs2, fvti.RegClass:$rs2, 0, (fvti.Mask V0), 2536 GPR:$vl, fvti.Log2SEW)>; 2537 2538 // 13.16. Vector Floating-Point Move Instruction 2539 // If we're splatting fpimm0, use vmv.v.x vd, x0. 2540 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2541 fvti.Vector:$passthru, (fvti.Scalar (fpimm0)), VLOpFrag)), 2542 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX) 2543 $passthru, 0, GPR:$vl, fvti.Log2SEW, TU_MU)>; 2544 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2545 fvti.Vector:$passthru, (fvti.Scalar (SelectFPImm (XLenVT GPR:$imm))), VLOpFrag)), 2546 (!cast<Instruction>("PseudoVMV_V_X_"#fvti.LMul.MX) 2547 $passthru, GPR:$imm, GPR:$vl, fvti.Log2SEW, TU_MU)>; 2548 2549 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2550 fvti.Vector:$passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), 2551 (!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" # 2552 fvti.LMul.MX) 2553 $passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), 2554 GPR:$vl, fvti.Log2SEW, TU_MU)>; 2555 } 2556} 2557 2558// 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions 2559defm : VPatConvertFP2IVL_V_RM<riscv_vfcvt_xu_f_vl, "PseudoVFCVT_XU_F_V">; 2560defm : VPatConvertFP2IVL_V_RM<riscv_vfcvt_x_f_vl, "PseudoVFCVT_X_F_V">; 2561defm : VPatConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFCVT_RM_XU_F_V">; 2562defm : VPatConvertFP2I_RM_VL_V<any_riscv_vfcvt_rm_x_f_vl, "PseudoVFCVT_RM_X_F_V">; 2563 2564defm : VPatConvertFP2IVL_V<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFCVT_RTZ_XU_F_V">; 2565defm : VPatConvertFP2IVL_V<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFCVT_RTZ_X_F_V">; 2566 2567defm : VPatConvertI2FPVL_V_RM<any_riscv_uint_to_fp_vl, "PseudoVFCVT_F_XU_V">; 2568defm : VPatConvertI2FPVL_V_RM<any_riscv_sint_to_fp_vl, "PseudoVFCVT_F_X_V">; 2569 2570defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_xu_vl, "PseudoVFCVT_RM_F_XU_V">; 2571defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_x_vl, "PseudoVFCVT_RM_F_X_V">; 2572 2573// 13.18. Widening Floating-Point/Integer Type-Convert Instructions 2574defm : VPatWConvertFP2IVL_V_RM<riscv_vfcvt_xu_f_vl, "PseudoVFWCVT_XU_F_V">; 2575defm : VPatWConvertFP2IVL_V_RM<riscv_vfcvt_x_f_vl, "PseudoVFWCVT_X_F_V">; 2576defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFWCVT_RM_XU_F_V">; 2577defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_x_f_vl, "PseudoVFWCVT_RM_X_F_V">; 2578 2579defm : VPatWConvertFP2IVL_V<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFWCVT_RTZ_XU_F_V">; 2580defm : VPatWConvertFP2IVL_V<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFWCVT_RTZ_X_F_V">; 2581 2582defm : VPatWConvertI2FPVL_V<any_riscv_uint_to_fp_vl, "PseudoVFWCVT_F_XU_V">; 2583defm : VPatWConvertI2FPVL_V<any_riscv_sint_to_fp_vl, "PseudoVFWCVT_F_X_V">; 2584 2585foreach fvtiToFWti = AllWidenableFloatVectors in { 2586 defvar fvti = fvtiToFWti.Vti; 2587 defvar fwti = fvtiToFWti.Wti; 2588 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 2589 GetVTypePredicates<fwti>.Predicates) in 2590 def : Pat<(fwti.Vector (any_riscv_fpextend_vl 2591 (fvti.Vector fvti.RegClass:$rs1), 2592 (fvti.Mask V0), 2593 VLOpFrag)), 2594 (!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX#"_MASK") 2595 (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 2596 (fvti.Mask V0), 2597 GPR:$vl, fvti.Log2SEW, TA_MA)>; 2598} 2599 2600// 13.19 Narrowing Floating-Point/Integer Type-Convert Instructions 2601defm : VPatNConvertFP2IVL_W_RM<riscv_vfcvt_xu_f_vl, "PseudoVFNCVT_XU_F_W">; 2602defm : VPatNConvertFP2IVL_W_RM<riscv_vfcvt_x_f_vl, "PseudoVFNCVT_X_F_W">; 2603defm : VPatNConvertFP2I_RM_VL_W<riscv_vfcvt_rm_xu_f_vl, "PseudoVFNCVT_RM_XU_F_W">; 2604defm : VPatNConvertFP2I_RM_VL_W<riscv_vfcvt_rm_x_f_vl, "PseudoVFNCVT_RM_X_F_W">; 2605 2606defm : VPatNConvertFP2IVL_W<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFNCVT_RTZ_XU_F_W">; 2607defm : VPatNConvertFP2IVL_W<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFNCVT_RTZ_X_F_W">; 2608 2609defm : VPatNConvertI2FPVL_W_RM<any_riscv_uint_to_fp_vl, "PseudoVFNCVT_F_XU_W">; 2610defm : VPatNConvertI2FPVL_W_RM<any_riscv_sint_to_fp_vl, "PseudoVFNCVT_F_X_W">; 2611 2612defm : VPatNConvertI2FP_RM_VL_W<riscv_vfcvt_rm_f_xu_vl, "PseudoVFNCVT_RM_F_XU_W">; 2613defm : VPatNConvertI2FP_RM_VL_W<riscv_vfcvt_rm_f_x_vl, "PseudoVFNCVT_RM_F_X_W">; 2614 2615foreach fvtiToFWti = AllWidenableFloatVectors in { 2616 defvar fvti = fvtiToFWti.Vti; 2617 defvar fwti = fvtiToFWti.Wti; 2618 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 2619 GetVTypePredicates<fwti>.Predicates) in { 2620 def : Pat<(fvti.Vector (any_riscv_fpround_vl 2621 (fwti.Vector fwti.RegClass:$rs1), 2622 (fwti.Mask V0), VLOpFrag)), 2623 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_MASK") 2624 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 2625 (fwti.Mask V0), 2626 // Value to indicate no rounding mode change in 2627 // RISCVInsertReadWriteCSR 2628 FRM_DYN, 2629 GPR:$vl, fvti.Log2SEW, TA_MA)>; 2630 2631 def : Pat<(fvti.Vector (any_riscv_fncvt_rod_vl 2632 (fwti.Vector fwti.RegClass:$rs1), 2633 (fwti.Mask V0), VLOpFrag)), 2634 (!cast<Instruction>("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX#"_MASK") 2635 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 2636 (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 2637 } 2638} 2639 2640// 14. Vector Reduction Operations 2641 2642// 14.1. Vector Single-Width Integer Reduction Instructions 2643defm : VPatReductionVL<rvv_vecreduce_ADD_vl, "PseudoVREDSUM", is_float=0>; 2644defm : VPatReductionVL<rvv_vecreduce_UMAX_vl, "PseudoVREDMAXU", is_float=0>; 2645defm : VPatReductionVL<rvv_vecreduce_SMAX_vl, "PseudoVREDMAX", is_float=0>; 2646defm : VPatReductionVL<rvv_vecreduce_UMIN_vl, "PseudoVREDMINU", is_float=0>; 2647defm : VPatReductionVL<rvv_vecreduce_SMIN_vl, "PseudoVREDMIN", is_float=0>; 2648defm : VPatReductionVL<rvv_vecreduce_AND_vl, "PseudoVREDAND", is_float=0>; 2649defm : VPatReductionVL<rvv_vecreduce_OR_vl, "PseudoVREDOR", is_float=0>; 2650defm : VPatReductionVL<rvv_vecreduce_XOR_vl, "PseudoVREDXOR", is_float=0>; 2651 2652// 14.2. Vector Widening Integer Reduction Instructions 2653defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, anyext_oneuse, "PseudoVWREDSUMU", is_float=0>; 2654defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, zext_oneuse, "PseudoVWREDSUMU", is_float=0>; 2655defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_zext_vl_oneuse, "PseudoVWREDSUMU", is_float=0>; 2656defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, sext_oneuse, "PseudoVWREDSUM", is_float=0>; 2657defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_sext_vl_oneuse, "PseudoVWREDSUM", is_float=0>; 2658 2659// 14.3. Vector Single-Width Floating-Point Reduction Instructions 2660defm : VPatReductionVL_RM<rvv_vecreduce_SEQ_FADD_vl, "PseudoVFREDOSUM", is_float=1>; 2661defm : VPatReductionVL_RM<rvv_vecreduce_FADD_vl, "PseudoVFREDUSUM", is_float=1>; 2662defm : VPatReductionVL<rvv_vecreduce_FMIN_vl, "PseudoVFREDMIN", is_float=1>; 2663defm : VPatReductionVL<rvv_vecreduce_FMAX_vl, "PseudoVFREDMAX", is_float=1>; 2664 2665// 14.4. Vector Widening Floating-Point Reduction Instructions 2666defm : VPatWidenReductionVL_RM<rvv_vecreduce_SEQ_FADD_vl, fpext_oneuse, 2667 "PseudoVFWREDOSUM", is_float=1>; 2668defm : VPatWidenReductionVL_Ext_VL_RM<rvv_vecreduce_SEQ_FADD_vl, 2669 riscv_fpextend_vl_oneuse, 2670 "PseudoVFWREDOSUM", is_float=1>; 2671defm : VPatWidenReductionVL_RM<rvv_vecreduce_FADD_vl, fpext_oneuse, 2672 "PseudoVFWREDUSUM", is_float=1>; 2673defm : VPatWidenReductionVL_Ext_VL_RM<rvv_vecreduce_FADD_vl, 2674 riscv_fpextend_vl_oneuse, 2675 "PseudoVFWREDUSUM", is_float=1>; 2676 2677// 15. Vector Mask Instructions 2678 2679foreach mti = AllMasks in { 2680 let Predicates = [HasVInstructions] in { 2681 // 15.1 Vector Mask-Register Logical Instructions 2682 def : Pat<(mti.Mask (riscv_vmset_vl VLOpFrag)), 2683 (!cast<Instruction>("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 2684 def : Pat<(mti.Mask (riscv_vmclr_vl VLOpFrag)), 2685 (!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 2686 2687 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2688 (!cast<Instruction>("PseudoVMAND_MM_" # mti.LMul.MX) 2689 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2690 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2691 (!cast<Instruction>("PseudoVMOR_MM_" # mti.LMul.MX) 2692 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2693 def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2694 (!cast<Instruction>("PseudoVMXOR_MM_" # mti.LMul.MX) 2695 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2696 2697 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, 2698 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 2699 VLOpFrag)), 2700 (!cast<Instruction>("PseudoVMANDN_MM_" # mti.LMul.MX) 2701 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2702 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, 2703 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 2704 VLOpFrag)), 2705 (!cast<Instruction>("PseudoVMORN_MM_" # mti.LMul.MX) 2706 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2707 // XOR is associative so we need 2 patterns for VMXNOR. 2708 def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1, 2709 VLOpFrag), 2710 VR:$rs2, VLOpFrag)), 2711 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX) 2712 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2713 2714 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2, 2715 VLOpFrag), 2716 VLOpFrag)), 2717 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX) 2718 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2719 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2, 2720 VLOpFrag), 2721 VLOpFrag)), 2722 (!cast<Instruction>("PseudoVMNOR_MM_" # mti.LMul.MX) 2723 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2724 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2, 2725 VLOpFrag), 2726 VLOpFrag)), 2727 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX) 2728 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2729 2730 // Match the not idiom to the vmnot.m pseudo. 2731 def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)), 2732 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX) 2733 VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>; 2734 2735 // 15.2 Vector count population in mask vcpop.m 2736 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), 2737 VLOpFrag)), 2738 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX) 2739 VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2740 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask V0), 2741 VLOpFrag)), 2742 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX # "_MASK") 2743 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 2744 2745 // 15.3 vfirst find-first-set mask bit 2746 def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), 2747 VLOpFrag)), 2748 (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX) 2749 VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2750 def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask V0), 2751 VLOpFrag)), 2752 (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX # "_MASK") 2753 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 2754 } 2755} 2756 2757// 16. Vector Permutation Instructions 2758 2759// 16.1. Integer Scalar Move Instructions 2760// 16.4. Vector Register Gather Instruction 2761foreach vti = AllIntegerVectors in { 2762 let Predicates = GetVTypePredicates<vti>.Predicates in { 2763 def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$merge), 2764 vti.ScalarRegClass:$rs1, 2765 VLOpFrag)), 2766 (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX) 2767 vti.RegClass:$merge, 2768 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; 2769 2770 def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2, 2771 vti.RegClass:$rs1, 2772 vti.RegClass:$merge, 2773 (vti.Mask V0), 2774 VLOpFrag)), 2775 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") 2776 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 2777 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2778 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 2779 vti.RegClass:$merge, 2780 (vti.Mask V0), 2781 VLOpFrag)), 2782 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") 2783 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 2784 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2785 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, 2786 uimm5:$imm, 2787 vti.RegClass:$merge, 2788 (vti.Mask V0), 2789 VLOpFrag)), 2790 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 2791 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, 2792 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2793 } 2794 2795 // emul = lmul * 16 / sew 2796 defvar vlmul = vti.LMul; 2797 defvar octuple_lmul = vlmul.octuple; 2798 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 2799 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 2800 defvar emul_str = octuple_to_str<octuple_emul>.ret; 2801 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 2802 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str; 2803 let Predicates = GetVTypePredicates<vti>.Predicates in 2804 def : Pat<(vti.Vector 2805 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 2806 (ivti.Vector ivti.RegClass:$rs1), 2807 vti.RegClass:$merge, 2808 (vti.Mask V0), 2809 VLOpFrag)), 2810 (!cast<Instruction>(inst#"_MASK") 2811 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, 2812 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2813 } 2814} 2815 2816// 16.2. Floating-Point Scalar Move Instructions 2817foreach vti = AllFloatVectors in { 2818 let Predicates = GetVTypePredicates<vti>.Predicates in { 2819 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 2820 (vti.Scalar (fpimm0)), 2821 VLOpFrag)), 2822 (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX) 2823 vti.RegClass:$merge, (XLenVT X0), GPR:$vl, vti.Log2SEW)>; 2824 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 2825 (vti.Scalar (SelectFPImm (XLenVT GPR:$imm))), 2826 VLOpFrag)), 2827 (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX) 2828 vti.RegClass:$merge, GPR:$imm, GPR:$vl, vti.Log2SEW)>; 2829 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 2830 vti.ScalarRegClass:$rs1, 2831 VLOpFrag)), 2832 (!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX) 2833 vti.RegClass:$merge, 2834 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; 2835 } 2836 defvar ivti = GetIntVTypeInfo<vti>.Vti; 2837 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2838 GetVTypePredicates<ivti>.Predicates) in { 2839 def : Pat<(vti.Vector 2840 (riscv_vrgather_vv_vl vti.RegClass:$rs2, 2841 (ivti.Vector vti.RegClass:$rs1), 2842 vti.RegClass:$merge, 2843 (vti.Mask V0), 2844 VLOpFrag)), 2845 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") 2846 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 2847 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2848 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 2849 vti.RegClass:$merge, 2850 (vti.Mask V0), 2851 VLOpFrag)), 2852 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") 2853 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 2854 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2855 def : Pat<(vti.Vector 2856 (riscv_vrgather_vx_vl vti.RegClass:$rs2, 2857 uimm5:$imm, 2858 vti.RegClass:$merge, 2859 (vti.Mask V0), 2860 VLOpFrag)), 2861 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 2862 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, 2863 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2864 } 2865 2866 defvar vlmul = vti.LMul; 2867 defvar octuple_lmul = vlmul.octuple; 2868 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 2869 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 2870 defvar emul_str = octuple_to_str<octuple_emul>.ret; 2871 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 2872 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str; 2873 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2874 GetVTypePredicates<ivti>.Predicates) in 2875 def : Pat<(vti.Vector 2876 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 2877 (ivti.Vector ivti.RegClass:$rs1), 2878 vti.RegClass:$merge, 2879 (vti.Mask V0), 2880 VLOpFrag)), 2881 (!cast<Instruction>(inst#"_MASK") 2882 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, 2883 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2884 } 2885} 2886 2887//===----------------------------------------------------------------------===// 2888// Miscellaneous RISCVISD SDNodes 2889//===----------------------------------------------------------------------===// 2890 2891def riscv_vid_vl : SDNode<"RISCVISD::VID_VL", SDTypeProfile<1, 2, 2892 [SDTCisVec<0>, SDTCVecEltisVT<1, i1>, 2893 SDTCisSameNumEltsAs<0, 1>, SDTCisVT<2, XLenVT>]>, []>; 2894 2895def SDTRVVSlide : SDTypeProfile<1, 6, [ 2896 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>, 2897 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>, 2898 SDTCisVT<6, XLenVT> 2899]>; 2900def SDTRVVSlide1 : SDTypeProfile<1, 5, [ 2901 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisInt<0>, 2902 SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, 2903 SDTCisVT<5, XLenVT> 2904]>; 2905def SDTRVVFSlide1 : SDTypeProfile<1, 5, [ 2906 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisFP<0>, 2907 SDTCisEltOfVec<3, 0>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, 2908 SDTCisVT<5, XLenVT> 2909]>; 2910 2911def riscv_slideup_vl : SDNode<"RISCVISD::VSLIDEUP_VL", SDTRVVSlide, []>; 2912def riscv_slide1up_vl : SDNode<"RISCVISD::VSLIDE1UP_VL", SDTRVVSlide1, []>; 2913def riscv_slidedown_vl : SDNode<"RISCVISD::VSLIDEDOWN_VL", SDTRVVSlide, []>; 2914def riscv_slide1down_vl : SDNode<"RISCVISD::VSLIDE1DOWN_VL", SDTRVVSlide1, []>; 2915def riscv_fslide1up_vl : SDNode<"RISCVISD::VFSLIDE1UP_VL", SDTRVVFSlide1, []>; 2916def riscv_fslide1down_vl : SDNode<"RISCVISD::VFSLIDE1DOWN_VL", SDTRVVFSlide1, []>; 2917 2918foreach vti = AllIntegerVectors in { 2919 let Predicates = GetVTypePredicates<vti>.Predicates in { 2920 def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask V0), 2921 VLOpFrag)), 2922 (!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX#"_MASK") 2923 (vti.Vector (IMPLICIT_DEF)), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2924 TAIL_AGNOSTIC)>; 2925 def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rd), 2926 (vti.Vector vti.RegClass:$rs1), 2927 GPR:$rs2, (vti.Mask true_mask), 2928 VLOpFrag)), 2929 (!cast<Instruction>("PseudoVSLIDE1UP_VX_"#vti.LMul.MX) 2930 vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2931 def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rd), 2932 (vti.Vector vti.RegClass:$rs1), 2933 GPR:$rs2, (vti.Mask true_mask), 2934 VLOpFrag)), 2935 (!cast<Instruction>("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX) 2936 vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2937 } 2938} 2939 2940foreach vti = AllFloatVectors in { 2941 let Predicates = GetVTypePredicates<vti>.Predicates in { 2942 def : Pat<(vti.Vector (riscv_fslide1up_vl (vti.Vector vti.RegClass:$rd), 2943 (vti.Vector vti.RegClass:$rs1), 2944 vti.Scalar:$rs2, (vti.Mask true_mask), 2945 VLOpFrag)), 2946 (!cast<Instruction>("PseudoVFSLIDE1UP_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 2947 vti.RegClass:$rd, vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2948 def : Pat<(vti.Vector (riscv_fslide1down_vl (vti.Vector vti.RegClass:$rd), 2949 (vti.Vector vti.RegClass:$rs1), 2950 vti.Scalar:$rs2, (vti.Mask true_mask), 2951 VLOpFrag)), 2952 (!cast<Instruction>("PseudoVFSLIDE1DOWN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 2953 vti.RegClass:$rd, vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2954 } 2955} 2956 2957foreach vti = AllVectors in { 2958 let Predicates = GetVTypePredicates<vti>.Predicates in { 2959 def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3), 2960 (vti.Vector vti.RegClass:$rs1), 2961 uimm5:$rs2, (vti.Mask true_mask), 2962 VLOpFrag, (XLenVT timm:$policy))), 2963 (!cast<Instruction>("PseudoVSLIDEUP_VI_"#vti.LMul.MX) 2964 vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2, 2965 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 2966 2967 def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3), 2968 (vti.Vector vti.RegClass:$rs1), 2969 GPR:$rs2, (vti.Mask true_mask), 2970 VLOpFrag, (XLenVT timm:$policy))), 2971 (!cast<Instruction>("PseudoVSLIDEUP_VX_"#vti.LMul.MX) 2972 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, 2973 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 2974 2975 def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3), 2976 (vti.Vector vti.RegClass:$rs1), 2977 uimm5:$rs2, (vti.Mask true_mask), 2978 VLOpFrag, (XLenVT timm:$policy))), 2979 (!cast<Instruction>("PseudoVSLIDEDOWN_VI_"#vti.LMul.MX) 2980 vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2, 2981 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 2982 2983 def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3), 2984 (vti.Vector vti.RegClass:$rs1), 2985 GPR:$rs2, (vti.Mask true_mask), 2986 VLOpFrag, (XLenVT timm:$policy))), 2987 (!cast<Instruction>("PseudoVSLIDEDOWN_VX_"#vti.LMul.MX) 2988 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, 2989 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 2990 } 2991} 2992