1//===- RISCVInstrInfoVVLPatterns.td - RVV VL patterns ------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file contains the required infrastructure and VL patterns to 10/// support code generation for the standard 'V' (Vector) extension, version 11/// version 1.0. 12/// 13/// This file is included from and depends upon RISCVInstrInfoVPseudos.td 14/// 15/// Note: the patterns for RVV intrinsics are found in 16/// RISCVInstrInfoVPseudos.td. 17/// 18//===----------------------------------------------------------------------===// 19 20//===----------------------------------------------------------------------===// 21// Helpers to define the VL patterns. 22//===----------------------------------------------------------------------===// 23 24def SDT_RISCVIntUnOp_VL : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, 25 SDTCisSameAs<0, 2>, 26 SDTCisVec<0>, SDTCisInt<0>, 27 SDTCVecEltisVT<3, i1>, 28 SDTCisSameNumEltsAs<0, 3>, 29 SDTCisVT<4, XLenVT>]>; 30 31def SDT_RISCVIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 32 SDTCisSameAs<0, 2>, 33 SDTCisVec<0>, SDTCisInt<0>, 34 SDTCisSameAs<0, 3>, 35 SDTCVecEltisVT<4, i1>, 36 SDTCisSameNumEltsAs<0, 4>, 37 SDTCisVT<5, XLenVT>]>; 38 39def SDT_RISCVFPUnOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 40 SDTCisVec<0>, SDTCisFP<0>, 41 SDTCVecEltisVT<2, i1>, 42 SDTCisSameNumEltsAs<0, 2>, 43 SDTCisVT<3, XLenVT>]>; 44def SDT_RISCVFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 45 SDTCisSameAs<0, 2>, 46 SDTCisVec<0>, SDTCisFP<0>, 47 SDTCisSameAs<0, 3>, 48 SDTCVecEltisVT<4, i1>, 49 SDTCisSameNumEltsAs<0, 4>, 50 SDTCisVT<5, XLenVT>]>; 51 52def SDT_RISCVCopySign_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 53 SDTCisSameAs<0, 2>, 54 SDTCisVec<0>, SDTCisFP<0>, 55 SDTCisSameAs<0, 3>, 56 SDTCVecEltisVT<4, i1>, 57 SDTCisSameNumEltsAs<0, 4>, 58 SDTCisVT<5, XLenVT>]>; 59 60def riscv_vmv_v_v_vl : SDNode<"RISCVISD::VMV_V_V_VL", 61 SDTypeProfile<1, 3, [SDTCisVec<0>, 62 SDTCisSameAs<0, 1>, 63 SDTCisSameAs<0, 2>, 64 SDTCisVT<3, XLenVT>]>>; 65def riscv_vmv_v_x_vl : SDNode<"RISCVISD::VMV_V_X_VL", 66 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>, 67 SDTCisSameAs<0, 1>, 68 SDTCisVT<2, XLenVT>, 69 SDTCisVT<3, XLenVT>]>>; 70def riscv_vfmv_v_f_vl : SDNode<"RISCVISD::VFMV_V_F_VL", 71 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>, 72 SDTCisSameAs<0, 1>, 73 SDTCisEltOfVec<2, 0>, 74 SDTCisVT<3, XLenVT>]>>; 75def riscv_vmv_s_x_vl : SDNode<"RISCVISD::VMV_S_X_VL", 76 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 77 SDTCisInt<0>, 78 SDTCisVT<2, XLenVT>, 79 SDTCisVT<3, XLenVT>]>>; 80def riscv_vfmv_s_f_vl : SDNode<"RISCVISD::VFMV_S_F_VL", 81 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 82 SDTCisFP<0>, 83 SDTCisEltOfVec<2, 0>, 84 SDTCisVT<3, XLenVT>]>>; 85 86def riscv_add_vl : SDNode<"RISCVISD::ADD_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 87def riscv_sub_vl : SDNode<"RISCVISD::SUB_VL", SDT_RISCVIntBinOp_VL>; 88def riscv_mul_vl : SDNode<"RISCVISD::MUL_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 89def riscv_mulhs_vl : SDNode<"RISCVISD::MULHS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 90def riscv_mulhu_vl : SDNode<"RISCVISD::MULHU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 91def riscv_and_vl : SDNode<"RISCVISD::AND_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 92def riscv_or_vl : SDNode<"RISCVISD::OR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 93def riscv_xor_vl : SDNode<"RISCVISD::XOR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 94def riscv_sdiv_vl : SDNode<"RISCVISD::SDIV_VL", SDT_RISCVIntBinOp_VL>; 95def riscv_srem_vl : SDNode<"RISCVISD::SREM_VL", SDT_RISCVIntBinOp_VL>; 96def riscv_udiv_vl : SDNode<"RISCVISD::UDIV_VL", SDT_RISCVIntBinOp_VL>; 97def riscv_urem_vl : SDNode<"RISCVISD::UREM_VL", SDT_RISCVIntBinOp_VL>; 98def riscv_shl_vl : SDNode<"RISCVISD::SHL_VL", SDT_RISCVIntBinOp_VL>; 99def riscv_sra_vl : SDNode<"RISCVISD::SRA_VL", SDT_RISCVIntBinOp_VL>; 100def riscv_srl_vl : SDNode<"RISCVISD::SRL_VL", SDT_RISCVIntBinOp_VL>; 101def riscv_rotl_vl : SDNode<"RISCVISD::ROTL_VL", SDT_RISCVIntBinOp_VL>; 102def riscv_rotr_vl : SDNode<"RISCVISD::ROTR_VL", SDT_RISCVIntBinOp_VL>; 103def riscv_smin_vl : SDNode<"RISCVISD::SMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 104def riscv_smax_vl : SDNode<"RISCVISD::SMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 105def riscv_umin_vl : SDNode<"RISCVISD::UMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 106def riscv_umax_vl : SDNode<"RISCVISD::UMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 107 108def riscv_bitreverse_vl : SDNode<"RISCVISD::BITREVERSE_VL", SDT_RISCVIntUnOp_VL>; 109def riscv_bswap_vl : SDNode<"RISCVISD::BSWAP_VL", SDT_RISCVIntUnOp_VL>; 110def riscv_ctlz_vl : SDNode<"RISCVISD::CTLZ_VL", SDT_RISCVIntUnOp_VL>; 111def riscv_cttz_vl : SDNode<"RISCVISD::CTTZ_VL", SDT_RISCVIntUnOp_VL>; 112def riscv_ctpop_vl : SDNode<"RISCVISD::CTPOP_VL", SDT_RISCVIntUnOp_VL>; 113 114def riscv_avgflooru_vl : SDNode<"RISCVISD::AVGFLOORU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 115def riscv_avgceilu_vl : SDNode<"RISCVISD::AVGCEILU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 116def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 117def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 118def riscv_ssubsat_vl : SDNode<"RISCVISD::SSUBSAT_VL", SDT_RISCVIntBinOp_VL>; 119def riscv_usubsat_vl : SDNode<"RISCVISD::USUBSAT_VL", SDT_RISCVIntBinOp_VL>; 120 121def riscv_fadd_vl : SDNode<"RISCVISD::FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 122def riscv_fsub_vl : SDNode<"RISCVISD::FSUB_VL", SDT_RISCVFPBinOp_VL>; 123def riscv_fmul_vl : SDNode<"RISCVISD::FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 124def riscv_fdiv_vl : SDNode<"RISCVISD::FDIV_VL", SDT_RISCVFPBinOp_VL>; 125def riscv_fneg_vl : SDNode<"RISCVISD::FNEG_VL", SDT_RISCVFPUnOp_VL>; 126def riscv_fabs_vl : SDNode<"RISCVISD::FABS_VL", SDT_RISCVFPUnOp_VL>; 127def riscv_fsqrt_vl : SDNode<"RISCVISD::FSQRT_VL", SDT_RISCVFPUnOp_VL>; 128def riscv_fcopysign_vl : SDNode<"RISCVISD::FCOPYSIGN_VL", SDT_RISCVCopySign_VL>; 129def riscv_vfmin_vl : SDNode<"RISCVISD::VFMIN_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 130def riscv_vfmax_vl : SDNode<"RISCVISD::VFMAX_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 131 132def riscv_strict_fadd_vl : SDNode<"RISCVISD::STRICT_FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; 133def riscv_strict_fsub_vl : SDNode<"RISCVISD::STRICT_FSUB_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; 134def riscv_strict_fmul_vl : SDNode<"RISCVISD::STRICT_FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; 135def riscv_strict_fdiv_vl : SDNode<"RISCVISD::STRICT_FDIV_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; 136def riscv_strict_fsqrt_vl : SDNode<"RISCVISD::STRICT_FSQRT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; 137 138def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 139 [(riscv_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 140 (riscv_strict_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 141def any_riscv_fsub_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 142 [(riscv_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 143 (riscv_strict_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 144def any_riscv_fmul_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 145 [(riscv_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 146 (riscv_strict_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 147def any_riscv_fdiv_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 148 [(riscv_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 149 (riscv_strict_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 150def any_riscv_fsqrt_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 151 [(riscv_fsqrt_vl node:$src, node:$mask, node:$vl), 152 (riscv_strict_fsqrt_vl node:$src, node:$mask, node:$vl)]>; 153 154def riscv_fclass_vl : SDNode<"RISCVISD::FCLASS_VL", 155 SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisVec<0>, 156 SDTCisFP<1>, SDTCisVec<1>, 157 SDTCisSameSizeAs<0, 1>, 158 SDTCisSameNumEltsAs<0, 1>, 159 SDTCVecEltisVT<2, i1>, 160 SDTCisSameNumEltsAs<0, 2>, 161 SDTCisVT<3, XLenVT>]>>; 162 163def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 164 SDTCisSameAs<0, 2>, 165 SDTCisSameAs<0, 3>, 166 SDTCisVec<0>, SDTCisFP<0>, 167 SDTCVecEltisVT<4, i1>, 168 SDTCisSameNumEltsAs<0, 4>, 169 SDTCisVT<5, XLenVT>]>; 170def riscv_vfmadd_vl : SDNode<"RISCVISD::VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 171def riscv_vfnmadd_vl : SDNode<"RISCVISD::VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 172def riscv_vfmsub_vl : SDNode<"RISCVISD::VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 173def riscv_vfnmsub_vl : SDNode<"RISCVISD::VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 174 175def SDT_RISCVWVecFMA_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 176 SDTCisVec<1>, SDTCisFP<1>, 177 SDTCisOpSmallerThanOp<1, 0>, 178 SDTCisSameNumEltsAs<0, 1>, 179 SDTCisSameAs<1, 2>, 180 SDTCisSameAs<0, 3>, 181 SDTCVecEltisVT<4, i1>, 182 SDTCisSameNumEltsAs<0, 4>, 183 SDTCisVT<5, XLenVT>]>; 184def riscv_vfwmadd_vl : SDNode<"RISCVISD::VFWMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 185def riscv_vfwnmadd_vl : SDNode<"RISCVISD::VFWNMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 186def riscv_vfwmsub_vl : SDNode<"RISCVISD::VFWMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 187def riscv_vfwnmsub_vl : SDNode<"RISCVISD::VFWNMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 188 189def riscv_strict_vfmadd_vl : SDNode<"RISCVISD::STRICT_VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 190def riscv_strict_vfnmadd_vl : SDNode<"RISCVISD::STRICT_VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 191def riscv_strict_vfmsub_vl : SDNode<"RISCVISD::STRICT_VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 192def riscv_strict_vfnmsub_vl : SDNode<"RISCVISD::STRICT_VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 193 194def any_riscv_vfmadd_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 195 [(riscv_vfmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 196 (riscv_strict_vfmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 197def any_riscv_vfnmadd_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 198 [(riscv_vfnmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 199 (riscv_strict_vfnmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 200def any_riscv_vfmsub_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 201 [(riscv_vfmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 202 (riscv_strict_vfmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 203def any_riscv_vfnmsub_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 204 [(riscv_vfnmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 205 (riscv_strict_vfnmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 206 207def SDT_RISCVFPRoundOp_VL : SDTypeProfile<1, 3, [ 208 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1>, SDTCisSameNumEltsAs<0, 1>, 209 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 210]>; 211def SDT_RISCVFPExtendOp_VL : SDTypeProfile<1, 3, [ 212 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>, 213 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 214]>; 215 216def riscv_fpround_vl : SDNode<"RISCVISD::FP_ROUND_VL", SDT_RISCVFPRoundOp_VL>; 217def riscv_strict_fpround_vl : SDNode<"RISCVISD::STRICT_FP_ROUND_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; 218def riscv_fpextend_vl : SDNode<"RISCVISD::FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL>; 219def riscv_strict_fpextend_vl : SDNode<"RISCVISD::STRICT_FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL, [SDNPHasChain]>; 220def riscv_fncvt_rod_vl : SDNode<"RISCVISD::VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL>; 221def riscv_strict_fncvt_rod_vl : SDNode<"RISCVISD::STRICT_VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; 222 223def any_riscv_fpround_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 224 [(riscv_fpround_vl node:$src, node:$mask, node:$vl), 225 (riscv_strict_fpround_vl node:$src, node:$mask, node:$vl)]>; 226def any_riscv_fpextend_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 227 [(riscv_fpextend_vl node:$src, node:$mask, node:$vl), 228 (riscv_strict_fpextend_vl node:$src, node:$mask, node:$vl)]>; 229def any_riscv_fncvt_rod_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 230 [(riscv_fncvt_rod_vl node:$src, node:$mask, node:$vl), 231 (riscv_strict_fncvt_rod_vl node:$src, node:$mask, node:$vl)]>; 232 233def SDT_RISCVFP2IOp_VL : SDTypeProfile<1, 3, [ 234 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, 235 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 236]>; 237def SDT_RISCVFP2IOp_RM_VL : SDTypeProfile<1, 4, [ 238 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, 239 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>, 240 SDTCisVT<4, XLenVT> // Rounding mode 241]>; 242 243def SDT_RISCVI2FPOp_VL : SDTypeProfile<1, 3, [ 244 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, 245 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 246]>; 247def SDT_RISCVI2FPOp_RM_VL : SDTypeProfile<1, 4, [ 248 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, 249 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>, 250 SDTCisVT<4, XLenVT> // Rounding mode 251]>; 252 253def SDT_RISCVSETCCOP_VL : SDTypeProfile<1, 6, [ 254 SDTCVecEltisVT<0, i1>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, 255 SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>, SDTCisSameAs<0, 4>, 256 SDTCisSameAs<0, 5>, SDTCisVT<6, XLenVT>]>; 257 258// Float -> Int 259def riscv_vfcvt_xu_f_vl : SDNode<"RISCVISD::VFCVT_XU_F_VL", SDT_RISCVFP2IOp_VL>; 260def riscv_vfcvt_x_f_vl : SDNode<"RISCVISD::VFCVT_X_F_VL", SDT_RISCVFP2IOp_VL>; 261def riscv_vfcvt_rm_xu_f_vl : SDNode<"RISCVISD::VFCVT_RM_XU_F_VL", SDT_RISCVFP2IOp_RM_VL>; 262def riscv_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL>; 263 264def riscv_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL>; 265def riscv_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL>; 266 267def riscv_strict_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL, [SDNPHasChain]>; 268def riscv_strict_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; 269def riscv_strict_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; 270 271def any_riscv_vfcvt_rm_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl, node:$rm), 272 [(riscv_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm), 273 (riscv_strict_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm)]>; 274def any_riscv_vfcvt_rtz_xu_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 275 [(riscv_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl), 276 (riscv_strict_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl)]>; 277def any_riscv_vfcvt_rtz_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 278 [(riscv_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl), 279 (riscv_strict_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl)]>; 280 281// Int -> Float 282def riscv_sint_to_fp_vl : SDNode<"RISCVISD::SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 283def riscv_uint_to_fp_vl : SDNode<"RISCVISD::UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 284def riscv_vfcvt_rm_f_xu_vl : SDNode<"RISCVISD::VFCVT_RM_F_XU_VL", SDT_RISCVI2FPOp_RM_VL>; 285def riscv_vfcvt_rm_f_x_vl : SDNode<"RISCVISD::VFCVT_RM_F_X_VL", SDT_RISCVI2FPOp_RM_VL>; 286 287def riscv_strict_sint_to_fp_vl : SDNode<"RISCVISD::STRICT_SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; 288def riscv_strict_uint_to_fp_vl : SDNode<"RISCVISD::STRICT_UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; 289 290def any_riscv_sint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 291 [(riscv_sint_to_fp_vl node:$src, node:$mask, node:$vl), 292 (riscv_strict_sint_to_fp_vl node:$src, node:$mask, node:$vl)]>; 293def any_riscv_uint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 294 [(riscv_uint_to_fp_vl node:$src, node:$mask, node:$vl), 295 (riscv_strict_uint_to_fp_vl node:$src, node:$mask, node:$vl)]>; 296 297def riscv_vfround_noexcept_vl: SDNode<"RISCVISD::VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL>; 298def riscv_strict_vfround_noexcept_vl: SDNode<"RISCVISD::STRICT_VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; 299 300def any_riscv_vfround_noexcept_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 301 [(riscv_vfround_noexcept_vl node:$src, node:$mask, node:$vl), 302 (riscv_strict_vfround_noexcept_vl node:$src, node:$mask, node:$vl)]>; 303 304def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL", SDT_RISCVSETCCOP_VL>; 305def riscv_strict_fsetcc_vl : SDNode<"RISCVISD::STRICT_FSETCC_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; 306def riscv_strict_fsetccs_vl : SDNode<"RISCVISD::STRICT_FSETCCS_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; 307def any_riscv_fsetcc_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 308 [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 309 (riscv_strict_fsetcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl)]>; 310def any_riscv_fsetccs_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 311 [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 312 (riscv_strict_fsetccs_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl)]>; 313 314def riscv_vrgather_vx_vl : SDNode<"RISCVISD::VRGATHER_VX_VL", 315 SDTypeProfile<1, 5, [SDTCisVec<0>, 316 SDTCisSameAs<0, 1>, 317 SDTCisVT<2, XLenVT>, 318 SDTCisSameAs<0, 3>, 319 SDTCVecEltisVT<4, i1>, 320 SDTCisSameNumEltsAs<0, 4>, 321 SDTCisVT<5, XLenVT>]>>; 322def riscv_vrgather_vv_vl : SDNode<"RISCVISD::VRGATHER_VV_VL", 323 SDTypeProfile<1, 5, [SDTCisVec<0>, 324 SDTCisSameAs<0, 1>, 325 SDTCisInt<2>, 326 SDTCisSameNumEltsAs<0, 2>, 327 SDTCisSameSizeAs<0, 2>, 328 SDTCisSameAs<0, 3>, 329 SDTCVecEltisVT<4, i1>, 330 SDTCisSameNumEltsAs<0, 4>, 331 SDTCisVT<5, XLenVT>]>>; 332def riscv_vrgatherei16_vv_vl : SDNode<"RISCVISD::VRGATHEREI16_VV_VL", 333 SDTypeProfile<1, 5, [SDTCisVec<0>, 334 SDTCisSameAs<0, 1>, 335 SDTCisInt<2>, 336 SDTCVecEltisVT<2, i16>, 337 SDTCisSameNumEltsAs<0, 2>, 338 SDTCisSameAs<0, 3>, 339 SDTCVecEltisVT<4, i1>, 340 SDTCisSameNumEltsAs<0, 4>, 341 SDTCisVT<5, XLenVT>]>>; 342 343def SDT_RISCVVMERGE_VL : SDTypeProfile<1, 5, [ 344 SDTCisVec<0>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<1, i1>, 345 SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>, SDTCisSameAs<0, 4>, 346 SDTCisVT<5, XLenVT> 347]>; 348 349def riscv_vmerge_vl : SDNode<"RISCVISD::VMERGE_VL", SDT_RISCVVMERGE_VL>; 350 351def SDT_RISCVVMSETCLR_VL : SDTypeProfile<1, 1, [SDTCVecEltisVT<0, i1>, 352 SDTCisVT<1, XLenVT>]>; 353def riscv_vmclr_vl : SDNode<"RISCVISD::VMCLR_VL", SDT_RISCVVMSETCLR_VL>; 354def riscv_vmset_vl : SDNode<"RISCVISD::VMSET_VL", SDT_RISCVVMSETCLR_VL>; 355 356def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 357 SDTCisSameAs<0, 2>, 358 SDTCVecEltisVT<0, i1>, 359 SDTCisVT<3, XLenVT>]>; 360def riscv_vmand_vl : SDNode<"RISCVISD::VMAND_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 361def riscv_vmor_vl : SDNode<"RISCVISD::VMOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 362def riscv_vmxor_vl : SDNode<"RISCVISD::VMXOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 363 364def true_mask : PatLeaf<(riscv_vmset_vl (XLenVT srcvalue))>; 365 366def riscv_vmnot_vl : PatFrag<(ops node:$rs, node:$vl), 367 (riscv_vmxor_vl node:$rs, true_mask, node:$vl)>; 368 369def riscv_vcpop_vl : SDNode<"RISCVISD::VCPOP_VL", 370 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, 371 SDTCisVec<1>, SDTCisInt<1>, 372 SDTCVecEltisVT<2, i1>, 373 SDTCisSameNumEltsAs<1, 2>, 374 SDTCisVT<3, XLenVT>]>>; 375 376def riscv_vfirst_vl : SDNode<"RISCVISD::VFIRST_VL", 377 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, 378 SDTCisVec<1>, SDTCisInt<1>, 379 SDTCVecEltisVT<2, i1>, 380 SDTCisSameNumEltsAs<1, 2>, 381 SDTCisVT<3, XLenVT>]>>; 382 383def SDT_RISCVVEXTEND_VL : SDTypeProfile<1, 3, [SDTCisVec<0>, 384 SDTCisSameNumEltsAs<0, 1>, 385 SDTCisSameNumEltsAs<1, 2>, 386 SDTCVecEltisVT<2, i1>, 387 SDTCisVT<3, XLenVT>]>; 388def riscv_sext_vl : SDNode<"RISCVISD::VSEXT_VL", SDT_RISCVVEXTEND_VL>; 389def riscv_zext_vl : SDNode<"RISCVISD::VZEXT_VL", SDT_RISCVVEXTEND_VL>; 390 391def riscv_trunc_vector_vl : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL", 392 SDTypeProfile<1, 3, [SDTCisVec<0>, 393 SDTCisSameNumEltsAs<0, 1>, 394 SDTCisSameNumEltsAs<0, 2>, 395 SDTCVecEltisVT<2, i1>, 396 SDTCisVT<3, XLenVT>]>>; 397 398def SDT_RISCVVWIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 399 SDTCisInt<1>, 400 SDTCisSameNumEltsAs<0, 1>, 401 SDTCisOpSmallerThanOp<1, 0>, 402 SDTCisSameAs<1, 2>, 403 SDTCisSameAs<0, 3>, 404 SDTCisSameNumEltsAs<1, 4>, 405 SDTCVecEltisVT<4, i1>, 406 SDTCisVT<5, XLenVT>]>; 407def riscv_vwmul_vl : SDNode<"RISCVISD::VWMUL_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 408def riscv_vwmulu_vl : SDNode<"RISCVISD::VWMULU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 409def riscv_vwmulsu_vl : SDNode<"RISCVISD::VWMULSU_VL", SDT_RISCVVWIntBinOp_VL>; 410def riscv_vwadd_vl : SDNode<"RISCVISD::VWADD_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 411def riscv_vwaddu_vl : SDNode<"RISCVISD::VWADDU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 412def riscv_vwsub_vl : SDNode<"RISCVISD::VWSUB_VL", SDT_RISCVVWIntBinOp_VL, []>; 413def riscv_vwsubu_vl : SDNode<"RISCVISD::VWSUBU_VL", SDT_RISCVVWIntBinOp_VL, []>; 414def riscv_vwsll_vl : SDNode<"RISCVISD::VWSLL_VL", SDT_RISCVVWIntBinOp_VL, []>; 415 416def SDT_RISCVVWIntTernOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 417 SDTCisInt<1>, 418 SDTCisSameNumEltsAs<0, 1>, 419 SDTCisOpSmallerThanOp<1, 0>, 420 SDTCisSameAs<1, 2>, 421 SDTCisSameAs<0, 3>, 422 SDTCisSameNumEltsAs<1, 4>, 423 SDTCVecEltisVT<4, i1>, 424 SDTCisVT<5, XLenVT>]>; 425def riscv_vwmacc_vl : SDNode<"RISCVISD::VWMACC_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; 426def riscv_vwmaccu_vl : SDNode<"RISCVISD::VWMACCU_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; 427def riscv_vwmaccsu_vl : SDNode<"RISCVISD::VWMACCSU_VL", SDT_RISCVVWIntTernOp_VL, []>; 428 429def SDT_RISCVVWFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 430 SDTCisFP<1>, 431 SDTCisSameNumEltsAs<0, 1>, 432 SDTCisOpSmallerThanOp<1, 0>, 433 SDTCisSameAs<1, 2>, 434 SDTCisSameAs<0, 3>, 435 SDTCisSameNumEltsAs<1, 4>, 436 SDTCVecEltisVT<4, i1>, 437 SDTCisVT<5, XLenVT>]>; 438def riscv_vfwmul_vl : SDNode<"RISCVISD::VFWMUL_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; 439def riscv_vfwadd_vl : SDNode<"RISCVISD::VFWADD_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; 440def riscv_vfwsub_vl : SDNode<"RISCVISD::VFWSUB_VL", SDT_RISCVVWFPBinOp_VL, []>; 441 442def SDT_RISCVVNIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 443 SDTCisInt<1>, 444 SDTCisSameNumEltsAs<0, 1>, 445 SDTCisOpSmallerThanOp<0, 1>, 446 SDTCisSameAs<0, 2>, 447 SDTCisSameAs<0, 3>, 448 SDTCisSameNumEltsAs<0, 4>, 449 SDTCVecEltisVT<4, i1>, 450 SDTCisVT<5, XLenVT>]>; 451def riscv_vnsrl_vl : SDNode<"RISCVISD::VNSRL_VL", SDT_RISCVVNIntBinOp_VL>; 452 453def SDT_RISCVVWIntBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 454 SDTCisSameAs<0, 1>, 455 SDTCisInt<2>, 456 SDTCisSameNumEltsAs<1, 2>, 457 SDTCisOpSmallerThanOp<2, 1>, 458 SDTCisSameAs<0, 3>, 459 SDTCisSameNumEltsAs<1, 4>, 460 SDTCVecEltisVT<4, i1>, 461 SDTCisVT<5, XLenVT>]>; 462def riscv_vwadd_w_vl : SDNode<"RISCVISD::VWADD_W_VL", SDT_RISCVVWIntBinOpW_VL>; 463def riscv_vwaddu_w_vl : SDNode<"RISCVISD::VWADDU_W_VL", SDT_RISCVVWIntBinOpW_VL>; 464def riscv_vwsub_w_vl : SDNode<"RISCVISD::VWSUB_W_VL", SDT_RISCVVWIntBinOpW_VL>; 465def riscv_vwsubu_w_vl : SDNode<"RISCVISD::VWSUBU_W_VL", SDT_RISCVVWIntBinOpW_VL>; 466 467def SDT_RISCVVWFPBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 468 SDTCisSameAs<0, 1>, 469 SDTCisFP<2>, 470 SDTCisSameNumEltsAs<1, 2>, 471 SDTCisOpSmallerThanOp<2, 1>, 472 SDTCisSameAs<0, 3>, 473 SDTCisSameNumEltsAs<1, 4>, 474 SDTCVecEltisVT<4, i1>, 475 SDTCisVT<5, XLenVT>]>; 476 477def riscv_vfwadd_w_vl : SDNode<"RISCVISD::VFWADD_W_VL", SDT_RISCVVWFPBinOpW_VL>; 478def riscv_vfwsub_w_vl : SDNode<"RISCVISD::VFWSUB_W_VL", SDT_RISCVVWFPBinOpW_VL>; 479 480def SDTRVVVecReduce : SDTypeProfile<1, 6, [ 481 SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>, 482 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<2, 4>, SDTCisVT<5, XLenVT>, 483 SDTCisVT<6, XLenVT> 484]>; 485 486def riscv_add_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 487 node:$E), 488 (riscv_add_vl node:$A, node:$B, node:$C, 489 node:$D, node:$E), [{ 490 return N->hasOneUse(); 491}]>; 492 493def riscv_sub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 494 node:$E), 495 (riscv_sub_vl node:$A, node:$B, node:$C, 496 node:$D, node:$E), [{ 497 return N->hasOneUse(); 498}]>; 499 500def riscv_mul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 501 node:$E), 502 (riscv_mul_vl node:$A, node:$B, node:$C, 503 node:$D, node:$E), [{ 504 return N->hasOneUse(); 505}]>; 506 507def riscv_vwmul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 508 node:$E), 509 (riscv_vwmul_vl node:$A, node:$B, node:$C, 510 node:$D, node:$E), [{ 511 return N->hasOneUse(); 512}]>; 513 514def riscv_vwmulu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 515 node:$E), 516 (riscv_vwmulu_vl node:$A, node:$B, node:$C, 517 node:$D, node:$E), [{ 518 return N->hasOneUse(); 519}]>; 520 521def riscv_vwmulsu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 522 node:$E), 523 (riscv_vwmulsu_vl node:$A, node:$B, node:$C, 524 node:$D, node:$E), [{ 525 return N->hasOneUse(); 526}]>; 527 528def riscv_sext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 529 (riscv_sext_vl node:$A, node:$B, node:$C), [{ 530 return N->hasOneUse(); 531}]>; 532 533def riscv_zext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 534 (riscv_zext_vl node:$A, node:$B, node:$C), [{ 535 return N->hasOneUse(); 536}]>; 537 538def riscv_fpextend_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 539 (riscv_fpextend_vl node:$A, node:$B, node:$C), [{ 540 return N->hasOneUse(); 541}]>; 542 543def riscv_vfmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 544 node:$E), 545 (riscv_vfmadd_vl node:$A, node:$B, 546 node:$C, node:$D, node:$E), [{ 547 return N->hasOneUse(); 548}]>; 549 550def riscv_vfnmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 551 node:$E), 552 (riscv_vfnmadd_vl node:$A, node:$B, 553 node:$C, node:$D, node:$E), [{ 554 return N->hasOneUse(); 555}]>; 556 557def riscv_vfmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 558 node:$E), 559 (riscv_vfmsub_vl node:$A, node:$B, 560 node:$C, node:$D, node:$E), [{ 561 return N->hasOneUse(); 562}]>; 563 564def riscv_vfnmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 565 node:$E), 566 (riscv_vfnmsub_vl node:$A, node:$B, 567 node:$C, node:$D, node:$E), [{ 568 return N->hasOneUse(); 569}]>; 570 571foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR", 572 "FADD", "SEQ_FADD", "FMIN", "FMAX"] in 573 def rvv_vecreduce_#kind#_vl : SDNode<"RISCVISD::VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>; 574 575// Give explicit Complexity to prefer simm5/uimm5. 576def SplatPat : ComplexPattern<vAny, 1, "selectVSplat", [], [], 1>; 577def SplatPat_simm5 : ComplexPattern<vAny, 1, "selectVSplatSimm5", [], [], 3>; 578def SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimmBits<5>", [], [], 3>; 579def SplatPat_uimm6 : ComplexPattern<vAny, 1, "selectVSplatUimmBits<6>", [], [], 3>; 580def SplatPat_simm5_plus1 581 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1", [], [], 3>; 582def SplatPat_simm5_plus1_nonzero 583 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1NonZero", [], [], 3>; 584 585// Selects extends or truncates of splats where we only care about the lowest 8 586// bits of each element. 587def Low8BitsSplatPat 588 : ComplexPattern<vAny, 1, "selectLow8BitsVSplat", [], [], 2>; 589 590// Ignore the vl operand on vmv_v_f, and vmv_s_f. 591def SplatFPOp : PatFrags<(ops node:$op), 592 [(riscv_vfmv_v_f_vl undef, node:$op, srcvalue), 593 (riscv_vfmv_s_f_vl undef, node:$op, srcvalue)]>; 594 595def sew8simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<8>", []>; 596def sew16simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<16>", []>; 597def sew32simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<32>", []>; 598def sew64simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<64>", []>; 599 600class VPatBinaryVL_V<SDPatternOperator vop, 601 string instruction_name, 602 string suffix, 603 ValueType result_type, 604 ValueType op1_type, 605 ValueType op2_type, 606 ValueType mask_type, 607 int log2sew, 608 LMULInfo vlmul, 609 VReg result_reg_class, 610 VReg op1_reg_class, 611 VReg op2_reg_class, 612 bit isSEWAware = 0> 613 : Pat<(result_type (vop 614 (op1_type op1_reg_class:$rs1), 615 (op2_type op2_reg_class:$rs2), 616 (result_type result_reg_class:$merge), 617 (mask_type V0), 618 VLOpFrag)), 619 (!cast<Instruction>( 620 !if(isSEWAware, 621 instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 622 instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) 623 result_reg_class:$merge, 624 op1_reg_class:$rs1, 625 op2_reg_class:$rs2, 626 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 627 628class VPatBinaryVL_V_RM<SDPatternOperator vop, 629 string instruction_name, 630 string suffix, 631 ValueType result_type, 632 ValueType op1_type, 633 ValueType op2_type, 634 ValueType mask_type, 635 int log2sew, 636 LMULInfo vlmul, 637 VReg result_reg_class, 638 VReg op1_reg_class, 639 VReg op2_reg_class, 640 bit isSEWAware = 0> 641 : Pat<(result_type (vop 642 (op1_type op1_reg_class:$rs1), 643 (op2_type op2_reg_class:$rs2), 644 (result_type result_reg_class:$merge), 645 (mask_type V0), 646 VLOpFrag)), 647 (!cast<Instruction>( 648 !if(isSEWAware, 649 instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 650 instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) 651 result_reg_class:$merge, 652 op1_reg_class:$rs1, 653 op2_reg_class:$rs2, 654 (mask_type V0), 655 // Value to indicate no rounding mode change in 656 // RISCVInsertReadWriteCSR 657 FRM_DYN, 658 GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 659 660multiclass VPatTiedBinaryNoMaskVL_V<SDNode vop, 661 string instruction_name, 662 string suffix, 663 ValueType result_type, 664 ValueType op2_type, 665 int sew, 666 LMULInfo vlmul, 667 VReg result_reg_class, 668 VReg op2_reg_class> { 669 def : Pat<(result_type (vop 670 (result_type result_reg_class:$rs1), 671 (op2_type op2_reg_class:$rs2), 672 srcvalue, 673 true_mask, 674 VLOpFrag)), 675 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 676 result_reg_class:$rs1, 677 op2_reg_class:$rs2, 678 GPR:$vl, sew, TAIL_AGNOSTIC)>; 679 // Tail undisturbed 680 def : Pat<(riscv_vmerge_vl true_mask, 681 (result_type (vop 682 result_reg_class:$rs1, 683 (op2_type op2_reg_class:$rs2), 684 srcvalue, 685 true_mask, 686 VLOpFrag)), 687 result_reg_class:$rs1, result_reg_class:$rs1, VLOpFrag), 688 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 689 result_reg_class:$rs1, 690 op2_reg_class:$rs2, 691 GPR:$vl, sew, TU_MU)>; 692} 693 694multiclass VPatTiedBinaryNoMaskVL_V_RM<SDNode vop, 695 string instruction_name, 696 string suffix, 697 ValueType result_type, 698 ValueType op2_type, 699 int sew, 700 LMULInfo vlmul, 701 VReg result_reg_class, 702 VReg op2_reg_class> { 703 def : Pat<(result_type (vop 704 (result_type result_reg_class:$rs1), 705 (op2_type op2_reg_class:$rs2), 706 srcvalue, 707 true_mask, 708 VLOpFrag)), 709 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 710 result_reg_class:$rs1, 711 op2_reg_class:$rs2, 712 // Value to indicate no rounding mode change in 713 // RISCVInsertReadWriteCSR 714 FRM_DYN, 715 GPR:$vl, sew, TAIL_AGNOSTIC)>; 716 // Tail undisturbed 717 def : Pat<(riscv_vmerge_vl true_mask, 718 (result_type (vop 719 result_reg_class:$rs1, 720 (op2_type op2_reg_class:$rs2), 721 srcvalue, 722 true_mask, 723 VLOpFrag)), 724 result_reg_class:$rs1, result_reg_class:$rs1, VLOpFrag), 725 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 726 result_reg_class:$rs1, 727 op2_reg_class:$rs2, 728 // Value to indicate no rounding mode change in 729 // RISCVInsertReadWriteCSR 730 FRM_DYN, 731 GPR:$vl, sew, TU_MU)>; 732} 733 734class VPatBinaryVL_XI<SDPatternOperator vop, 735 string instruction_name, 736 string suffix, 737 ValueType result_type, 738 ValueType vop1_type, 739 ValueType vop2_type, 740 ValueType mask_type, 741 int log2sew, 742 LMULInfo vlmul, 743 VReg result_reg_class, 744 VReg vop_reg_class, 745 ComplexPattern SplatPatKind, 746 DAGOperand xop_kind, 747 bit isSEWAware = 0> 748 : Pat<(result_type (vop 749 (vop1_type vop_reg_class:$rs1), 750 (vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))), 751 (result_type result_reg_class:$merge), 752 (mask_type V0), 753 VLOpFrag)), 754 (!cast<Instruction>( 755 !if(isSEWAware, 756 instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 757 instruction_name#_#suffix#_#vlmul.MX#"_MASK")) 758 result_reg_class:$merge, 759 vop_reg_class:$rs1, 760 xop_kind:$rs2, 761 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 762 763multiclass VPatBinaryVL_VV_VX<SDPatternOperator vop, string instruction_name, 764 list<VTypeInfo> vtilist = AllIntegerVectors, 765 bit isSEWAware = 0> { 766 foreach vti = vtilist in { 767 let Predicates = GetVTypePredicates<vti>.Predicates in { 768 def : VPatBinaryVL_V<vop, instruction_name, "VV", 769 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 770 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 771 vti.RegClass, isSEWAware>; 772 def : VPatBinaryVL_XI<vop, instruction_name, "VX", 773 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 774 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 775 SplatPat, GPR, isSEWAware>; 776 } 777 } 778} 779 780multiclass VPatBinaryVL_VV_VX_VI<SDPatternOperator vop, string instruction_name, 781 Operand ImmType = simm5> 782 : VPatBinaryVL_VV_VX<vop, instruction_name> { 783 foreach vti = AllIntegerVectors in { 784 let Predicates = GetVTypePredicates<vti>.Predicates in 785 def : VPatBinaryVL_XI<vop, instruction_name, "VI", 786 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 787 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 788 !cast<ComplexPattern>(SplatPat#_#ImmType), 789 ImmType>; 790 } 791} 792 793multiclass VPatBinaryWVL_VV_VX<SDPatternOperator vop, string instruction_name> { 794 foreach VtiToWti = AllWidenableIntVectors in { 795 defvar vti = VtiToWti.Vti; 796 defvar wti = VtiToWti.Wti; 797 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 798 GetVTypePredicates<wti>.Predicates) in { 799 def : VPatBinaryVL_V<vop, instruction_name, "VV", 800 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 801 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 802 vti.RegClass>; 803 def : VPatBinaryVL_XI<vop, instruction_name, "VX", 804 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 805 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 806 SplatPat, GPR>; 807 } 808 } 809} 810 811multiclass VPatBinaryWVL_VV_VX_WV_WX<SDPatternOperator vop, SDNode vop_w, 812 string instruction_name> 813 : VPatBinaryWVL_VV_VX<vop, instruction_name> { 814 foreach VtiToWti = AllWidenableIntVectors in { 815 defvar vti = VtiToWti.Vti; 816 defvar wti = VtiToWti.Wti; 817 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 818 GetVTypePredicates<wti>.Predicates) in { 819 defm : VPatTiedBinaryNoMaskVL_V<vop_w, instruction_name, "WV", 820 wti.Vector, vti.Vector, vti.Log2SEW, 821 vti.LMul, wti.RegClass, vti.RegClass>; 822 def : VPatBinaryVL_V<vop_w, instruction_name, "WV", 823 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 824 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 825 vti.RegClass>; 826 def : VPatBinaryVL_XI<vop_w, instruction_name, "WX", 827 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 828 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 829 SplatPat, GPR>; 830 } 831 } 832} 833 834multiclass VPatBinaryNVL_WV_WX_WI<SDPatternOperator vop, string instruction_name> { 835 foreach VtiToWti = AllWidenableIntVectors in { 836 defvar vti = VtiToWti.Vti; 837 defvar wti = VtiToWti.Wti; 838 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 839 GetVTypePredicates<wti>.Predicates) in { 840 def : VPatBinaryVL_V<vop, instruction_name, "WV", 841 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 842 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 843 vti.RegClass>; 844 def : VPatBinaryVL_XI<vop, instruction_name, "WX", 845 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 846 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 847 SplatPat, GPR>; 848 def : VPatBinaryVL_XI<vop, instruction_name, "WI", 849 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 850 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 851 !cast<ComplexPattern>(SplatPat#_#uimm5), 852 uimm5>; 853 } 854 } 855} 856 857class VPatBinaryVL_VF<SDPatternOperator vop, 858 string instruction_name, 859 ValueType result_type, 860 ValueType vop1_type, 861 ValueType vop2_type, 862 ValueType mask_type, 863 int log2sew, 864 LMULInfo vlmul, 865 VReg result_reg_class, 866 VReg vop_reg_class, 867 RegisterClass scalar_reg_class, 868 bit isSEWAware = 0> 869 : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), 870 (vop2_type (SplatFPOp scalar_reg_class:$rs2)), 871 (result_type result_reg_class:$merge), 872 (mask_type V0), 873 VLOpFrag)), 874 (!cast<Instruction>( 875 !if(isSEWAware, 876 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 877 instruction_name#"_"#vlmul.MX#"_MASK")) 878 result_reg_class:$merge, 879 vop_reg_class:$rs1, 880 scalar_reg_class:$rs2, 881 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 882 883class VPatBinaryVL_VF_RM<SDPatternOperator vop, 884 string instruction_name, 885 ValueType result_type, 886 ValueType vop1_type, 887 ValueType vop2_type, 888 ValueType mask_type, 889 int log2sew, 890 LMULInfo vlmul, 891 VReg result_reg_class, 892 VReg vop_reg_class, 893 RegisterClass scalar_reg_class, 894 bit isSEWAware = 0> 895 : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), 896 (vop2_type (SplatFPOp scalar_reg_class:$rs2)), 897 (result_type result_reg_class:$merge), 898 (mask_type V0), 899 VLOpFrag)), 900 (!cast<Instruction>( 901 !if(isSEWAware, 902 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 903 instruction_name#"_"#vlmul.MX#"_MASK")) 904 result_reg_class:$merge, 905 vop_reg_class:$rs1, 906 scalar_reg_class:$rs2, 907 (mask_type V0), 908 // Value to indicate no rounding mode change in 909 // RISCVInsertReadWriteCSR 910 FRM_DYN, 911 GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 912 913multiclass VPatBinaryFPVL_VV_VF<SDPatternOperator vop, string instruction_name, 914 bit isSEWAware = 0> { 915 foreach vti = AllFloatVectors in { 916 let Predicates = GetVTypePredicates<vti>.Predicates in { 917 def : VPatBinaryVL_V<vop, instruction_name, "VV", 918 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 919 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 920 vti.RegClass, isSEWAware>; 921 def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 922 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 923 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 924 vti.ScalarRegClass, isSEWAware>; 925 } 926 } 927} 928 929multiclass VPatBinaryFPVL_VV_VF_RM<SDPatternOperator vop, string instruction_name, 930 bit isSEWAware = 0> { 931 foreach vti = AllFloatVectors in { 932 let Predicates = GetVTypePredicates<vti>.Predicates in { 933 def : VPatBinaryVL_V_RM<vop, instruction_name, "VV", 934 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 935 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 936 vti.RegClass, isSEWAware>; 937 def : VPatBinaryVL_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix, 938 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 939 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 940 vti.ScalarRegClass, isSEWAware>; 941 } 942 } 943} 944 945multiclass VPatBinaryFPVL_R_VF<SDPatternOperator vop, string instruction_name, 946 bit isSEWAware = 0> { 947 foreach fvti = AllFloatVectors in { 948 let Predicates = GetVTypePredicates<fvti>.Predicates in 949 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 950 fvti.RegClass:$rs1, 951 (fvti.Vector fvti.RegClass:$merge), 952 (fvti.Mask V0), 953 VLOpFrag)), 954 (!cast<Instruction>( 955 !if(isSEWAware, 956 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", 957 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) 958 fvti.RegClass:$merge, 959 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 960 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 961 } 962} 963 964multiclass VPatBinaryFPVL_R_VF_RM<SDPatternOperator vop, string instruction_name, 965 bit isSEWAware = 0> { 966 foreach fvti = AllFloatVectors in { 967 let Predicates = GetVTypePredicates<fvti>.Predicates in 968 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 969 fvti.RegClass:$rs1, 970 (fvti.Vector fvti.RegClass:$merge), 971 (fvti.Mask V0), 972 VLOpFrag)), 973 (!cast<Instruction>( 974 !if(isSEWAware, 975 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", 976 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) 977 fvti.RegClass:$merge, 978 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 979 (fvti.Mask V0), 980 // Value to indicate no rounding mode change in 981 // RISCVInsertReadWriteCSR 982 FRM_DYN, 983 GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 984 } 985} 986 987multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name, 988 CondCode cc> { 989 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 990 vti.RegClass:$rs2, cc, 991 VR:$merge, 992 (vti.Mask V0), 993 VLOpFrag)), 994 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") 995 VR:$merge, 996 vti.RegClass:$rs1, 997 vti.RegClass:$rs2, 998 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 999} 1000 1001// Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped. 1002multiclass VPatIntegerSetCCVL_VV_Swappable<VTypeInfo vti, string instruction_name, 1003 CondCode cc, CondCode invcc> 1004 : VPatIntegerSetCCVL_VV<vti, instruction_name, cc> { 1005 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2), 1006 vti.RegClass:$rs1, invcc, 1007 VR:$merge, 1008 (vti.Mask V0), 1009 VLOpFrag)), 1010 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") 1011 VR:$merge, vti.RegClass:$rs1, 1012 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1013} 1014 1015multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_name, 1016 CondCode cc, CondCode invcc> { 1017 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK"); 1018 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1019 (SplatPat (XLenVT GPR:$rs2)), cc, 1020 VR:$merge, 1021 (vti.Mask V0), 1022 VLOpFrag)), 1023 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1024 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1025 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)), 1026 (vti.Vector vti.RegClass:$rs1), invcc, 1027 VR:$merge, 1028 (vti.Mask V0), 1029 VLOpFrag)), 1030 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1031 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1032} 1033 1034multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_name, 1035 CondCode cc, CondCode invcc> { 1036 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); 1037 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1038 (SplatPat_simm5 simm5:$rs2), cc, 1039 VR:$merge, 1040 (vti.Mask V0), 1041 VLOpFrag)), 1042 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1043 XLenVT:$rs2, (vti.Mask V0), GPR:$vl, 1044 vti.Log2SEW)>; 1045 1046 // FIXME: Can do some canonicalization to remove these patterns. 1047 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2), 1048 (vti.Vector vti.RegClass:$rs1), invcc, 1049 VR:$merge, 1050 (vti.Mask V0), 1051 VLOpFrag)), 1052 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1053 simm5:$rs2, (vti.Mask V0), GPR:$vl, 1054 vti.Log2SEW)>; 1055} 1056 1057multiclass VPatIntegerSetCCVL_VIPlus1_Swappable<VTypeInfo vti, 1058 string instruction_name, 1059 CondCode cc, CondCode invcc, 1060 ComplexPattern splatpat_kind> { 1061 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); 1062 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1063 (splatpat_kind simm5:$rs2), cc, 1064 VR:$merge, 1065 (vti.Mask V0), 1066 VLOpFrag)), 1067 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1068 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl, 1069 vti.Log2SEW)>; 1070 1071 // FIXME: Can do some canonicalization to remove these patterns. 1072 def : Pat<(vti.Mask (riscv_setcc_vl (splatpat_kind simm5:$rs2), 1073 (vti.Vector vti.RegClass:$rs1), invcc, 1074 VR:$merge, 1075 (vti.Mask V0), 1076 VLOpFrag)), 1077 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1078 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl, 1079 vti.Log2SEW)>; 1080} 1081 1082multiclass VPatFPSetCCVL_VV_VF_FV<SDPatternOperator vop, CondCode cc, 1083 string inst_name, 1084 string swapped_op_inst_name> { 1085 foreach fvti = AllFloatVectors in { 1086 let Predicates = GetVTypePredicates<fvti>.Predicates in { 1087 def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1), 1088 fvti.RegClass:$rs2, 1089 cc, 1090 VR:$merge, 1091 (fvti.Mask V0), 1092 VLOpFrag)), 1093 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX#"_MASK") 1094 VR:$merge, fvti.RegClass:$rs1, 1095 fvti.RegClass:$rs2, (fvti.Mask V0), 1096 GPR:$vl, fvti.Log2SEW)>; 1097 def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1), 1098 (SplatFPOp fvti.ScalarRegClass:$rs2), 1099 cc, 1100 VR:$merge, 1101 (fvti.Mask V0), 1102 VLOpFrag)), 1103 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 1104 VR:$merge, fvti.RegClass:$rs1, 1105 fvti.ScalarRegClass:$rs2, (fvti.Mask V0), 1106 GPR:$vl, fvti.Log2SEW)>; 1107 def : Pat<(fvti.Mask (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 1108 (fvti.Vector fvti.RegClass:$rs1), 1109 cc, 1110 VR:$merge, 1111 (fvti.Mask V0), 1112 VLOpFrag)), 1113 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 1114 VR:$merge, fvti.RegClass:$rs1, 1115 fvti.ScalarRegClass:$rs2, (fvti.Mask V0), 1116 GPR:$vl, fvti.Log2SEW)>; 1117 } 1118 } 1119} 1120 1121multiclass VPatExtendVL_V<SDNode vop, string inst_name, string suffix, 1122 list <VTypeInfoToFraction> fraction_list> { 1123 foreach vtiTofti = fraction_list in { 1124 defvar vti = vtiTofti.Vti; 1125 defvar fti = vtiTofti.Fti; 1126 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1127 GetVTypePredicates<fti>.Predicates) in 1128 def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2), 1129 (fti.Mask V0), VLOpFrag)), 1130 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX#"_MASK") 1131 (vti.Vector (IMPLICIT_DEF)), 1132 fti.RegClass:$rs2, 1133 (fti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1134 } 1135} 1136 1137// Single width converting 1138 1139multiclass VPatConvertFP2IVL_V<SDPatternOperator vop, string instruction_name> { 1140 foreach fvti = AllFloatVectors in { 1141 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1142 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1143 GetVTypePredicates<ivti>.Predicates) in 1144 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1145 (fvti.Mask V0), 1146 VLOpFrag)), 1147 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1148 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1149 (fvti.Mask V0), GPR:$vl, ivti.Log2SEW, TA_MA)>; 1150 } 1151} 1152 1153multiclass VPatConvertFP2IVL_V_RM<SDPatternOperator vop, string instruction_name> { 1154 foreach fvti = AllFloatVectors in { 1155 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1156 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1157 GetVTypePredicates<ivti>.Predicates) in 1158 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1159 (fvti.Mask V0), 1160 VLOpFrag)), 1161 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1162 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1163 (fvti.Mask V0), 1164 // Value to indicate no rounding mode change in 1165 // RISCVInsertReadWriteCSR 1166 FRM_DYN, 1167 GPR:$vl, ivti.Log2SEW, TA_MA)>; 1168 } 1169} 1170 1171 1172multiclass VPatConvertFP2I_RM_VL_V<SDPatternOperator vop, string instruction_name> { 1173 foreach fvti = AllFloatVectors in { 1174 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1175 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1176 GetVTypePredicates<ivti>.Predicates) in 1177 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1178 (fvti.Mask V0), (XLenVT timm:$frm), 1179 VLOpFrag)), 1180 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1181 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1182 (fvti.Mask V0), timm:$frm, GPR:$vl, ivti.Log2SEW, 1183 TA_MA)>; 1184 } 1185} 1186 1187multiclass VPatConvertI2FPVL_V_RM<SDPatternOperator vop, string instruction_name> { 1188 foreach fvti = AllFloatVectors in { 1189 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1190 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1191 GetVTypePredicates<ivti>.Predicates) in 1192 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1193 (ivti.Mask V0), 1194 VLOpFrag)), 1195 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1196 (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1197 (ivti.Mask V0), 1198 // Value to indicate no rounding mode change in 1199 // RISCVInsertReadWriteCSR 1200 FRM_DYN, 1201 GPR:$vl, fvti.Log2SEW, TA_MA)>; 1202 } 1203} 1204 1205multiclass VPatConvertI2FP_RM_VL_V<SDNode vop, string instruction_name> { 1206 foreach fvti = AllFloatVectors in { 1207 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1208 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1209 GetVTypePredicates<ivti>.Predicates) in 1210 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1211 (ivti.Mask V0), (XLenVT timm:$frm), 1212 VLOpFrag)), 1213 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1214 (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1215 (ivti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1216 } 1217} 1218 1219// Widening converting 1220 1221multiclass VPatWConvertFP2IVL_V<SDPatternOperator vop, string instruction_name> { 1222 foreach fvtiToFWti = AllWidenableFloatVectors in { 1223 defvar fvti = fvtiToFWti.Vti; 1224 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1225 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1226 GetVTypePredicates<iwti>.Predicates) in 1227 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1228 (fvti.Mask V0), 1229 VLOpFrag)), 1230 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1231 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1232 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 1233 } 1234} 1235 1236multiclass VPatWConvertFP2IVL_V_RM<SDPatternOperator vop, string instruction_name> { 1237 foreach fvtiToFWti = AllWidenableFloatVectors in { 1238 defvar fvti = fvtiToFWti.Vti; 1239 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1240 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1241 GetVTypePredicates<iwti>.Predicates) in 1242 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1243 (fvti.Mask V0), 1244 VLOpFrag)), 1245 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1246 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1247 (fvti.Mask V0), 1248 // Value to indicate no rounding mode change in 1249 // RISCVInsertReadWriteCSR 1250 FRM_DYN, 1251 GPR:$vl, fvti.Log2SEW, TA_MA)>; 1252 } 1253} 1254 1255 1256multiclass VPatWConvertFP2I_RM_VL_V<SDNode vop, string instruction_name> { 1257 foreach fvtiToFWti = AllWidenableFloatVectors in { 1258 defvar fvti = fvtiToFWti.Vti; 1259 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1260 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1261 GetVTypePredicates<iwti>.Predicates) in 1262 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1263 (fvti.Mask V0), (XLenVT timm:$frm), 1264 VLOpFrag)), 1265 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1266 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1267 (fvti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1268 } 1269} 1270 1271multiclass VPatWConvertI2FPVL_V<SDPatternOperator vop, 1272 string instruction_name> { 1273 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1274 defvar ivti = vtiToWti.Vti; 1275 defvar fwti = vtiToWti.Wti; 1276 let Predicates = !listconcat(GetVTypePredicates<ivti>.Predicates, 1277 GetVTypePredicates<fwti>.Predicates) in 1278 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1279 (ivti.Mask V0), 1280 VLOpFrag)), 1281 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1282 (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1283 (ivti.Mask V0), 1284 GPR:$vl, ivti.Log2SEW, TA_MA)>; 1285 } 1286} 1287 1288// Narrowing converting 1289 1290multiclass VPatNConvertFP2IVL_W<SDPatternOperator vop, 1291 string instruction_name> { 1292 // Reuse the same list of types used in the widening nodes, but just swap the 1293 // direction of types around so we're converting from Wti -> Vti 1294 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1295 defvar vti = vtiToWti.Vti; 1296 defvar fwti = vtiToWti.Wti; 1297 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1298 GetVTypePredicates<fwti>.Predicates) in 1299 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 1300 (fwti.Mask V0), 1301 VLOpFrag)), 1302 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 1303 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1304 (fwti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1305 } 1306} 1307 1308multiclass VPatNConvertFP2IVL_W_RM<SDPatternOperator vop, 1309 string instruction_name> { 1310 // Reuse the same list of types used in the widening nodes, but just swap the 1311 // direction of types around so we're converting from Wti -> Vti 1312 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1313 defvar vti = vtiToWti.Vti; 1314 defvar fwti = vtiToWti.Wti; 1315 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1316 GetVTypePredicates<fwti>.Predicates) in 1317 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 1318 (fwti.Mask V0), 1319 VLOpFrag)), 1320 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 1321 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1322 (fwti.Mask V0), 1323 // Value to indicate no rounding mode change in 1324 // RISCVInsertReadWriteCSR 1325 FRM_DYN, 1326 GPR:$vl, vti.Log2SEW, TA_MA)>; 1327 } 1328} 1329 1330multiclass VPatNConvertFP2I_RM_VL_W<SDNode vop, string instruction_name> { 1331 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1332 defvar vti = vtiToWti.Vti; 1333 defvar fwti = vtiToWti.Wti; 1334 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1335 GetVTypePredicates<fwti>.Predicates) in 1336 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 1337 (fwti.Mask V0), (XLenVT timm:$frm), 1338 VLOpFrag)), 1339 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 1340 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1341 (fwti.Mask V0), timm:$frm, GPR:$vl, vti.Log2SEW, TA_MA)>; 1342 } 1343} 1344 1345multiclass VPatNConvertI2FPVL_W_RM<SDPatternOperator vop, 1346 string instruction_name> { 1347 foreach fvtiToFWti = AllWidenableFloatVectors in { 1348 defvar fvti = fvtiToFWti.Vti; 1349 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1350 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1351 GetVTypePredicates<iwti>.Predicates) in 1352 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), 1353 (iwti.Mask V0), 1354 VLOpFrag)), 1355 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1356 (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, 1357 (iwti.Mask V0), 1358 // Value to indicate no rounding mode change in 1359 // RISCVInsertReadWriteCSR 1360 FRM_DYN, 1361 GPR:$vl, fvti.Log2SEW, TA_MA)>; 1362 } 1363} 1364 1365multiclass VPatNConvertI2FP_RM_VL_W<SDNode vop, string instruction_name> { 1366 foreach fvtiToFWti = AllWidenableFloatVectors in { 1367 defvar fvti = fvtiToFWti.Vti; 1368 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1369 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1370 GetVTypePredicates<iwti>.Predicates) in 1371 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), 1372 (iwti.Mask V0), (XLenVT timm:$frm), 1373 VLOpFrag)), 1374 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1375 (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, 1376 (iwti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1377 } 1378} 1379 1380multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> { 1381 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { 1382 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1"); 1383 let Predicates = GetVTypePredicates<vti>.Predicates in { 1384 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), 1385 (vti.Vector vti.RegClass:$rs1), VR:$rs2, 1386 (vti.Mask V0), VLOpFrag, 1387 (XLenVT timm:$policy))), 1388 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1389 (vti_m1.Vector VR:$merge), 1390 (vti.Vector vti.RegClass:$rs1), 1391 (vti_m1.Vector VR:$rs2), 1392 (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 1393 } 1394 } 1395} 1396 1397multiclass VPatReductionVL_RM<SDNode vop, string instruction_name, bit is_float> { 1398 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { 1399 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1"); 1400 let Predicates = GetVTypePredicates<vti>.Predicates in { 1401 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), 1402 (vti.Vector vti.RegClass:$rs1), VR:$rs2, 1403 (vti.Mask V0), VLOpFrag, 1404 (XLenVT timm:$policy))), 1405 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1406 (vti_m1.Vector VR:$merge), 1407 (vti.Vector vti.RegClass:$rs1), 1408 (vti_m1.Vector VR:$rs2), 1409 (vti.Mask V0), 1410 // Value to indicate no rounding mode change in 1411 // RISCVInsertReadWriteCSR 1412 FRM_DYN, 1413 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 1414 } 1415 } 1416} 1417 1418multiclass VPatBinaryVL_WV_WX_WI<SDNode op, string instruction_name> { 1419 foreach vtiToWti = AllWidenableIntVectors in { 1420 defvar vti = vtiToWti.Vti; 1421 defvar wti = vtiToWti.Wti; 1422 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1423 GetVTypePredicates<wti>.Predicates) in { 1424 def : Pat< 1425 (vti.Vector 1426 (riscv_trunc_vector_vl 1427 (op (wti.Vector wti.RegClass:$rs2), 1428 (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1)))), 1429 (vti.Mask true_mask), 1430 VLOpFrag)), 1431 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX) 1432 (vti.Vector (IMPLICIT_DEF)), 1433 wti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1434 1435 def : Pat< 1436 (vti.Vector 1437 (riscv_trunc_vector_vl 1438 (op (wti.Vector wti.RegClass:$rs2), 1439 (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1)))), 1440 (vti.Mask true_mask), 1441 VLOpFrag)), 1442 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1443 (vti.Vector (IMPLICIT_DEF)), 1444 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1445 1446 def : Pat< 1447 (vti.Vector 1448 (riscv_trunc_vector_vl 1449 (op (wti.Vector wti.RegClass:$rs2), 1450 (wti.Vector (SplatPat_uimm5 uimm5:$rs1))), (vti.Mask true_mask), 1451 VLOpFrag)), 1452 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX) 1453 (vti.Vector (IMPLICIT_DEF)), 1454 wti.RegClass:$rs2, uimm5:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1455 } 1456 } 1457} 1458 1459multiclass VPatWidenReductionVL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1460 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1461 defvar vti = vtiToWti.Vti; 1462 defvar wti = vtiToWti.Wti; 1463 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1464 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1465 GetVTypePredicates<wti>.Predicates) in { 1466 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1467 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 1468 VR:$rs2, (vti.Mask V0), VLOpFrag, 1469 (XLenVT timm:$policy))), 1470 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1471 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1472 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1473 (XLenVT timm:$policy))>; 1474 } 1475 } 1476} 1477 1478multiclass VPatWidenReductionVL_RM<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1479 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1480 defvar vti = vtiToWti.Vti; 1481 defvar wti = vtiToWti.Wti; 1482 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1483 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1484 GetVTypePredicates<wti>.Predicates) in { 1485 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1486 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 1487 VR:$rs2, (vti.Mask V0), VLOpFrag, 1488 (XLenVT timm:$policy))), 1489 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1490 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1491 (wti_m1.Vector VR:$rs2), (vti.Mask V0), 1492 // Value to indicate no rounding mode change in 1493 // RISCVInsertReadWriteCSR 1494 FRM_DYN, 1495 GPR:$vl, vti.Log2SEW, 1496 (XLenVT timm:$policy))>; 1497 } 1498 } 1499} 1500 1501multiclass VPatWidenReductionVL_Ext_VL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1502 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1503 defvar vti = vtiToWti.Vti; 1504 defvar wti = vtiToWti.Wti; 1505 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1506 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1507 GetVTypePredicates<wti>.Predicates) in { 1508 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1509 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 1510 VR:$rs2, (vti.Mask V0), VLOpFrag, 1511 (XLenVT timm:$policy))), 1512 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1513 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1514 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1515 (XLenVT timm:$policy))>; 1516 } 1517 } 1518} 1519 1520multiclass VPatWidenReductionVL_Ext_VL_RM<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1521 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1522 defvar vti = vtiToWti.Vti; 1523 defvar wti = vtiToWti.Wti; 1524 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1525 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1526 GetVTypePredicates<wti>.Predicates) in { 1527 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1528 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 1529 VR:$rs2, (vti.Mask V0), VLOpFrag, 1530 (XLenVT timm:$policy))), 1531 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1532 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1533 (wti_m1.Vector VR:$rs2), (vti.Mask V0), 1534 // Value to indicate no rounding mode change in 1535 // RISCVInsertReadWriteCSR 1536 FRM_DYN, 1537 GPR:$vl, vti.Log2SEW, 1538 (XLenVT timm:$policy))>; 1539 } 1540 } 1541} 1542 1543multiclass VPatBinaryFPWVL_VV_VF<SDNode vop, string instruction_name> { 1544 foreach fvtiToFWti = AllWidenableFloatVectors in { 1545 defvar vti = fvtiToFWti.Vti; 1546 defvar wti = fvtiToFWti.Wti; 1547 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1548 GetVTypePredicates<wti>.Predicates) in { 1549 def : VPatBinaryVL_V<vop, instruction_name, "VV", 1550 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1551 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1552 vti.RegClass>; 1553 def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 1554 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1555 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1556 vti.ScalarRegClass>; 1557 } 1558 } 1559} 1560 1561multiclass VPatBinaryFPWVL_VV_VF_RM<SDNode vop, string instruction_name> { 1562 foreach fvtiToFWti = AllWidenableFloatVectors in { 1563 defvar vti = fvtiToFWti.Vti; 1564 defvar wti = fvtiToFWti.Wti; 1565 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1566 GetVTypePredicates<wti>.Predicates) in { 1567 def : VPatBinaryVL_V_RM<vop, instruction_name, "VV", 1568 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1569 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1570 vti.RegClass>; 1571 def : VPatBinaryVL_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix, 1572 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1573 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1574 vti.ScalarRegClass>; 1575 } 1576 } 1577} 1578 1579multiclass VPatBinaryFPWVL_VV_VF_WV_WF<SDNode vop, SDNode vop_w, string instruction_name> 1580 : VPatBinaryFPWVL_VV_VF<vop, instruction_name> { 1581 foreach fvtiToFWti = AllWidenableFloatVectors in { 1582 defvar vti = fvtiToFWti.Vti; 1583 defvar wti = fvtiToFWti.Wti; 1584 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1585 GetVTypePredicates<wti>.Predicates) in { 1586 defm : VPatTiedBinaryNoMaskVL_V<vop_w, instruction_name, "WV", 1587 wti.Vector, vti.Vector, vti.Log2SEW, 1588 vti.LMul, wti.RegClass, vti.RegClass>; 1589 def : VPatBinaryVL_V<vop_w, instruction_name, "WV", 1590 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1591 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1592 vti.RegClass>; 1593 def : VPatBinaryVL_VF<vop_w, instruction_name#"_W"#vti.ScalarSuffix, 1594 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1595 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1596 vti.ScalarRegClass>; 1597 } 1598 } 1599} 1600 1601multiclass VPatBinaryFPWVL_VV_VF_WV_WF_RM<SDNode vop, SDNode vop_w, string instruction_name> 1602 : VPatBinaryFPWVL_VV_VF_RM<vop, instruction_name> { 1603 foreach fvtiToFWti = AllWidenableFloatVectors in { 1604 defvar vti = fvtiToFWti.Vti; 1605 defvar wti = fvtiToFWti.Wti; 1606 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1607 GetVTypePredicates<wti>.Predicates) in { 1608 defm : VPatTiedBinaryNoMaskVL_V_RM<vop_w, instruction_name, "WV", 1609 wti.Vector, vti.Vector, vti.Log2SEW, 1610 vti.LMul, wti.RegClass, vti.RegClass>; 1611 def : VPatBinaryVL_V_RM<vop_w, instruction_name, "WV", 1612 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1613 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1614 vti.RegClass>; 1615 def : VPatBinaryVL_VF_RM<vop_w, instruction_name#"_W"#vti.ScalarSuffix, 1616 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1617 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1618 vti.ScalarRegClass>; 1619 } 1620 } 1621} 1622 1623multiclass VPatNarrowShiftSplatExt_WX<SDNode op, PatFrags extop, string instruction_name> { 1624 foreach vtiToWti = AllWidenableIntVectors in { 1625 defvar vti = vtiToWti.Vti; 1626 defvar wti = vtiToWti.Wti; 1627 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1628 GetVTypePredicates<wti>.Predicates) in 1629 def : Pat< 1630 (vti.Vector 1631 (riscv_trunc_vector_vl 1632 (op (wti.Vector wti.RegClass:$rs2), 1633 (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1))), 1634 (vti.Mask true_mask), VLOpFrag)), 1635 srcvalue, (wti.Mask true_mask), VLOpFrag), 1636 (vti.Mask true_mask), VLOpFrag)), 1637 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1638 (vti.Vector (IMPLICIT_DEF)), 1639 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1640 } 1641} 1642 1643multiclass VPatNarrowShiftExtVL_WV<SDNode op, PatFrags extop, string instruction_name> { 1644 foreach vtiToWti = AllWidenableIntVectors in { 1645 defvar vti = vtiToWti.Vti; 1646 defvar wti = vtiToWti.Wti; 1647 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1648 GetVTypePredicates<wti>.Predicates) in 1649 def : Pat< 1650 (vti.Vector 1651 (riscv_trunc_vector_vl 1652 (op (wti.Vector wti.RegClass:$rs2), 1653 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), 1654 (vti.Mask true_mask), VLOpFrag)), 1655 srcvalue, (vti.Mask true_mask), VLOpFrag), 1656 (vti.Mask V0), VLOpFrag)), 1657 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_MASK") 1658 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, vti.RegClass:$rs1, 1659 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1660 } 1661} 1662 1663multiclass VPatNarrowShiftVL_WV<SDNode op, string instruction_name> { 1664 defm : VPatNarrowShiftExtVL_WV<op, riscv_sext_vl_oneuse, instruction_name>; 1665 defm : VPatNarrowShiftExtVL_WV<op, riscv_zext_vl_oneuse, instruction_name>; 1666} 1667 1668multiclass VPatMultiplyAddVL_VV_VX<SDNode op, string instruction_name> { 1669 foreach vti = AllIntegerVectors in { 1670 defvar suffix = vti.LMul.MX; 1671 let Predicates = GetVTypePredicates<vti>.Predicates in { 1672 // NOTE: We choose VMADD because it has the most commuting freedom. So it 1673 // works best with how TwoAddressInstructionPass tries commuting. 1674 def : Pat<(vti.Vector 1675 (op vti.RegClass:$rs2, 1676 (riscv_mul_vl_oneuse vti.RegClass:$rs1, 1677 vti.RegClass:$rd, 1678 srcvalue, (vti.Mask true_mask), VLOpFrag), 1679 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1680 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 1681 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1682 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1683 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally 1684 // commutable. 1685 def : Pat<(vti.Vector 1686 (op vti.RegClass:$rs2, 1687 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), 1688 vti.RegClass:$rd, 1689 srcvalue, (vti.Mask true_mask), VLOpFrag), 1690 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1691 (!cast<Instruction>(instruction_name#"_VX_" # suffix) 1692 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1693 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1694 } 1695 } 1696} 1697 1698multiclass VPatMultiplyAccVL_VV_VX<PatFrag op, string instruction_name> { 1699 foreach vti = AllIntegerVectors in { 1700 defvar suffix = vti.LMul.MX; 1701 let Predicates = GetVTypePredicates<vti>.Predicates in { 1702 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1703 (vti.Vector (op vti.RegClass:$rd, 1704 (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2, 1705 srcvalue, (vti.Mask true_mask), VLOpFrag), 1706 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1707 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1708 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1709 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1710 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1711 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1712 (vti.Vector (op vti.RegClass:$rd, 1713 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, 1714 srcvalue, (vti.Mask true_mask), VLOpFrag), 1715 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1716 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1717 (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK") 1718 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1719 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1720 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1721 (vti.Vector (op vti.RegClass:$rd, 1722 (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2, 1723 srcvalue, (vti.Mask true_mask), VLOpFrag), 1724 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1725 vti.RegClass:$rd, undef, VLOpFrag), 1726 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1727 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1728 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1729 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1730 (vti.Vector (op vti.RegClass:$rd, 1731 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, 1732 srcvalue, (vti.Mask true_mask), VLOpFrag), 1733 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1734 vti.RegClass:$rd, undef, VLOpFrag), 1735 (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK") 1736 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1737 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1738 } 1739 } 1740} 1741 1742multiclass VPatWidenMultiplyAddVL_VV_VX<SDNode vwmacc_op, string instr_name> { 1743 foreach vtiTowti = AllWidenableIntVectors in { 1744 defvar vti = vtiTowti.Vti; 1745 defvar wti = vtiTowti.Wti; 1746 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1747 GetVTypePredicates<wti>.Predicates) in { 1748 def : Pat<(vwmacc_op (vti.Vector vti.RegClass:$rs1), 1749 (vti.Vector vti.RegClass:$rs2), 1750 (wti.Vector wti.RegClass:$rd), 1751 (vti.Mask V0), VLOpFrag), 1752 (!cast<Instruction>(instr_name#"_VV_"#vti.LMul.MX#"_MASK") 1753 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1754 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1755 def : Pat<(vwmacc_op (SplatPat XLenVT:$rs1), 1756 (vti.Vector vti.RegClass:$rs2), 1757 (wti.Vector wti.RegClass:$rd), 1758 (vti.Mask V0), VLOpFrag), 1759 (!cast<Instruction>(instr_name#"_VX_"#vti.LMul.MX#"_MASK") 1760 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, 1761 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1762 TAIL_AGNOSTIC)>; 1763 } 1764 } 1765} 1766 1767multiclass VPatNarrowShiftSplat_WX_WI<SDNode op, string instruction_name> { 1768 foreach vtiTowti = AllWidenableIntVectors in { 1769 defvar vti = vtiTowti.Vti; 1770 defvar wti = vtiTowti.Wti; 1771 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1772 GetVTypePredicates<wti>.Predicates) in { 1773 def : Pat<(vti.Vector (riscv_trunc_vector_vl 1774 (wti.Vector (op wti.RegClass:$rs1, (SplatPat XLenVT:$rs2), 1775 srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), 1776 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1777 (vti.Vector (IMPLICIT_DEF)), 1778 wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; 1779 def : Pat<(vti.Vector (riscv_trunc_vector_vl 1780 (wti.Vector (op wti.RegClass:$rs1, (SplatPat_uimm5 uimm5:$rs2), 1781 srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), 1782 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX) 1783 (vti.Vector (IMPLICIT_DEF)), 1784 wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; 1785 } 1786 } 1787} 1788 1789multiclass VPatFPMulAddVL_VV_VF<SDPatternOperator vop, string instruction_name> { 1790 foreach vti = AllFloatVectors in { 1791 defvar suffix = vti.LMul.MX; 1792 let Predicates = GetVTypePredicates<vti>.Predicates in { 1793 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, 1794 vti.RegClass:$rs2, (vti.Mask V0), 1795 VLOpFrag)), 1796 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1797 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1798 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1799 1800 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), 1801 vti.RegClass:$rd, vti.RegClass:$rs2, 1802 (vti.Mask V0), 1803 VLOpFrag)), 1804 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1805 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1806 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1807 } 1808 } 1809} 1810 1811multiclass VPatFPMulAddVL_VV_VF_RM<SDPatternOperator vop, string instruction_name> { 1812 foreach vti = AllFloatVectors in { 1813 defvar suffix = vti.LMul.MX; 1814 let Predicates = GetVTypePredicates<vti>.Predicates in { 1815 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, 1816 vti.RegClass:$rs2, (vti.Mask V0), 1817 VLOpFrag)), 1818 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1819 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1820 (vti.Mask V0), 1821 // Value to indicate no rounding mode change in 1822 // RISCVInsertReadWriteCSR 1823 FRM_DYN, 1824 GPR:$vl, vti.Log2SEW, TA_MA)>; 1825 1826 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), 1827 vti.RegClass:$rd, vti.RegClass:$rs2, 1828 (vti.Mask V0), 1829 VLOpFrag)), 1830 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1831 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1832 (vti.Mask V0), 1833 // Value to indicate no rounding mode change in 1834 // RISCVInsertReadWriteCSR 1835 FRM_DYN, 1836 GPR:$vl, vti.Log2SEW, TA_MA)>; 1837 } 1838 } 1839} 1840 1841multiclass VPatFPMulAccVL_VV_VF<PatFrag vop, string instruction_name> { 1842 foreach vti = AllFloatVectors in { 1843 defvar suffix = vti.LMul.MX; 1844 let Predicates = GetVTypePredicates<vti>.Predicates in { 1845 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1846 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1847 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1848 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1849 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1850 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1851 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1852 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1853 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1854 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1855 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1856 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1857 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1858 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1859 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1860 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1861 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1862 vti.RegClass:$rd, undef, VLOpFrag), 1863 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1864 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1865 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1866 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1867 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1868 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1869 vti.RegClass:$rd, undef, VLOpFrag), 1870 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1871 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1872 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1873 } 1874 } 1875} 1876 1877multiclass VPatFPMulAccVL_VV_VF_RM<PatFrag vop, string instruction_name> { 1878 foreach vti = AllFloatVectors in { 1879 defvar suffix = vti.LMul.MX; 1880 let Predicates = GetVTypePredicates<vti>.Predicates in { 1881 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1882 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1883 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1884 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1885 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1886 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1887 (vti.Mask V0), 1888 // Value to indicate no rounding mode change in 1889 // RISCVInsertReadWriteCSR 1890 FRM_DYN, 1891 GPR:$vl, vti.Log2SEW, TU_MU)>; 1892 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1893 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1894 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1895 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1896 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1897 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1898 (vti.Mask V0), 1899 // Value to indicate no rounding mode change in 1900 // RISCVInsertReadWriteCSR 1901 FRM_DYN, 1902 GPR:$vl, vti.Log2SEW, TU_MU)>; 1903 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1904 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1905 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1906 vti.RegClass:$rd, undef, VLOpFrag), 1907 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1908 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1909 (vti.Mask V0), 1910 // Value to indicate no rounding mode change in 1911 // RISCVInsertReadWriteCSR 1912 FRM_DYN, 1913 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1914 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1915 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1916 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1917 vti.RegClass:$rd, undef, VLOpFrag), 1918 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1919 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1920 (vti.Mask V0), 1921 // Value to indicate no rounding mode change in 1922 // RISCVInsertReadWriteCSR 1923 FRM_DYN, 1924 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1925 } 1926 } 1927} 1928 1929multiclass VPatWidenFPMulAccVL_VV_VF<SDNode vop, string instruction_name> { 1930 foreach vtiToWti = AllWidenableFloatVectors in { 1931 defvar vti = vtiToWti.Vti; 1932 defvar wti = vtiToWti.Wti; 1933 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1934 GetVTypePredicates<wti>.Predicates) in { 1935 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 1936 (vti.Vector vti.RegClass:$rs2), 1937 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 1938 VLOpFrag), 1939 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX #"_MASK") 1940 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1941 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1942 def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 1943 (vti.Vector vti.RegClass:$rs2), 1944 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 1945 VLOpFrag), 1946 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX #"_MASK") 1947 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1948 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1949 } 1950 } 1951} 1952 1953multiclass VPatWidenFPMulAccVL_VV_VF_RM<SDNode vop, string instruction_name> { 1954 foreach vtiToWti = AllWidenableFloatVectors in { 1955 defvar vti = vtiToWti.Vti; 1956 defvar wti = vtiToWti.Wti; 1957 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1958 GetVTypePredicates<wti>.Predicates) in { 1959 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 1960 (vti.Vector vti.RegClass:$rs2), 1961 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 1962 VLOpFrag), 1963 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX #"_MASK") 1964 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1965 (vti.Mask V0), 1966 // Value to indicate no rounding mode change in 1967 // RISCVInsertReadWriteCSR 1968 FRM_DYN, 1969 GPR:$vl, vti.Log2SEW, TA_MA)>; 1970 def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 1971 (vti.Vector vti.RegClass:$rs2), 1972 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 1973 VLOpFrag), 1974 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX #"_MASK") 1975 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1976 (vti.Mask V0), 1977 // Value to indicate no rounding mode change in 1978 // RISCVInsertReadWriteCSR 1979 FRM_DYN, 1980 GPR:$vl, vti.Log2SEW, TA_MA)>; 1981 } 1982 } 1983} 1984 1985multiclass VPatSlideVL_VX_VI<SDNode vop, string instruction_name> { 1986 foreach vti = AllVectors in { 1987 let Predicates = GetVTypePredicates<vti>.Predicates in { 1988 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rd), 1989 (vti.Vector vti.RegClass:$rs1), 1990 uimm5:$rs2, (vti.Mask V0), 1991 VLOpFrag, (XLenVT timm:$policy))), 1992 (!cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK") 1993 vti.RegClass:$rd, vti.RegClass:$rs1, uimm5:$rs2, 1994 (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1995 (XLenVT timm:$policy))>; 1996 1997 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rd), 1998 (vti.Vector vti.RegClass:$rs1), 1999 GPR:$rs2, (vti.Mask V0), 2000 VLOpFrag, (XLenVT timm:$policy))), 2001 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK") 2002 vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, 2003 (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2004 (XLenVT timm:$policy))>; 2005 } 2006 } 2007} 2008 2009multiclass VPatSlide1VL_VX<SDNode vop, string instruction_name> { 2010 foreach vti = AllIntegerVectors in { 2011 let Predicates = GetVTypePredicates<vti>.Predicates in { 2012 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rs3), 2013 (vti.Vector vti.RegClass:$rs1), 2014 GPR:$rs2, (vti.Mask V0), VLOpFrag)), 2015 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK") 2016 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, 2017 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 2018 } 2019 } 2020} 2021 2022multiclass VPatSlide1VL_VF<SDNode vop, string instruction_name> { 2023 foreach vti = AllFloatVectors in { 2024 let Predicates = GetVTypePredicates<vti>.Predicates in { 2025 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rs3), 2026 (vti.Vector vti.RegClass:$rs1), 2027 vti.Scalar:$rs2, (vti.Mask V0), VLOpFrag)), 2028 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_MASK") 2029 vti.RegClass:$rs3, vti.RegClass:$rs1, vti.Scalar:$rs2, 2030 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 2031 } 2032 } 2033} 2034 2035multiclass VPatAVGADDVL_VV_VX_RM<SDNode vop, int vxrm> { 2036 foreach vti = AllIntegerVectors in { 2037 let Predicates = GetVTypePredicates<vti>.Predicates in { 2038 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 2039 (vti.Vector vti.RegClass:$rs2), 2040 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2041 (!cast<Instruction>("PseudoVAADDU_VV_"#vti.LMul.MX#"_MASK") 2042 vti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs2, 2043 (vti.Mask V0), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2044 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 2045 (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 2046 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2047 (!cast<Instruction>("PseudoVAADDU_VX_"#vti.LMul.MX#"_MASK") 2048 vti.RegClass:$merge, vti.RegClass:$rs1, GPR:$rs2, 2049 (vti.Mask V0), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2050 } 2051 } 2052} 2053 2054//===----------------------------------------------------------------------===// 2055// Patterns. 2056//===----------------------------------------------------------------------===// 2057 2058// 11. Vector Integer Arithmetic Instructions 2059 2060// 11.1. Vector Single-Width Integer Add and Subtract 2061defm : VPatBinaryVL_VV_VX_VI<riscv_add_vl, "PseudoVADD">; 2062defm : VPatBinaryVL_VV_VX<riscv_sub_vl, "PseudoVSUB">; 2063// Handle VRSUB specially since it's the only integer binary op with reversed 2064// pattern operands 2065foreach vti = AllIntegerVectors in { 2066 let Predicates = GetVTypePredicates<vti>.Predicates in { 2067 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 2068 (vti.Vector vti.RegClass:$rs1), 2069 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2070 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK") 2071 vti.RegClass:$merge, vti.RegClass:$rs1, GPR:$rs2, 2072 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2073 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)), 2074 (vti.Vector vti.RegClass:$rs1), 2075 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2076 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK") 2077 vti.RegClass:$merge, vti.RegClass:$rs1, simm5:$rs2, 2078 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2079 } 2080} 2081 2082// 11.2. Vector Widening Integer Add/Subtract 2083defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwadd_vl, riscv_vwadd_w_vl, "PseudoVWADD">; 2084defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwaddu_vl, riscv_vwaddu_w_vl, "PseudoVWADDU">; 2085defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsub_vl, riscv_vwsub_w_vl, "PseudoVWSUB">; 2086defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsubu_vl, riscv_vwsubu_w_vl, "PseudoVWSUBU">; 2087 2088// shl_vl (ext_vl v, splat 1) is a special case of widening add. 2089foreach vtiToWti = AllWidenableIntVectors in { 2090 defvar vti = vtiToWti.Vti; 2091 defvar wti = vtiToWti.Wti; 2092 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2093 GetVTypePredicates<wti>.Predicates) in { 2094 def : Pat<(riscv_shl_vl (wti.Vector (riscv_sext_vl_oneuse 2095 (vti.Vector vti.RegClass:$rs1), 2096 (vti.Mask V0), VLOpFrag)), 2097 (wti.Vector (riscv_vmv_v_x_vl 2098 (wti.Vector undef), 1, VLOpFrag)), 2099 wti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2100 (!cast<Instruction>("PseudoVWADD_VV_"#vti.LMul.MX#"_MASK") 2101 wti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs1, 2102 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2103 def : Pat<(riscv_shl_vl (wti.Vector (riscv_zext_vl_oneuse 2104 (vti.Vector vti.RegClass:$rs1), 2105 (vti.Mask V0), VLOpFrag)), 2106 (wti.Vector (riscv_vmv_v_x_vl 2107 (wti.Vector undef), 1, VLOpFrag)), 2108 wti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2109 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX#"_MASK") 2110 wti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs1, 2111 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2112 } 2113} 2114 2115// 11.3. Vector Integer Extension 2116defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF2", 2117 AllFractionableVF2IntVectors>; 2118defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF2", 2119 AllFractionableVF2IntVectors>; 2120defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF4", 2121 AllFractionableVF4IntVectors>; 2122defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF4", 2123 AllFractionableVF4IntVectors>; 2124defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF8", 2125 AllFractionableVF8IntVectors>; 2126defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF8", 2127 AllFractionableVF8IntVectors>; 2128 2129// 11.5. Vector Bitwise Logical Instructions 2130defm : VPatBinaryVL_VV_VX_VI<riscv_and_vl, "PseudoVAND">; 2131defm : VPatBinaryVL_VV_VX_VI<riscv_or_vl, "PseudoVOR">; 2132defm : VPatBinaryVL_VV_VX_VI<riscv_xor_vl, "PseudoVXOR">; 2133 2134// 11.6. Vector Single-Width Bit Shift Instructions 2135defm : VPatBinaryVL_VV_VX_VI<riscv_shl_vl, "PseudoVSLL", uimm5>; 2136defm : VPatBinaryVL_VV_VX_VI<riscv_srl_vl, "PseudoVSRL", uimm5>; 2137defm : VPatBinaryVL_VV_VX_VI<riscv_sra_vl, "PseudoVSRA", uimm5>; 2138 2139foreach vti = AllIntegerVectors in { 2140 // Emit shift by 1 as an add since it might be faster. 2141 let Predicates = GetVTypePredicates<vti>.Predicates in 2142 def : Pat<(riscv_shl_vl (vti.Vector vti.RegClass:$rs1), 2143 (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)), 2144 srcvalue, (vti.Mask true_mask), VLOpFrag), 2145 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX) 2146 (vti.Vector (IMPLICIT_DEF)), 2147 vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 2148} 2149 2150// 11.7. Vector Narrowing Integer Right Shift Instructions 2151defm : VPatBinaryVL_WV_WX_WI<srl, "PseudoVNSRL">; 2152defm : VPatBinaryVL_WV_WX_WI<sra, "PseudoVNSRA">; 2153 2154defm : VPatNarrowShiftSplat_WX_WI<riscv_sra_vl, "PseudoVNSRA">; 2155defm : VPatNarrowShiftSplat_WX_WI<riscv_srl_vl, "PseudoVNSRL">; 2156defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_sext_vl_oneuse, "PseudoVNSRA">; 2157defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_zext_vl_oneuse, "PseudoVNSRA">; 2158defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_sext_vl_oneuse, "PseudoVNSRL">; 2159defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_zext_vl_oneuse, "PseudoVNSRL">; 2160 2161defm : VPatNarrowShiftVL_WV<riscv_srl_vl, "PseudoVNSRL">; 2162defm : VPatNarrowShiftVL_WV<riscv_sra_vl, "PseudoVNSRA">; 2163 2164defm : VPatBinaryNVL_WV_WX_WI<riscv_vnsrl_vl, "PseudoVNSRL">; 2165 2166foreach vtiTowti = AllWidenableIntVectors in { 2167 defvar vti = vtiTowti.Vti; 2168 defvar wti = vtiTowti.Wti; 2169 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2170 GetVTypePredicates<wti>.Predicates) in 2171 def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1), 2172 (vti.Mask V0), 2173 VLOpFrag)), 2174 (!cast<Instruction>("PseudoVNSRL_WI_"#vti.LMul.MX#"_MASK") 2175 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, 2176 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2177} 2178 2179// 11.8. Vector Integer Comparison Instructions 2180foreach vti = AllIntegerVectors in { 2181 let Predicates = GetVTypePredicates<vti>.Predicates in { 2182 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSEQ", SETEQ>; 2183 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSNE", SETNE>; 2184 2185 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 2186 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 2187 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2188 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2189 2190 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 2191 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 2192 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 2193 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 2194 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2195 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2196 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>; 2197 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>; 2198 // There is no VMSGE(U)_VX instruction 2199 2200 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 2201 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 2202 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2203 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2204 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>; 2205 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>; 2206 2207 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLE", SETLT, SETGT, 2208 SplatPat_simm5_plus1>; 2209 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLEU", SETULT, SETUGT, 2210 SplatPat_simm5_plus1_nonzero>; 2211 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGT", SETGE, SETLE, 2212 SplatPat_simm5_plus1>; 2213 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGTU", SETUGE, SETULE, 2214 SplatPat_simm5_plus1_nonzero>; 2215 } 2216} // foreach vti = AllIntegerVectors 2217 2218// 11.9. Vector Integer Min/Max Instructions 2219defm : VPatBinaryVL_VV_VX<riscv_umin_vl, "PseudoVMINU">; 2220defm : VPatBinaryVL_VV_VX<riscv_smin_vl, "PseudoVMIN">; 2221defm : VPatBinaryVL_VV_VX<riscv_umax_vl, "PseudoVMAXU">; 2222defm : VPatBinaryVL_VV_VX<riscv_smax_vl, "PseudoVMAX">; 2223 2224// 11.10. Vector Single-Width Integer Multiply Instructions 2225defm : VPatBinaryVL_VV_VX<riscv_mul_vl, "PseudoVMUL">; 2226defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH", IntegerVectorsExceptI64>; 2227defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU", IntegerVectorsExceptI64>; 2228// vsmul.vv and vsmul.vx are not included in EEW=64 in Zve64*. 2229let Predicates = [HasVInstructionsFullMultiply] in { 2230 defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH", I64IntegerVectors>; 2231 defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU", I64IntegerVectors>; 2232} 2233 2234// 11.11. Vector Integer Divide Instructions 2235defm : VPatBinaryVL_VV_VX<riscv_udiv_vl, "PseudoVDIVU", isSEWAware=1>; 2236defm : VPatBinaryVL_VV_VX<riscv_sdiv_vl, "PseudoVDIV", isSEWAware=1>; 2237defm : VPatBinaryVL_VV_VX<riscv_urem_vl, "PseudoVREMU", isSEWAware=1>; 2238defm : VPatBinaryVL_VV_VX<riscv_srem_vl, "PseudoVREM", isSEWAware=1>; 2239 2240// 11.12. Vector Widening Integer Multiply Instructions 2241defm : VPatBinaryWVL_VV_VX<riscv_vwmul_vl, "PseudoVWMUL">; 2242defm : VPatBinaryWVL_VV_VX<riscv_vwmulu_vl, "PseudoVWMULU">; 2243defm : VPatBinaryWVL_VV_VX<riscv_vwmulsu_vl, "PseudoVWMULSU">; 2244 2245// 11.13 Vector Single-Width Integer Multiply-Add Instructions 2246defm : VPatMultiplyAddVL_VV_VX<riscv_add_vl, "PseudoVMADD">; 2247defm : VPatMultiplyAddVL_VV_VX<riscv_sub_vl, "PseudoVNMSUB">; 2248defm : VPatMultiplyAccVL_VV_VX<riscv_add_vl_oneuse, "PseudoVMACC">; 2249defm : VPatMultiplyAccVL_VV_VX<riscv_sub_vl_oneuse, "PseudoVNMSAC">; 2250 2251// 11.14. Vector Widening Integer Multiply-Add Instructions 2252defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmacc_vl, "PseudoVWMACC">; 2253defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmaccu_vl, "PseudoVWMACCU">; 2254defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmaccsu_vl, "PseudoVWMACCSU">; 2255foreach vtiTowti = AllWidenableIntVectors in { 2256 defvar vti = vtiTowti.Vti; 2257 defvar wti = vtiTowti.Wti; 2258 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2259 GetVTypePredicates<wti>.Predicates) in 2260 def : Pat<(riscv_vwmaccsu_vl (vti.Vector vti.RegClass:$rs1), 2261 (SplatPat XLenVT:$rs2), 2262 (wti.Vector wti.RegClass:$rd), 2263 (vti.Mask V0), VLOpFrag), 2264 (!cast<Instruction>("PseudoVWMACCUS_VX_"#vti.LMul.MX#"_MASK") 2265 wti.RegClass:$rd, vti.ScalarRegClass:$rs2, vti.RegClass:$rs1, 2266 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2267} 2268 2269// 11.15. Vector Integer Merge Instructions 2270foreach vti = AllIntegerVectors in { 2271 let Predicates = GetVTypePredicates<vti>.Predicates in { 2272 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0), 2273 vti.RegClass:$rs1, 2274 vti.RegClass:$rs2, 2275 vti.RegClass:$merge, 2276 VLOpFrag)), 2277 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 2278 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 2279 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2280 2281 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0), 2282 (SplatPat XLenVT:$rs1), 2283 vti.RegClass:$rs2, 2284 vti.RegClass:$merge, 2285 VLOpFrag)), 2286 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 2287 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 2288 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2289 2290 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0), 2291 (SplatPat_simm5 simm5:$rs1), 2292 vti.RegClass:$rs2, 2293 vti.RegClass:$merge, 2294 VLOpFrag)), 2295 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 2296 vti.RegClass:$merge, vti.RegClass:$rs2, simm5:$rs1, 2297 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2298 } 2299} 2300 2301// 11.16. Vector Integer Move Instructions 2302foreach vti = AllVectors in { 2303 let Predicates = GetVTypePredicates<vti>.Predicates in { 2304 def : Pat<(vti.Vector (riscv_vmv_v_v_vl vti.RegClass:$passthru, 2305 vti.RegClass:$rs2, VLOpFrag)), 2306 (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX) 2307 vti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2308} 2309 2310foreach vti = AllIntegerVectors in { 2311 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.RegClass:$passthru, GPR:$rs2, VLOpFrag)), 2312 (!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX) 2313 vti.RegClass:$passthru, GPR:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2314 defvar ImmPat = !cast<ComplexPattern>("sew"#vti.SEW#"simm5"); 2315 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.RegClass:$passthru, (ImmPat simm5:$imm5), 2316 VLOpFrag)), 2317 (!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX) 2318 vti.RegClass:$passthru, simm5:$imm5, GPR:$vl, vti.Log2SEW, TU_MU)>; 2319 } 2320} 2321 2322// 12. Vector Fixed-Point Arithmetic Instructions 2323 2324// 12.1. Vector Single-Width Saturating Add and Subtract 2325defm : VPatBinaryVL_VV_VX_VI<riscv_saddsat_vl, "PseudoVSADD">; 2326defm : VPatBinaryVL_VV_VX_VI<riscv_uaddsat_vl, "PseudoVSADDU">; 2327defm : VPatBinaryVL_VV_VX<riscv_ssubsat_vl, "PseudoVSSUB">; 2328defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">; 2329 2330// 12.2. Vector Single-Width Averaging Add and Subtract 2331defm : VPatAVGADDVL_VV_VX_RM<riscv_avgflooru_vl, 0b10>; 2332defm : VPatAVGADDVL_VV_VX_RM<riscv_avgceilu_vl, 0b00>; 2333 2334// 12.5. Vector Narrowing Fixed-Point Clip Instructions 2335class VPatTruncSatClipMaxMinBase<string inst, 2336 VTypeInfo vti, 2337 VTypeInfo wti, 2338 SDPatternOperator op1, 2339 int op1_value, 2340 SDPatternOperator op2, 2341 int op2_value> : 2342 Pat<(vti.Vector (riscv_trunc_vector_vl 2343 (wti.Vector (op1 2344 (wti.Vector (op2 2345 (wti.Vector wti.RegClass:$rs1), 2346 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), op2_value, (XLenVT srcvalue))), 2347 (wti.Vector undef),(wti.Mask V0), VLOpFrag)), 2348 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), op1_value, (XLenVT srcvalue))), 2349 (wti.Vector undef), (wti.Mask V0), VLOpFrag)), 2350 (vti.Mask V0), VLOpFrag)), 2351 (!cast<Instruction>(inst#"_WI_"#vti.LMul.MX#"_MASK") 2352 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, 2353 (vti.Mask V0), 0, GPR:$vl, vti.Log2SEW, TA_MA)>; 2354 2355class VPatTruncSatClipUMin<VTypeInfo vti, 2356 VTypeInfo wti, 2357 int uminval> : 2358 Pat<(vti.Vector (riscv_trunc_vector_vl 2359 (wti.Vector (riscv_umin_vl 2360 (wti.Vector wti.RegClass:$rs1), 2361 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), uminval, (XLenVT srcvalue))), 2362 (wti.Vector undef), (wti.Mask V0), VLOpFrag)), 2363 (vti.Mask V0), VLOpFrag)), 2364 (!cast<Instruction>("PseudoVNCLIPU_WI_"#vti.LMul.MX#"_MASK") 2365 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, 2366 (vti.Mask V0), 0, GPR:$vl, vti.Log2SEW, TA_MA)>; 2367 2368multiclass VPatTruncSatClipMaxMin<string inst, VTypeInfo vti, VTypeInfo wti, 2369 SDPatternOperator max, int maxval, SDPatternOperator min, int minval> { 2370 def : VPatTruncSatClipMaxMinBase<inst, vti, wti, max, maxval, min, minval>; 2371 def : VPatTruncSatClipMaxMinBase<inst, vti, wti, min, minval, max, maxval>; 2372} 2373 2374multiclass VPatTruncSatClip<VTypeInfo vti, VTypeInfo wti> { 2375 defvar sew = vti.SEW; 2376 defvar uminval = !sub(!shl(1, sew), 1); 2377 defvar sminval = !sub(!shl(1, !sub(sew, 1)), 1); 2378 defvar smaxval = !sub(0, !shl(1, !sub(sew, 1))); 2379 2380 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2381 GetVTypePredicates<wti>.Predicates) in { 2382 defm : VPatTruncSatClipMaxMin<"PseudoVNCLIP", vti, wti, riscv_smin_vl, 2383 sminval, riscv_smax_vl, smaxval>; 2384 def : VPatTruncSatClipUMin<vti, wti, uminval>; 2385 } 2386 2387} 2388 2389foreach vtiToWti = AllWidenableIntVectors in 2390 defm : VPatTruncSatClip<vtiToWti.Vti, vtiToWti.Wti>; 2391 2392// 13. Vector Floating-Point Instructions 2393 2394// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions 2395defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fadd_vl, "PseudoVFADD">; 2396defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fsub_vl, "PseudoVFSUB">; 2397defm : VPatBinaryFPVL_R_VF_RM<any_riscv_fsub_vl, "PseudoVFRSUB">; 2398 2399// 13.3. Vector Widening Floating-Point Add/Subtract Instructions 2400defm : VPatBinaryFPWVL_VV_VF_WV_WF_RM<riscv_vfwadd_vl, riscv_vfwadd_w_vl, "PseudoVFWADD">; 2401defm : VPatBinaryFPWVL_VV_VF_WV_WF_RM<riscv_vfwsub_vl, riscv_vfwsub_w_vl, "PseudoVFWSUB">; 2402 2403// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 2404defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fmul_vl, "PseudoVFMUL">; 2405defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fdiv_vl, "PseudoVFDIV", isSEWAware=1>; 2406defm : VPatBinaryFPVL_R_VF_RM<any_riscv_fdiv_vl, "PseudoVFRDIV", isSEWAware=1>; 2407 2408// 13.5. Vector Widening Floating-Point Multiply Instructions 2409defm : VPatBinaryFPWVL_VV_VF_RM<riscv_vfwmul_vl, "PseudoVFWMUL">; 2410 2411// 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. 2412defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfmadd_vl, "PseudoVFMADD">; 2413defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfmsub_vl, "PseudoVFMSUB">; 2414defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfnmadd_vl, "PseudoVFNMADD">; 2415defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfnmsub_vl, "PseudoVFNMSUB">; 2416defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfmadd_vl_oneuse, "PseudoVFMACC">; 2417defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfmsub_vl_oneuse, "PseudoVFMSAC">; 2418defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfnmadd_vl_oneuse, "PseudoVFNMACC">; 2419defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfnmsub_vl_oneuse, "PseudoVFNMSAC">; 2420 2421// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 2422defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwmadd_vl, "PseudoVFWMACC">; 2423defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwnmadd_vl, "PseudoVFWNMACC">; 2424defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwmsub_vl, "PseudoVFWMSAC">; 2425defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwnmsub_vl, "PseudoVFWNMSAC">; 2426 2427// 13.11. Vector Floating-Point MIN/MAX Instructions 2428defm : VPatBinaryFPVL_VV_VF<riscv_vfmin_vl, "PseudoVFMIN">; 2429defm : VPatBinaryFPVL_VV_VF<riscv_vfmax_vl, "PseudoVFMAX">; 2430 2431// 13.13. Vector Floating-Point Compare Instructions 2432defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETEQ, 2433 "PseudoVMFEQ", "PseudoVMFEQ">; 2434defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETOEQ, 2435 "PseudoVMFEQ", "PseudoVMFEQ">; 2436defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETNE, 2437 "PseudoVMFNE", "PseudoVMFNE">; 2438defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETUNE, 2439 "PseudoVMFNE", "PseudoVMFNE">; 2440defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETLT, 2441 "PseudoVMFLT", "PseudoVMFGT">; 2442defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETOLT, 2443 "PseudoVMFLT", "PseudoVMFGT">; 2444defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETLE, 2445 "PseudoVMFLE", "PseudoVMFGE">; 2446defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETOLE, 2447 "PseudoVMFLE", "PseudoVMFGE">; 2448 2449foreach vti = AllFloatVectors in { 2450 let Predicates = GetVTypePredicates<vti>.Predicates in { 2451 // 13.8. Vector Floating-Point Square-Root Instruction 2452 def : Pat<(any_riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask V0), 2453 VLOpFrag), 2454 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX # "_E" # vti.SEW # "_MASK") 2455 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 2456 (vti.Mask V0), 2457 // Value to indicate no rounding mode change in 2458 // RISCVInsertReadWriteCSR 2459 FRM_DYN, 2460 GPR:$vl, vti.Log2SEW, TA_MA)>; 2461 2462 // 13.12. Vector Floating-Point Sign-Injection Instructions 2463 def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), 2464 VLOpFrag), 2465 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX #"_MASK") 2466 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, 2467 vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2468 TA_MA)>; 2469 // Handle fneg with VFSGNJN using the same input for both operands. 2470 def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), 2471 VLOpFrag), 2472 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX #"_MASK") 2473 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, 2474 vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2475 TA_MA)>; 2476 2477 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2478 (vti.Vector vti.RegClass:$rs2), 2479 vti.RegClass:$merge, 2480 (vti.Mask V0), 2481 VLOpFrag), 2482 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_MASK") 2483 vti.RegClass:$merge, vti.RegClass:$rs1, 2484 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2485 TAIL_AGNOSTIC)>; 2486 2487 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2488 (riscv_fneg_vl vti.RegClass:$rs2, 2489 (vti.Mask true_mask), 2490 VLOpFrag), 2491 srcvalue, 2492 (vti.Mask true_mask), 2493 VLOpFrag), 2494 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 2495 (vti.Vector (IMPLICIT_DEF)), 2496 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; 2497 2498 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2499 (SplatFPOp vti.ScalarRegClass:$rs2), 2500 vti.RegClass:$merge, 2501 (vti.Mask V0), 2502 VLOpFrag), 2503 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX#"_MASK") 2504 vti.RegClass:$merge, vti.RegClass:$rs1, 2505 vti.ScalarRegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2506 TAIL_AGNOSTIC)>; 2507 2508 // Rounding without exception to implement nearbyint. 2509 def : Pat<(any_riscv_vfround_noexcept_vl (vti.Vector vti.RegClass:$rs1), 2510 (vti.Mask V0), VLOpFrag), 2511 (!cast<Instruction>("PseudoVFROUND_NOEXCEPT_V_" # vti.LMul.MX #"_MASK") 2512 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, 2513 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2514 2515 // 14.14. Vector Floating-Point Classify Instruction 2516 def : Pat<(riscv_fclass_vl (vti.Vector vti.RegClass:$rs2), 2517 (vti.Mask V0), VLOpFrag), 2518 (!cast<Instruction>("PseudoVFCLASS_V_"# vti.LMul.MX #"_MASK") 2519 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 2520 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2521 } 2522} 2523 2524foreach fvti = AllFloatVectors in { 2525 // Floating-point vselects: 2526 // 11.15. Vector Integer Merge Instructions 2527 // 13.15. Vector Floating-Point Merge Instruction 2528 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 2529 let Predicates = GetVTypePredicates<ivti>.Predicates in { 2530 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2531 fvti.RegClass:$rs1, 2532 fvti.RegClass:$rs2, 2533 fvti.RegClass:$merge, 2534 VLOpFrag)), 2535 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 2536 fvti.RegClass:$merge, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 2537 GPR:$vl, fvti.Log2SEW)>; 2538 2539 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2540 (SplatFPOp (SelectFPImm (XLenVT GPR:$imm))), 2541 fvti.RegClass:$rs2, 2542 fvti.RegClass:$merge, 2543 VLOpFrag)), 2544 (!cast<Instruction>("PseudoVMERGE_VXM_"#fvti.LMul.MX) 2545 fvti.RegClass:$merge, fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask V0), 2546 GPR:$vl, fvti.Log2SEW)>; 2547 2548 2549 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2550 (SplatFPOp (fvti.Scalar fpimm0)), 2551 fvti.RegClass:$rs2, 2552 fvti.RegClass:$merge, 2553 VLOpFrag)), 2554 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 2555 fvti.RegClass:$merge, fvti.RegClass:$rs2, 0, (fvti.Mask V0), 2556 GPR:$vl, fvti.Log2SEW)>; 2557 } 2558 2559 let Predicates = GetVTypePredicates<fvti>.Predicates in { 2560 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2561 (SplatFPOp fvti.ScalarRegClass:$rs1), 2562 fvti.RegClass:$rs2, 2563 fvti.RegClass:$merge, 2564 VLOpFrag)), 2565 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 2566 fvti.RegClass:$merge, fvti.RegClass:$rs2, 2567 (fvti.Scalar fvti.ScalarRegClass:$rs1), 2568 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 2569 2570 // 13.16. Vector Floating-Point Move Instruction 2571 // If we're splatting fpimm0, use vmv.v.x vd, x0. 2572 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2573 fvti.Vector:$passthru, (fvti.Scalar (fpimm0)), VLOpFrag)), 2574 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX) 2575 $passthru, 0, GPR:$vl, fvti.Log2SEW, TU_MU)>; 2576 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2577 fvti.Vector:$passthru, (fvti.Scalar (SelectFPImm (XLenVT GPR:$imm))), VLOpFrag)), 2578 (!cast<Instruction>("PseudoVMV_V_X_"#fvti.LMul.MX) 2579 $passthru, GPR:$imm, GPR:$vl, fvti.Log2SEW, TU_MU)>; 2580 2581 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2582 fvti.Vector:$passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), 2583 (!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" # 2584 fvti.LMul.MX) 2585 $passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), 2586 GPR:$vl, fvti.Log2SEW, TU_MU)>; 2587 } 2588} 2589 2590// 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions 2591defm : VPatConvertFP2IVL_V_RM<riscv_vfcvt_xu_f_vl, "PseudoVFCVT_XU_F_V">; 2592defm : VPatConvertFP2IVL_V_RM<riscv_vfcvt_x_f_vl, "PseudoVFCVT_X_F_V">; 2593defm : VPatConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFCVT_RM_XU_F_V">; 2594defm : VPatConvertFP2I_RM_VL_V<any_riscv_vfcvt_rm_x_f_vl, "PseudoVFCVT_RM_X_F_V">; 2595 2596defm : VPatConvertFP2IVL_V<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFCVT_RTZ_XU_F_V">; 2597defm : VPatConvertFP2IVL_V<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFCVT_RTZ_X_F_V">; 2598 2599defm : VPatConvertI2FPVL_V_RM<any_riscv_uint_to_fp_vl, "PseudoVFCVT_F_XU_V">; 2600defm : VPatConvertI2FPVL_V_RM<any_riscv_sint_to_fp_vl, "PseudoVFCVT_F_X_V">; 2601 2602defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_xu_vl, "PseudoVFCVT_RM_F_XU_V">; 2603defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_x_vl, "PseudoVFCVT_RM_F_X_V">; 2604 2605// 13.18. Widening Floating-Point/Integer Type-Convert Instructions 2606defm : VPatWConvertFP2IVL_V_RM<riscv_vfcvt_xu_f_vl, "PseudoVFWCVT_XU_F_V">; 2607defm : VPatWConvertFP2IVL_V_RM<riscv_vfcvt_x_f_vl, "PseudoVFWCVT_X_F_V">; 2608defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFWCVT_RM_XU_F_V">; 2609defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_x_f_vl, "PseudoVFWCVT_RM_X_F_V">; 2610 2611defm : VPatWConvertFP2IVL_V<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFWCVT_RTZ_XU_F_V">; 2612defm : VPatWConvertFP2IVL_V<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFWCVT_RTZ_X_F_V">; 2613 2614defm : VPatWConvertI2FPVL_V<any_riscv_uint_to_fp_vl, "PseudoVFWCVT_F_XU_V">; 2615defm : VPatWConvertI2FPVL_V<any_riscv_sint_to_fp_vl, "PseudoVFWCVT_F_X_V">; 2616 2617foreach fvtiToFWti = AllWidenableFloatVectors in { 2618 defvar fvti = fvtiToFWti.Vti; 2619 defvar fwti = fvtiToFWti.Wti; 2620 let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], 2621 !listconcat(GetVTypePredicates<fvti>.Predicates, 2622 GetVTypePredicates<fwti>.Predicates)) in 2623 def : Pat<(fwti.Vector (any_riscv_fpextend_vl 2624 (fvti.Vector fvti.RegClass:$rs1), 2625 (fvti.Mask V0), 2626 VLOpFrag)), 2627 (!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX#"_MASK") 2628 (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 2629 (fvti.Mask V0), 2630 GPR:$vl, fvti.Log2SEW, TA_MA)>; 2631} 2632 2633// 13.19 Narrowing Floating-Point/Integer Type-Convert Instructions 2634defm : VPatNConvertFP2IVL_W_RM<riscv_vfcvt_xu_f_vl, "PseudoVFNCVT_XU_F_W">; 2635defm : VPatNConvertFP2IVL_W_RM<riscv_vfcvt_x_f_vl, "PseudoVFNCVT_X_F_W">; 2636defm : VPatNConvertFP2I_RM_VL_W<riscv_vfcvt_rm_xu_f_vl, "PseudoVFNCVT_RM_XU_F_W">; 2637defm : VPatNConvertFP2I_RM_VL_W<riscv_vfcvt_rm_x_f_vl, "PseudoVFNCVT_RM_X_F_W">; 2638 2639defm : VPatNConvertFP2IVL_W<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFNCVT_RTZ_XU_F_W">; 2640defm : VPatNConvertFP2IVL_W<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFNCVT_RTZ_X_F_W">; 2641 2642defm : VPatNConvertI2FPVL_W_RM<any_riscv_uint_to_fp_vl, "PseudoVFNCVT_F_XU_W">; 2643defm : VPatNConvertI2FPVL_W_RM<any_riscv_sint_to_fp_vl, "PseudoVFNCVT_F_X_W">; 2644 2645defm : VPatNConvertI2FP_RM_VL_W<riscv_vfcvt_rm_f_xu_vl, "PseudoVFNCVT_RM_F_XU_W">; 2646defm : VPatNConvertI2FP_RM_VL_W<riscv_vfcvt_rm_f_x_vl, "PseudoVFNCVT_RM_F_X_W">; 2647 2648foreach fvtiToFWti = AllWidenableFloatVectors in { 2649 defvar fvti = fvtiToFWti.Vti; 2650 defvar fwti = fvtiToFWti.Wti; 2651 // Define vfwcvt.f.f.v for f16 when Zvfhmin is enable. 2652 let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], 2653 !listconcat(GetVTypePredicates<fvti>.Predicates, 2654 GetVTypePredicates<fwti>.Predicates)) in { 2655 def : Pat<(fvti.Vector (any_riscv_fpround_vl 2656 (fwti.Vector fwti.RegClass:$rs1), 2657 (fwti.Mask V0), VLOpFrag)), 2658 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_MASK") 2659 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 2660 (fwti.Mask V0), 2661 // Value to indicate no rounding mode change in 2662 // RISCVInsertReadWriteCSR 2663 FRM_DYN, 2664 GPR:$vl, fvti.Log2SEW, TA_MA)>; 2665 2666 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 2667 GetVTypePredicates<fwti>.Predicates) in 2668 def : Pat<(fvti.Vector (any_riscv_fncvt_rod_vl 2669 (fwti.Vector fwti.RegClass:$rs1), 2670 (fwti.Mask V0), VLOpFrag)), 2671 (!cast<Instruction>("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX#"_MASK") 2672 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 2673 (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 2674 } 2675} 2676 2677// 14. Vector Reduction Operations 2678 2679// 14.1. Vector Single-Width Integer Reduction Instructions 2680defm : VPatReductionVL<rvv_vecreduce_ADD_vl, "PseudoVREDSUM", is_float=0>; 2681defm : VPatReductionVL<rvv_vecreduce_UMAX_vl, "PseudoVREDMAXU", is_float=0>; 2682defm : VPatReductionVL<rvv_vecreduce_SMAX_vl, "PseudoVREDMAX", is_float=0>; 2683defm : VPatReductionVL<rvv_vecreduce_UMIN_vl, "PseudoVREDMINU", is_float=0>; 2684defm : VPatReductionVL<rvv_vecreduce_SMIN_vl, "PseudoVREDMIN", is_float=0>; 2685defm : VPatReductionVL<rvv_vecreduce_AND_vl, "PseudoVREDAND", is_float=0>; 2686defm : VPatReductionVL<rvv_vecreduce_OR_vl, "PseudoVREDOR", is_float=0>; 2687defm : VPatReductionVL<rvv_vecreduce_XOR_vl, "PseudoVREDXOR", is_float=0>; 2688 2689// 14.2. Vector Widening Integer Reduction Instructions 2690defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, anyext_oneuse, "PseudoVWREDSUMU", is_float=0>; 2691defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, zext_oneuse, "PseudoVWREDSUMU", is_float=0>; 2692defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_zext_vl_oneuse, "PseudoVWREDSUMU", is_float=0>; 2693defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, sext_oneuse, "PseudoVWREDSUM", is_float=0>; 2694defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_sext_vl_oneuse, "PseudoVWREDSUM", is_float=0>; 2695 2696// 14.3. Vector Single-Width Floating-Point Reduction Instructions 2697defm : VPatReductionVL_RM<rvv_vecreduce_SEQ_FADD_vl, "PseudoVFREDOSUM", is_float=1>; 2698defm : VPatReductionVL_RM<rvv_vecreduce_FADD_vl, "PseudoVFREDUSUM", is_float=1>; 2699defm : VPatReductionVL<rvv_vecreduce_FMIN_vl, "PseudoVFREDMIN", is_float=1>; 2700defm : VPatReductionVL<rvv_vecreduce_FMAX_vl, "PseudoVFREDMAX", is_float=1>; 2701 2702// 14.4. Vector Widening Floating-Point Reduction Instructions 2703defm : VPatWidenReductionVL_RM<rvv_vecreduce_SEQ_FADD_vl, fpext_oneuse, 2704 "PseudoVFWREDOSUM", is_float=1>; 2705defm : VPatWidenReductionVL_Ext_VL_RM<rvv_vecreduce_SEQ_FADD_vl, 2706 riscv_fpextend_vl_oneuse, 2707 "PseudoVFWREDOSUM", is_float=1>; 2708defm : VPatWidenReductionVL_RM<rvv_vecreduce_FADD_vl, fpext_oneuse, 2709 "PseudoVFWREDUSUM", is_float=1>; 2710defm : VPatWidenReductionVL_Ext_VL_RM<rvv_vecreduce_FADD_vl, 2711 riscv_fpextend_vl_oneuse, 2712 "PseudoVFWREDUSUM", is_float=1>; 2713 2714// 15. Vector Mask Instructions 2715 2716foreach mti = AllMasks in { 2717 let Predicates = [HasVInstructions] in { 2718 // 15.1 Vector Mask-Register Logical Instructions 2719 def : Pat<(mti.Mask (riscv_vmset_vl VLOpFrag)), 2720 (!cast<Instruction>("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 2721 def : Pat<(mti.Mask (riscv_vmclr_vl VLOpFrag)), 2722 (!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 2723 2724 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2725 (!cast<Instruction>("PseudoVMAND_MM_" # mti.LMul.MX) 2726 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2727 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2728 (!cast<Instruction>("PseudoVMOR_MM_" # mti.LMul.MX) 2729 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2730 def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2731 (!cast<Instruction>("PseudoVMXOR_MM_" # mti.LMul.MX) 2732 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2733 2734 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, 2735 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 2736 VLOpFrag)), 2737 (!cast<Instruction>("PseudoVMANDN_MM_" # mti.LMul.MX) 2738 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2739 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, 2740 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 2741 VLOpFrag)), 2742 (!cast<Instruction>("PseudoVMORN_MM_" # mti.LMul.MX) 2743 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2744 // XOR is associative so we need 2 patterns for VMXNOR. 2745 def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1, 2746 VLOpFrag), 2747 VR:$rs2, VLOpFrag)), 2748 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX) 2749 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2750 2751 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2, 2752 VLOpFrag), 2753 VLOpFrag)), 2754 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX) 2755 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2756 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2, 2757 VLOpFrag), 2758 VLOpFrag)), 2759 (!cast<Instruction>("PseudoVMNOR_MM_" # mti.LMul.MX) 2760 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2761 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2, 2762 VLOpFrag), 2763 VLOpFrag)), 2764 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX) 2765 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2766 2767 // Match the not idiom to the vmnot.m pseudo. 2768 def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)), 2769 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX) 2770 VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>; 2771 2772 // 15.2 Vector count population in mask vcpop.m 2773 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), 2774 VLOpFrag)), 2775 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX) 2776 VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2777 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask V0), 2778 VLOpFrag)), 2779 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX # "_MASK") 2780 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 2781 2782 // 15.3 vfirst find-first-set mask bit 2783 def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), 2784 VLOpFrag)), 2785 (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX) 2786 VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2787 def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask V0), 2788 VLOpFrag)), 2789 (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX # "_MASK") 2790 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 2791 } 2792} 2793 2794// 16. Vector Permutation Instructions 2795 2796// 16.1. Integer Scalar Move Instructions 2797foreach vti = NoGroupIntegerVectors in { 2798 let Predicates = GetVTypePredicates<vti>.Predicates in { 2799 def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$merge), 2800 vti.ScalarRegClass:$rs1, 2801 VLOpFrag)), 2802 (PseudoVMV_S_X $merge, vti.ScalarRegClass:$rs1, GPR:$vl, 2803 vti.Log2SEW)>; 2804 } 2805} 2806 2807// 16.4. Vector Register Gather Instruction 2808foreach vti = AllIntegerVectors in { 2809 let Predicates = GetVTypePredicates<vti>.Predicates in { 2810 def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2, 2811 vti.RegClass:$rs1, 2812 vti.RegClass:$merge, 2813 (vti.Mask V0), 2814 VLOpFrag)), 2815 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") 2816 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 2817 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2818 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 2819 vti.RegClass:$merge, 2820 (vti.Mask V0), 2821 VLOpFrag)), 2822 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") 2823 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 2824 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2825 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, 2826 uimm5:$imm, 2827 vti.RegClass:$merge, 2828 (vti.Mask V0), 2829 VLOpFrag)), 2830 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 2831 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, 2832 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2833 } 2834 2835 // emul = lmul * 16 / sew 2836 defvar vlmul = vti.LMul; 2837 defvar octuple_lmul = vlmul.octuple; 2838 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 2839 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 2840 defvar emul_str = octuple_to_str<octuple_emul>.ret; 2841 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 2842 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str; 2843 let Predicates = GetVTypePredicates<vti>.Predicates in 2844 def : Pat<(vti.Vector 2845 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 2846 (ivti.Vector ivti.RegClass:$rs1), 2847 vti.RegClass:$merge, 2848 (vti.Mask V0), 2849 VLOpFrag)), 2850 (!cast<Instruction>(inst#"_MASK") 2851 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, 2852 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2853 } 2854} 2855 2856// 16.2. Floating-Point Scalar Move Instructions 2857foreach vti = NoGroupFloatVectors in { 2858 let Predicates = GetVTypePredicates<vti>.Predicates in { 2859 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 2860 (vti.Scalar (fpimm0)), 2861 VLOpFrag)), 2862 (PseudoVMV_S_X $merge, (XLenVT X0), GPR:$vl, vti.Log2SEW)>; 2863 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 2864 (vti.Scalar (SelectFPImm (XLenVT GPR:$imm))), 2865 VLOpFrag)), 2866 (PseudoVMV_S_X $merge, GPR:$imm, GPR:$vl, vti.Log2SEW)>; 2867 } 2868} 2869 2870foreach vti = AllFloatVectors in { 2871 let Predicates = GetVTypePredicates<vti>.Predicates in { 2872 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 2873 vti.ScalarRegClass:$rs1, 2874 VLOpFrag)), 2875 (!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX) 2876 vti.RegClass:$merge, 2877 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; 2878 } 2879 defvar ivti = GetIntVTypeInfo<vti>.Vti; 2880 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2881 GetVTypePredicates<ivti>.Predicates) in { 2882 def : Pat<(vti.Vector 2883 (riscv_vrgather_vv_vl vti.RegClass:$rs2, 2884 (ivti.Vector vti.RegClass:$rs1), 2885 vti.RegClass:$merge, 2886 (vti.Mask V0), 2887 VLOpFrag)), 2888 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") 2889 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 2890 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2891 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 2892 vti.RegClass:$merge, 2893 (vti.Mask V0), 2894 VLOpFrag)), 2895 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") 2896 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 2897 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2898 def : Pat<(vti.Vector 2899 (riscv_vrgather_vx_vl vti.RegClass:$rs2, 2900 uimm5:$imm, 2901 vti.RegClass:$merge, 2902 (vti.Mask V0), 2903 VLOpFrag)), 2904 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 2905 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, 2906 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2907 } 2908 2909 defvar vlmul = vti.LMul; 2910 defvar octuple_lmul = vlmul.octuple; 2911 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 2912 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 2913 defvar emul_str = octuple_to_str<octuple_emul>.ret; 2914 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 2915 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str; 2916 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2917 GetVTypePredicates<ivti>.Predicates) in 2918 def : Pat<(vti.Vector 2919 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 2920 (ivti.Vector ivti.RegClass:$rs1), 2921 vti.RegClass:$merge, 2922 (vti.Mask V0), 2923 VLOpFrag)), 2924 (!cast<Instruction>(inst#"_MASK") 2925 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, 2926 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2927 } 2928} 2929 2930//===----------------------------------------------------------------------===// 2931// Miscellaneous RISCVISD SDNodes 2932//===----------------------------------------------------------------------===// 2933 2934def riscv_vid_vl : SDNode<"RISCVISD::VID_VL", SDTypeProfile<1, 2, 2935 [SDTCisVec<0>, SDTCVecEltisVT<1, i1>, 2936 SDTCisSameNumEltsAs<0, 1>, SDTCisVT<2, XLenVT>]>, []>; 2937 2938def SDTRVVSlide : SDTypeProfile<1, 6, [ 2939 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>, 2940 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>, 2941 SDTCisVT<6, XLenVT> 2942]>; 2943def SDTRVVSlide1 : SDTypeProfile<1, 5, [ 2944 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisInt<0>, 2945 SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, 2946 SDTCisVT<5, XLenVT> 2947]>; 2948def SDTRVVFSlide1 : SDTypeProfile<1, 5, [ 2949 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisFP<0>, 2950 SDTCisEltOfVec<3, 0>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, 2951 SDTCisVT<5, XLenVT> 2952]>; 2953 2954def riscv_slideup_vl : SDNode<"RISCVISD::VSLIDEUP_VL", SDTRVVSlide, []>; 2955def riscv_slide1up_vl : SDNode<"RISCVISD::VSLIDE1UP_VL", SDTRVVSlide1, []>; 2956def riscv_slidedown_vl : SDNode<"RISCVISD::VSLIDEDOWN_VL", SDTRVVSlide, []>; 2957def riscv_slide1down_vl : SDNode<"RISCVISD::VSLIDE1DOWN_VL", SDTRVVSlide1, []>; 2958def riscv_fslide1up_vl : SDNode<"RISCVISD::VFSLIDE1UP_VL", SDTRVVFSlide1, []>; 2959def riscv_fslide1down_vl : SDNode<"RISCVISD::VFSLIDE1DOWN_VL", SDTRVVFSlide1, []>; 2960 2961foreach vti = AllIntegerVectors in { 2962 let Predicates = GetVTypePredicates<vti>.Predicates in { 2963 def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask V0), 2964 VLOpFrag)), 2965 (!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX#"_MASK") 2966 (vti.Vector (IMPLICIT_DEF)), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2967 TAIL_AGNOSTIC)>; 2968 } 2969} 2970 2971defm : VPatSlideVL_VX_VI<riscv_slideup_vl, "PseudoVSLIDEUP">; 2972defm : VPatSlideVL_VX_VI<riscv_slidedown_vl, "PseudoVSLIDEDOWN">; 2973defm : VPatSlide1VL_VX<riscv_slide1up_vl, "PseudoVSLIDE1UP">; 2974defm : VPatSlide1VL_VF<riscv_fslide1up_vl, "PseudoVFSLIDE1UP">; 2975defm : VPatSlide1VL_VX<riscv_slide1down_vl, "PseudoVSLIDE1DOWN">; 2976defm : VPatSlide1VL_VF<riscv_fslide1down_vl, "PseudoVFSLIDE1DOWN">; 2977