1//===- RISCVInstrInfoVVLPatterns.td - RVV VL patterns ------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file contains the required infrastructure and VL patterns to 10/// support code generation for the standard 'V' (Vector) extension, version 11/// version 1.0. 12/// 13/// This file is included from and depends upon RISCVInstrInfoVPseudos.td 14/// 15/// Note: the patterns for RVV intrinsics are found in 16/// RISCVInstrInfoVPseudos.td. 17/// 18//===----------------------------------------------------------------------===// 19 20//===----------------------------------------------------------------------===// 21// Helpers to define the VL patterns. 22//===----------------------------------------------------------------------===// 23 24def SDT_RISCVIntUnOp_VL : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, 25 SDTCisSameAs<0, 2>, 26 SDTCisVec<0>, SDTCisInt<0>, 27 SDTCVecEltisVT<3, i1>, 28 SDTCisSameNumEltsAs<0, 3>, 29 SDTCisVT<4, XLenVT>]>; 30 31def SDT_RISCVIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 32 SDTCisSameAs<0, 2>, 33 SDTCisVec<0>, SDTCisInt<0>, 34 SDTCisSameAs<0, 3>, 35 SDTCVecEltisVT<4, i1>, 36 SDTCisSameNumEltsAs<0, 4>, 37 SDTCisVT<5, XLenVT>]>; 38 39def SDT_RISCVFPUnOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 40 SDTCisVec<0>, SDTCisFP<0>, 41 SDTCVecEltisVT<2, i1>, 42 SDTCisSameNumEltsAs<0, 2>, 43 SDTCisVT<3, XLenVT>]>; 44def SDT_RISCVFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 45 SDTCisSameAs<0, 2>, 46 SDTCisVec<0>, SDTCisFP<0>, 47 SDTCisSameAs<0, 3>, 48 SDTCVecEltisVT<4, i1>, 49 SDTCisSameNumEltsAs<0, 4>, 50 SDTCisVT<5, XLenVT>]>; 51 52def SDT_RISCVCopySign_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 53 SDTCisSameAs<0, 2>, 54 SDTCisVec<0>, SDTCisFP<0>, 55 SDTCisSameAs<0, 3>, 56 SDTCVecEltisVT<4, i1>, 57 SDTCisSameNumEltsAs<0, 4>, 58 SDTCisVT<5, XLenVT>]>; 59 60def riscv_vmv_v_v_vl : SDNode<"RISCVISD::VMV_V_V_VL", 61 SDTypeProfile<1, 3, [SDTCisVec<0>, 62 SDTCisSameAs<0, 1>, 63 SDTCisSameAs<0, 2>, 64 SDTCisVT<3, XLenVT>]>>; 65def riscv_vmv_v_x_vl : SDNode<"RISCVISD::VMV_V_X_VL", 66 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>, 67 SDTCisSameAs<0, 1>, 68 SDTCisVT<2, XLenVT>, 69 SDTCisVT<3, XLenVT>]>>; 70def riscv_vfmv_v_f_vl : SDNode<"RISCVISD::VFMV_V_F_VL", 71 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>, 72 SDTCisSameAs<0, 1>, 73 SDTCisEltOfVec<2, 0>, 74 SDTCisVT<3, XLenVT>]>>; 75def riscv_vmv_s_x_vl : SDNode<"RISCVISD::VMV_S_X_VL", 76 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 77 SDTCisInt<0>, 78 SDTCisVT<2, XLenVT>, 79 SDTCisVT<3, XLenVT>]>>; 80def riscv_vfmv_s_f_vl : SDNode<"RISCVISD::VFMV_S_F_VL", 81 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 82 SDTCisFP<0>, 83 SDTCisEltOfVec<2, 0>, 84 SDTCisVT<3, XLenVT>]>>; 85 86def riscv_add_vl : SDNode<"RISCVISD::ADD_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 87def riscv_sub_vl : SDNode<"RISCVISD::SUB_VL", SDT_RISCVIntBinOp_VL>; 88def riscv_mul_vl : SDNode<"RISCVISD::MUL_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 89def riscv_mulhs_vl : SDNode<"RISCVISD::MULHS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 90def riscv_mulhu_vl : SDNode<"RISCVISD::MULHU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 91def riscv_and_vl : SDNode<"RISCVISD::AND_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 92def riscv_or_vl : SDNode<"RISCVISD::OR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 93def riscv_xor_vl : SDNode<"RISCVISD::XOR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 94def riscv_sdiv_vl : SDNode<"RISCVISD::SDIV_VL", SDT_RISCVIntBinOp_VL>; 95def riscv_srem_vl : SDNode<"RISCVISD::SREM_VL", SDT_RISCVIntBinOp_VL>; 96def riscv_udiv_vl : SDNode<"RISCVISD::UDIV_VL", SDT_RISCVIntBinOp_VL>; 97def riscv_urem_vl : SDNode<"RISCVISD::UREM_VL", SDT_RISCVIntBinOp_VL>; 98def riscv_shl_vl : SDNode<"RISCVISD::SHL_VL", SDT_RISCVIntBinOp_VL>; 99def riscv_sra_vl : SDNode<"RISCVISD::SRA_VL", SDT_RISCVIntBinOp_VL>; 100def riscv_srl_vl : SDNode<"RISCVISD::SRL_VL", SDT_RISCVIntBinOp_VL>; 101def riscv_rotl_vl : SDNode<"RISCVISD::ROTL_VL", SDT_RISCVIntBinOp_VL>; 102def riscv_rotr_vl : SDNode<"RISCVISD::ROTR_VL", SDT_RISCVIntBinOp_VL>; 103def riscv_smin_vl : SDNode<"RISCVISD::SMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 104def riscv_smax_vl : SDNode<"RISCVISD::SMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 105def riscv_umin_vl : SDNode<"RISCVISD::UMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 106def riscv_umax_vl : SDNode<"RISCVISD::UMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 107 108def riscv_bitreverse_vl : SDNode<"RISCVISD::BITREVERSE_VL", SDT_RISCVIntUnOp_VL>; 109def riscv_bswap_vl : SDNode<"RISCVISD::BSWAP_VL", SDT_RISCVIntUnOp_VL>; 110def riscv_ctlz_vl : SDNode<"RISCVISD::CTLZ_VL", SDT_RISCVIntUnOp_VL>; 111def riscv_cttz_vl : SDNode<"RISCVISD::CTTZ_VL", SDT_RISCVIntUnOp_VL>; 112def riscv_ctpop_vl : SDNode<"RISCVISD::CTPOP_VL", SDT_RISCVIntUnOp_VL>; 113 114def riscv_avgflooru_vl : SDNode<"RISCVISD::AVGFLOORU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 115def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 116def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 117def riscv_ssubsat_vl : SDNode<"RISCVISD::SSUBSAT_VL", SDT_RISCVIntBinOp_VL>; 118def riscv_usubsat_vl : SDNode<"RISCVISD::USUBSAT_VL", SDT_RISCVIntBinOp_VL>; 119 120def riscv_fadd_vl : SDNode<"RISCVISD::FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 121def riscv_fsub_vl : SDNode<"RISCVISD::FSUB_VL", SDT_RISCVFPBinOp_VL>; 122def riscv_fmul_vl : SDNode<"RISCVISD::FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 123def riscv_fdiv_vl : SDNode<"RISCVISD::FDIV_VL", SDT_RISCVFPBinOp_VL>; 124def riscv_fneg_vl : SDNode<"RISCVISD::FNEG_VL", SDT_RISCVFPUnOp_VL>; 125def riscv_fabs_vl : SDNode<"RISCVISD::FABS_VL", SDT_RISCVFPUnOp_VL>; 126def riscv_fsqrt_vl : SDNode<"RISCVISD::FSQRT_VL", SDT_RISCVFPUnOp_VL>; 127def riscv_fcopysign_vl : SDNode<"RISCVISD::FCOPYSIGN_VL", SDT_RISCVCopySign_VL>; 128def riscv_vfmin_vl : SDNode<"RISCVISD::VFMIN_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 129def riscv_vfmax_vl : SDNode<"RISCVISD::VFMAX_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 130 131def riscv_strict_fadd_vl : SDNode<"RISCVISD::STRICT_FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; 132def riscv_strict_fsub_vl : SDNode<"RISCVISD::STRICT_FSUB_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; 133def riscv_strict_fmul_vl : SDNode<"RISCVISD::STRICT_FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; 134def riscv_strict_fdiv_vl : SDNode<"RISCVISD::STRICT_FDIV_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; 135def riscv_strict_fsqrt_vl : SDNode<"RISCVISD::STRICT_FSQRT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; 136 137def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 138 [(riscv_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 139 (riscv_strict_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 140def any_riscv_fsub_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 141 [(riscv_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 142 (riscv_strict_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 143def any_riscv_fmul_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 144 [(riscv_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 145 (riscv_strict_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 146def any_riscv_fdiv_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 147 [(riscv_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 148 (riscv_strict_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 149def any_riscv_fsqrt_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 150 [(riscv_fsqrt_vl node:$src, node:$mask, node:$vl), 151 (riscv_strict_fsqrt_vl node:$src, node:$mask, node:$vl)]>; 152 153def riscv_fclass_vl : SDNode<"RISCVISD::FCLASS_VL", 154 SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisVec<0>, 155 SDTCisFP<1>, SDTCisVec<1>, 156 SDTCisSameSizeAs<0, 1>, 157 SDTCisSameNumEltsAs<0, 1>, 158 SDTCVecEltisVT<2, i1>, 159 SDTCisSameNumEltsAs<0, 2>, 160 SDTCisVT<3, XLenVT>]>>; 161 162def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 163 SDTCisSameAs<0, 2>, 164 SDTCisSameAs<0, 3>, 165 SDTCisVec<0>, SDTCisFP<0>, 166 SDTCVecEltisVT<4, i1>, 167 SDTCisSameNumEltsAs<0, 4>, 168 SDTCisVT<5, XLenVT>]>; 169def riscv_vfmadd_vl : SDNode<"RISCVISD::VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 170def riscv_vfnmadd_vl : SDNode<"RISCVISD::VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 171def riscv_vfmsub_vl : SDNode<"RISCVISD::VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 172def riscv_vfnmsub_vl : SDNode<"RISCVISD::VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 173 174def SDT_RISCVWVecFMA_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 175 SDTCisVec<1>, SDTCisFP<1>, 176 SDTCisOpSmallerThanOp<1, 0>, 177 SDTCisSameNumEltsAs<0, 1>, 178 SDTCisSameAs<1, 2>, 179 SDTCisSameAs<0, 3>, 180 SDTCVecEltisVT<4, i1>, 181 SDTCisSameNumEltsAs<0, 4>, 182 SDTCisVT<5, XLenVT>]>; 183def riscv_vfwmadd_vl : SDNode<"RISCVISD::VFWMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 184def riscv_vfwnmadd_vl : SDNode<"RISCVISD::VFWNMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 185def riscv_vfwmsub_vl : SDNode<"RISCVISD::VFWMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 186def riscv_vfwnmsub_vl : SDNode<"RISCVISD::VFWNMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 187 188def riscv_strict_vfmadd_vl : SDNode<"RISCVISD::STRICT_VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 189def riscv_strict_vfnmadd_vl : SDNode<"RISCVISD::STRICT_VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 190def riscv_strict_vfmsub_vl : SDNode<"RISCVISD::STRICT_VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 191def riscv_strict_vfnmsub_vl : SDNode<"RISCVISD::STRICT_VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 192 193def any_riscv_vfmadd_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 194 [(riscv_vfmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 195 (riscv_strict_vfmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 196def any_riscv_vfnmadd_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 197 [(riscv_vfnmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 198 (riscv_strict_vfnmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 199def any_riscv_vfmsub_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 200 [(riscv_vfmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 201 (riscv_strict_vfmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 202def any_riscv_vfnmsub_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 203 [(riscv_vfnmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 204 (riscv_strict_vfnmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 205 206def SDT_RISCVFPRoundOp_VL : SDTypeProfile<1, 3, [ 207 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1>, SDTCisSameNumEltsAs<0, 1>, 208 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 209]>; 210def SDT_RISCVFPExtendOp_VL : SDTypeProfile<1, 3, [ 211 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>, 212 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 213]>; 214 215def riscv_fpround_vl : SDNode<"RISCVISD::FP_ROUND_VL", SDT_RISCVFPRoundOp_VL>; 216def riscv_strict_fpround_vl : SDNode<"RISCVISD::STRICT_FP_ROUND_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; 217def riscv_fpextend_vl : SDNode<"RISCVISD::FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL>; 218def riscv_strict_fpextend_vl : SDNode<"RISCVISD::STRICT_FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL, [SDNPHasChain]>; 219def riscv_fncvt_rod_vl : SDNode<"RISCVISD::VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL>; 220def riscv_strict_fncvt_rod_vl : SDNode<"RISCVISD::STRICT_VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; 221 222def any_riscv_fpround_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 223 [(riscv_fpround_vl node:$src, node:$mask, node:$vl), 224 (riscv_strict_fpround_vl node:$src, node:$mask, node:$vl)]>; 225def any_riscv_fpextend_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 226 [(riscv_fpextend_vl node:$src, node:$mask, node:$vl), 227 (riscv_strict_fpextend_vl node:$src, node:$mask, node:$vl)]>; 228def any_riscv_fncvt_rod_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 229 [(riscv_fncvt_rod_vl node:$src, node:$mask, node:$vl), 230 (riscv_strict_fncvt_rod_vl node:$src, node:$mask, node:$vl)]>; 231 232def SDT_RISCVFP2IOp_VL : SDTypeProfile<1, 3, [ 233 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, 234 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 235]>; 236def SDT_RISCVFP2IOp_RM_VL : SDTypeProfile<1, 4, [ 237 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, 238 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>, 239 SDTCisVT<4, XLenVT> // Rounding mode 240]>; 241 242def SDT_RISCVI2FPOp_VL : SDTypeProfile<1, 3, [ 243 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, 244 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 245]>; 246def SDT_RISCVI2FPOp_RM_VL : SDTypeProfile<1, 4, [ 247 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, 248 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>, 249 SDTCisVT<4, XLenVT> // Rounding mode 250]>; 251 252def SDT_RISCVSETCCOP_VL : SDTypeProfile<1, 6, [ 253 SDTCVecEltisVT<0, i1>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, 254 SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>, SDTCisSameAs<0, 4>, 255 SDTCisSameAs<0, 5>, SDTCisVT<6, XLenVT>]>; 256 257// Float -> Int 258def riscv_vfcvt_xu_f_vl : SDNode<"RISCVISD::VFCVT_XU_F_VL", SDT_RISCVFP2IOp_VL>; 259def riscv_vfcvt_x_f_vl : SDNode<"RISCVISD::VFCVT_X_F_VL", SDT_RISCVFP2IOp_VL>; 260def riscv_vfcvt_rm_xu_f_vl : SDNode<"RISCVISD::VFCVT_RM_XU_F_VL", SDT_RISCVFP2IOp_RM_VL>; 261def riscv_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL>; 262 263def riscv_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL>; 264def riscv_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL>; 265 266def riscv_strict_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL, [SDNPHasChain]>; 267def riscv_strict_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; 268def riscv_strict_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; 269 270def any_riscv_vfcvt_rm_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl, node:$rm), 271 [(riscv_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm), 272 (riscv_strict_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm)]>; 273def any_riscv_vfcvt_rtz_xu_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 274 [(riscv_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl), 275 (riscv_strict_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl)]>; 276def any_riscv_vfcvt_rtz_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 277 [(riscv_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl), 278 (riscv_strict_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl)]>; 279 280// Int -> Float 281def riscv_sint_to_fp_vl : SDNode<"RISCVISD::SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 282def riscv_uint_to_fp_vl : SDNode<"RISCVISD::UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 283def riscv_vfcvt_rm_f_xu_vl : SDNode<"RISCVISD::VFCVT_RM_F_XU_VL", SDT_RISCVI2FPOp_RM_VL>; 284def riscv_vfcvt_rm_f_x_vl : SDNode<"RISCVISD::VFCVT_RM_F_X_VL", SDT_RISCVI2FPOp_RM_VL>; 285 286def riscv_strict_sint_to_fp_vl : SDNode<"RISCVISD::STRICT_SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; 287def riscv_strict_uint_to_fp_vl : SDNode<"RISCVISD::STRICT_UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; 288 289def any_riscv_sint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 290 [(riscv_sint_to_fp_vl node:$src, node:$mask, node:$vl), 291 (riscv_strict_sint_to_fp_vl node:$src, node:$mask, node:$vl)]>; 292def any_riscv_uint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 293 [(riscv_uint_to_fp_vl node:$src, node:$mask, node:$vl), 294 (riscv_strict_uint_to_fp_vl node:$src, node:$mask, node:$vl)]>; 295 296def riscv_vfround_noexcept_vl: SDNode<"RISCVISD::VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL>; 297def riscv_strict_vfround_noexcept_vl: SDNode<"RISCVISD::STRICT_VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; 298 299def any_riscv_vfround_noexcept_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 300 [(riscv_vfround_noexcept_vl node:$src, node:$mask, node:$vl), 301 (riscv_strict_vfround_noexcept_vl node:$src, node:$mask, node:$vl)]>; 302 303def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL", SDT_RISCVSETCCOP_VL>; 304def riscv_strict_fsetcc_vl : SDNode<"RISCVISD::STRICT_FSETCC_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; 305def riscv_strict_fsetccs_vl : SDNode<"RISCVISD::STRICT_FSETCCS_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; 306def any_riscv_fsetcc_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 307 [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 308 (riscv_strict_fsetcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl)]>; 309def any_riscv_fsetccs_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 310 [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 311 (riscv_strict_fsetccs_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl)]>; 312 313def riscv_vrgather_vx_vl : SDNode<"RISCVISD::VRGATHER_VX_VL", 314 SDTypeProfile<1, 5, [SDTCisVec<0>, 315 SDTCisSameAs<0, 1>, 316 SDTCisVT<2, XLenVT>, 317 SDTCisSameAs<0, 3>, 318 SDTCVecEltisVT<4, i1>, 319 SDTCisSameNumEltsAs<0, 4>, 320 SDTCisVT<5, XLenVT>]>>; 321def riscv_vrgather_vv_vl : SDNode<"RISCVISD::VRGATHER_VV_VL", 322 SDTypeProfile<1, 5, [SDTCisVec<0>, 323 SDTCisSameAs<0, 1>, 324 SDTCisInt<2>, 325 SDTCisSameNumEltsAs<0, 2>, 326 SDTCisSameSizeAs<0, 2>, 327 SDTCisSameAs<0, 3>, 328 SDTCVecEltisVT<4, i1>, 329 SDTCisSameNumEltsAs<0, 4>, 330 SDTCisVT<5, XLenVT>]>>; 331def riscv_vrgatherei16_vv_vl : SDNode<"RISCVISD::VRGATHEREI16_VV_VL", 332 SDTypeProfile<1, 5, [SDTCisVec<0>, 333 SDTCisSameAs<0, 1>, 334 SDTCisInt<2>, 335 SDTCVecEltisVT<2, i16>, 336 SDTCisSameNumEltsAs<0, 2>, 337 SDTCisSameAs<0, 3>, 338 SDTCVecEltisVT<4, i1>, 339 SDTCisSameNumEltsAs<0, 4>, 340 SDTCisVT<5, XLenVT>]>>; 341 342def SDT_RISCVVMERGE_VL : SDTypeProfile<1, 5, [ 343 SDTCisVec<0>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<1, i1>, 344 SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>, SDTCisSameAs<0, 4>, 345 SDTCisVT<5, XLenVT> 346]>; 347 348def riscv_vmerge_vl : SDNode<"RISCVISD::VMERGE_VL", SDT_RISCVVMERGE_VL>; 349 350def SDT_RISCVVMSETCLR_VL : SDTypeProfile<1, 1, [SDTCVecEltisVT<0, i1>, 351 SDTCisVT<1, XLenVT>]>; 352def riscv_vmclr_vl : SDNode<"RISCVISD::VMCLR_VL", SDT_RISCVVMSETCLR_VL>; 353def riscv_vmset_vl : SDNode<"RISCVISD::VMSET_VL", SDT_RISCVVMSETCLR_VL>; 354 355def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 356 SDTCisSameAs<0, 2>, 357 SDTCVecEltisVT<0, i1>, 358 SDTCisVT<3, XLenVT>]>; 359def riscv_vmand_vl : SDNode<"RISCVISD::VMAND_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 360def riscv_vmor_vl : SDNode<"RISCVISD::VMOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 361def riscv_vmxor_vl : SDNode<"RISCVISD::VMXOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 362 363def true_mask : PatLeaf<(riscv_vmset_vl (XLenVT srcvalue))>; 364 365def riscv_vmnot_vl : PatFrag<(ops node:$rs, node:$vl), 366 (riscv_vmxor_vl node:$rs, true_mask, node:$vl)>; 367 368def riscv_vcpop_vl : SDNode<"RISCVISD::VCPOP_VL", 369 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, 370 SDTCisVec<1>, SDTCisInt<1>, 371 SDTCVecEltisVT<2, i1>, 372 SDTCisSameNumEltsAs<1, 2>, 373 SDTCisVT<3, XLenVT>]>>; 374 375def riscv_vfirst_vl : SDNode<"RISCVISD::VFIRST_VL", 376 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, 377 SDTCisVec<1>, SDTCisInt<1>, 378 SDTCVecEltisVT<2, i1>, 379 SDTCisSameNumEltsAs<1, 2>, 380 SDTCisVT<3, XLenVT>]>>; 381 382def SDT_RISCVVEXTEND_VL : SDTypeProfile<1, 3, [SDTCisVec<0>, 383 SDTCisSameNumEltsAs<0, 1>, 384 SDTCisSameNumEltsAs<1, 2>, 385 SDTCVecEltisVT<2, i1>, 386 SDTCisVT<3, XLenVT>]>; 387def riscv_sext_vl : SDNode<"RISCVISD::VSEXT_VL", SDT_RISCVVEXTEND_VL>; 388def riscv_zext_vl : SDNode<"RISCVISD::VZEXT_VL", SDT_RISCVVEXTEND_VL>; 389 390def riscv_trunc_vector_vl : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL", 391 SDTypeProfile<1, 3, [SDTCisVec<0>, 392 SDTCisSameNumEltsAs<0, 1>, 393 SDTCisSameNumEltsAs<0, 2>, 394 SDTCVecEltisVT<2, i1>, 395 SDTCisVT<3, XLenVT>]>>; 396 397def SDT_RISCVVWIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 398 SDTCisInt<1>, 399 SDTCisSameNumEltsAs<0, 1>, 400 SDTCisOpSmallerThanOp<1, 0>, 401 SDTCisSameAs<1, 2>, 402 SDTCisSameAs<0, 3>, 403 SDTCisSameNumEltsAs<1, 4>, 404 SDTCVecEltisVT<4, i1>, 405 SDTCisVT<5, XLenVT>]>; 406def riscv_vwmul_vl : SDNode<"RISCVISD::VWMUL_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 407def riscv_vwmulu_vl : SDNode<"RISCVISD::VWMULU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 408def riscv_vwmulsu_vl : SDNode<"RISCVISD::VWMULSU_VL", SDT_RISCVVWIntBinOp_VL>; 409def riscv_vwadd_vl : SDNode<"RISCVISD::VWADD_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 410def riscv_vwaddu_vl : SDNode<"RISCVISD::VWADDU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 411def riscv_vwsub_vl : SDNode<"RISCVISD::VWSUB_VL", SDT_RISCVVWIntBinOp_VL, []>; 412def riscv_vwsubu_vl : SDNode<"RISCVISD::VWSUBU_VL", SDT_RISCVVWIntBinOp_VL, []>; 413def riscv_vwsll_vl : SDNode<"RISCVISD::VWSLL_VL", SDT_RISCVVWIntBinOp_VL, []>; 414 415def SDT_RISCVVWIntTernOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 416 SDTCisInt<1>, 417 SDTCisSameNumEltsAs<0, 1>, 418 SDTCisOpSmallerThanOp<1, 0>, 419 SDTCisSameAs<1, 2>, 420 SDTCisSameAs<0, 3>, 421 SDTCisSameNumEltsAs<1, 4>, 422 SDTCVecEltisVT<4, i1>, 423 SDTCisVT<5, XLenVT>]>; 424def riscv_vwmacc_vl : SDNode<"RISCVISD::VWMACC_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; 425def riscv_vwmaccu_vl : SDNode<"RISCVISD::VWMACCU_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; 426def riscv_vwmaccsu_vl : SDNode<"RISCVISD::VWMACCSU_VL", SDT_RISCVVWIntTernOp_VL, []>; 427 428def SDT_RISCVVWFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 429 SDTCisFP<1>, 430 SDTCisSameNumEltsAs<0, 1>, 431 SDTCisOpSmallerThanOp<1, 0>, 432 SDTCisSameAs<1, 2>, 433 SDTCisSameAs<0, 3>, 434 SDTCisSameNumEltsAs<1, 4>, 435 SDTCVecEltisVT<4, i1>, 436 SDTCisVT<5, XLenVT>]>; 437def riscv_vfwmul_vl : SDNode<"RISCVISD::VFWMUL_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; 438def riscv_vfwadd_vl : SDNode<"RISCVISD::VFWADD_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; 439def riscv_vfwsub_vl : SDNode<"RISCVISD::VFWSUB_VL", SDT_RISCVVWFPBinOp_VL, []>; 440 441def SDT_RISCVVNIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 442 SDTCisInt<1>, 443 SDTCisSameNumEltsAs<0, 1>, 444 SDTCisOpSmallerThanOp<0, 1>, 445 SDTCisSameAs<0, 2>, 446 SDTCisSameAs<0, 3>, 447 SDTCisSameNumEltsAs<0, 4>, 448 SDTCVecEltisVT<4, i1>, 449 SDTCisVT<5, XLenVT>]>; 450def riscv_vnsrl_vl : SDNode<"RISCVISD::VNSRL_VL", SDT_RISCVVNIntBinOp_VL>; 451 452def SDT_RISCVVWIntBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 453 SDTCisSameAs<0, 1>, 454 SDTCisInt<2>, 455 SDTCisSameNumEltsAs<1, 2>, 456 SDTCisOpSmallerThanOp<2, 1>, 457 SDTCisSameAs<0, 3>, 458 SDTCisSameNumEltsAs<1, 4>, 459 SDTCVecEltisVT<4, i1>, 460 SDTCisVT<5, XLenVT>]>; 461def riscv_vwadd_w_vl : SDNode<"RISCVISD::VWADD_W_VL", SDT_RISCVVWIntBinOpW_VL>; 462def riscv_vwaddu_w_vl : SDNode<"RISCVISD::VWADDU_W_VL", SDT_RISCVVWIntBinOpW_VL>; 463def riscv_vwsub_w_vl : SDNode<"RISCVISD::VWSUB_W_VL", SDT_RISCVVWIntBinOpW_VL>; 464def riscv_vwsubu_w_vl : SDNode<"RISCVISD::VWSUBU_W_VL", SDT_RISCVVWIntBinOpW_VL>; 465 466def SDT_RISCVVWFPBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 467 SDTCisSameAs<0, 1>, 468 SDTCisFP<2>, 469 SDTCisSameNumEltsAs<1, 2>, 470 SDTCisOpSmallerThanOp<2, 1>, 471 SDTCisSameAs<0, 3>, 472 SDTCisSameNumEltsAs<1, 4>, 473 SDTCVecEltisVT<4, i1>, 474 SDTCisVT<5, XLenVT>]>; 475 476def riscv_vfwadd_w_vl : SDNode<"RISCVISD::VFWADD_W_VL", SDT_RISCVVWFPBinOpW_VL>; 477def riscv_vfwsub_w_vl : SDNode<"RISCVISD::VFWSUB_W_VL", SDT_RISCVVWFPBinOpW_VL>; 478 479def SDTRVVVecReduce : SDTypeProfile<1, 6, [ 480 SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>, 481 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<2, 4>, SDTCisVT<5, XLenVT>, 482 SDTCisVT<6, XLenVT> 483]>; 484 485def riscv_add_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 486 node:$E), 487 (riscv_add_vl node:$A, node:$B, node:$C, 488 node:$D, node:$E), [{ 489 return N->hasOneUse(); 490}]>; 491 492def riscv_sub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 493 node:$E), 494 (riscv_sub_vl node:$A, node:$B, node:$C, 495 node:$D, node:$E), [{ 496 return N->hasOneUse(); 497}]>; 498 499def riscv_mul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 500 node:$E), 501 (riscv_mul_vl node:$A, node:$B, node:$C, 502 node:$D, node:$E), [{ 503 return N->hasOneUse(); 504}]>; 505 506def riscv_vwmul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 507 node:$E), 508 (riscv_vwmul_vl node:$A, node:$B, node:$C, 509 node:$D, node:$E), [{ 510 return N->hasOneUse(); 511}]>; 512 513def riscv_vwmulu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 514 node:$E), 515 (riscv_vwmulu_vl node:$A, node:$B, node:$C, 516 node:$D, node:$E), [{ 517 return N->hasOneUse(); 518}]>; 519 520def riscv_vwmulsu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 521 node:$E), 522 (riscv_vwmulsu_vl node:$A, node:$B, node:$C, 523 node:$D, node:$E), [{ 524 return N->hasOneUse(); 525}]>; 526 527def riscv_sext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 528 (riscv_sext_vl node:$A, node:$B, node:$C), [{ 529 return N->hasOneUse(); 530}]>; 531 532def riscv_zext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 533 (riscv_zext_vl node:$A, node:$B, node:$C), [{ 534 return N->hasOneUse(); 535}]>; 536 537def riscv_fpextend_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 538 (riscv_fpextend_vl node:$A, node:$B, node:$C), [{ 539 return N->hasOneUse(); 540}]>; 541 542def riscv_vfmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 543 node:$E), 544 (riscv_vfmadd_vl node:$A, node:$B, 545 node:$C, node:$D, node:$E), [{ 546 return N->hasOneUse(); 547}]>; 548 549def riscv_vfnmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 550 node:$E), 551 (riscv_vfnmadd_vl node:$A, node:$B, 552 node:$C, node:$D, node:$E), [{ 553 return N->hasOneUse(); 554}]>; 555 556def riscv_vfmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 557 node:$E), 558 (riscv_vfmsub_vl node:$A, node:$B, 559 node:$C, node:$D, node:$E), [{ 560 return N->hasOneUse(); 561}]>; 562 563def riscv_vfnmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 564 node:$E), 565 (riscv_vfnmsub_vl node:$A, node:$B, 566 node:$C, node:$D, node:$E), [{ 567 return N->hasOneUse(); 568}]>; 569 570foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR", 571 "FADD", "SEQ_FADD", "FMIN", "FMAX"] in 572 def rvv_vecreduce_#kind#_vl : SDNode<"RISCVISD::VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>; 573 574// Give explicit Complexity to prefer simm5/uimm5. 575def SplatPat : ComplexPattern<vAny, 1, "selectVSplat", [], [], 1>; 576def SplatPat_simm5 : ComplexPattern<vAny, 1, "selectVSplatSimm5", [], [], 3>; 577def SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimmBits<5>", [], [], 3>; 578def SplatPat_uimm6 : ComplexPattern<vAny, 1, "selectVSplatUimmBits<6>", [], [], 3>; 579def SplatPat_simm5_plus1 580 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1", [], [], 3>; 581def SplatPat_simm5_plus1_nonzero 582 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1NonZero", [], [], 3>; 583 584// Selects extends or truncates of splats where we only care about the lowest 8 585// bits of each element. 586def Low8BitsSplatPat 587 : ComplexPattern<vAny, 1, "selectLow8BitsVSplat", [], [], 2>; 588 589// Ignore the vl operand on vmv_v_f, and vmv_s_f. 590def SplatFPOp : PatFrags<(ops node:$op), 591 [(riscv_vfmv_v_f_vl undef, node:$op, srcvalue), 592 (riscv_vfmv_s_f_vl undef, node:$op, srcvalue)]>; 593 594def sew8simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<8>", []>; 595def sew16simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<16>", []>; 596def sew32simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<32>", []>; 597def sew64simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<64>", []>; 598 599class VPatBinaryVL_V<SDPatternOperator vop, 600 string instruction_name, 601 string suffix, 602 ValueType result_type, 603 ValueType op1_type, 604 ValueType op2_type, 605 ValueType mask_type, 606 int log2sew, 607 LMULInfo vlmul, 608 VReg result_reg_class, 609 VReg op1_reg_class, 610 VReg op2_reg_class, 611 bit isSEWAware = 0> 612 : Pat<(result_type (vop 613 (op1_type op1_reg_class:$rs1), 614 (op2_type op2_reg_class:$rs2), 615 (result_type result_reg_class:$merge), 616 (mask_type V0), 617 VLOpFrag)), 618 (!cast<Instruction>( 619 !if(isSEWAware, 620 instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 621 instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) 622 result_reg_class:$merge, 623 op1_reg_class:$rs1, 624 op2_reg_class:$rs2, 625 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 626 627class VPatBinaryVL_V_RM<SDPatternOperator vop, 628 string instruction_name, 629 string suffix, 630 ValueType result_type, 631 ValueType op1_type, 632 ValueType op2_type, 633 ValueType mask_type, 634 int log2sew, 635 LMULInfo vlmul, 636 VReg result_reg_class, 637 VReg op1_reg_class, 638 VReg op2_reg_class, 639 bit isSEWAware = 0> 640 : Pat<(result_type (vop 641 (op1_type op1_reg_class:$rs1), 642 (op2_type op2_reg_class:$rs2), 643 (result_type result_reg_class:$merge), 644 (mask_type V0), 645 VLOpFrag)), 646 (!cast<Instruction>( 647 !if(isSEWAware, 648 instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 649 instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) 650 result_reg_class:$merge, 651 op1_reg_class:$rs1, 652 op2_reg_class:$rs2, 653 (mask_type V0), 654 // Value to indicate no rounding mode change in 655 // RISCVInsertReadWriteCSR 656 FRM_DYN, 657 GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 658 659multiclass VPatTiedBinaryNoMaskVL_V<SDNode vop, 660 string instruction_name, 661 string suffix, 662 ValueType result_type, 663 ValueType op2_type, 664 int sew, 665 LMULInfo vlmul, 666 VReg result_reg_class, 667 VReg op2_reg_class> { 668 def : Pat<(result_type (vop 669 (result_type result_reg_class:$rs1), 670 (op2_type op2_reg_class:$rs2), 671 srcvalue, 672 true_mask, 673 VLOpFrag)), 674 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 675 result_reg_class:$rs1, 676 op2_reg_class:$rs2, 677 GPR:$vl, sew, TAIL_AGNOSTIC)>; 678 // Tail undisturbed 679 def : Pat<(riscv_vmerge_vl true_mask, 680 (result_type (vop 681 result_reg_class:$rs1, 682 (op2_type op2_reg_class:$rs2), 683 srcvalue, 684 true_mask, 685 VLOpFrag)), 686 result_reg_class:$rs1, result_reg_class:$rs1, VLOpFrag), 687 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 688 result_reg_class:$rs1, 689 op2_reg_class:$rs2, 690 GPR:$vl, sew, TU_MU)>; 691} 692 693multiclass VPatTiedBinaryNoMaskVL_V_RM<SDNode vop, 694 string instruction_name, 695 string suffix, 696 ValueType result_type, 697 ValueType op2_type, 698 int sew, 699 LMULInfo vlmul, 700 VReg result_reg_class, 701 VReg op2_reg_class> { 702 def : Pat<(result_type (vop 703 (result_type result_reg_class:$rs1), 704 (op2_type op2_reg_class:$rs2), 705 srcvalue, 706 true_mask, 707 VLOpFrag)), 708 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 709 result_reg_class:$rs1, 710 op2_reg_class:$rs2, 711 // Value to indicate no rounding mode change in 712 // RISCVInsertReadWriteCSR 713 FRM_DYN, 714 GPR:$vl, sew, TAIL_AGNOSTIC)>; 715 // Tail undisturbed 716 def : Pat<(riscv_vmerge_vl true_mask, 717 (result_type (vop 718 result_reg_class:$rs1, 719 (op2_type op2_reg_class:$rs2), 720 srcvalue, 721 true_mask, 722 VLOpFrag)), 723 result_reg_class:$rs1, result_reg_class:$rs1, VLOpFrag), 724 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 725 result_reg_class:$rs1, 726 op2_reg_class:$rs2, 727 // Value to indicate no rounding mode change in 728 // RISCVInsertReadWriteCSR 729 FRM_DYN, 730 GPR:$vl, sew, TU_MU)>; 731} 732 733class VPatBinaryVL_XI<SDPatternOperator vop, 734 string instruction_name, 735 string suffix, 736 ValueType result_type, 737 ValueType vop1_type, 738 ValueType vop2_type, 739 ValueType mask_type, 740 int log2sew, 741 LMULInfo vlmul, 742 VReg result_reg_class, 743 VReg vop_reg_class, 744 ComplexPattern SplatPatKind, 745 DAGOperand xop_kind, 746 bit isSEWAware = 0> 747 : Pat<(result_type (vop 748 (vop1_type vop_reg_class:$rs1), 749 (vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))), 750 (result_type result_reg_class:$merge), 751 (mask_type V0), 752 VLOpFrag)), 753 (!cast<Instruction>( 754 !if(isSEWAware, 755 instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 756 instruction_name#_#suffix#_#vlmul.MX#"_MASK")) 757 result_reg_class:$merge, 758 vop_reg_class:$rs1, 759 xop_kind:$rs2, 760 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 761 762multiclass VPatBinaryVL_VV_VX<SDPatternOperator vop, string instruction_name, 763 list<VTypeInfo> vtilist = AllIntegerVectors, 764 bit isSEWAware = 0> { 765 foreach vti = vtilist in { 766 let Predicates = GetVTypePredicates<vti>.Predicates in { 767 def : VPatBinaryVL_V<vop, instruction_name, "VV", 768 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 769 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 770 vti.RegClass, isSEWAware>; 771 def : VPatBinaryVL_XI<vop, instruction_name, "VX", 772 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 773 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 774 SplatPat, GPR, isSEWAware>; 775 } 776 } 777} 778 779multiclass VPatBinaryVL_VV_VX_VI<SDPatternOperator vop, string instruction_name, 780 Operand ImmType = simm5> 781 : VPatBinaryVL_VV_VX<vop, instruction_name> { 782 foreach vti = AllIntegerVectors in { 783 let Predicates = GetVTypePredicates<vti>.Predicates in 784 def : VPatBinaryVL_XI<vop, instruction_name, "VI", 785 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 786 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 787 !cast<ComplexPattern>(SplatPat#_#ImmType), 788 ImmType>; 789 } 790} 791 792multiclass VPatBinaryWVL_VV_VX<SDPatternOperator vop, string instruction_name> { 793 foreach VtiToWti = AllWidenableIntVectors in { 794 defvar vti = VtiToWti.Vti; 795 defvar wti = VtiToWti.Wti; 796 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 797 GetVTypePredicates<wti>.Predicates) in { 798 def : VPatBinaryVL_V<vop, instruction_name, "VV", 799 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 800 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 801 vti.RegClass>; 802 def : VPatBinaryVL_XI<vop, instruction_name, "VX", 803 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 804 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 805 SplatPat, GPR>; 806 } 807 } 808} 809 810multiclass VPatBinaryWVL_VV_VX_WV_WX<SDPatternOperator vop, SDNode vop_w, 811 string instruction_name> 812 : VPatBinaryWVL_VV_VX<vop, instruction_name> { 813 foreach VtiToWti = AllWidenableIntVectors in { 814 defvar vti = VtiToWti.Vti; 815 defvar wti = VtiToWti.Wti; 816 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 817 GetVTypePredicates<wti>.Predicates) in { 818 defm : VPatTiedBinaryNoMaskVL_V<vop_w, instruction_name, "WV", 819 wti.Vector, vti.Vector, vti.Log2SEW, 820 vti.LMul, wti.RegClass, vti.RegClass>; 821 def : VPatBinaryVL_V<vop_w, instruction_name, "WV", 822 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 823 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 824 vti.RegClass>; 825 def : VPatBinaryVL_XI<vop_w, instruction_name, "WX", 826 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 827 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 828 SplatPat, GPR>; 829 } 830 } 831} 832 833multiclass VPatBinaryNVL_WV_WX_WI<SDPatternOperator vop, string instruction_name> { 834 foreach VtiToWti = AllWidenableIntVectors in { 835 defvar vti = VtiToWti.Vti; 836 defvar wti = VtiToWti.Wti; 837 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 838 GetVTypePredicates<wti>.Predicates) in { 839 def : VPatBinaryVL_V<vop, instruction_name, "WV", 840 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 841 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 842 vti.RegClass>; 843 def : VPatBinaryVL_XI<vop, instruction_name, "WX", 844 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 845 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 846 SplatPat, GPR>; 847 def : VPatBinaryVL_XI<vop, instruction_name, "WI", 848 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 849 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 850 !cast<ComplexPattern>(SplatPat#_#uimm5), 851 uimm5>; 852 } 853 } 854} 855 856class VPatBinaryVL_VF<SDPatternOperator vop, 857 string instruction_name, 858 ValueType result_type, 859 ValueType vop1_type, 860 ValueType vop2_type, 861 ValueType mask_type, 862 int log2sew, 863 LMULInfo vlmul, 864 VReg result_reg_class, 865 VReg vop_reg_class, 866 RegisterClass scalar_reg_class, 867 bit isSEWAware = 0> 868 : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), 869 (vop2_type (SplatFPOp scalar_reg_class:$rs2)), 870 (result_type result_reg_class:$merge), 871 (mask_type V0), 872 VLOpFrag)), 873 (!cast<Instruction>( 874 !if(isSEWAware, 875 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 876 instruction_name#"_"#vlmul.MX#"_MASK")) 877 result_reg_class:$merge, 878 vop_reg_class:$rs1, 879 scalar_reg_class:$rs2, 880 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 881 882class VPatBinaryVL_VF_RM<SDPatternOperator vop, 883 string instruction_name, 884 ValueType result_type, 885 ValueType vop1_type, 886 ValueType vop2_type, 887 ValueType mask_type, 888 int log2sew, 889 LMULInfo vlmul, 890 VReg result_reg_class, 891 VReg vop_reg_class, 892 RegisterClass scalar_reg_class, 893 bit isSEWAware = 0> 894 : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), 895 (vop2_type (SplatFPOp scalar_reg_class:$rs2)), 896 (result_type result_reg_class:$merge), 897 (mask_type V0), 898 VLOpFrag)), 899 (!cast<Instruction>( 900 !if(isSEWAware, 901 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 902 instruction_name#"_"#vlmul.MX#"_MASK")) 903 result_reg_class:$merge, 904 vop_reg_class:$rs1, 905 scalar_reg_class:$rs2, 906 (mask_type V0), 907 // Value to indicate no rounding mode change in 908 // RISCVInsertReadWriteCSR 909 FRM_DYN, 910 GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 911 912multiclass VPatBinaryFPVL_VV_VF<SDPatternOperator vop, string instruction_name, 913 bit isSEWAware = 0> { 914 foreach vti = AllFloatVectors in { 915 let Predicates = GetVTypePredicates<vti>.Predicates in { 916 def : VPatBinaryVL_V<vop, instruction_name, "VV", 917 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 918 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 919 vti.RegClass, isSEWAware>; 920 def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 921 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 922 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 923 vti.ScalarRegClass, isSEWAware>; 924 } 925 } 926} 927 928multiclass VPatBinaryFPVL_VV_VF_RM<SDPatternOperator vop, string instruction_name, 929 bit isSEWAware = 0> { 930 foreach vti = AllFloatVectors in { 931 let Predicates = GetVTypePredicates<vti>.Predicates in { 932 def : VPatBinaryVL_V_RM<vop, instruction_name, "VV", 933 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 934 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 935 vti.RegClass, isSEWAware>; 936 def : VPatBinaryVL_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix, 937 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 938 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 939 vti.ScalarRegClass, isSEWAware>; 940 } 941 } 942} 943 944multiclass VPatBinaryFPVL_R_VF<SDPatternOperator vop, string instruction_name, 945 bit isSEWAware = 0> { 946 foreach fvti = AllFloatVectors in { 947 let Predicates = GetVTypePredicates<fvti>.Predicates in 948 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 949 fvti.RegClass:$rs1, 950 (fvti.Vector fvti.RegClass:$merge), 951 (fvti.Mask V0), 952 VLOpFrag)), 953 (!cast<Instruction>( 954 !if(isSEWAware, 955 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", 956 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) 957 fvti.RegClass:$merge, 958 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 959 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 960 } 961} 962 963multiclass VPatBinaryFPVL_R_VF_RM<SDPatternOperator vop, string instruction_name, 964 bit isSEWAware = 0> { 965 foreach fvti = AllFloatVectors in { 966 let Predicates = GetVTypePredicates<fvti>.Predicates in 967 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 968 fvti.RegClass:$rs1, 969 (fvti.Vector fvti.RegClass:$merge), 970 (fvti.Mask V0), 971 VLOpFrag)), 972 (!cast<Instruction>( 973 !if(isSEWAware, 974 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", 975 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) 976 fvti.RegClass:$merge, 977 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 978 (fvti.Mask V0), 979 // Value to indicate no rounding mode change in 980 // RISCVInsertReadWriteCSR 981 FRM_DYN, 982 GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 983 } 984} 985 986multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name, 987 CondCode cc> { 988 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 989 vti.RegClass:$rs2, cc, 990 VR:$merge, 991 (vti.Mask V0), 992 VLOpFrag)), 993 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") 994 VR:$merge, 995 vti.RegClass:$rs1, 996 vti.RegClass:$rs2, 997 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 998} 999 1000// Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped. 1001multiclass VPatIntegerSetCCVL_VV_Swappable<VTypeInfo vti, string instruction_name, 1002 CondCode cc, CondCode invcc> 1003 : VPatIntegerSetCCVL_VV<vti, instruction_name, cc> { 1004 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2), 1005 vti.RegClass:$rs1, invcc, 1006 VR:$merge, 1007 (vti.Mask V0), 1008 VLOpFrag)), 1009 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") 1010 VR:$merge, vti.RegClass:$rs1, 1011 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1012} 1013 1014multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_name, 1015 CondCode cc, CondCode invcc> { 1016 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK"); 1017 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1018 (SplatPat (XLenVT GPR:$rs2)), cc, 1019 VR:$merge, 1020 (vti.Mask V0), 1021 VLOpFrag)), 1022 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1023 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1024 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)), 1025 (vti.Vector vti.RegClass:$rs1), invcc, 1026 VR:$merge, 1027 (vti.Mask V0), 1028 VLOpFrag)), 1029 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1030 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1031} 1032 1033multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_name, 1034 CondCode cc, CondCode invcc> { 1035 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); 1036 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1037 (SplatPat_simm5 simm5:$rs2), cc, 1038 VR:$merge, 1039 (vti.Mask V0), 1040 VLOpFrag)), 1041 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1042 XLenVT:$rs2, (vti.Mask V0), GPR:$vl, 1043 vti.Log2SEW)>; 1044 1045 // FIXME: Can do some canonicalization to remove these patterns. 1046 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2), 1047 (vti.Vector vti.RegClass:$rs1), invcc, 1048 VR:$merge, 1049 (vti.Mask V0), 1050 VLOpFrag)), 1051 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1052 simm5:$rs2, (vti.Mask V0), GPR:$vl, 1053 vti.Log2SEW)>; 1054} 1055 1056multiclass VPatIntegerSetCCVL_VIPlus1_Swappable<VTypeInfo vti, 1057 string instruction_name, 1058 CondCode cc, CondCode invcc, 1059 ComplexPattern splatpat_kind> { 1060 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); 1061 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1062 (splatpat_kind simm5:$rs2), cc, 1063 VR:$merge, 1064 (vti.Mask V0), 1065 VLOpFrag)), 1066 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1067 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl, 1068 vti.Log2SEW)>; 1069 1070 // FIXME: Can do some canonicalization to remove these patterns. 1071 def : Pat<(vti.Mask (riscv_setcc_vl (splatpat_kind simm5:$rs2), 1072 (vti.Vector vti.RegClass:$rs1), invcc, 1073 VR:$merge, 1074 (vti.Mask V0), 1075 VLOpFrag)), 1076 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1077 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl, 1078 vti.Log2SEW)>; 1079} 1080 1081multiclass VPatFPSetCCVL_VV_VF_FV<SDPatternOperator vop, CondCode cc, 1082 string inst_name, 1083 string swapped_op_inst_name> { 1084 foreach fvti = AllFloatVectors in { 1085 let Predicates = GetVTypePredicates<fvti>.Predicates in { 1086 def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1), 1087 fvti.RegClass:$rs2, 1088 cc, 1089 VR:$merge, 1090 (fvti.Mask V0), 1091 VLOpFrag)), 1092 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX#"_MASK") 1093 VR:$merge, fvti.RegClass:$rs1, 1094 fvti.RegClass:$rs2, (fvti.Mask V0), 1095 GPR:$vl, fvti.Log2SEW)>; 1096 def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1), 1097 (SplatFPOp fvti.ScalarRegClass:$rs2), 1098 cc, 1099 VR:$merge, 1100 (fvti.Mask V0), 1101 VLOpFrag)), 1102 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 1103 VR:$merge, fvti.RegClass:$rs1, 1104 fvti.ScalarRegClass:$rs2, (fvti.Mask V0), 1105 GPR:$vl, fvti.Log2SEW)>; 1106 def : Pat<(fvti.Mask (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 1107 (fvti.Vector fvti.RegClass:$rs1), 1108 cc, 1109 VR:$merge, 1110 (fvti.Mask V0), 1111 VLOpFrag)), 1112 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 1113 VR:$merge, fvti.RegClass:$rs1, 1114 fvti.ScalarRegClass:$rs2, (fvti.Mask V0), 1115 GPR:$vl, fvti.Log2SEW)>; 1116 } 1117 } 1118} 1119 1120multiclass VPatExtendVL_V<SDNode vop, string inst_name, string suffix, 1121 list <VTypeInfoToFraction> fraction_list> { 1122 foreach vtiTofti = fraction_list in { 1123 defvar vti = vtiTofti.Vti; 1124 defvar fti = vtiTofti.Fti; 1125 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1126 GetVTypePredicates<fti>.Predicates) in 1127 def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2), 1128 (fti.Mask V0), VLOpFrag)), 1129 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX#"_MASK") 1130 (vti.Vector (IMPLICIT_DEF)), 1131 fti.RegClass:$rs2, 1132 (fti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1133 } 1134} 1135 1136// Single width converting 1137 1138multiclass VPatConvertFP2IVL_V<SDPatternOperator vop, string instruction_name> { 1139 foreach fvti = AllFloatVectors in { 1140 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1141 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1142 GetVTypePredicates<ivti>.Predicates) in 1143 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1144 (fvti.Mask V0), 1145 VLOpFrag)), 1146 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1147 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1148 (fvti.Mask V0), GPR:$vl, ivti.Log2SEW, TA_MA)>; 1149 } 1150} 1151 1152multiclass VPatConvertFP2IVL_V_RM<SDPatternOperator vop, string instruction_name> { 1153 foreach fvti = AllFloatVectors in { 1154 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1155 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1156 GetVTypePredicates<ivti>.Predicates) in 1157 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1158 (fvti.Mask V0), 1159 VLOpFrag)), 1160 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1161 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1162 (fvti.Mask V0), 1163 // Value to indicate no rounding mode change in 1164 // RISCVInsertReadWriteCSR 1165 FRM_DYN, 1166 GPR:$vl, ivti.Log2SEW, TA_MA)>; 1167 } 1168} 1169 1170 1171multiclass VPatConvertFP2I_RM_VL_V<SDPatternOperator vop, string instruction_name> { 1172 foreach fvti = AllFloatVectors in { 1173 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1174 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1175 GetVTypePredicates<ivti>.Predicates) in 1176 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1177 (fvti.Mask V0), (XLenVT timm:$frm), 1178 VLOpFrag)), 1179 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1180 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1181 (fvti.Mask V0), timm:$frm, GPR:$vl, ivti.Log2SEW, 1182 TA_MA)>; 1183 } 1184} 1185 1186multiclass VPatConvertI2FPVL_V_RM<SDPatternOperator vop, string instruction_name> { 1187 foreach fvti = AllFloatVectors in { 1188 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1189 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1190 GetVTypePredicates<ivti>.Predicates) in 1191 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1192 (ivti.Mask V0), 1193 VLOpFrag)), 1194 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1195 (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1196 (ivti.Mask V0), 1197 // Value to indicate no rounding mode change in 1198 // RISCVInsertReadWriteCSR 1199 FRM_DYN, 1200 GPR:$vl, fvti.Log2SEW, TA_MA)>; 1201 } 1202} 1203 1204multiclass VPatConvertI2FP_RM_VL_V<SDNode vop, string instruction_name> { 1205 foreach fvti = AllFloatVectors in { 1206 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1207 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1208 GetVTypePredicates<ivti>.Predicates) in 1209 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1210 (ivti.Mask V0), (XLenVT timm:$frm), 1211 VLOpFrag)), 1212 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1213 (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1214 (ivti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1215 } 1216} 1217 1218// Widening converting 1219 1220multiclass VPatWConvertFP2IVL_V<SDPatternOperator vop, string instruction_name> { 1221 foreach fvtiToFWti = AllWidenableFloatVectors in { 1222 defvar fvti = fvtiToFWti.Vti; 1223 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1224 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1225 GetVTypePredicates<iwti>.Predicates) in 1226 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1227 (fvti.Mask V0), 1228 VLOpFrag)), 1229 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1230 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1231 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 1232 } 1233} 1234 1235multiclass VPatWConvertFP2IVL_V_RM<SDPatternOperator vop, string instruction_name> { 1236 foreach fvtiToFWti = AllWidenableFloatVectors in { 1237 defvar fvti = fvtiToFWti.Vti; 1238 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1239 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1240 GetVTypePredicates<iwti>.Predicates) in 1241 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1242 (fvti.Mask V0), 1243 VLOpFrag)), 1244 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1245 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1246 (fvti.Mask V0), 1247 // Value to indicate no rounding mode change in 1248 // RISCVInsertReadWriteCSR 1249 FRM_DYN, 1250 GPR:$vl, fvti.Log2SEW, TA_MA)>; 1251 } 1252} 1253 1254 1255multiclass VPatWConvertFP2I_RM_VL_V<SDNode vop, string instruction_name> { 1256 foreach fvtiToFWti = AllWidenableFloatVectors in { 1257 defvar fvti = fvtiToFWti.Vti; 1258 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1259 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1260 GetVTypePredicates<iwti>.Predicates) in 1261 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1262 (fvti.Mask V0), (XLenVT timm:$frm), 1263 VLOpFrag)), 1264 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1265 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1266 (fvti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1267 } 1268} 1269 1270multiclass VPatWConvertI2FPVL_V<SDPatternOperator vop, 1271 string instruction_name> { 1272 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1273 defvar ivti = vtiToWti.Vti; 1274 defvar fwti = vtiToWti.Wti; 1275 let Predicates = !listconcat(GetVTypePredicates<ivti>.Predicates, 1276 GetVTypePredicates<fwti>.Predicates) in 1277 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1278 (ivti.Mask V0), 1279 VLOpFrag)), 1280 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1281 (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1282 (ivti.Mask V0), 1283 GPR:$vl, ivti.Log2SEW, TA_MA)>; 1284 } 1285} 1286 1287// Narrowing converting 1288 1289multiclass VPatNConvertFP2IVL_W<SDPatternOperator vop, 1290 string instruction_name> { 1291 // Reuse the same list of types used in the widening nodes, but just swap the 1292 // direction of types around so we're converting from Wti -> Vti 1293 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1294 defvar vti = vtiToWti.Vti; 1295 defvar fwti = vtiToWti.Wti; 1296 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1297 GetVTypePredicates<fwti>.Predicates) in 1298 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 1299 (fwti.Mask V0), 1300 VLOpFrag)), 1301 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 1302 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1303 (fwti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1304 } 1305} 1306 1307multiclass VPatNConvertFP2IVL_W_RM<SDPatternOperator vop, 1308 string instruction_name> { 1309 // Reuse the same list of types used in the widening nodes, but just swap the 1310 // direction of types around so we're converting from Wti -> Vti 1311 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1312 defvar vti = vtiToWti.Vti; 1313 defvar fwti = vtiToWti.Wti; 1314 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1315 GetVTypePredicates<fwti>.Predicates) in 1316 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 1317 (fwti.Mask V0), 1318 VLOpFrag)), 1319 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 1320 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1321 (fwti.Mask V0), 1322 // Value to indicate no rounding mode change in 1323 // RISCVInsertReadWriteCSR 1324 FRM_DYN, 1325 GPR:$vl, vti.Log2SEW, TA_MA)>; 1326 } 1327} 1328 1329multiclass VPatNConvertFP2I_RM_VL_W<SDNode vop, string instruction_name> { 1330 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1331 defvar vti = vtiToWti.Vti; 1332 defvar fwti = vtiToWti.Wti; 1333 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1334 GetVTypePredicates<fwti>.Predicates) in 1335 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 1336 (fwti.Mask V0), (XLenVT timm:$frm), 1337 VLOpFrag)), 1338 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 1339 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1340 (fwti.Mask V0), timm:$frm, GPR:$vl, vti.Log2SEW, TA_MA)>; 1341 } 1342} 1343 1344multiclass VPatNConvertI2FPVL_W_RM<SDPatternOperator vop, 1345 string instruction_name> { 1346 foreach fvtiToFWti = AllWidenableFloatVectors in { 1347 defvar fvti = fvtiToFWti.Vti; 1348 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1349 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1350 GetVTypePredicates<iwti>.Predicates) in 1351 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), 1352 (iwti.Mask V0), 1353 VLOpFrag)), 1354 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1355 (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, 1356 (iwti.Mask V0), 1357 // Value to indicate no rounding mode change in 1358 // RISCVInsertReadWriteCSR 1359 FRM_DYN, 1360 GPR:$vl, fvti.Log2SEW, TA_MA)>; 1361 } 1362} 1363 1364multiclass VPatNConvertI2FP_RM_VL_W<SDNode vop, string instruction_name> { 1365 foreach fvtiToFWti = AllWidenableFloatVectors in { 1366 defvar fvti = fvtiToFWti.Vti; 1367 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1368 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1369 GetVTypePredicates<iwti>.Predicates) in 1370 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), 1371 (iwti.Mask V0), (XLenVT timm:$frm), 1372 VLOpFrag)), 1373 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1374 (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, 1375 (iwti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1376 } 1377} 1378 1379multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> { 1380 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { 1381 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1"); 1382 let Predicates = GetVTypePredicates<vti>.Predicates in { 1383 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), 1384 (vti.Vector vti.RegClass:$rs1), VR:$rs2, 1385 (vti.Mask V0), VLOpFrag, 1386 (XLenVT timm:$policy))), 1387 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1388 (vti_m1.Vector VR:$merge), 1389 (vti.Vector vti.RegClass:$rs1), 1390 (vti_m1.Vector VR:$rs2), 1391 (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 1392 } 1393 } 1394} 1395 1396multiclass VPatReductionVL_RM<SDNode vop, string instruction_name, bit is_float> { 1397 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { 1398 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1"); 1399 let Predicates = GetVTypePredicates<vti>.Predicates in { 1400 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), 1401 (vti.Vector vti.RegClass:$rs1), VR:$rs2, 1402 (vti.Mask V0), VLOpFrag, 1403 (XLenVT timm:$policy))), 1404 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1405 (vti_m1.Vector VR:$merge), 1406 (vti.Vector vti.RegClass:$rs1), 1407 (vti_m1.Vector VR:$rs2), 1408 (vti.Mask V0), 1409 // Value to indicate no rounding mode change in 1410 // RISCVInsertReadWriteCSR 1411 FRM_DYN, 1412 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 1413 } 1414 } 1415} 1416 1417multiclass VPatBinaryVL_WV_WX_WI<SDNode op, string instruction_name> { 1418 foreach vtiToWti = AllWidenableIntVectors in { 1419 defvar vti = vtiToWti.Vti; 1420 defvar wti = vtiToWti.Wti; 1421 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1422 GetVTypePredicates<wti>.Predicates) in { 1423 def : Pat< 1424 (vti.Vector 1425 (riscv_trunc_vector_vl 1426 (op (wti.Vector wti.RegClass:$rs2), 1427 (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1)))), 1428 (vti.Mask true_mask), 1429 VLOpFrag)), 1430 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX) 1431 (vti.Vector (IMPLICIT_DEF)), 1432 wti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1433 1434 def : Pat< 1435 (vti.Vector 1436 (riscv_trunc_vector_vl 1437 (op (wti.Vector wti.RegClass:$rs2), 1438 (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1)))), 1439 (vti.Mask true_mask), 1440 VLOpFrag)), 1441 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1442 (vti.Vector (IMPLICIT_DEF)), 1443 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1444 1445 def : Pat< 1446 (vti.Vector 1447 (riscv_trunc_vector_vl 1448 (op (wti.Vector wti.RegClass:$rs2), 1449 (wti.Vector (SplatPat_uimm5 uimm5:$rs1))), (vti.Mask true_mask), 1450 VLOpFrag)), 1451 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX) 1452 (vti.Vector (IMPLICIT_DEF)), 1453 wti.RegClass:$rs2, uimm5:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1454 } 1455 } 1456} 1457 1458multiclass VPatWidenReductionVL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1459 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1460 defvar vti = vtiToWti.Vti; 1461 defvar wti = vtiToWti.Wti; 1462 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1463 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1464 GetVTypePredicates<wti>.Predicates) in { 1465 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1466 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 1467 VR:$rs2, (vti.Mask V0), VLOpFrag, 1468 (XLenVT timm:$policy))), 1469 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1470 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1471 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1472 (XLenVT timm:$policy))>; 1473 } 1474 } 1475} 1476 1477multiclass VPatWidenReductionVL_RM<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1478 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1479 defvar vti = vtiToWti.Vti; 1480 defvar wti = vtiToWti.Wti; 1481 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1482 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1483 GetVTypePredicates<wti>.Predicates) in { 1484 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1485 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 1486 VR:$rs2, (vti.Mask V0), VLOpFrag, 1487 (XLenVT timm:$policy))), 1488 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1489 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1490 (wti_m1.Vector VR:$rs2), (vti.Mask V0), 1491 // Value to indicate no rounding mode change in 1492 // RISCVInsertReadWriteCSR 1493 FRM_DYN, 1494 GPR:$vl, vti.Log2SEW, 1495 (XLenVT timm:$policy))>; 1496 } 1497 } 1498} 1499 1500multiclass VPatWidenReductionVL_Ext_VL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1501 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1502 defvar vti = vtiToWti.Vti; 1503 defvar wti = vtiToWti.Wti; 1504 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1505 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1506 GetVTypePredicates<wti>.Predicates) in { 1507 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1508 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 1509 VR:$rs2, (vti.Mask V0), VLOpFrag, 1510 (XLenVT timm:$policy))), 1511 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1512 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1513 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1514 (XLenVT timm:$policy))>; 1515 } 1516 } 1517} 1518 1519multiclass VPatWidenReductionVL_Ext_VL_RM<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1520 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1521 defvar vti = vtiToWti.Vti; 1522 defvar wti = vtiToWti.Wti; 1523 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1524 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1525 GetVTypePredicates<wti>.Predicates) in { 1526 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1527 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 1528 VR:$rs2, (vti.Mask V0), VLOpFrag, 1529 (XLenVT timm:$policy))), 1530 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1531 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1532 (wti_m1.Vector VR:$rs2), (vti.Mask V0), 1533 // Value to indicate no rounding mode change in 1534 // RISCVInsertReadWriteCSR 1535 FRM_DYN, 1536 GPR:$vl, vti.Log2SEW, 1537 (XLenVT timm:$policy))>; 1538 } 1539 } 1540} 1541 1542multiclass VPatBinaryFPWVL_VV_VF<SDNode vop, string instruction_name> { 1543 foreach fvtiToFWti = AllWidenableFloatVectors in { 1544 defvar vti = fvtiToFWti.Vti; 1545 defvar wti = fvtiToFWti.Wti; 1546 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1547 GetVTypePredicates<wti>.Predicates) in { 1548 def : VPatBinaryVL_V<vop, instruction_name, "VV", 1549 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1550 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1551 vti.RegClass>; 1552 def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 1553 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1554 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1555 vti.ScalarRegClass>; 1556 } 1557 } 1558} 1559 1560multiclass VPatBinaryFPWVL_VV_VF_RM<SDNode vop, string instruction_name> { 1561 foreach fvtiToFWti = AllWidenableFloatVectors in { 1562 defvar vti = fvtiToFWti.Vti; 1563 defvar wti = fvtiToFWti.Wti; 1564 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1565 GetVTypePredicates<wti>.Predicates) in { 1566 def : VPatBinaryVL_V_RM<vop, instruction_name, "VV", 1567 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1568 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1569 vti.RegClass>; 1570 def : VPatBinaryVL_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix, 1571 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1572 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1573 vti.ScalarRegClass>; 1574 } 1575 } 1576} 1577 1578multiclass VPatBinaryFPWVL_VV_VF_WV_WF<SDNode vop, SDNode vop_w, string instruction_name> 1579 : VPatBinaryFPWVL_VV_VF<vop, instruction_name> { 1580 foreach fvtiToFWti = AllWidenableFloatVectors in { 1581 defvar vti = fvtiToFWti.Vti; 1582 defvar wti = fvtiToFWti.Wti; 1583 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1584 GetVTypePredicates<wti>.Predicates) in { 1585 defm : VPatTiedBinaryNoMaskVL_V<vop_w, instruction_name, "WV", 1586 wti.Vector, vti.Vector, vti.Log2SEW, 1587 vti.LMul, wti.RegClass, vti.RegClass>; 1588 def : VPatBinaryVL_V<vop_w, instruction_name, "WV", 1589 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1590 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1591 vti.RegClass>; 1592 def : VPatBinaryVL_VF<vop_w, instruction_name#"_W"#vti.ScalarSuffix, 1593 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1594 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1595 vti.ScalarRegClass>; 1596 } 1597 } 1598} 1599 1600multiclass VPatBinaryFPWVL_VV_VF_WV_WF_RM<SDNode vop, SDNode vop_w, string instruction_name> 1601 : VPatBinaryFPWVL_VV_VF_RM<vop, instruction_name> { 1602 foreach fvtiToFWti = AllWidenableFloatVectors in { 1603 defvar vti = fvtiToFWti.Vti; 1604 defvar wti = fvtiToFWti.Wti; 1605 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1606 GetVTypePredicates<wti>.Predicates) in { 1607 defm : VPatTiedBinaryNoMaskVL_V_RM<vop_w, instruction_name, "WV", 1608 wti.Vector, vti.Vector, vti.Log2SEW, 1609 vti.LMul, wti.RegClass, vti.RegClass>; 1610 def : VPatBinaryVL_V_RM<vop_w, instruction_name, "WV", 1611 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1612 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1613 vti.RegClass>; 1614 def : VPatBinaryVL_VF_RM<vop_w, instruction_name#"_W"#vti.ScalarSuffix, 1615 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1616 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1617 vti.ScalarRegClass>; 1618 } 1619 } 1620} 1621 1622multiclass VPatNarrowShiftSplatExt_WX<SDNode op, PatFrags extop, string instruction_name> { 1623 foreach vtiToWti = AllWidenableIntVectors in { 1624 defvar vti = vtiToWti.Vti; 1625 defvar wti = vtiToWti.Wti; 1626 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1627 GetVTypePredicates<wti>.Predicates) in 1628 def : Pat< 1629 (vti.Vector 1630 (riscv_trunc_vector_vl 1631 (op (wti.Vector wti.RegClass:$rs2), 1632 (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1))), 1633 (vti.Mask true_mask), VLOpFrag)), 1634 srcvalue, (wti.Mask true_mask), VLOpFrag), 1635 (vti.Mask true_mask), VLOpFrag)), 1636 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1637 (vti.Vector (IMPLICIT_DEF)), 1638 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1639 } 1640} 1641 1642multiclass VPatNarrowShiftExtVL_WV<SDNode op, PatFrags extop, string instruction_name> { 1643 foreach vtiToWti = AllWidenableIntVectors in { 1644 defvar vti = vtiToWti.Vti; 1645 defvar wti = vtiToWti.Wti; 1646 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1647 GetVTypePredicates<wti>.Predicates) in 1648 def : Pat< 1649 (vti.Vector 1650 (riscv_trunc_vector_vl 1651 (op (wti.Vector wti.RegClass:$rs2), 1652 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), 1653 (vti.Mask true_mask), VLOpFrag)), 1654 srcvalue, (vti.Mask true_mask), VLOpFrag), 1655 (vti.Mask V0), VLOpFrag)), 1656 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_MASK") 1657 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, vti.RegClass:$rs1, 1658 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1659 } 1660} 1661 1662multiclass VPatNarrowShiftVL_WV<SDNode op, string instruction_name> { 1663 defm : VPatNarrowShiftExtVL_WV<op, riscv_sext_vl_oneuse, instruction_name>; 1664 defm : VPatNarrowShiftExtVL_WV<op, riscv_zext_vl_oneuse, instruction_name>; 1665} 1666 1667multiclass VPatMultiplyAddVL_VV_VX<SDNode op, string instruction_name> { 1668 foreach vti = AllIntegerVectors in { 1669 defvar suffix = vti.LMul.MX; 1670 let Predicates = GetVTypePredicates<vti>.Predicates in { 1671 // NOTE: We choose VMADD because it has the most commuting freedom. So it 1672 // works best with how TwoAddressInstructionPass tries commuting. 1673 def : Pat<(vti.Vector 1674 (op vti.RegClass:$rs2, 1675 (riscv_mul_vl_oneuse vti.RegClass:$rs1, 1676 vti.RegClass:$rd, 1677 srcvalue, (vti.Mask true_mask), VLOpFrag), 1678 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1679 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 1680 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1681 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1682 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally 1683 // commutable. 1684 def : Pat<(vti.Vector 1685 (op vti.RegClass:$rs2, 1686 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), 1687 vti.RegClass:$rd, 1688 srcvalue, (vti.Mask true_mask), VLOpFrag), 1689 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1690 (!cast<Instruction>(instruction_name#"_VX_" # suffix) 1691 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1692 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1693 } 1694 } 1695} 1696 1697multiclass VPatMultiplyAccVL_VV_VX<PatFrag op, string instruction_name> { 1698 foreach vti = AllIntegerVectors in { 1699 defvar suffix = vti.LMul.MX; 1700 let Predicates = GetVTypePredicates<vti>.Predicates in { 1701 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1702 (vti.Vector (op vti.RegClass:$rd, 1703 (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2, 1704 srcvalue, (vti.Mask true_mask), VLOpFrag), 1705 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1706 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1707 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1708 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1709 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1710 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1711 (vti.Vector (op vti.RegClass:$rd, 1712 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, 1713 srcvalue, (vti.Mask true_mask), VLOpFrag), 1714 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1715 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1716 (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK") 1717 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1718 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1719 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1720 (vti.Vector (op vti.RegClass:$rd, 1721 (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2, 1722 srcvalue, (vti.Mask true_mask), VLOpFrag), 1723 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1724 vti.RegClass:$rd, undef, VLOpFrag), 1725 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1726 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1727 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1728 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1729 (vti.Vector (op vti.RegClass:$rd, 1730 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, 1731 srcvalue, (vti.Mask true_mask), VLOpFrag), 1732 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1733 vti.RegClass:$rd, undef, VLOpFrag), 1734 (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK") 1735 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1736 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1737 } 1738 } 1739} 1740 1741multiclass VPatWidenMultiplyAddVL_VV_VX<SDNode vwmacc_op, string instr_name> { 1742 foreach vtiTowti = AllWidenableIntVectors in { 1743 defvar vti = vtiTowti.Vti; 1744 defvar wti = vtiTowti.Wti; 1745 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1746 GetVTypePredicates<wti>.Predicates) in { 1747 def : Pat<(vwmacc_op (vti.Vector vti.RegClass:$rs1), 1748 (vti.Vector vti.RegClass:$rs2), 1749 (wti.Vector wti.RegClass:$rd), 1750 (vti.Mask V0), VLOpFrag), 1751 (!cast<Instruction>(instr_name#"_VV_"#vti.LMul.MX#"_MASK") 1752 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1753 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1754 def : Pat<(vwmacc_op (SplatPat XLenVT:$rs1), 1755 (vti.Vector vti.RegClass:$rs2), 1756 (wti.Vector wti.RegClass:$rd), 1757 (vti.Mask V0), VLOpFrag), 1758 (!cast<Instruction>(instr_name#"_VX_"#vti.LMul.MX#"_MASK") 1759 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, 1760 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1761 TAIL_AGNOSTIC)>; 1762 } 1763 } 1764} 1765 1766multiclass VPatNarrowShiftSplat_WX_WI<SDNode op, string instruction_name> { 1767 foreach vtiTowti = AllWidenableIntVectors in { 1768 defvar vti = vtiTowti.Vti; 1769 defvar wti = vtiTowti.Wti; 1770 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1771 GetVTypePredicates<wti>.Predicates) in { 1772 def : Pat<(vti.Vector (riscv_trunc_vector_vl 1773 (wti.Vector (op wti.RegClass:$rs1, (SplatPat XLenVT:$rs2), 1774 srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), 1775 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1776 (vti.Vector (IMPLICIT_DEF)), 1777 wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; 1778 def : Pat<(vti.Vector (riscv_trunc_vector_vl 1779 (wti.Vector (op wti.RegClass:$rs1, (SplatPat_uimm5 uimm5:$rs2), 1780 srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), 1781 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX) 1782 (vti.Vector (IMPLICIT_DEF)), 1783 wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; 1784 } 1785 } 1786} 1787 1788multiclass VPatFPMulAddVL_VV_VF<SDPatternOperator vop, string instruction_name> { 1789 foreach vti = AllFloatVectors in { 1790 defvar suffix = vti.LMul.MX; 1791 let Predicates = GetVTypePredicates<vti>.Predicates in { 1792 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, 1793 vti.RegClass:$rs2, (vti.Mask V0), 1794 VLOpFrag)), 1795 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1796 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1797 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1798 1799 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), 1800 vti.RegClass:$rd, vti.RegClass:$rs2, 1801 (vti.Mask V0), 1802 VLOpFrag)), 1803 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1804 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1805 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1806 } 1807 } 1808} 1809 1810multiclass VPatFPMulAddVL_VV_VF_RM<SDPatternOperator vop, string instruction_name> { 1811 foreach vti = AllFloatVectors in { 1812 defvar suffix = vti.LMul.MX; 1813 let Predicates = GetVTypePredicates<vti>.Predicates in { 1814 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, 1815 vti.RegClass:$rs2, (vti.Mask V0), 1816 VLOpFrag)), 1817 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1818 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1819 (vti.Mask V0), 1820 // Value to indicate no rounding mode change in 1821 // RISCVInsertReadWriteCSR 1822 FRM_DYN, 1823 GPR:$vl, vti.Log2SEW, TA_MA)>; 1824 1825 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), 1826 vti.RegClass:$rd, vti.RegClass:$rs2, 1827 (vti.Mask V0), 1828 VLOpFrag)), 1829 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1830 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1831 (vti.Mask V0), 1832 // Value to indicate no rounding mode change in 1833 // RISCVInsertReadWriteCSR 1834 FRM_DYN, 1835 GPR:$vl, vti.Log2SEW, TA_MA)>; 1836 } 1837 } 1838} 1839 1840multiclass VPatFPMulAccVL_VV_VF<PatFrag vop, string instruction_name> { 1841 foreach vti = AllFloatVectors in { 1842 defvar suffix = vti.LMul.MX; 1843 let Predicates = GetVTypePredicates<vti>.Predicates in { 1844 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1845 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1846 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1847 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1848 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1849 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1850 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1851 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1852 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1853 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1854 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1855 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1856 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1857 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1858 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1859 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1860 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1861 vti.RegClass:$rd, undef, VLOpFrag), 1862 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1863 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1864 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1865 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1866 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1867 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1868 vti.RegClass:$rd, undef, VLOpFrag), 1869 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1870 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1871 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1872 } 1873 } 1874} 1875 1876multiclass VPatFPMulAccVL_VV_VF_RM<PatFrag vop, string instruction_name> { 1877 foreach vti = AllFloatVectors in { 1878 defvar suffix = vti.LMul.MX; 1879 let Predicates = GetVTypePredicates<vti>.Predicates in { 1880 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1881 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1882 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1883 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1884 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1885 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1886 (vti.Mask V0), 1887 // Value to indicate no rounding mode change in 1888 // RISCVInsertReadWriteCSR 1889 FRM_DYN, 1890 GPR:$vl, vti.Log2SEW, TU_MU)>; 1891 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1892 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1893 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1894 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1895 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1896 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1897 (vti.Mask V0), 1898 // Value to indicate no rounding mode change in 1899 // RISCVInsertReadWriteCSR 1900 FRM_DYN, 1901 GPR:$vl, vti.Log2SEW, TU_MU)>; 1902 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1903 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1904 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1905 vti.RegClass:$rd, undef, VLOpFrag), 1906 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1907 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1908 (vti.Mask V0), 1909 // Value to indicate no rounding mode change in 1910 // RISCVInsertReadWriteCSR 1911 FRM_DYN, 1912 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1913 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1914 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1915 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1916 vti.RegClass:$rd, undef, VLOpFrag), 1917 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1918 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1919 (vti.Mask V0), 1920 // Value to indicate no rounding mode change in 1921 // RISCVInsertReadWriteCSR 1922 FRM_DYN, 1923 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1924 } 1925 } 1926} 1927 1928multiclass VPatWidenFPMulAccVL_VV_VF<SDNode vop, string instruction_name> { 1929 foreach vtiToWti = AllWidenableFloatVectors in { 1930 defvar vti = vtiToWti.Vti; 1931 defvar wti = vtiToWti.Wti; 1932 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1933 GetVTypePredicates<wti>.Predicates) in { 1934 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 1935 (vti.Vector vti.RegClass:$rs2), 1936 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 1937 VLOpFrag), 1938 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX #"_MASK") 1939 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1940 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1941 def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 1942 (vti.Vector vti.RegClass:$rs2), 1943 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 1944 VLOpFrag), 1945 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX #"_MASK") 1946 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1947 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1948 } 1949 } 1950} 1951 1952multiclass VPatWidenFPMulAccVL_VV_VF_RM<SDNode vop, string instruction_name> { 1953 foreach vtiToWti = AllWidenableFloatVectors in { 1954 defvar vti = vtiToWti.Vti; 1955 defvar wti = vtiToWti.Wti; 1956 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1957 GetVTypePredicates<wti>.Predicates) in { 1958 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 1959 (vti.Vector vti.RegClass:$rs2), 1960 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 1961 VLOpFrag), 1962 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX #"_MASK") 1963 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1964 (vti.Mask V0), 1965 // Value to indicate no rounding mode change in 1966 // RISCVInsertReadWriteCSR 1967 FRM_DYN, 1968 GPR:$vl, vti.Log2SEW, TA_MA)>; 1969 def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 1970 (vti.Vector vti.RegClass:$rs2), 1971 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 1972 VLOpFrag), 1973 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX #"_MASK") 1974 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1975 (vti.Mask V0), 1976 // Value to indicate no rounding mode change in 1977 // RISCVInsertReadWriteCSR 1978 FRM_DYN, 1979 GPR:$vl, vti.Log2SEW, TA_MA)>; 1980 } 1981 } 1982} 1983 1984multiclass VPatSlideVL_VX_VI<SDNode vop, string instruction_name> { 1985 foreach vti = AllVectors in { 1986 let Predicates = GetVTypePredicates<vti>.Predicates in { 1987 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rd), 1988 (vti.Vector vti.RegClass:$rs1), 1989 uimm5:$rs2, (vti.Mask V0), 1990 VLOpFrag, (XLenVT timm:$policy))), 1991 (!cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK") 1992 vti.RegClass:$rd, vti.RegClass:$rs1, uimm5:$rs2, 1993 (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1994 (XLenVT timm:$policy))>; 1995 1996 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rd), 1997 (vti.Vector vti.RegClass:$rs1), 1998 GPR:$rs2, (vti.Mask V0), 1999 VLOpFrag, (XLenVT timm:$policy))), 2000 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK") 2001 vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, 2002 (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2003 (XLenVT timm:$policy))>; 2004 } 2005 } 2006} 2007 2008multiclass VPatSlide1VL_VX<SDNode vop, string instruction_name> { 2009 foreach vti = AllIntegerVectors in { 2010 let Predicates = GetVTypePredicates<vti>.Predicates in { 2011 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rs3), 2012 (vti.Vector vti.RegClass:$rs1), 2013 GPR:$rs2, (vti.Mask V0), VLOpFrag)), 2014 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK") 2015 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, 2016 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 2017 } 2018 } 2019} 2020 2021multiclass VPatSlide1VL_VF<SDNode vop, string instruction_name> { 2022 foreach vti = AllFloatVectors in { 2023 let Predicates = GetVTypePredicates<vti>.Predicates in { 2024 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rs3), 2025 (vti.Vector vti.RegClass:$rs1), 2026 vti.Scalar:$rs2, (vti.Mask V0), VLOpFrag)), 2027 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_MASK") 2028 vti.RegClass:$rs3, vti.RegClass:$rs1, vti.Scalar:$rs2, 2029 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 2030 } 2031 } 2032} 2033 2034//===----------------------------------------------------------------------===// 2035// Patterns. 2036//===----------------------------------------------------------------------===// 2037 2038// 11. Vector Integer Arithmetic Instructions 2039 2040// 11.1. Vector Single-Width Integer Add and Subtract 2041defm : VPatBinaryVL_VV_VX_VI<riscv_add_vl, "PseudoVADD">; 2042defm : VPatBinaryVL_VV_VX<riscv_sub_vl, "PseudoVSUB">; 2043// Handle VRSUB specially since it's the only integer binary op with reversed 2044// pattern operands 2045foreach vti = AllIntegerVectors in { 2046 let Predicates = GetVTypePredicates<vti>.Predicates in { 2047 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 2048 (vti.Vector vti.RegClass:$rs1), 2049 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2050 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK") 2051 vti.RegClass:$merge, vti.RegClass:$rs1, GPR:$rs2, 2052 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2053 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)), 2054 (vti.Vector vti.RegClass:$rs1), 2055 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2056 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK") 2057 vti.RegClass:$merge, vti.RegClass:$rs1, simm5:$rs2, 2058 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2059 } 2060} 2061 2062// 11.2. Vector Widening Integer Add/Subtract 2063defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwadd_vl, riscv_vwadd_w_vl, "PseudoVWADD">; 2064defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwaddu_vl, riscv_vwaddu_w_vl, "PseudoVWADDU">; 2065defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsub_vl, riscv_vwsub_w_vl, "PseudoVWSUB">; 2066defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsubu_vl, riscv_vwsubu_w_vl, "PseudoVWSUBU">; 2067 2068// shl_vl (ext_vl v, splat 1) is a special case of widening add. 2069foreach vtiToWti = AllWidenableIntVectors in { 2070 defvar vti = vtiToWti.Vti; 2071 defvar wti = vtiToWti.Wti; 2072 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2073 GetVTypePredicates<wti>.Predicates) in { 2074 def : Pat<(riscv_shl_vl (wti.Vector (riscv_sext_vl_oneuse 2075 (vti.Vector vti.RegClass:$rs1), 2076 (vti.Mask V0), VLOpFrag)), 2077 (wti.Vector (riscv_vmv_v_x_vl 2078 (wti.Vector undef), 1, VLOpFrag)), 2079 wti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2080 (!cast<Instruction>("PseudoVWADD_VV_"#vti.LMul.MX#"_MASK") 2081 wti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs1, 2082 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2083 def : Pat<(riscv_shl_vl (wti.Vector (riscv_zext_vl_oneuse 2084 (vti.Vector vti.RegClass:$rs1), 2085 (vti.Mask V0), VLOpFrag)), 2086 (wti.Vector (riscv_vmv_v_x_vl 2087 (wti.Vector undef), 1, VLOpFrag)), 2088 wti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2089 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX#"_MASK") 2090 wti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs1, 2091 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2092 } 2093} 2094 2095// 11.3. Vector Integer Extension 2096defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF2", 2097 AllFractionableVF2IntVectors>; 2098defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF2", 2099 AllFractionableVF2IntVectors>; 2100defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF4", 2101 AllFractionableVF4IntVectors>; 2102defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF4", 2103 AllFractionableVF4IntVectors>; 2104defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF8", 2105 AllFractionableVF8IntVectors>; 2106defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF8", 2107 AllFractionableVF8IntVectors>; 2108 2109// 11.5. Vector Bitwise Logical Instructions 2110defm : VPatBinaryVL_VV_VX_VI<riscv_and_vl, "PseudoVAND">; 2111defm : VPatBinaryVL_VV_VX_VI<riscv_or_vl, "PseudoVOR">; 2112defm : VPatBinaryVL_VV_VX_VI<riscv_xor_vl, "PseudoVXOR">; 2113 2114// 11.6. Vector Single-Width Bit Shift Instructions 2115defm : VPatBinaryVL_VV_VX_VI<riscv_shl_vl, "PseudoVSLL", uimm5>; 2116defm : VPatBinaryVL_VV_VX_VI<riscv_srl_vl, "PseudoVSRL", uimm5>; 2117defm : VPatBinaryVL_VV_VX_VI<riscv_sra_vl, "PseudoVSRA", uimm5>; 2118 2119foreach vti = AllIntegerVectors in { 2120 // Emit shift by 1 as an add since it might be faster. 2121 let Predicates = GetVTypePredicates<vti>.Predicates in 2122 def : Pat<(riscv_shl_vl (vti.Vector vti.RegClass:$rs1), 2123 (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)), 2124 srcvalue, (vti.Mask true_mask), VLOpFrag), 2125 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX) 2126 (vti.Vector (IMPLICIT_DEF)), 2127 vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 2128} 2129 2130// 11.7. Vector Narrowing Integer Right Shift Instructions 2131defm : VPatBinaryVL_WV_WX_WI<srl, "PseudoVNSRL">; 2132defm : VPatBinaryVL_WV_WX_WI<sra, "PseudoVNSRA">; 2133 2134defm : VPatNarrowShiftSplat_WX_WI<riscv_sra_vl, "PseudoVNSRA">; 2135defm : VPatNarrowShiftSplat_WX_WI<riscv_srl_vl, "PseudoVNSRL">; 2136defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_sext_vl_oneuse, "PseudoVNSRA">; 2137defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_zext_vl_oneuse, "PseudoVNSRA">; 2138defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_sext_vl_oneuse, "PseudoVNSRL">; 2139defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_zext_vl_oneuse, "PseudoVNSRL">; 2140 2141defm : VPatNarrowShiftVL_WV<riscv_srl_vl, "PseudoVNSRL">; 2142defm : VPatNarrowShiftVL_WV<riscv_sra_vl, "PseudoVNSRA">; 2143 2144defm : VPatBinaryNVL_WV_WX_WI<riscv_vnsrl_vl, "PseudoVNSRL">; 2145 2146foreach vtiTowti = AllWidenableIntVectors in { 2147 defvar vti = vtiTowti.Vti; 2148 defvar wti = vtiTowti.Wti; 2149 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2150 GetVTypePredicates<wti>.Predicates) in 2151 def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1), 2152 (vti.Mask V0), 2153 VLOpFrag)), 2154 (!cast<Instruction>("PseudoVNSRL_WI_"#vti.LMul.MX#"_MASK") 2155 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, 2156 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2157} 2158 2159// 11.8. Vector Integer Comparison Instructions 2160foreach vti = AllIntegerVectors in { 2161 let Predicates = GetVTypePredicates<vti>.Predicates in { 2162 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSEQ", SETEQ>; 2163 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSNE", SETNE>; 2164 2165 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 2166 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 2167 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2168 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2169 2170 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 2171 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 2172 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 2173 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 2174 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2175 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2176 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>; 2177 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>; 2178 // There is no VMSGE(U)_VX instruction 2179 2180 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 2181 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 2182 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2183 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2184 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>; 2185 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>; 2186 2187 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLE", SETLT, SETGT, 2188 SplatPat_simm5_plus1>; 2189 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLEU", SETULT, SETUGT, 2190 SplatPat_simm5_plus1_nonzero>; 2191 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGT", SETGE, SETLE, 2192 SplatPat_simm5_plus1>; 2193 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGTU", SETUGE, SETULE, 2194 SplatPat_simm5_plus1_nonzero>; 2195 } 2196} // foreach vti = AllIntegerVectors 2197 2198// 11.9. Vector Integer Min/Max Instructions 2199defm : VPatBinaryVL_VV_VX<riscv_umin_vl, "PseudoVMINU">; 2200defm : VPatBinaryVL_VV_VX<riscv_smin_vl, "PseudoVMIN">; 2201defm : VPatBinaryVL_VV_VX<riscv_umax_vl, "PseudoVMAXU">; 2202defm : VPatBinaryVL_VV_VX<riscv_smax_vl, "PseudoVMAX">; 2203 2204// 11.10. Vector Single-Width Integer Multiply Instructions 2205defm : VPatBinaryVL_VV_VX<riscv_mul_vl, "PseudoVMUL">; 2206defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH", IntegerVectorsExceptI64>; 2207defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU", IntegerVectorsExceptI64>; 2208// vsmul.vv and vsmul.vx are not included in EEW=64 in Zve64*. 2209let Predicates = [HasVInstructionsFullMultiply] in { 2210 defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH", I64IntegerVectors>; 2211 defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU", I64IntegerVectors>; 2212} 2213 2214// 11.11. Vector Integer Divide Instructions 2215defm : VPatBinaryVL_VV_VX<riscv_udiv_vl, "PseudoVDIVU", isSEWAware=1>; 2216defm : VPatBinaryVL_VV_VX<riscv_sdiv_vl, "PseudoVDIV", isSEWAware=1>; 2217defm : VPatBinaryVL_VV_VX<riscv_urem_vl, "PseudoVREMU", isSEWAware=1>; 2218defm : VPatBinaryVL_VV_VX<riscv_srem_vl, "PseudoVREM", isSEWAware=1>; 2219 2220// 11.12. Vector Widening Integer Multiply Instructions 2221defm : VPatBinaryWVL_VV_VX<riscv_vwmul_vl, "PseudoVWMUL">; 2222defm : VPatBinaryWVL_VV_VX<riscv_vwmulu_vl, "PseudoVWMULU">; 2223defm : VPatBinaryWVL_VV_VX<riscv_vwmulsu_vl, "PseudoVWMULSU">; 2224 2225// 11.13 Vector Single-Width Integer Multiply-Add Instructions 2226defm : VPatMultiplyAddVL_VV_VX<riscv_add_vl, "PseudoVMADD">; 2227defm : VPatMultiplyAddVL_VV_VX<riscv_sub_vl, "PseudoVNMSUB">; 2228defm : VPatMultiplyAccVL_VV_VX<riscv_add_vl_oneuse, "PseudoVMACC">; 2229defm : VPatMultiplyAccVL_VV_VX<riscv_sub_vl_oneuse, "PseudoVNMSAC">; 2230 2231// 11.14. Vector Widening Integer Multiply-Add Instructions 2232defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmacc_vl, "PseudoVWMACC">; 2233defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmaccu_vl, "PseudoVWMACCU">; 2234defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmaccsu_vl, "PseudoVWMACCSU">; 2235foreach vtiTowti = AllWidenableIntVectors in { 2236 defvar vti = vtiTowti.Vti; 2237 defvar wti = vtiTowti.Wti; 2238 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2239 GetVTypePredicates<wti>.Predicates) in 2240 def : Pat<(riscv_vwmaccsu_vl (vti.Vector vti.RegClass:$rs1), 2241 (SplatPat XLenVT:$rs2), 2242 (wti.Vector wti.RegClass:$rd), 2243 (vti.Mask V0), VLOpFrag), 2244 (!cast<Instruction>("PseudoVWMACCUS_VX_"#vti.LMul.MX#"_MASK") 2245 wti.RegClass:$rd, vti.ScalarRegClass:$rs2, vti.RegClass:$rs1, 2246 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2247} 2248 2249// 11.15. Vector Integer Merge Instructions 2250foreach vti = AllIntegerVectors in { 2251 let Predicates = GetVTypePredicates<vti>.Predicates in { 2252 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0), 2253 vti.RegClass:$rs1, 2254 vti.RegClass:$rs2, 2255 vti.RegClass:$merge, 2256 VLOpFrag)), 2257 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 2258 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 2259 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2260 2261 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0), 2262 (SplatPat XLenVT:$rs1), 2263 vti.RegClass:$rs2, 2264 vti.RegClass:$merge, 2265 VLOpFrag)), 2266 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 2267 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 2268 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2269 2270 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0), 2271 (SplatPat_simm5 simm5:$rs1), 2272 vti.RegClass:$rs2, 2273 vti.RegClass:$merge, 2274 VLOpFrag)), 2275 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 2276 vti.RegClass:$merge, vti.RegClass:$rs2, simm5:$rs1, 2277 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2278 } 2279} 2280 2281// 11.16. Vector Integer Move Instructions 2282foreach vti = AllVectors in { 2283 let Predicates = GetVTypePredicates<vti>.Predicates in { 2284 def : Pat<(vti.Vector (riscv_vmv_v_v_vl vti.RegClass:$passthru, 2285 vti.RegClass:$rs2, VLOpFrag)), 2286 (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX) 2287 vti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2288} 2289 2290foreach vti = AllIntegerVectors in { 2291 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.RegClass:$passthru, GPR:$rs2, VLOpFrag)), 2292 (!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX) 2293 vti.RegClass:$passthru, GPR:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2294 defvar ImmPat = !cast<ComplexPattern>("sew"#vti.SEW#"simm5"); 2295 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.RegClass:$passthru, (ImmPat simm5:$imm5), 2296 VLOpFrag)), 2297 (!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX) 2298 vti.RegClass:$passthru, simm5:$imm5, GPR:$vl, vti.Log2SEW, TU_MU)>; 2299 } 2300} 2301 2302// 12. Vector Fixed-Point Arithmetic Instructions 2303 2304// 12.1. Vector Single-Width Saturating Add and Subtract 2305defm : VPatBinaryVL_VV_VX_VI<riscv_saddsat_vl, "PseudoVSADD">; 2306defm : VPatBinaryVL_VV_VX_VI<riscv_uaddsat_vl, "PseudoVSADDU">; 2307defm : VPatBinaryVL_VV_VX<riscv_ssubsat_vl, "PseudoVSSUB">; 2308defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">; 2309 2310// 12.2. Vector Single-Width Averaging Add and Subtract 2311foreach vti = AllIntegerVectors in { 2312 let Predicates = GetVTypePredicates<vti>.Predicates in { 2313 def : Pat<(riscv_avgflooru_vl (vti.Vector vti.RegClass:$rs1), 2314 (vti.Vector vti.RegClass:$rs2), 2315 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2316 (!cast<Instruction>("PseudoVAADDU_VV_"#vti.LMul.MX#"_MASK") 2317 vti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs2, 2318 (vti.Mask V0), 0b10, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2319 def : Pat<(riscv_avgflooru_vl (vti.Vector vti.RegClass:$rs1), 2320 (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 2321 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2322 (!cast<Instruction>("PseudoVAADDU_VX_"#vti.LMul.MX#"_MASK") 2323 vti.RegClass:$merge, vti.RegClass:$rs1, GPR:$rs2, 2324 (vti.Mask V0), 0b10, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2325 } 2326} 2327 2328// 12.5. Vector Narrowing Fixed-Point Clip Instructions 2329class VPatTruncSatClipMaxMinBase<string inst, 2330 VTypeInfo vti, 2331 VTypeInfo wti, 2332 SDPatternOperator op1, 2333 int op1_value, 2334 SDPatternOperator op2, 2335 int op2_value> : 2336 Pat<(vti.Vector (riscv_trunc_vector_vl 2337 (wti.Vector (op1 2338 (wti.Vector (op2 2339 (wti.Vector wti.RegClass:$rs1), 2340 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), op2_value, (XLenVT srcvalue))), 2341 (wti.Vector undef),(wti.Mask V0), VLOpFrag)), 2342 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), op1_value, (XLenVT srcvalue))), 2343 (wti.Vector undef), (wti.Mask V0), VLOpFrag)), 2344 (vti.Mask V0), VLOpFrag)), 2345 (!cast<Instruction>(inst#"_WI_"#vti.LMul.MX#"_MASK") 2346 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, 2347 (vti.Mask V0), 0, GPR:$vl, vti.Log2SEW, TA_MA)>; 2348 2349class VPatTruncSatClipUMin<VTypeInfo vti, 2350 VTypeInfo wti, 2351 int uminval> : 2352 Pat<(vti.Vector (riscv_trunc_vector_vl 2353 (wti.Vector (riscv_umin_vl 2354 (wti.Vector wti.RegClass:$rs1), 2355 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), uminval, (XLenVT srcvalue))), 2356 (wti.Vector undef), (wti.Mask V0), VLOpFrag)), 2357 (vti.Mask V0), VLOpFrag)), 2358 (!cast<Instruction>("PseudoVNCLIPU_WI_"#vti.LMul.MX#"_MASK") 2359 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, 2360 (vti.Mask V0), 0, GPR:$vl, vti.Log2SEW, TA_MA)>; 2361 2362multiclass VPatTruncSatClipMaxMin<string inst, VTypeInfo vti, VTypeInfo wti, 2363 SDPatternOperator max, int maxval, SDPatternOperator min, int minval> { 2364 def : VPatTruncSatClipMaxMinBase<inst, vti, wti, max, maxval, min, minval>; 2365 def : VPatTruncSatClipMaxMinBase<inst, vti, wti, min, minval, max, maxval>; 2366} 2367 2368multiclass VPatTruncSatClip<VTypeInfo vti, VTypeInfo wti> { 2369 defvar sew = vti.SEW; 2370 defvar uminval = !sub(!shl(1, sew), 1); 2371 defvar sminval = !sub(!shl(1, !sub(sew, 1)), 1); 2372 defvar smaxval = !sub(0, !shl(1, !sub(sew, 1))); 2373 2374 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2375 GetVTypePredicates<wti>.Predicates) in { 2376 defm : VPatTruncSatClipMaxMin<"PseudoVNCLIP", vti, wti, riscv_smin_vl, 2377 sminval, riscv_smax_vl, smaxval>; 2378 def : VPatTruncSatClipUMin<vti, wti, uminval>; 2379 } 2380 2381} 2382 2383foreach vtiToWti = AllWidenableIntVectors in 2384 defm : VPatTruncSatClip<vtiToWti.Vti, vtiToWti.Wti>; 2385 2386// 13. Vector Floating-Point Instructions 2387 2388// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions 2389defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fadd_vl, "PseudoVFADD">; 2390defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fsub_vl, "PseudoVFSUB">; 2391defm : VPatBinaryFPVL_R_VF_RM<any_riscv_fsub_vl, "PseudoVFRSUB">; 2392 2393// 13.3. Vector Widening Floating-Point Add/Subtract Instructions 2394defm : VPatBinaryFPWVL_VV_VF_WV_WF_RM<riscv_vfwadd_vl, riscv_vfwadd_w_vl, "PseudoVFWADD">; 2395defm : VPatBinaryFPWVL_VV_VF_WV_WF_RM<riscv_vfwsub_vl, riscv_vfwsub_w_vl, "PseudoVFWSUB">; 2396 2397// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 2398defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fmul_vl, "PseudoVFMUL">; 2399defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fdiv_vl, "PseudoVFDIV", isSEWAware=1>; 2400defm : VPatBinaryFPVL_R_VF_RM<any_riscv_fdiv_vl, "PseudoVFRDIV", isSEWAware=1>; 2401 2402// 13.5. Vector Widening Floating-Point Multiply Instructions 2403defm : VPatBinaryFPWVL_VV_VF_RM<riscv_vfwmul_vl, "PseudoVFWMUL">; 2404 2405// 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. 2406defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfmadd_vl, "PseudoVFMADD">; 2407defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfmsub_vl, "PseudoVFMSUB">; 2408defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfnmadd_vl, "PseudoVFNMADD">; 2409defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfnmsub_vl, "PseudoVFNMSUB">; 2410defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfmadd_vl_oneuse, "PseudoVFMACC">; 2411defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfmsub_vl_oneuse, "PseudoVFMSAC">; 2412defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfnmadd_vl_oneuse, "PseudoVFNMACC">; 2413defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfnmsub_vl_oneuse, "PseudoVFNMSAC">; 2414 2415// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 2416defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwmadd_vl, "PseudoVFWMACC">; 2417defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwnmadd_vl, "PseudoVFWNMACC">; 2418defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwmsub_vl, "PseudoVFWMSAC">; 2419defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwnmsub_vl, "PseudoVFWNMSAC">; 2420 2421// 13.11. Vector Floating-Point MIN/MAX Instructions 2422defm : VPatBinaryFPVL_VV_VF<riscv_vfmin_vl, "PseudoVFMIN">; 2423defm : VPatBinaryFPVL_VV_VF<riscv_vfmax_vl, "PseudoVFMAX">; 2424 2425// 13.13. Vector Floating-Point Compare Instructions 2426defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETEQ, 2427 "PseudoVMFEQ", "PseudoVMFEQ">; 2428defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETOEQ, 2429 "PseudoVMFEQ", "PseudoVMFEQ">; 2430defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETNE, 2431 "PseudoVMFNE", "PseudoVMFNE">; 2432defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETUNE, 2433 "PseudoVMFNE", "PseudoVMFNE">; 2434defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETLT, 2435 "PseudoVMFLT", "PseudoVMFGT">; 2436defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETOLT, 2437 "PseudoVMFLT", "PseudoVMFGT">; 2438defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETLE, 2439 "PseudoVMFLE", "PseudoVMFGE">; 2440defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETOLE, 2441 "PseudoVMFLE", "PseudoVMFGE">; 2442 2443foreach vti = AllFloatVectors in { 2444 let Predicates = GetVTypePredicates<vti>.Predicates in { 2445 // 13.8. Vector Floating-Point Square-Root Instruction 2446 def : Pat<(any_riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask V0), 2447 VLOpFrag), 2448 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX # "_E" # vti.SEW # "_MASK") 2449 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 2450 (vti.Mask V0), 2451 // Value to indicate no rounding mode change in 2452 // RISCVInsertReadWriteCSR 2453 FRM_DYN, 2454 GPR:$vl, vti.Log2SEW, TA_MA)>; 2455 2456 // 13.12. Vector Floating-Point Sign-Injection Instructions 2457 def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), 2458 VLOpFrag), 2459 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX #"_MASK") 2460 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, 2461 vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2462 TA_MA)>; 2463 // Handle fneg with VFSGNJN using the same input for both operands. 2464 def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), 2465 VLOpFrag), 2466 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX #"_MASK") 2467 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, 2468 vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2469 TA_MA)>; 2470 2471 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2472 (vti.Vector vti.RegClass:$rs2), 2473 vti.RegClass:$merge, 2474 (vti.Mask V0), 2475 VLOpFrag), 2476 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_MASK") 2477 vti.RegClass:$merge, vti.RegClass:$rs1, 2478 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2479 TAIL_AGNOSTIC)>; 2480 2481 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2482 (riscv_fneg_vl vti.RegClass:$rs2, 2483 (vti.Mask true_mask), 2484 VLOpFrag), 2485 srcvalue, 2486 (vti.Mask true_mask), 2487 VLOpFrag), 2488 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 2489 (vti.Vector (IMPLICIT_DEF)), 2490 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; 2491 2492 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2493 (SplatFPOp vti.ScalarRegClass:$rs2), 2494 vti.RegClass:$merge, 2495 (vti.Mask V0), 2496 VLOpFrag), 2497 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX#"_MASK") 2498 vti.RegClass:$merge, vti.RegClass:$rs1, 2499 vti.ScalarRegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2500 TAIL_AGNOSTIC)>; 2501 2502 // Rounding without exception to implement nearbyint. 2503 def : Pat<(any_riscv_vfround_noexcept_vl (vti.Vector vti.RegClass:$rs1), 2504 (vti.Mask V0), VLOpFrag), 2505 (!cast<Instruction>("PseudoVFROUND_NOEXCEPT_V_" # vti.LMul.MX #"_MASK") 2506 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, 2507 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2508 2509 // 14.14. Vector Floating-Point Classify Instruction 2510 def : Pat<(riscv_fclass_vl (vti.Vector vti.RegClass:$rs2), 2511 (vti.Mask V0), VLOpFrag), 2512 (!cast<Instruction>("PseudoVFCLASS_V_"# vti.LMul.MX #"_MASK") 2513 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 2514 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2515 } 2516} 2517 2518foreach fvti = AllFloatVectors in { 2519 // Floating-point vselects: 2520 // 11.15. Vector Integer Merge Instructions 2521 // 13.15. Vector Floating-Point Merge Instruction 2522 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 2523 let Predicates = GetVTypePredicates<ivti>.Predicates in { 2524 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2525 fvti.RegClass:$rs1, 2526 fvti.RegClass:$rs2, 2527 fvti.RegClass:$merge, 2528 VLOpFrag)), 2529 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 2530 fvti.RegClass:$merge, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 2531 GPR:$vl, fvti.Log2SEW)>; 2532 2533 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2534 (SplatFPOp (SelectFPImm (XLenVT GPR:$imm))), 2535 fvti.RegClass:$rs2, 2536 fvti.RegClass:$merge, 2537 VLOpFrag)), 2538 (!cast<Instruction>("PseudoVMERGE_VXM_"#fvti.LMul.MX) 2539 fvti.RegClass:$merge, fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask V0), 2540 GPR:$vl, fvti.Log2SEW)>; 2541 2542 2543 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2544 (SplatFPOp (fvti.Scalar fpimm0)), 2545 fvti.RegClass:$rs2, 2546 fvti.RegClass:$merge, 2547 VLOpFrag)), 2548 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 2549 fvti.RegClass:$merge, fvti.RegClass:$rs2, 0, (fvti.Mask V0), 2550 GPR:$vl, fvti.Log2SEW)>; 2551 } 2552 2553 let Predicates = GetVTypePredicates<fvti>.Predicates in { 2554 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2555 (SplatFPOp fvti.ScalarRegClass:$rs1), 2556 fvti.RegClass:$rs2, 2557 fvti.RegClass:$merge, 2558 VLOpFrag)), 2559 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 2560 fvti.RegClass:$merge, fvti.RegClass:$rs2, 2561 (fvti.Scalar fvti.ScalarRegClass:$rs1), 2562 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 2563 2564 // 13.16. Vector Floating-Point Move Instruction 2565 // If we're splatting fpimm0, use vmv.v.x vd, x0. 2566 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2567 fvti.Vector:$passthru, (fvti.Scalar (fpimm0)), VLOpFrag)), 2568 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX) 2569 $passthru, 0, GPR:$vl, fvti.Log2SEW, TU_MU)>; 2570 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2571 fvti.Vector:$passthru, (fvti.Scalar (SelectFPImm (XLenVT GPR:$imm))), VLOpFrag)), 2572 (!cast<Instruction>("PseudoVMV_V_X_"#fvti.LMul.MX) 2573 $passthru, GPR:$imm, GPR:$vl, fvti.Log2SEW, TU_MU)>; 2574 2575 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2576 fvti.Vector:$passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), 2577 (!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" # 2578 fvti.LMul.MX) 2579 $passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), 2580 GPR:$vl, fvti.Log2SEW, TU_MU)>; 2581 } 2582} 2583 2584// 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions 2585defm : VPatConvertFP2IVL_V_RM<riscv_vfcvt_xu_f_vl, "PseudoVFCVT_XU_F_V">; 2586defm : VPatConvertFP2IVL_V_RM<riscv_vfcvt_x_f_vl, "PseudoVFCVT_X_F_V">; 2587defm : VPatConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFCVT_RM_XU_F_V">; 2588defm : VPatConvertFP2I_RM_VL_V<any_riscv_vfcvt_rm_x_f_vl, "PseudoVFCVT_RM_X_F_V">; 2589 2590defm : VPatConvertFP2IVL_V<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFCVT_RTZ_XU_F_V">; 2591defm : VPatConvertFP2IVL_V<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFCVT_RTZ_X_F_V">; 2592 2593defm : VPatConvertI2FPVL_V_RM<any_riscv_uint_to_fp_vl, "PseudoVFCVT_F_XU_V">; 2594defm : VPatConvertI2FPVL_V_RM<any_riscv_sint_to_fp_vl, "PseudoVFCVT_F_X_V">; 2595 2596defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_xu_vl, "PseudoVFCVT_RM_F_XU_V">; 2597defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_x_vl, "PseudoVFCVT_RM_F_X_V">; 2598 2599// 13.18. Widening Floating-Point/Integer Type-Convert Instructions 2600defm : VPatWConvertFP2IVL_V_RM<riscv_vfcvt_xu_f_vl, "PseudoVFWCVT_XU_F_V">; 2601defm : VPatWConvertFP2IVL_V_RM<riscv_vfcvt_x_f_vl, "PseudoVFWCVT_X_F_V">; 2602defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFWCVT_RM_XU_F_V">; 2603defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_x_f_vl, "PseudoVFWCVT_RM_X_F_V">; 2604 2605defm : VPatWConvertFP2IVL_V<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFWCVT_RTZ_XU_F_V">; 2606defm : VPatWConvertFP2IVL_V<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFWCVT_RTZ_X_F_V">; 2607 2608defm : VPatWConvertI2FPVL_V<any_riscv_uint_to_fp_vl, "PseudoVFWCVT_F_XU_V">; 2609defm : VPatWConvertI2FPVL_V<any_riscv_sint_to_fp_vl, "PseudoVFWCVT_F_X_V">; 2610 2611foreach fvtiToFWti = AllWidenableFloatVectors in { 2612 defvar fvti = fvtiToFWti.Vti; 2613 defvar fwti = fvtiToFWti.Wti; 2614 let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], 2615 !listconcat(GetVTypePredicates<fvti>.Predicates, 2616 GetVTypePredicates<fwti>.Predicates)) in 2617 def : Pat<(fwti.Vector (any_riscv_fpextend_vl 2618 (fvti.Vector fvti.RegClass:$rs1), 2619 (fvti.Mask V0), 2620 VLOpFrag)), 2621 (!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX#"_MASK") 2622 (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 2623 (fvti.Mask V0), 2624 GPR:$vl, fvti.Log2SEW, TA_MA)>; 2625} 2626 2627// 13.19 Narrowing Floating-Point/Integer Type-Convert Instructions 2628defm : VPatNConvertFP2IVL_W_RM<riscv_vfcvt_xu_f_vl, "PseudoVFNCVT_XU_F_W">; 2629defm : VPatNConvertFP2IVL_W_RM<riscv_vfcvt_x_f_vl, "PseudoVFNCVT_X_F_W">; 2630defm : VPatNConvertFP2I_RM_VL_W<riscv_vfcvt_rm_xu_f_vl, "PseudoVFNCVT_RM_XU_F_W">; 2631defm : VPatNConvertFP2I_RM_VL_W<riscv_vfcvt_rm_x_f_vl, "PseudoVFNCVT_RM_X_F_W">; 2632 2633defm : VPatNConvertFP2IVL_W<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFNCVT_RTZ_XU_F_W">; 2634defm : VPatNConvertFP2IVL_W<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFNCVT_RTZ_X_F_W">; 2635 2636defm : VPatNConvertI2FPVL_W_RM<any_riscv_uint_to_fp_vl, "PseudoVFNCVT_F_XU_W">; 2637defm : VPatNConvertI2FPVL_W_RM<any_riscv_sint_to_fp_vl, "PseudoVFNCVT_F_X_W">; 2638 2639defm : VPatNConvertI2FP_RM_VL_W<riscv_vfcvt_rm_f_xu_vl, "PseudoVFNCVT_RM_F_XU_W">; 2640defm : VPatNConvertI2FP_RM_VL_W<riscv_vfcvt_rm_f_x_vl, "PseudoVFNCVT_RM_F_X_W">; 2641 2642foreach fvtiToFWti = AllWidenableFloatVectors in { 2643 defvar fvti = fvtiToFWti.Vti; 2644 defvar fwti = fvtiToFWti.Wti; 2645 // Define vfwcvt.f.f.v for f16 when Zvfhmin is enable. 2646 let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], 2647 !listconcat(GetVTypePredicates<fvti>.Predicates, 2648 GetVTypePredicates<fwti>.Predicates)) in { 2649 def : Pat<(fvti.Vector (any_riscv_fpround_vl 2650 (fwti.Vector fwti.RegClass:$rs1), 2651 (fwti.Mask V0), VLOpFrag)), 2652 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_MASK") 2653 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 2654 (fwti.Mask V0), 2655 // Value to indicate no rounding mode change in 2656 // RISCVInsertReadWriteCSR 2657 FRM_DYN, 2658 GPR:$vl, fvti.Log2SEW, TA_MA)>; 2659 2660 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 2661 GetVTypePredicates<fwti>.Predicates) in 2662 def : Pat<(fvti.Vector (any_riscv_fncvt_rod_vl 2663 (fwti.Vector fwti.RegClass:$rs1), 2664 (fwti.Mask V0), VLOpFrag)), 2665 (!cast<Instruction>("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX#"_MASK") 2666 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 2667 (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 2668 } 2669} 2670 2671// 14. Vector Reduction Operations 2672 2673// 14.1. Vector Single-Width Integer Reduction Instructions 2674defm : VPatReductionVL<rvv_vecreduce_ADD_vl, "PseudoVREDSUM", is_float=0>; 2675defm : VPatReductionVL<rvv_vecreduce_UMAX_vl, "PseudoVREDMAXU", is_float=0>; 2676defm : VPatReductionVL<rvv_vecreduce_SMAX_vl, "PseudoVREDMAX", is_float=0>; 2677defm : VPatReductionVL<rvv_vecreduce_UMIN_vl, "PseudoVREDMINU", is_float=0>; 2678defm : VPatReductionVL<rvv_vecreduce_SMIN_vl, "PseudoVREDMIN", is_float=0>; 2679defm : VPatReductionVL<rvv_vecreduce_AND_vl, "PseudoVREDAND", is_float=0>; 2680defm : VPatReductionVL<rvv_vecreduce_OR_vl, "PseudoVREDOR", is_float=0>; 2681defm : VPatReductionVL<rvv_vecreduce_XOR_vl, "PseudoVREDXOR", is_float=0>; 2682 2683// 14.2. Vector Widening Integer Reduction Instructions 2684defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, anyext_oneuse, "PseudoVWREDSUMU", is_float=0>; 2685defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, zext_oneuse, "PseudoVWREDSUMU", is_float=0>; 2686defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_zext_vl_oneuse, "PseudoVWREDSUMU", is_float=0>; 2687defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, sext_oneuse, "PseudoVWREDSUM", is_float=0>; 2688defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_sext_vl_oneuse, "PseudoVWREDSUM", is_float=0>; 2689 2690// 14.3. Vector Single-Width Floating-Point Reduction Instructions 2691defm : VPatReductionVL_RM<rvv_vecreduce_SEQ_FADD_vl, "PseudoVFREDOSUM", is_float=1>; 2692defm : VPatReductionVL_RM<rvv_vecreduce_FADD_vl, "PseudoVFREDUSUM", is_float=1>; 2693defm : VPatReductionVL<rvv_vecreduce_FMIN_vl, "PseudoVFREDMIN", is_float=1>; 2694defm : VPatReductionVL<rvv_vecreduce_FMAX_vl, "PseudoVFREDMAX", is_float=1>; 2695 2696// 14.4. Vector Widening Floating-Point Reduction Instructions 2697defm : VPatWidenReductionVL_RM<rvv_vecreduce_SEQ_FADD_vl, fpext_oneuse, 2698 "PseudoVFWREDOSUM", is_float=1>; 2699defm : VPatWidenReductionVL_Ext_VL_RM<rvv_vecreduce_SEQ_FADD_vl, 2700 riscv_fpextend_vl_oneuse, 2701 "PseudoVFWREDOSUM", is_float=1>; 2702defm : VPatWidenReductionVL_RM<rvv_vecreduce_FADD_vl, fpext_oneuse, 2703 "PseudoVFWREDUSUM", is_float=1>; 2704defm : VPatWidenReductionVL_Ext_VL_RM<rvv_vecreduce_FADD_vl, 2705 riscv_fpextend_vl_oneuse, 2706 "PseudoVFWREDUSUM", is_float=1>; 2707 2708// 15. Vector Mask Instructions 2709 2710foreach mti = AllMasks in { 2711 let Predicates = [HasVInstructions] in { 2712 // 15.1 Vector Mask-Register Logical Instructions 2713 def : Pat<(mti.Mask (riscv_vmset_vl VLOpFrag)), 2714 (!cast<Instruction>("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 2715 def : Pat<(mti.Mask (riscv_vmclr_vl VLOpFrag)), 2716 (!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 2717 2718 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2719 (!cast<Instruction>("PseudoVMAND_MM_" # mti.LMul.MX) 2720 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2721 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2722 (!cast<Instruction>("PseudoVMOR_MM_" # mti.LMul.MX) 2723 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2724 def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2725 (!cast<Instruction>("PseudoVMXOR_MM_" # mti.LMul.MX) 2726 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2727 2728 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, 2729 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 2730 VLOpFrag)), 2731 (!cast<Instruction>("PseudoVMANDN_MM_" # mti.LMul.MX) 2732 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2733 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, 2734 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 2735 VLOpFrag)), 2736 (!cast<Instruction>("PseudoVMORN_MM_" # mti.LMul.MX) 2737 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2738 // XOR is associative so we need 2 patterns for VMXNOR. 2739 def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1, 2740 VLOpFrag), 2741 VR:$rs2, VLOpFrag)), 2742 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX) 2743 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2744 2745 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2, 2746 VLOpFrag), 2747 VLOpFrag)), 2748 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX) 2749 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2750 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2, 2751 VLOpFrag), 2752 VLOpFrag)), 2753 (!cast<Instruction>("PseudoVMNOR_MM_" # mti.LMul.MX) 2754 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2755 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2, 2756 VLOpFrag), 2757 VLOpFrag)), 2758 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX) 2759 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2760 2761 // Match the not idiom to the vmnot.m pseudo. 2762 def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)), 2763 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX) 2764 VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>; 2765 2766 // 15.2 Vector count population in mask vcpop.m 2767 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), 2768 VLOpFrag)), 2769 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX) 2770 VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2771 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask V0), 2772 VLOpFrag)), 2773 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX # "_MASK") 2774 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 2775 2776 // 15.3 vfirst find-first-set mask bit 2777 def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), 2778 VLOpFrag)), 2779 (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX) 2780 VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2781 def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask V0), 2782 VLOpFrag)), 2783 (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX # "_MASK") 2784 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 2785 } 2786} 2787 2788// 16. Vector Permutation Instructions 2789 2790// 16.1. Integer Scalar Move Instructions 2791// 16.4. Vector Register Gather Instruction 2792foreach vti = AllIntegerVectors in { 2793 let Predicates = GetVTypePredicates<vti>.Predicates in { 2794 def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$merge), 2795 vti.ScalarRegClass:$rs1, 2796 VLOpFrag)), 2797 (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX) 2798 vti.RegClass:$merge, 2799 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; 2800 2801 def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2, 2802 vti.RegClass:$rs1, 2803 vti.RegClass:$merge, 2804 (vti.Mask V0), 2805 VLOpFrag)), 2806 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") 2807 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 2808 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2809 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 2810 vti.RegClass:$merge, 2811 (vti.Mask V0), 2812 VLOpFrag)), 2813 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") 2814 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 2815 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2816 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, 2817 uimm5:$imm, 2818 vti.RegClass:$merge, 2819 (vti.Mask V0), 2820 VLOpFrag)), 2821 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 2822 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, 2823 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2824 } 2825 2826 // emul = lmul * 16 / sew 2827 defvar vlmul = vti.LMul; 2828 defvar octuple_lmul = vlmul.octuple; 2829 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 2830 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 2831 defvar emul_str = octuple_to_str<octuple_emul>.ret; 2832 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 2833 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str; 2834 let Predicates = GetVTypePredicates<vti>.Predicates in 2835 def : Pat<(vti.Vector 2836 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 2837 (ivti.Vector ivti.RegClass:$rs1), 2838 vti.RegClass:$merge, 2839 (vti.Mask V0), 2840 VLOpFrag)), 2841 (!cast<Instruction>(inst#"_MASK") 2842 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, 2843 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2844 } 2845} 2846 2847// 16.2. Floating-Point Scalar Move Instructions 2848foreach vti = AllFloatVectors in { 2849 let Predicates = GetVTypePredicates<vti>.Predicates in { 2850 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 2851 (vti.Scalar (fpimm0)), 2852 VLOpFrag)), 2853 (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX) 2854 vti.RegClass:$merge, (XLenVT X0), GPR:$vl, vti.Log2SEW)>; 2855 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 2856 (vti.Scalar (SelectFPImm (XLenVT GPR:$imm))), 2857 VLOpFrag)), 2858 (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX) 2859 vti.RegClass:$merge, GPR:$imm, GPR:$vl, vti.Log2SEW)>; 2860 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 2861 vti.ScalarRegClass:$rs1, 2862 VLOpFrag)), 2863 (!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX) 2864 vti.RegClass:$merge, 2865 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; 2866 } 2867 defvar ivti = GetIntVTypeInfo<vti>.Vti; 2868 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2869 GetVTypePredicates<ivti>.Predicates) in { 2870 def : Pat<(vti.Vector 2871 (riscv_vrgather_vv_vl vti.RegClass:$rs2, 2872 (ivti.Vector vti.RegClass:$rs1), 2873 vti.RegClass:$merge, 2874 (vti.Mask V0), 2875 VLOpFrag)), 2876 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") 2877 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 2878 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2879 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 2880 vti.RegClass:$merge, 2881 (vti.Mask V0), 2882 VLOpFrag)), 2883 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") 2884 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 2885 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2886 def : Pat<(vti.Vector 2887 (riscv_vrgather_vx_vl vti.RegClass:$rs2, 2888 uimm5:$imm, 2889 vti.RegClass:$merge, 2890 (vti.Mask V0), 2891 VLOpFrag)), 2892 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 2893 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, 2894 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2895 } 2896 2897 defvar vlmul = vti.LMul; 2898 defvar octuple_lmul = vlmul.octuple; 2899 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 2900 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 2901 defvar emul_str = octuple_to_str<octuple_emul>.ret; 2902 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 2903 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str; 2904 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2905 GetVTypePredicates<ivti>.Predicates) in 2906 def : Pat<(vti.Vector 2907 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 2908 (ivti.Vector ivti.RegClass:$rs1), 2909 vti.RegClass:$merge, 2910 (vti.Mask V0), 2911 VLOpFrag)), 2912 (!cast<Instruction>(inst#"_MASK") 2913 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, 2914 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2915 } 2916} 2917 2918//===----------------------------------------------------------------------===// 2919// Miscellaneous RISCVISD SDNodes 2920//===----------------------------------------------------------------------===// 2921 2922def riscv_vid_vl : SDNode<"RISCVISD::VID_VL", SDTypeProfile<1, 2, 2923 [SDTCisVec<0>, SDTCVecEltisVT<1, i1>, 2924 SDTCisSameNumEltsAs<0, 1>, SDTCisVT<2, XLenVT>]>, []>; 2925 2926def SDTRVVSlide : SDTypeProfile<1, 6, [ 2927 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>, 2928 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>, 2929 SDTCisVT<6, XLenVT> 2930]>; 2931def SDTRVVSlide1 : SDTypeProfile<1, 5, [ 2932 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisInt<0>, 2933 SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, 2934 SDTCisVT<5, XLenVT> 2935]>; 2936def SDTRVVFSlide1 : SDTypeProfile<1, 5, [ 2937 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisFP<0>, 2938 SDTCisEltOfVec<3, 0>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, 2939 SDTCisVT<5, XLenVT> 2940]>; 2941 2942def riscv_slideup_vl : SDNode<"RISCVISD::VSLIDEUP_VL", SDTRVVSlide, []>; 2943def riscv_slide1up_vl : SDNode<"RISCVISD::VSLIDE1UP_VL", SDTRVVSlide1, []>; 2944def riscv_slidedown_vl : SDNode<"RISCVISD::VSLIDEDOWN_VL", SDTRVVSlide, []>; 2945def riscv_slide1down_vl : SDNode<"RISCVISD::VSLIDE1DOWN_VL", SDTRVVSlide1, []>; 2946def riscv_fslide1up_vl : SDNode<"RISCVISD::VFSLIDE1UP_VL", SDTRVVFSlide1, []>; 2947def riscv_fslide1down_vl : SDNode<"RISCVISD::VFSLIDE1DOWN_VL", SDTRVVFSlide1, []>; 2948 2949foreach vti = AllIntegerVectors in { 2950 let Predicates = GetVTypePredicates<vti>.Predicates in { 2951 def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask V0), 2952 VLOpFrag)), 2953 (!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX#"_MASK") 2954 (vti.Vector (IMPLICIT_DEF)), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2955 TAIL_AGNOSTIC)>; 2956 } 2957} 2958 2959defm : VPatSlideVL_VX_VI<riscv_slideup_vl, "PseudoVSLIDEUP">; 2960defm : VPatSlideVL_VX_VI<riscv_slidedown_vl, "PseudoVSLIDEDOWN">; 2961defm : VPatSlide1VL_VX<riscv_slide1up_vl, "PseudoVSLIDE1UP">; 2962defm : VPatSlide1VL_VF<riscv_fslide1up_vl, "PseudoVFSLIDE1UP">; 2963defm : VPatSlide1VL_VX<riscv_slide1down_vl, "PseudoVSLIDE1DOWN">; 2964defm : VPatSlide1VL_VF<riscv_fslide1down_vl, "PseudoVFSLIDE1DOWN">; 2965