1//===- RISCVInstrInfoVVLPatterns.td - RVV VL patterns ------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file contains the required infrastructure and VL patterns to 10/// support code generation for the standard 'V' (Vector) extension, version 11/// version 1.0. 12/// 13/// This file is included from and depends upon RISCVInstrInfoVPseudos.td 14/// 15/// Note: the patterns for RVV intrinsics are found in 16/// RISCVInstrInfoVPseudos.td. 17/// 18//===----------------------------------------------------------------------===// 19 20//===----------------------------------------------------------------------===// 21// Helpers to define the VL patterns. 22//===----------------------------------------------------------------------===// 23 24def SDT_RISCVIntUnOp_VL : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, 25 SDTCisSameAs<0, 2>, 26 SDTCisVec<0>, SDTCisInt<0>, 27 SDTCVecEltisVT<3, i1>, 28 SDTCisSameNumEltsAs<0, 3>, 29 SDTCisVT<4, XLenVT>]>; 30 31def SDT_RISCVIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 32 SDTCisSameAs<0, 2>, 33 SDTCisVec<0>, SDTCisInt<0>, 34 SDTCisSameAs<0, 3>, 35 SDTCVecEltisVT<4, i1>, 36 SDTCisSameNumEltsAs<0, 4>, 37 SDTCisVT<5, XLenVT>]>; 38 39// Input: (vector, vector/scalar, merge, mask, roundmode, vl) 40def SDT_RISCVVNBinOp_RM_VL : SDTypeProfile<1, 6, [SDTCisVec<0>, SDTCisInt<0>, 41 SDTCisSameAs<0, 3>, 42 SDTCisSameNumEltsAs<0, 1>, 43 SDTCisVec<1>, 44 SDTCisOpSmallerThanOp<2, 1>, 45 SDTCisSameAs<0, 2>, 46 SDTCisSameNumEltsAs<0, 4>, 47 SDTCVecEltisVT<4, i1>, 48 SDTCisVT<5, XLenVT>, 49 SDTCisVT<6, XLenVT>]>; 50 51def SDT_RISCVFPUnOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 52 SDTCisVec<0>, SDTCisFP<0>, 53 SDTCVecEltisVT<2, i1>, 54 SDTCisSameNumEltsAs<0, 2>, 55 SDTCisVT<3, XLenVT>]>; 56def SDT_RISCVFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 57 SDTCisSameAs<0, 2>, 58 SDTCisVec<0>, SDTCisFP<0>, 59 SDTCisSameAs<0, 3>, 60 SDTCVecEltisVT<4, i1>, 61 SDTCisSameNumEltsAs<0, 4>, 62 SDTCisVT<5, XLenVT>]>; 63 64def SDT_RISCVCopySign_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 65 SDTCisSameAs<0, 2>, 66 SDTCisVec<0>, SDTCisFP<0>, 67 SDTCisSameAs<0, 3>, 68 SDTCVecEltisVT<4, i1>, 69 SDTCisSameNumEltsAs<0, 4>, 70 SDTCisVT<5, XLenVT>]>; 71 72def riscv_vmv_v_v_vl : SDNode<"RISCVISD::VMV_V_V_VL", 73 SDTypeProfile<1, 3, [SDTCisVec<0>, 74 SDTCisSameAs<0, 1>, 75 SDTCisSameAs<0, 2>, 76 SDTCisVT<3, XLenVT>]>>; 77def riscv_vmv_v_x_vl : SDNode<"RISCVISD::VMV_V_X_VL", 78 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>, 79 SDTCisSameAs<0, 1>, 80 SDTCisVT<2, XLenVT>, 81 SDTCisVT<3, XLenVT>]>>; 82def riscv_vfmv_v_f_vl : SDNode<"RISCVISD::VFMV_V_F_VL", 83 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>, 84 SDTCisSameAs<0, 1>, 85 SDTCisEltOfVec<2, 0>, 86 SDTCisVT<3, XLenVT>]>>; 87def riscv_vmv_s_x_vl : SDNode<"RISCVISD::VMV_S_X_VL", 88 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 89 SDTCisInt<0>, 90 SDTCisVT<2, XLenVT>, 91 SDTCisVT<3, XLenVT>]>>; 92def riscv_vfmv_s_f_vl : SDNode<"RISCVISD::VFMV_S_F_VL", 93 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 94 SDTCisFP<0>, 95 SDTCisEltOfVec<2, 0>, 96 SDTCisVT<3, XLenVT>]>>; 97 98def riscv_add_vl : SDNode<"RISCVISD::ADD_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 99def riscv_sub_vl : SDNode<"RISCVISD::SUB_VL", SDT_RISCVIntBinOp_VL>; 100def riscv_mul_vl : SDNode<"RISCVISD::MUL_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 101def riscv_mulhs_vl : SDNode<"RISCVISD::MULHS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 102def riscv_mulhu_vl : SDNode<"RISCVISD::MULHU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 103def riscv_and_vl : SDNode<"RISCVISD::AND_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 104def riscv_or_vl : SDNode<"RISCVISD::OR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 105def riscv_xor_vl : SDNode<"RISCVISD::XOR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 106def riscv_sdiv_vl : SDNode<"RISCVISD::SDIV_VL", SDT_RISCVIntBinOp_VL>; 107def riscv_srem_vl : SDNode<"RISCVISD::SREM_VL", SDT_RISCVIntBinOp_VL>; 108def riscv_udiv_vl : SDNode<"RISCVISD::UDIV_VL", SDT_RISCVIntBinOp_VL>; 109def riscv_urem_vl : SDNode<"RISCVISD::UREM_VL", SDT_RISCVIntBinOp_VL>; 110def riscv_shl_vl : SDNode<"RISCVISD::SHL_VL", SDT_RISCVIntBinOp_VL>; 111def riscv_sra_vl : SDNode<"RISCVISD::SRA_VL", SDT_RISCVIntBinOp_VL>; 112def riscv_srl_vl : SDNode<"RISCVISD::SRL_VL", SDT_RISCVIntBinOp_VL>; 113def riscv_rotl_vl : SDNode<"RISCVISD::ROTL_VL", SDT_RISCVIntBinOp_VL>; 114def riscv_rotr_vl : SDNode<"RISCVISD::ROTR_VL", SDT_RISCVIntBinOp_VL>; 115def riscv_smin_vl : SDNode<"RISCVISD::SMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 116def riscv_smax_vl : SDNode<"RISCVISD::SMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 117def riscv_umin_vl : SDNode<"RISCVISD::UMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 118def riscv_umax_vl : SDNode<"RISCVISD::UMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 119 120def riscv_bitreverse_vl : SDNode<"RISCVISD::BITREVERSE_VL", SDT_RISCVIntUnOp_VL>; 121def riscv_bswap_vl : SDNode<"RISCVISD::BSWAP_VL", SDT_RISCVIntUnOp_VL>; 122def riscv_ctlz_vl : SDNode<"RISCVISD::CTLZ_VL", SDT_RISCVIntUnOp_VL>; 123def riscv_cttz_vl : SDNode<"RISCVISD::CTTZ_VL", SDT_RISCVIntUnOp_VL>; 124def riscv_ctpop_vl : SDNode<"RISCVISD::CTPOP_VL", SDT_RISCVIntUnOp_VL>; 125 126def riscv_avgfloors_vl : SDNode<"RISCVISD::AVGFLOORS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 127def riscv_avgflooru_vl : SDNode<"RISCVISD::AVGFLOORU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 128def riscv_avgceils_vl : SDNode<"RISCVISD::AVGCEILS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 129def riscv_avgceilu_vl : SDNode<"RISCVISD::AVGCEILU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 130def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 131def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 132def riscv_ssubsat_vl : SDNode<"RISCVISD::SSUBSAT_VL", SDT_RISCVIntBinOp_VL>; 133def riscv_usubsat_vl : SDNode<"RISCVISD::USUBSAT_VL", SDT_RISCVIntBinOp_VL>; 134 135def riscv_vnclipu_vl : SDNode<"RISCVISD::VNCLIPU_VL", SDT_RISCVVNBinOp_RM_VL>; 136def riscv_vnclip_vl : SDNode<"RISCVISD::VNCLIP_VL", SDT_RISCVVNBinOp_RM_VL>; 137 138def riscv_fadd_vl : SDNode<"RISCVISD::FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 139def riscv_fsub_vl : SDNode<"RISCVISD::FSUB_VL", SDT_RISCVFPBinOp_VL>; 140def riscv_fmul_vl : SDNode<"RISCVISD::FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 141def riscv_fdiv_vl : SDNode<"RISCVISD::FDIV_VL", SDT_RISCVFPBinOp_VL>; 142def riscv_fneg_vl : SDNode<"RISCVISD::FNEG_VL", SDT_RISCVFPUnOp_VL>; 143def riscv_fabs_vl : SDNode<"RISCVISD::FABS_VL", SDT_RISCVFPUnOp_VL>; 144def riscv_fsqrt_vl : SDNode<"RISCVISD::FSQRT_VL", SDT_RISCVFPUnOp_VL>; 145def riscv_fcopysign_vl : SDNode<"RISCVISD::FCOPYSIGN_VL", SDT_RISCVCopySign_VL>; 146def riscv_vfmin_vl : SDNode<"RISCVISD::VFMIN_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 147def riscv_vfmax_vl : SDNode<"RISCVISD::VFMAX_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 148 149def riscv_strict_fadd_vl : SDNode<"RISCVISD::STRICT_FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; 150def riscv_strict_fsub_vl : SDNode<"RISCVISD::STRICT_FSUB_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; 151def riscv_strict_fmul_vl : SDNode<"RISCVISD::STRICT_FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; 152def riscv_strict_fdiv_vl : SDNode<"RISCVISD::STRICT_FDIV_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; 153def riscv_strict_fsqrt_vl : SDNode<"RISCVISD::STRICT_FSQRT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; 154 155def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 156 [(riscv_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 157 (riscv_strict_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 158def any_riscv_fsub_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 159 [(riscv_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 160 (riscv_strict_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 161def any_riscv_fmul_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 162 [(riscv_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 163 (riscv_strict_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 164def any_riscv_fdiv_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 165 [(riscv_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 166 (riscv_strict_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 167def any_riscv_fsqrt_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 168 [(riscv_fsqrt_vl node:$src, node:$mask, node:$vl), 169 (riscv_strict_fsqrt_vl node:$src, node:$mask, node:$vl)]>; 170 171def riscv_fclass_vl : SDNode<"RISCVISD::FCLASS_VL", 172 SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisVec<0>, 173 SDTCisFP<1>, SDTCisVec<1>, 174 SDTCisSameSizeAs<0, 1>, 175 SDTCisSameNumEltsAs<0, 1>, 176 SDTCVecEltisVT<2, i1>, 177 SDTCisSameNumEltsAs<0, 2>, 178 SDTCisVT<3, XLenVT>]>>; 179 180def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 181 SDTCisSameAs<0, 2>, 182 SDTCisSameAs<0, 3>, 183 SDTCisVec<0>, SDTCisFP<0>, 184 SDTCVecEltisVT<4, i1>, 185 SDTCisSameNumEltsAs<0, 4>, 186 SDTCisVT<5, XLenVT>]>; 187def riscv_vfmadd_vl : SDNode<"RISCVISD::VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 188def riscv_vfnmadd_vl : SDNode<"RISCVISD::VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 189def riscv_vfmsub_vl : SDNode<"RISCVISD::VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 190def riscv_vfnmsub_vl : SDNode<"RISCVISD::VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 191 192def SDT_RISCVWVecFMA_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 193 SDTCisVec<1>, SDTCisFP<1>, 194 SDTCisOpSmallerThanOp<1, 0>, 195 SDTCisSameNumEltsAs<0, 1>, 196 SDTCisSameAs<1, 2>, 197 SDTCisSameAs<0, 3>, 198 SDTCVecEltisVT<4, i1>, 199 SDTCisSameNumEltsAs<0, 4>, 200 SDTCisVT<5, XLenVT>]>; 201def riscv_vfwmadd_vl : SDNode<"RISCVISD::VFWMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 202def riscv_vfwnmadd_vl : SDNode<"RISCVISD::VFWNMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 203def riscv_vfwmsub_vl : SDNode<"RISCVISD::VFWMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 204def riscv_vfwnmsub_vl : SDNode<"RISCVISD::VFWNMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 205 206def riscv_strict_vfmadd_vl : SDNode<"RISCVISD::STRICT_VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 207def riscv_strict_vfnmadd_vl : SDNode<"RISCVISD::STRICT_VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 208def riscv_strict_vfmsub_vl : SDNode<"RISCVISD::STRICT_VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 209def riscv_strict_vfnmsub_vl : SDNode<"RISCVISD::STRICT_VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 210 211def any_riscv_vfmadd_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 212 [(riscv_vfmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 213 (riscv_strict_vfmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 214def any_riscv_vfnmadd_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 215 [(riscv_vfnmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 216 (riscv_strict_vfnmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 217def any_riscv_vfmsub_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 218 [(riscv_vfmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 219 (riscv_strict_vfmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 220def any_riscv_vfnmsub_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 221 [(riscv_vfnmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 222 (riscv_strict_vfnmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 223 224def SDT_RISCVFPRoundOp_VL : SDTypeProfile<1, 3, [ 225 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1>, SDTCisSameNumEltsAs<0, 1>, 226 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 227]>; 228def SDT_RISCVFPExtendOp_VL : SDTypeProfile<1, 3, [ 229 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>, 230 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 231]>; 232 233def riscv_fpround_vl : SDNode<"RISCVISD::FP_ROUND_VL", SDT_RISCVFPRoundOp_VL>; 234def riscv_strict_fpround_vl : SDNode<"RISCVISD::STRICT_FP_ROUND_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; 235def riscv_fpextend_vl : SDNode<"RISCVISD::FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL>; 236def riscv_strict_fpextend_vl : SDNode<"RISCVISD::STRICT_FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL, [SDNPHasChain]>; 237def riscv_fncvt_rod_vl : SDNode<"RISCVISD::VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL>; 238def riscv_strict_fncvt_rod_vl : SDNode<"RISCVISD::STRICT_VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; 239 240def any_riscv_fpround_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 241 [(riscv_fpround_vl node:$src, node:$mask, node:$vl), 242 (riscv_strict_fpround_vl node:$src, node:$mask, node:$vl)]>; 243def any_riscv_fpextend_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 244 [(riscv_fpextend_vl node:$src, node:$mask, node:$vl), 245 (riscv_strict_fpextend_vl node:$src, node:$mask, node:$vl)]>; 246def any_riscv_fncvt_rod_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 247 [(riscv_fncvt_rod_vl node:$src, node:$mask, node:$vl), 248 (riscv_strict_fncvt_rod_vl node:$src, node:$mask, node:$vl)]>; 249 250def SDT_RISCVFP2IOp_VL : SDTypeProfile<1, 3, [ 251 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, 252 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 253]>; 254def SDT_RISCVFP2IOp_RM_VL : SDTypeProfile<1, 4, [ 255 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, 256 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>, 257 SDTCisVT<4, XLenVT> // Rounding mode 258]>; 259 260def SDT_RISCVI2FPOp_VL : SDTypeProfile<1, 3, [ 261 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, 262 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 263]>; 264def SDT_RISCVI2FPOp_RM_VL : SDTypeProfile<1, 4, [ 265 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, 266 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>, 267 SDTCisVT<4, XLenVT> // Rounding mode 268]>; 269 270def SDT_RISCVSETCCOP_VL : SDTypeProfile<1, 6, [ 271 SDTCVecEltisVT<0, i1>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, 272 SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>, SDTCisSameAs<0, 4>, 273 SDTCisSameAs<0, 5>, SDTCisVT<6, XLenVT>]>; 274 275// Float -> Int 276def riscv_vfcvt_xu_f_vl : SDNode<"RISCVISD::VFCVT_XU_F_VL", SDT_RISCVFP2IOp_VL>; 277def riscv_vfcvt_x_f_vl : SDNode<"RISCVISD::VFCVT_X_F_VL", SDT_RISCVFP2IOp_VL>; 278def riscv_vfcvt_rm_xu_f_vl : SDNode<"RISCVISD::VFCVT_RM_XU_F_VL", SDT_RISCVFP2IOp_RM_VL>; 279def riscv_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL>; 280 281def riscv_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL>; 282def riscv_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL>; 283 284def riscv_strict_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL, [SDNPHasChain]>; 285def riscv_strict_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; 286def riscv_strict_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; 287 288def any_riscv_vfcvt_rm_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl, node:$rm), 289 [(riscv_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm), 290 (riscv_strict_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm)]>; 291def any_riscv_vfcvt_rtz_xu_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 292 [(riscv_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl), 293 (riscv_strict_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl)]>; 294def any_riscv_vfcvt_rtz_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 295 [(riscv_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl), 296 (riscv_strict_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl)]>; 297 298// Int -> Float 299def riscv_sint_to_fp_vl : SDNode<"RISCVISD::SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 300def riscv_uint_to_fp_vl : SDNode<"RISCVISD::UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 301def riscv_vfcvt_rm_f_xu_vl : SDNode<"RISCVISD::VFCVT_RM_F_XU_VL", SDT_RISCVI2FPOp_RM_VL>; 302def riscv_vfcvt_rm_f_x_vl : SDNode<"RISCVISD::VFCVT_RM_F_X_VL", SDT_RISCVI2FPOp_RM_VL>; 303 304def riscv_strict_sint_to_fp_vl : SDNode<"RISCVISD::STRICT_SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; 305def riscv_strict_uint_to_fp_vl : SDNode<"RISCVISD::STRICT_UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; 306 307def any_riscv_sint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 308 [(riscv_sint_to_fp_vl node:$src, node:$mask, node:$vl), 309 (riscv_strict_sint_to_fp_vl node:$src, node:$mask, node:$vl)]>; 310def any_riscv_uint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 311 [(riscv_uint_to_fp_vl node:$src, node:$mask, node:$vl), 312 (riscv_strict_uint_to_fp_vl node:$src, node:$mask, node:$vl)]>; 313 314def riscv_vfround_noexcept_vl: SDNode<"RISCVISD::VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL>; 315def riscv_strict_vfround_noexcept_vl: SDNode<"RISCVISD::STRICT_VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; 316 317def any_riscv_vfround_noexcept_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 318 [(riscv_vfround_noexcept_vl node:$src, node:$mask, node:$vl), 319 (riscv_strict_vfround_noexcept_vl node:$src, node:$mask, node:$vl)]>; 320 321def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL", SDT_RISCVSETCCOP_VL>; 322def riscv_strict_fsetcc_vl : SDNode<"RISCVISD::STRICT_FSETCC_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; 323def riscv_strict_fsetccs_vl : SDNode<"RISCVISD::STRICT_FSETCCS_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; 324def any_riscv_fsetcc_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 325 [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 326 (riscv_strict_fsetcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl)]>; 327def any_riscv_fsetccs_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 328 [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 329 (riscv_strict_fsetccs_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl)]>; 330 331def riscv_vrgather_vx_vl : SDNode<"RISCVISD::VRGATHER_VX_VL", 332 SDTypeProfile<1, 5, [SDTCisVec<0>, 333 SDTCisSameAs<0, 1>, 334 SDTCisVT<2, XLenVT>, 335 SDTCisSameAs<0, 3>, 336 SDTCVecEltisVT<4, i1>, 337 SDTCisSameNumEltsAs<0, 4>, 338 SDTCisVT<5, XLenVT>]>>; 339def riscv_vrgather_vv_vl : SDNode<"RISCVISD::VRGATHER_VV_VL", 340 SDTypeProfile<1, 5, [SDTCisVec<0>, 341 SDTCisSameAs<0, 1>, 342 SDTCisInt<2>, 343 SDTCisSameNumEltsAs<0, 2>, 344 SDTCisSameSizeAs<0, 2>, 345 SDTCisSameAs<0, 3>, 346 SDTCVecEltisVT<4, i1>, 347 SDTCisSameNumEltsAs<0, 4>, 348 SDTCisVT<5, XLenVT>]>>; 349def riscv_vrgatherei16_vv_vl : SDNode<"RISCVISD::VRGATHEREI16_VV_VL", 350 SDTypeProfile<1, 5, [SDTCisVec<0>, 351 SDTCisSameAs<0, 1>, 352 SDTCisInt<2>, 353 SDTCVecEltisVT<2, i16>, 354 SDTCisSameNumEltsAs<0, 2>, 355 SDTCisSameAs<0, 3>, 356 SDTCVecEltisVT<4, i1>, 357 SDTCisSameNumEltsAs<0, 4>, 358 SDTCisVT<5, XLenVT>]>>; 359 360def SDT_RISCVVMERGE_VL : SDTypeProfile<1, 5, [ 361 SDTCisVec<0>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<1, i1>, 362 SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>, SDTCisSameAs<0, 4>, 363 SDTCisVT<5, XLenVT> 364]>; 365 366def riscv_vmerge_vl : SDNode<"RISCVISD::VMERGE_VL", SDT_RISCVVMERGE_VL>; 367 368def SDT_RISCVVMSETCLR_VL : SDTypeProfile<1, 1, [SDTCVecEltisVT<0, i1>, 369 SDTCisVT<1, XLenVT>]>; 370def riscv_vmclr_vl : SDNode<"RISCVISD::VMCLR_VL", SDT_RISCVVMSETCLR_VL>; 371def riscv_vmset_vl : SDNode<"RISCVISD::VMSET_VL", SDT_RISCVVMSETCLR_VL>; 372 373def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 374 SDTCisSameAs<0, 2>, 375 SDTCVecEltisVT<0, i1>, 376 SDTCisVT<3, XLenVT>]>; 377def riscv_vmand_vl : SDNode<"RISCVISD::VMAND_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 378def riscv_vmor_vl : SDNode<"RISCVISD::VMOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 379def riscv_vmxor_vl : SDNode<"RISCVISD::VMXOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 380 381def true_mask : PatLeaf<(riscv_vmset_vl (XLenVT srcvalue))>; 382 383def riscv_vmnot_vl : PatFrag<(ops node:$rs, node:$vl), 384 (riscv_vmxor_vl node:$rs, true_mask, node:$vl)>; 385 386def riscv_vcpop_vl : SDNode<"RISCVISD::VCPOP_VL", 387 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, 388 SDTCisVec<1>, SDTCisInt<1>, 389 SDTCVecEltisVT<2, i1>, 390 SDTCisSameNumEltsAs<1, 2>, 391 SDTCisVT<3, XLenVT>]>>; 392 393def riscv_vfirst_vl : SDNode<"RISCVISD::VFIRST_VL", 394 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, 395 SDTCisVec<1>, SDTCisInt<1>, 396 SDTCVecEltisVT<2, i1>, 397 SDTCisSameNumEltsAs<1, 2>, 398 SDTCisVT<3, XLenVT>]>>; 399 400def SDT_RISCVVEXTEND_VL : SDTypeProfile<1, 3, [SDTCisVec<0>, 401 SDTCisSameNumEltsAs<0, 1>, 402 SDTCisSameNumEltsAs<1, 2>, 403 SDTCVecEltisVT<2, i1>, 404 SDTCisVT<3, XLenVT>]>; 405def riscv_sext_vl : SDNode<"RISCVISD::VSEXT_VL", SDT_RISCVVEXTEND_VL>; 406def riscv_zext_vl : SDNode<"RISCVISD::VZEXT_VL", SDT_RISCVVEXTEND_VL>; 407def riscv_ext_vl : PatFrags<(ops node:$A, node:$B, node:$C), 408 [(riscv_sext_vl node:$A, node:$B, node:$C), 409 (riscv_zext_vl node:$A, node:$B, node:$C)]>; 410 411def riscv_trunc_vector_vl : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL", 412 SDTypeProfile<1, 3, [SDTCisVec<0>, 413 SDTCisSameNumEltsAs<0, 1>, 414 SDTCisSameNumEltsAs<0, 2>, 415 SDTCVecEltisVT<2, i1>, 416 SDTCisVT<3, XLenVT>]>>; 417 418def SDT_RISCVVWIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 419 SDTCisInt<1>, 420 SDTCisSameNumEltsAs<0, 1>, 421 SDTCisOpSmallerThanOp<1, 0>, 422 SDTCisSameAs<1, 2>, 423 SDTCisSameAs<0, 3>, 424 SDTCisSameNumEltsAs<1, 4>, 425 SDTCVecEltisVT<4, i1>, 426 SDTCisVT<5, XLenVT>]>; 427def riscv_vwmul_vl : SDNode<"RISCVISD::VWMUL_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 428def riscv_vwmulu_vl : SDNode<"RISCVISD::VWMULU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 429def riscv_vwmulsu_vl : SDNode<"RISCVISD::VWMULSU_VL", SDT_RISCVVWIntBinOp_VL>; 430def riscv_vwadd_vl : SDNode<"RISCVISD::VWADD_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 431def riscv_vwaddu_vl : SDNode<"RISCVISD::VWADDU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 432def riscv_vwsub_vl : SDNode<"RISCVISD::VWSUB_VL", SDT_RISCVVWIntBinOp_VL, []>; 433def riscv_vwsubu_vl : SDNode<"RISCVISD::VWSUBU_VL", SDT_RISCVVWIntBinOp_VL, []>; 434def riscv_vwsll_vl : SDNode<"RISCVISD::VWSLL_VL", SDT_RISCVVWIntBinOp_VL, []>; 435 436def SDT_RISCVVWIntTernOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 437 SDTCisInt<1>, 438 SDTCisSameNumEltsAs<0, 1>, 439 SDTCisOpSmallerThanOp<1, 0>, 440 SDTCisSameAs<1, 2>, 441 SDTCisSameAs<0, 3>, 442 SDTCisSameNumEltsAs<1, 4>, 443 SDTCVecEltisVT<4, i1>, 444 SDTCisVT<5, XLenVT>]>; 445def riscv_vwmacc_vl : SDNode<"RISCVISD::VWMACC_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; 446def riscv_vwmaccu_vl : SDNode<"RISCVISD::VWMACCU_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; 447def riscv_vwmaccsu_vl : SDNode<"RISCVISD::VWMACCSU_VL", SDT_RISCVVWIntTernOp_VL, []>; 448 449def SDT_RISCVVWFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 450 SDTCisFP<1>, 451 SDTCisSameNumEltsAs<0, 1>, 452 SDTCisOpSmallerThanOp<1, 0>, 453 SDTCisSameAs<1, 2>, 454 SDTCisSameAs<0, 3>, 455 SDTCisSameNumEltsAs<1, 4>, 456 SDTCVecEltisVT<4, i1>, 457 SDTCisVT<5, XLenVT>]>; 458def riscv_vfwmul_vl : SDNode<"RISCVISD::VFWMUL_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; 459def riscv_vfwadd_vl : SDNode<"RISCVISD::VFWADD_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; 460def riscv_vfwsub_vl : SDNode<"RISCVISD::VFWSUB_VL", SDT_RISCVVWFPBinOp_VL, []>; 461 462def SDT_RISCVVNIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 463 SDTCisInt<1>, 464 SDTCisSameNumEltsAs<0, 1>, 465 SDTCisOpSmallerThanOp<0, 1>, 466 SDTCisSameAs<0, 2>, 467 SDTCisSameAs<0, 3>, 468 SDTCisSameNumEltsAs<0, 4>, 469 SDTCVecEltisVT<4, i1>, 470 SDTCisVT<5, XLenVT>]>; 471def riscv_vnsrl_vl : SDNode<"RISCVISD::VNSRL_VL", SDT_RISCVVNIntBinOp_VL>; 472 473def SDT_RISCVVWIntBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 474 SDTCisSameAs<0, 1>, 475 SDTCisInt<2>, 476 SDTCisSameNumEltsAs<1, 2>, 477 SDTCisOpSmallerThanOp<2, 1>, 478 SDTCisSameAs<0, 3>, 479 SDTCisSameNumEltsAs<1, 4>, 480 SDTCVecEltisVT<4, i1>, 481 SDTCisVT<5, XLenVT>]>; 482def riscv_vwadd_w_vl : SDNode<"RISCVISD::VWADD_W_VL", SDT_RISCVVWIntBinOpW_VL>; 483def riscv_vwaddu_w_vl : SDNode<"RISCVISD::VWADDU_W_VL", SDT_RISCVVWIntBinOpW_VL>; 484def riscv_vwsub_w_vl : SDNode<"RISCVISD::VWSUB_W_VL", SDT_RISCVVWIntBinOpW_VL>; 485def riscv_vwsubu_w_vl : SDNode<"RISCVISD::VWSUBU_W_VL", SDT_RISCVVWIntBinOpW_VL>; 486 487def SDT_RISCVVWFPBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 488 SDTCisSameAs<0, 1>, 489 SDTCisFP<2>, 490 SDTCisSameNumEltsAs<1, 2>, 491 SDTCisOpSmallerThanOp<2, 1>, 492 SDTCisSameAs<0, 3>, 493 SDTCisSameNumEltsAs<1, 4>, 494 SDTCVecEltisVT<4, i1>, 495 SDTCisVT<5, XLenVT>]>; 496 497def riscv_vfwadd_w_vl : SDNode<"RISCVISD::VFWADD_W_VL", SDT_RISCVVWFPBinOpW_VL>; 498def riscv_vfwsub_w_vl : SDNode<"RISCVISD::VFWSUB_W_VL", SDT_RISCVVWFPBinOpW_VL>; 499 500def SDTRVVVecReduce : SDTypeProfile<1, 6, [ 501 SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>, 502 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<2, 4>, SDTCisVT<5, XLenVT>, 503 SDTCisVT<6, XLenVT> 504]>; 505 506def riscv_add_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 507 node:$E), 508 (riscv_add_vl node:$A, node:$B, node:$C, 509 node:$D, node:$E), [{ 510 return N->hasOneUse(); 511}]>; 512 513def riscv_sub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 514 node:$E), 515 (riscv_sub_vl node:$A, node:$B, node:$C, 516 node:$D, node:$E), [{ 517 return N->hasOneUse(); 518}]>; 519 520def riscv_mul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 521 node:$E), 522 (riscv_mul_vl node:$A, node:$B, node:$C, 523 node:$D, node:$E), [{ 524 return N->hasOneUse(); 525}]>; 526 527def riscv_vwmul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 528 node:$E), 529 (riscv_vwmul_vl node:$A, node:$B, node:$C, 530 node:$D, node:$E), [{ 531 return N->hasOneUse(); 532}]>; 533 534def riscv_vwmulu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 535 node:$E), 536 (riscv_vwmulu_vl node:$A, node:$B, node:$C, 537 node:$D, node:$E), [{ 538 return N->hasOneUse(); 539}]>; 540 541def riscv_vwmulsu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 542 node:$E), 543 (riscv_vwmulsu_vl node:$A, node:$B, node:$C, 544 node:$D, node:$E), [{ 545 return N->hasOneUse(); 546}]>; 547 548def riscv_sext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 549 (riscv_sext_vl node:$A, node:$B, node:$C), [{ 550 return N->hasOneUse(); 551}]>; 552 553def riscv_zext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 554 (riscv_zext_vl node:$A, node:$B, node:$C), [{ 555 return N->hasOneUse(); 556}]>; 557 558def riscv_ext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 559 (riscv_ext_vl node:$A, node:$B, node:$C), [{ 560 return N->hasOneUse(); 561}]>; 562 563def riscv_fpextend_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 564 (riscv_fpextend_vl node:$A, node:$B, node:$C), [{ 565 return N->hasOneUse(); 566}]>; 567 568def riscv_vfmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 569 node:$E), 570 (riscv_vfmadd_vl node:$A, node:$B, 571 node:$C, node:$D, node:$E), [{ 572 return N->hasOneUse(); 573}]>; 574 575def riscv_vfnmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 576 node:$E), 577 (riscv_vfnmadd_vl node:$A, node:$B, 578 node:$C, node:$D, node:$E), [{ 579 return N->hasOneUse(); 580}]>; 581 582def riscv_vfmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 583 node:$E), 584 (riscv_vfmsub_vl node:$A, node:$B, 585 node:$C, node:$D, node:$E), [{ 586 return N->hasOneUse(); 587}]>; 588 589def riscv_vfnmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 590 node:$E), 591 (riscv_vfnmsub_vl node:$A, node:$B, 592 node:$C, node:$D, node:$E), [{ 593 return N->hasOneUse(); 594}]>; 595 596foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR", 597 "FADD", "SEQ_FADD", "FMIN", "FMAX"] in 598 def rvv_vecreduce_#kind#_vl : SDNode<"RISCVISD::VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>; 599 600// Give explicit Complexity to prefer simm5/uimm5. 601def SplatPat : ComplexPattern<vAny, 1, "selectVSplat", [], [], 1>; 602def SplatPat_simm5 : ComplexPattern<vAny, 1, "selectVSplatSimm5", [], [], 3>; 603def SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimmBits<5>", [], [], 3>; 604def SplatPat_uimm6 : ComplexPattern<vAny, 1, "selectVSplatUimmBits<6>", [], [], 3>; 605def SplatPat_simm5_plus1 606 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1", [], [], 3>; 607def SplatPat_simm5_plus1_nonzero 608 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1NonZero", [], [], 3>; 609 610// Selects extends or truncates of splats where we only care about the lowest 8 611// bits of each element. 612def Low8BitsSplatPat 613 : ComplexPattern<vAny, 1, "selectLow8BitsVSplat", [], [], 2>; 614 615// Ignore the vl operand on vmv_v_f, and vmv_s_f. 616def SplatFPOp : PatFrags<(ops node:$op), 617 [(riscv_vfmv_v_f_vl undef, node:$op, srcvalue), 618 (riscv_vfmv_s_f_vl undef, node:$op, srcvalue)]>; 619 620def sew8simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<8>", []>; 621def sew16simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<16>", []>; 622def sew32simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<32>", []>; 623def sew64simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<64>", []>; 624 625class VPatBinaryVL_V<SDPatternOperator vop, 626 string instruction_name, 627 string suffix, 628 ValueType result_type, 629 ValueType op1_type, 630 ValueType op2_type, 631 ValueType mask_type, 632 int log2sew, 633 LMULInfo vlmul, 634 VReg result_reg_class, 635 VReg op1_reg_class, 636 VReg op2_reg_class, 637 bit isSEWAware = 0> 638 : Pat<(result_type (vop 639 (op1_type op1_reg_class:$rs1), 640 (op2_type op2_reg_class:$rs2), 641 (result_type result_reg_class:$merge), 642 (mask_type V0), 643 VLOpFrag)), 644 (!cast<Instruction>( 645 !if(isSEWAware, 646 instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 647 instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) 648 result_reg_class:$merge, 649 op1_reg_class:$rs1, 650 op2_reg_class:$rs2, 651 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 652 653multiclass VPatBinaryRM_VL_V<SDNode vop, 654 string instruction_name, 655 string suffix, 656 ValueType result_type, 657 ValueType op1_type, 658 ValueType op2_type, 659 ValueType mask_type, 660 int sew, 661 LMULInfo vlmul, 662 VReg result_reg_class, 663 VReg op1_reg_class, 664 VReg op2_reg_class> { 665 def : Pat<(result_type (vop 666 (op1_type op1_reg_class:$rs1), 667 (op2_type op2_reg_class:$rs2), 668 (result_type result_reg_class:$merge), 669 (mask_type V0), 670 (XLenVT timm:$roundmode), 671 VLOpFrag)), 672 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_MASK") 673 result_reg_class:$merge, 674 op1_reg_class:$rs1, 675 op2_reg_class:$rs2, 676 (mask_type V0), 677 (XLenVT timm:$roundmode), 678 GPR:$vl, sew, TAIL_AGNOSTIC)>; 679} 680 681class VPatBinaryVL_V_RM<SDPatternOperator vop, 682 string instruction_name, 683 string suffix, 684 ValueType result_type, 685 ValueType op1_type, 686 ValueType op2_type, 687 ValueType mask_type, 688 int log2sew, 689 LMULInfo vlmul, 690 VReg result_reg_class, 691 VReg op1_reg_class, 692 VReg op2_reg_class, 693 bit isSEWAware = 0> 694 : Pat<(result_type (vop 695 (op1_type op1_reg_class:$rs1), 696 (op2_type op2_reg_class:$rs2), 697 (result_type result_reg_class:$merge), 698 (mask_type V0), 699 VLOpFrag)), 700 (!cast<Instruction>( 701 !if(isSEWAware, 702 instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 703 instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) 704 result_reg_class:$merge, 705 op1_reg_class:$rs1, 706 op2_reg_class:$rs2, 707 (mask_type V0), 708 // Value to indicate no rounding mode change in 709 // RISCVInsertReadWriteCSR 710 FRM_DYN, 711 GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 712 713multiclass VPatTiedBinaryNoMaskVL_V<SDNode vop, 714 string instruction_name, 715 string suffix, 716 ValueType result_type, 717 ValueType op2_type, 718 int sew, 719 LMULInfo vlmul, 720 VReg result_reg_class, 721 VReg op2_reg_class> { 722 def : Pat<(result_type (vop 723 (result_type result_reg_class:$rs1), 724 (op2_type op2_reg_class:$rs2), 725 srcvalue, 726 true_mask, 727 VLOpFrag)), 728 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 729 result_reg_class:$rs1, 730 op2_reg_class:$rs2, 731 GPR:$vl, sew, TAIL_AGNOSTIC)>; 732 // Tail undisturbed 733 def : Pat<(riscv_vmerge_vl true_mask, 734 (result_type (vop 735 result_reg_class:$rs1, 736 (op2_type op2_reg_class:$rs2), 737 srcvalue, 738 true_mask, 739 VLOpFrag)), 740 result_reg_class:$rs1, result_reg_class:$rs1, VLOpFrag), 741 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 742 result_reg_class:$rs1, 743 op2_reg_class:$rs2, 744 GPR:$vl, sew, TU_MU)>; 745} 746 747class VPatTiedBinaryMaskVL_V<SDNode vop, 748 string instruction_name, 749 string suffix, 750 ValueType result_type, 751 ValueType op2_type, 752 ValueType mask_type, 753 int sew, 754 LMULInfo vlmul, 755 VReg result_reg_class, 756 VReg op2_reg_class> : 757 Pat<(result_type (vop 758 (result_type result_reg_class:$rs1), 759 (op2_type op2_reg_class:$rs2), 760 (result_type result_reg_class:$rs1), 761 (mask_type V0), 762 VLOpFrag)), 763 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_MASK_TIED") 764 result_reg_class:$rs1, 765 op2_reg_class:$rs2, 766 (mask_type V0), GPR:$vl, sew, TU_MU)>; 767 768multiclass VPatTiedBinaryNoMaskVL_V_RM<SDNode vop, 769 string instruction_name, 770 string suffix, 771 ValueType result_type, 772 ValueType op2_type, 773 int log2sew, 774 LMULInfo vlmul, 775 VReg result_reg_class, 776 VReg op2_reg_class, 777 bit isSEWAware = 0> { 778 defvar name = !if(isSEWAware, 779 instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_TIED", 780 instruction_name#"_"#suffix#"_"#vlmul.MX#"_TIED"); 781 def : Pat<(result_type (vop 782 (result_type result_reg_class:$rs1), 783 (op2_type op2_reg_class:$rs2), 784 srcvalue, 785 true_mask, 786 VLOpFrag)), 787 (!cast<Instruction>(name) 788 result_reg_class:$rs1, 789 op2_reg_class:$rs2, 790 // Value to indicate no rounding mode change in 791 // RISCVInsertReadWriteCSR 792 FRM_DYN, 793 GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 794 // Tail undisturbed 795 def : Pat<(riscv_vmerge_vl true_mask, 796 (result_type (vop 797 result_reg_class:$rs1, 798 (op2_type op2_reg_class:$rs2), 799 srcvalue, 800 true_mask, 801 VLOpFrag)), 802 result_reg_class:$rs1, result_reg_class:$rs1, VLOpFrag), 803 (!cast<Instruction>(name) 804 result_reg_class:$rs1, 805 op2_reg_class:$rs2, 806 // Value to indicate no rounding mode change in 807 // RISCVInsertReadWriteCSR 808 FRM_DYN, 809 GPR:$vl, log2sew, TU_MU)>; 810} 811 812class VPatBinaryVL_XI<SDPatternOperator vop, 813 string instruction_name, 814 string suffix, 815 ValueType result_type, 816 ValueType vop1_type, 817 ValueType vop2_type, 818 ValueType mask_type, 819 int log2sew, 820 LMULInfo vlmul, 821 VReg result_reg_class, 822 VReg vop_reg_class, 823 ComplexPattern SplatPatKind, 824 DAGOperand xop_kind, 825 bit isSEWAware = 0> 826 : Pat<(result_type (vop 827 (vop1_type vop_reg_class:$rs1), 828 (vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))), 829 (result_type result_reg_class:$merge), 830 (mask_type V0), 831 VLOpFrag)), 832 (!cast<Instruction>( 833 !if(isSEWAware, 834 instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 835 instruction_name#_#suffix#_#vlmul.MX#"_MASK")) 836 result_reg_class:$merge, 837 vop_reg_class:$rs1, 838 xop_kind:$rs2, 839 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 840 841multiclass VPatBinaryRM_VL_XI<SDNode vop, 842 string instruction_name, 843 string suffix, 844 ValueType result_type, 845 ValueType vop1_type, 846 ValueType vop2_type, 847 ValueType mask_type, 848 int sew, 849 LMULInfo vlmul, 850 VReg result_reg_class, 851 VReg vop_reg_class, 852 ComplexPattern SplatPatKind, 853 DAGOperand xop_kind> { 854 def : Pat<(result_type (vop 855 (vop1_type vop_reg_class:$rs1), 856 (vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))), 857 (result_type result_reg_class:$merge), 858 (mask_type V0), 859 (XLenVT timm:$roundmode), 860 VLOpFrag)), 861 (!cast<Instruction>(instruction_name#_#suffix#_# vlmul.MX#"_MASK") 862 result_reg_class:$merge, 863 vop_reg_class:$rs1, 864 xop_kind:$rs2, 865 (mask_type V0), 866 (XLenVT timm:$roundmode), 867 GPR:$vl, sew, TAIL_AGNOSTIC)>; 868} 869 870multiclass VPatBinaryVL_VV_VX<SDPatternOperator vop, string instruction_name, 871 list<VTypeInfo> vtilist = AllIntegerVectors, 872 bit isSEWAware = 0> { 873 foreach vti = vtilist in { 874 let Predicates = GetVTypePredicates<vti>.Predicates in { 875 def : VPatBinaryVL_V<vop, instruction_name, "VV", 876 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 877 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 878 vti.RegClass, isSEWAware>; 879 def : VPatBinaryVL_XI<vop, instruction_name, "VX", 880 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 881 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 882 SplatPat, GPR, isSEWAware>; 883 } 884 } 885} 886 887multiclass VPatBinaryVL_VV_VX_VI<SDPatternOperator vop, string instruction_name, 888 Operand ImmType = simm5> 889 : VPatBinaryVL_VV_VX<vop, instruction_name> { 890 foreach vti = AllIntegerVectors in { 891 let Predicates = GetVTypePredicates<vti>.Predicates in 892 def : VPatBinaryVL_XI<vop, instruction_name, "VI", 893 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 894 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 895 !cast<ComplexPattern>(SplatPat#_#ImmType), 896 ImmType>; 897 } 898} 899 900multiclass VPatBinaryWVL_VV_VX<SDPatternOperator vop, string instruction_name> { 901 foreach VtiToWti = AllWidenableIntVectors in { 902 defvar vti = VtiToWti.Vti; 903 defvar wti = VtiToWti.Wti; 904 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 905 GetVTypePredicates<wti>.Predicates) in { 906 def : VPatBinaryVL_V<vop, instruction_name, "VV", 907 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 908 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 909 vti.RegClass>; 910 def : VPatBinaryVL_XI<vop, instruction_name, "VX", 911 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 912 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 913 SplatPat, GPR>; 914 } 915 } 916} 917 918multiclass VPatBinaryWVL_VV_VX_WV_WX<SDPatternOperator vop, SDNode vop_w, 919 string instruction_name> 920 : VPatBinaryWVL_VV_VX<vop, instruction_name> { 921 foreach VtiToWti = AllWidenableIntVectors in { 922 defvar vti = VtiToWti.Vti; 923 defvar wti = VtiToWti.Wti; 924 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 925 GetVTypePredicates<wti>.Predicates) in { 926 defm : VPatTiedBinaryNoMaskVL_V<vop_w, instruction_name, "WV", 927 wti.Vector, vti.Vector, vti.Log2SEW, 928 vti.LMul, wti.RegClass, vti.RegClass>; 929 def : VPatTiedBinaryMaskVL_V<vop_w, instruction_name, "WV", 930 wti.Vector, vti.Vector, wti.Mask, 931 vti.Log2SEW, vti.LMul, wti.RegClass, 932 vti.RegClass>; 933 def : VPatBinaryVL_V<vop_w, instruction_name, "WV", 934 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 935 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 936 vti.RegClass>; 937 def : VPatBinaryVL_XI<vop_w, instruction_name, "WX", 938 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 939 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 940 SplatPat, GPR>; 941 } 942 } 943} 944 945multiclass VPatBinaryNVL_WV_WX_WI<SDPatternOperator vop, string instruction_name> { 946 foreach VtiToWti = AllWidenableIntVectors in { 947 defvar vti = VtiToWti.Vti; 948 defvar wti = VtiToWti.Wti; 949 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 950 GetVTypePredicates<wti>.Predicates) in { 951 def : VPatBinaryVL_V<vop, instruction_name, "WV", 952 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 953 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 954 vti.RegClass>; 955 def : VPatBinaryVL_XI<vop, instruction_name, "WX", 956 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 957 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 958 SplatPat, GPR>; 959 def : VPatBinaryVL_XI<vop, instruction_name, "WI", 960 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 961 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 962 !cast<ComplexPattern>(SplatPat#_#uimm5), 963 uimm5>; 964 } 965 } 966} 967 968multiclass VPatBinaryRM_NVL_WV_WX_WI<SDNode vop, string instruction_name> { 969 foreach VtiToWti = AllWidenableIntVectors in { 970 defvar vti = VtiToWti.Vti; 971 defvar wti = VtiToWti.Wti; 972 defm : VPatBinaryRM_VL_V<vop, instruction_name, "WV", 973 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 974 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, vti.RegClass>; 975 defm : VPatBinaryRM_VL_XI<vop, instruction_name, "WX", 976 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 977 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, SplatPat, GPR>; 978 defm : VPatBinaryRM_VL_XI<vop, instruction_name, "WI", 979 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 980 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 981 !cast<ComplexPattern>(SplatPat#_#uimm5), 982 uimm5>; 983 } 984} 985 986class VPatBinaryVL_VF<SDPatternOperator vop, 987 string instruction_name, 988 ValueType result_type, 989 ValueType vop1_type, 990 ValueType vop2_type, 991 ValueType mask_type, 992 int log2sew, 993 LMULInfo vlmul, 994 VReg result_reg_class, 995 VReg vop_reg_class, 996 RegisterClass scalar_reg_class, 997 bit isSEWAware = 0> 998 : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), 999 (vop2_type (SplatFPOp scalar_reg_class:$rs2)), 1000 (result_type result_reg_class:$merge), 1001 (mask_type V0), 1002 VLOpFrag)), 1003 (!cast<Instruction>( 1004 !if(isSEWAware, 1005 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 1006 instruction_name#"_"#vlmul.MX#"_MASK")) 1007 result_reg_class:$merge, 1008 vop_reg_class:$rs1, 1009 scalar_reg_class:$rs2, 1010 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 1011 1012class VPatBinaryVL_VF_RM<SDPatternOperator vop, 1013 string instruction_name, 1014 ValueType result_type, 1015 ValueType vop1_type, 1016 ValueType vop2_type, 1017 ValueType mask_type, 1018 int log2sew, 1019 LMULInfo vlmul, 1020 VReg result_reg_class, 1021 VReg vop_reg_class, 1022 RegisterClass scalar_reg_class, 1023 bit isSEWAware = 0> 1024 : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), 1025 (vop2_type (SplatFPOp scalar_reg_class:$rs2)), 1026 (result_type result_reg_class:$merge), 1027 (mask_type V0), 1028 VLOpFrag)), 1029 (!cast<Instruction>( 1030 !if(isSEWAware, 1031 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 1032 instruction_name#"_"#vlmul.MX#"_MASK")) 1033 result_reg_class:$merge, 1034 vop_reg_class:$rs1, 1035 scalar_reg_class:$rs2, 1036 (mask_type V0), 1037 // Value to indicate no rounding mode change in 1038 // RISCVInsertReadWriteCSR 1039 FRM_DYN, 1040 GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 1041 1042multiclass VPatBinaryFPVL_VV_VF<SDPatternOperator vop, string instruction_name, 1043 bit isSEWAware = 0> { 1044 foreach vti = AllFloatVectors in { 1045 let Predicates = GetVTypePredicates<vti>.Predicates in { 1046 def : VPatBinaryVL_V<vop, instruction_name, "VV", 1047 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 1048 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 1049 vti.RegClass, isSEWAware>; 1050 def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 1051 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 1052 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 1053 vti.ScalarRegClass, isSEWAware>; 1054 } 1055 } 1056} 1057 1058multiclass VPatBinaryFPVL_VV_VF_RM<SDPatternOperator vop, string instruction_name, 1059 bit isSEWAware = 0> { 1060 foreach vti = AllFloatVectors in { 1061 let Predicates = GetVTypePredicates<vti>.Predicates in { 1062 def : VPatBinaryVL_V_RM<vop, instruction_name, "VV", 1063 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 1064 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 1065 vti.RegClass, isSEWAware>; 1066 def : VPatBinaryVL_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix, 1067 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 1068 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 1069 vti.ScalarRegClass, isSEWAware>; 1070 } 1071 } 1072} 1073 1074multiclass VPatBinaryFPVL_R_VF<SDPatternOperator vop, string instruction_name, 1075 bit isSEWAware = 0> { 1076 foreach fvti = AllFloatVectors in { 1077 let Predicates = GetVTypePredicates<fvti>.Predicates in 1078 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 1079 fvti.RegClass:$rs1, 1080 (fvti.Vector fvti.RegClass:$merge), 1081 (fvti.Mask V0), 1082 VLOpFrag)), 1083 (!cast<Instruction>( 1084 !if(isSEWAware, 1085 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", 1086 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) 1087 fvti.RegClass:$merge, 1088 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 1089 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1090 } 1091} 1092 1093multiclass VPatBinaryFPVL_R_VF_RM<SDPatternOperator vop, string instruction_name, 1094 bit isSEWAware = 0> { 1095 foreach fvti = AllFloatVectors in { 1096 let Predicates = GetVTypePredicates<fvti>.Predicates in 1097 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 1098 fvti.RegClass:$rs1, 1099 (fvti.Vector fvti.RegClass:$merge), 1100 (fvti.Mask V0), 1101 VLOpFrag)), 1102 (!cast<Instruction>( 1103 !if(isSEWAware, 1104 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", 1105 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) 1106 fvti.RegClass:$merge, 1107 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 1108 (fvti.Mask V0), 1109 // Value to indicate no rounding mode change in 1110 // RISCVInsertReadWriteCSR 1111 FRM_DYN, 1112 GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1113 } 1114} 1115 1116multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name, 1117 CondCode cc> { 1118 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1119 vti.RegClass:$rs2, cc, 1120 VR:$merge, 1121 (vti.Mask V0), 1122 VLOpFrag)), 1123 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") 1124 VR:$merge, 1125 vti.RegClass:$rs1, 1126 vti.RegClass:$rs2, 1127 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1128} 1129 1130// Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped. 1131multiclass VPatIntegerSetCCVL_VV_Swappable<VTypeInfo vti, string instruction_name, 1132 CondCode cc, CondCode invcc> 1133 : VPatIntegerSetCCVL_VV<vti, instruction_name, cc> { 1134 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2), 1135 vti.RegClass:$rs1, invcc, 1136 VR:$merge, 1137 (vti.Mask V0), 1138 VLOpFrag)), 1139 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") 1140 VR:$merge, vti.RegClass:$rs1, 1141 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1142} 1143 1144multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_name, 1145 CondCode cc, CondCode invcc> { 1146 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK"); 1147 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1148 (SplatPat (XLenVT GPR:$rs2)), cc, 1149 VR:$merge, 1150 (vti.Mask V0), 1151 VLOpFrag)), 1152 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1153 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1154 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)), 1155 (vti.Vector vti.RegClass:$rs1), invcc, 1156 VR:$merge, 1157 (vti.Mask V0), 1158 VLOpFrag)), 1159 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1160 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1161} 1162 1163multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_name, 1164 CondCode cc, CondCode invcc> { 1165 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); 1166 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1167 (SplatPat_simm5 simm5:$rs2), cc, 1168 VR:$merge, 1169 (vti.Mask V0), 1170 VLOpFrag)), 1171 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1172 XLenVT:$rs2, (vti.Mask V0), GPR:$vl, 1173 vti.Log2SEW)>; 1174 1175 // FIXME: Can do some canonicalization to remove these patterns. 1176 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2), 1177 (vti.Vector vti.RegClass:$rs1), invcc, 1178 VR:$merge, 1179 (vti.Mask V0), 1180 VLOpFrag)), 1181 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1182 simm5:$rs2, (vti.Mask V0), GPR:$vl, 1183 vti.Log2SEW)>; 1184} 1185 1186multiclass VPatIntegerSetCCVL_VIPlus1_Swappable<VTypeInfo vti, 1187 string instruction_name, 1188 CondCode cc, CondCode invcc, 1189 ComplexPattern splatpat_kind> { 1190 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); 1191 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1192 (splatpat_kind simm5:$rs2), cc, 1193 VR:$merge, 1194 (vti.Mask V0), 1195 VLOpFrag)), 1196 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1197 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl, 1198 vti.Log2SEW)>; 1199 1200 // FIXME: Can do some canonicalization to remove these patterns. 1201 def : Pat<(vti.Mask (riscv_setcc_vl (splatpat_kind simm5:$rs2), 1202 (vti.Vector vti.RegClass:$rs1), invcc, 1203 VR:$merge, 1204 (vti.Mask V0), 1205 VLOpFrag)), 1206 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1207 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl, 1208 vti.Log2SEW)>; 1209} 1210 1211multiclass VPatFPSetCCVL_VV_VF_FV<SDPatternOperator vop, CondCode cc, 1212 string inst_name, 1213 string swapped_op_inst_name> { 1214 foreach fvti = AllFloatVectors in { 1215 let Predicates = GetVTypePredicates<fvti>.Predicates in { 1216 def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1), 1217 fvti.RegClass:$rs2, 1218 cc, 1219 VR:$merge, 1220 (fvti.Mask V0), 1221 VLOpFrag)), 1222 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX#"_MASK") 1223 VR:$merge, fvti.RegClass:$rs1, 1224 fvti.RegClass:$rs2, (fvti.Mask V0), 1225 GPR:$vl, fvti.Log2SEW)>; 1226 def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1), 1227 (SplatFPOp fvti.ScalarRegClass:$rs2), 1228 cc, 1229 VR:$merge, 1230 (fvti.Mask V0), 1231 VLOpFrag)), 1232 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 1233 VR:$merge, fvti.RegClass:$rs1, 1234 fvti.ScalarRegClass:$rs2, (fvti.Mask V0), 1235 GPR:$vl, fvti.Log2SEW)>; 1236 def : Pat<(fvti.Mask (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 1237 (fvti.Vector fvti.RegClass:$rs1), 1238 cc, 1239 VR:$merge, 1240 (fvti.Mask V0), 1241 VLOpFrag)), 1242 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 1243 VR:$merge, fvti.RegClass:$rs1, 1244 fvti.ScalarRegClass:$rs2, (fvti.Mask V0), 1245 GPR:$vl, fvti.Log2SEW)>; 1246 } 1247 } 1248} 1249 1250multiclass VPatExtendVL_V<SDNode vop, string inst_name, string suffix, 1251 list <VTypeInfoToFraction> fraction_list> { 1252 foreach vtiTofti = fraction_list in { 1253 defvar vti = vtiTofti.Vti; 1254 defvar fti = vtiTofti.Fti; 1255 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1256 GetVTypePredicates<fti>.Predicates) in 1257 def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2), 1258 (fti.Mask V0), VLOpFrag)), 1259 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX#"_MASK") 1260 (vti.Vector (IMPLICIT_DEF)), 1261 fti.RegClass:$rs2, 1262 (fti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1263 } 1264} 1265 1266// Single width converting 1267 1268multiclass VPatConvertFP2IVL_V<SDPatternOperator vop, string instruction_name> { 1269 foreach fvti = AllFloatVectors in { 1270 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1271 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1272 GetVTypePredicates<ivti>.Predicates) in 1273 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1274 (fvti.Mask V0), 1275 VLOpFrag)), 1276 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1277 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1278 (fvti.Mask V0), GPR:$vl, ivti.Log2SEW, TA_MA)>; 1279 } 1280} 1281 1282multiclass VPatConvertFP2IVL_V_RM<SDPatternOperator vop, string instruction_name> { 1283 foreach fvti = AllFloatVectors in { 1284 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1285 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1286 GetVTypePredicates<ivti>.Predicates) in 1287 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1288 (fvti.Mask V0), 1289 VLOpFrag)), 1290 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1291 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1292 (fvti.Mask V0), 1293 // Value to indicate no rounding mode change in 1294 // RISCVInsertReadWriteCSR 1295 FRM_DYN, 1296 GPR:$vl, ivti.Log2SEW, TA_MA)>; 1297 } 1298} 1299 1300 1301multiclass VPatConvertFP2I_RM_VL_V<SDPatternOperator vop, string instruction_name> { 1302 foreach fvti = AllFloatVectors in { 1303 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1304 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1305 GetVTypePredicates<ivti>.Predicates) in 1306 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1307 (fvti.Mask V0), (XLenVT timm:$frm), 1308 VLOpFrag)), 1309 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1310 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1311 (fvti.Mask V0), timm:$frm, GPR:$vl, ivti.Log2SEW, 1312 TA_MA)>; 1313 } 1314} 1315 1316multiclass VPatConvertI2FPVL_V_RM<SDPatternOperator vop, string instruction_name> { 1317 foreach fvti = AllFloatVectors in { 1318 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1319 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1320 GetVTypePredicates<ivti>.Predicates) in 1321 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1322 (ivti.Mask V0), 1323 VLOpFrag)), 1324 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 1325 (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1326 (ivti.Mask V0), 1327 // Value to indicate no rounding mode change in 1328 // RISCVInsertReadWriteCSR 1329 FRM_DYN, 1330 GPR:$vl, fvti.Log2SEW, TA_MA)>; 1331 } 1332} 1333 1334multiclass VPatConvertI2FP_RM_VL_V<SDNode vop, string instruction_name> { 1335 foreach fvti = AllFloatVectors in { 1336 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1337 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1338 GetVTypePredicates<ivti>.Predicates) in 1339 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1340 (ivti.Mask V0), (XLenVT timm:$frm), 1341 VLOpFrag)), 1342 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 1343 (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1344 (ivti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1345 } 1346} 1347 1348// Widening converting 1349 1350multiclass VPatWConvertFP2IVL_V<SDPatternOperator vop, string instruction_name> { 1351 foreach fvtiToFWti = AllWidenableFloatVectors in { 1352 defvar fvti = fvtiToFWti.Vti; 1353 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1354 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1355 GetVTypePredicates<iwti>.Predicates) in 1356 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1357 (fvti.Mask V0), 1358 VLOpFrag)), 1359 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1360 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1361 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 1362 } 1363} 1364 1365multiclass VPatWConvertFP2IVL_V_RM<SDPatternOperator vop, string instruction_name> { 1366 foreach fvtiToFWti = AllWidenableFloatVectors in { 1367 defvar fvti = fvtiToFWti.Vti; 1368 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1369 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1370 GetVTypePredicates<iwti>.Predicates) in 1371 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1372 (fvti.Mask V0), 1373 VLOpFrag)), 1374 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1375 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1376 (fvti.Mask V0), 1377 // Value to indicate no rounding mode change in 1378 // RISCVInsertReadWriteCSR 1379 FRM_DYN, 1380 GPR:$vl, fvti.Log2SEW, TA_MA)>; 1381 } 1382} 1383 1384 1385multiclass VPatWConvertFP2I_RM_VL_V<SDNode vop, string instruction_name> { 1386 foreach fvtiToFWti = AllWidenableFloatVectors in { 1387 defvar fvti = fvtiToFWti.Vti; 1388 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1389 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1390 GetVTypePredicates<iwti>.Predicates) in 1391 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1392 (fvti.Mask V0), (XLenVT timm:$frm), 1393 VLOpFrag)), 1394 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1395 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1396 (fvti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1397 } 1398} 1399 1400multiclass VPatWConvertI2FPVL_V<SDPatternOperator vop, 1401 string instruction_name> { 1402 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1403 defvar ivti = vtiToWti.Vti; 1404 defvar fwti = vtiToWti.Wti; 1405 let Predicates = !listconcat(GetVTypePredicates<ivti>.Predicates, 1406 GetVTypePredicates<fwti>.Predicates) in 1407 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1408 (ivti.Mask V0), 1409 VLOpFrag)), 1410 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_E"#ivti.SEW#"_MASK") 1411 (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1412 (ivti.Mask V0), 1413 GPR:$vl, ivti.Log2SEW, TA_MA)>; 1414 } 1415} 1416 1417// Narrowing converting 1418 1419multiclass VPatNConvertFP2IVL_W<SDPatternOperator vop, 1420 string instruction_name> { 1421 // Reuse the same list of types used in the widening nodes, but just swap the 1422 // direction of types around so we're converting from Wti -> Vti 1423 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1424 defvar vti = vtiToWti.Vti; 1425 defvar fwti = vtiToWti.Wti; 1426 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1427 GetVTypePredicates<fwti>.Predicates) in 1428 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 1429 (fwti.Mask V0), 1430 VLOpFrag)), 1431 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 1432 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1433 (fwti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1434 } 1435} 1436 1437multiclass VPatNConvertFP2IVL_W_RM<SDPatternOperator vop, 1438 string instruction_name> { 1439 // Reuse the same list of types used in the widening nodes, but just swap the 1440 // direction of types around so we're converting from Wti -> Vti 1441 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1442 defvar vti = vtiToWti.Vti; 1443 defvar fwti = vtiToWti.Wti; 1444 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1445 GetVTypePredicates<fwti>.Predicates) in 1446 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 1447 (fwti.Mask V0), 1448 VLOpFrag)), 1449 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 1450 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1451 (fwti.Mask V0), 1452 // Value to indicate no rounding mode change in 1453 // RISCVInsertReadWriteCSR 1454 FRM_DYN, 1455 GPR:$vl, vti.Log2SEW, TA_MA)>; 1456 } 1457} 1458 1459multiclass VPatNConvertFP2I_RM_VL_W<SDNode vop, string instruction_name> { 1460 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1461 defvar vti = vtiToWti.Vti; 1462 defvar fwti = vtiToWti.Wti; 1463 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1464 GetVTypePredicates<fwti>.Predicates) in 1465 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 1466 (fwti.Mask V0), (XLenVT timm:$frm), 1467 VLOpFrag)), 1468 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 1469 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1470 (fwti.Mask V0), timm:$frm, GPR:$vl, vti.Log2SEW, TA_MA)>; 1471 } 1472} 1473 1474multiclass VPatNConvertI2FPVL_W_RM<SDPatternOperator vop, 1475 string instruction_name> { 1476 foreach fvtiToFWti = AllWidenableFloatVectors in { 1477 defvar fvti = fvtiToFWti.Vti; 1478 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1479 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1480 GetVTypePredicates<iwti>.Predicates) in 1481 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), 1482 (iwti.Mask V0), 1483 VLOpFrag)), 1484 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 1485 (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, 1486 (iwti.Mask V0), 1487 // Value to indicate no rounding mode change in 1488 // RISCVInsertReadWriteCSR 1489 FRM_DYN, 1490 GPR:$vl, fvti.Log2SEW, TA_MA)>; 1491 } 1492} 1493 1494multiclass VPatNConvertI2FP_RM_VL_W<SDNode vop, string instruction_name> { 1495 foreach fvtiToFWti = AllWidenableFloatVectors in { 1496 defvar fvti = fvtiToFWti.Vti; 1497 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1498 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1499 GetVTypePredicates<iwti>.Predicates) in 1500 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), 1501 (iwti.Mask V0), (XLenVT timm:$frm), 1502 VLOpFrag)), 1503 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 1504 (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, 1505 (iwti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1506 } 1507} 1508 1509multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> { 1510 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { 1511 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1"); 1512 let Predicates = GetVTypePredicates<vti>.Predicates in { 1513 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), 1514 (vti.Vector vti.RegClass:$rs1), VR:$rs2, 1515 (vti.Mask V0), VLOpFrag, 1516 (XLenVT timm:$policy))), 1517 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1518 (vti_m1.Vector VR:$merge), 1519 (vti.Vector vti.RegClass:$rs1), 1520 (vti_m1.Vector VR:$rs2), 1521 (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 1522 } 1523 } 1524} 1525 1526multiclass VPatReductionVL_RM<SDNode vop, string instruction_name, bit is_float> { 1527 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { 1528 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1"); 1529 let Predicates = GetVTypePredicates<vti>.Predicates in { 1530 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), 1531 (vti.Vector vti.RegClass:$rs1), VR:$rs2, 1532 (vti.Mask V0), VLOpFrag, 1533 (XLenVT timm:$policy))), 1534 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1535 (vti_m1.Vector VR:$merge), 1536 (vti.Vector vti.RegClass:$rs1), 1537 (vti_m1.Vector VR:$rs2), 1538 (vti.Mask V0), 1539 // Value to indicate no rounding mode change in 1540 // RISCVInsertReadWriteCSR 1541 FRM_DYN, 1542 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 1543 } 1544 } 1545} 1546 1547multiclass VPatBinaryVL_WV_WX_WI<SDNode op, string instruction_name> { 1548 foreach vtiToWti = AllWidenableIntVectors in { 1549 defvar vti = vtiToWti.Vti; 1550 defvar wti = vtiToWti.Wti; 1551 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1552 GetVTypePredicates<wti>.Predicates) in { 1553 def : Pat< 1554 (vti.Vector 1555 (riscv_trunc_vector_vl 1556 (op (wti.Vector wti.RegClass:$rs2), 1557 (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1)))), 1558 (vti.Mask true_mask), 1559 VLOpFrag)), 1560 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX) 1561 (vti.Vector (IMPLICIT_DEF)), 1562 wti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1563 1564 def : Pat< 1565 (vti.Vector 1566 (riscv_trunc_vector_vl 1567 (op (wti.Vector wti.RegClass:$rs2), 1568 (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1)))), 1569 (vti.Mask true_mask), 1570 VLOpFrag)), 1571 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1572 (vti.Vector (IMPLICIT_DEF)), 1573 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1574 1575 def : Pat< 1576 (vti.Vector 1577 (riscv_trunc_vector_vl 1578 (op (wti.Vector wti.RegClass:$rs2), 1579 (wti.Vector (SplatPat_uimm5 uimm5:$rs1))), (vti.Mask true_mask), 1580 VLOpFrag)), 1581 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX) 1582 (vti.Vector (IMPLICIT_DEF)), 1583 wti.RegClass:$rs2, uimm5:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1584 } 1585 } 1586} 1587 1588multiclass VPatWidenReductionVL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1589 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1590 defvar vti = vtiToWti.Vti; 1591 defvar wti = vtiToWti.Wti; 1592 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1593 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1594 GetVTypePredicates<wti>.Predicates) in { 1595 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1596 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 1597 VR:$rs2, (vti.Mask V0), VLOpFrag, 1598 (XLenVT timm:$policy))), 1599 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1600 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1601 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1602 (XLenVT timm:$policy))>; 1603 } 1604 } 1605} 1606 1607multiclass VPatWidenReductionVL_RM<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1608 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1609 defvar vti = vtiToWti.Vti; 1610 defvar wti = vtiToWti.Wti; 1611 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1612 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1613 GetVTypePredicates<wti>.Predicates) in { 1614 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1615 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 1616 VR:$rs2, (vti.Mask V0), VLOpFrag, 1617 (XLenVT timm:$policy))), 1618 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1619 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1620 (wti_m1.Vector VR:$rs2), (vti.Mask V0), 1621 // Value to indicate no rounding mode change in 1622 // RISCVInsertReadWriteCSR 1623 FRM_DYN, 1624 GPR:$vl, vti.Log2SEW, 1625 (XLenVT timm:$policy))>; 1626 } 1627 } 1628} 1629 1630multiclass VPatWidenReductionVL_Ext_VL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1631 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1632 defvar vti = vtiToWti.Vti; 1633 defvar wti = vtiToWti.Wti; 1634 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1635 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1636 GetVTypePredicates<wti>.Predicates) in { 1637 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1638 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 1639 VR:$rs2, (vti.Mask V0), VLOpFrag, 1640 (XLenVT timm:$policy))), 1641 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1642 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1643 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1644 (XLenVT timm:$policy))>; 1645 } 1646 } 1647} 1648 1649multiclass VPatWidenReductionVL_Ext_VL_RM<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1650 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1651 defvar vti = vtiToWti.Vti; 1652 defvar wti = vtiToWti.Wti; 1653 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1654 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1655 GetVTypePredicates<wti>.Predicates) in { 1656 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1657 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 1658 VR:$rs2, (vti.Mask V0), VLOpFrag, 1659 (XLenVT timm:$policy))), 1660 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1661 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1662 (wti_m1.Vector VR:$rs2), (vti.Mask V0), 1663 // Value to indicate no rounding mode change in 1664 // RISCVInsertReadWriteCSR 1665 FRM_DYN, 1666 GPR:$vl, vti.Log2SEW, 1667 (XLenVT timm:$policy))>; 1668 } 1669 } 1670} 1671 1672multiclass VPatBinaryFPWVL_VV_VF<SDNode vop, string instruction_name> { 1673 foreach fvtiToFWti = AllWidenableFloatVectors in { 1674 defvar vti = fvtiToFWti.Vti; 1675 defvar wti = fvtiToFWti.Wti; 1676 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1677 GetVTypePredicates<wti>.Predicates) in { 1678 def : VPatBinaryVL_V<vop, instruction_name, "VV", 1679 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1680 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1681 vti.RegClass>; 1682 def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 1683 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1684 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1685 vti.ScalarRegClass>; 1686 } 1687 } 1688} 1689 1690multiclass VPatBinaryFPWVL_VV_VF_RM<SDNode vop, string instruction_name, 1691 bit isSEWAware = 0> { 1692 foreach fvtiToFWti = AllWidenableFloatVectors in { 1693 defvar vti = fvtiToFWti.Vti; 1694 defvar wti = fvtiToFWti.Wti; 1695 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1696 GetVTypePredicates<wti>.Predicates) in { 1697 def : VPatBinaryVL_V_RM<vop, instruction_name, "VV", 1698 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1699 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1700 vti.RegClass, isSEWAware>; 1701 def : VPatBinaryVL_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix, 1702 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1703 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1704 vti.ScalarRegClass, isSEWAware>; 1705 } 1706 } 1707} 1708 1709multiclass VPatBinaryFPWVL_VV_VF_WV_WF<SDNode vop, SDNode vop_w, string instruction_name> 1710 : VPatBinaryFPWVL_VV_VF<vop, instruction_name> { 1711 foreach fvtiToFWti = AllWidenableFloatVectors in { 1712 defvar vti = fvtiToFWti.Vti; 1713 defvar wti = fvtiToFWti.Wti; 1714 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1715 GetVTypePredicates<wti>.Predicates) in { 1716 defm : VPatTiedBinaryNoMaskVL_V<vop_w, instruction_name, "WV", 1717 wti.Vector, vti.Vector, vti.Log2SEW, 1718 vti.LMul, wti.RegClass, vti.RegClass>; 1719 def : VPatBinaryVL_V<vop_w, instruction_name, "WV", 1720 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1721 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1722 vti.RegClass>; 1723 def : VPatBinaryVL_VF<vop_w, instruction_name#"_W"#vti.ScalarSuffix, 1724 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1725 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1726 vti.ScalarRegClass>; 1727 } 1728 } 1729} 1730 1731multiclass VPatBinaryFPWVL_VV_VF_WV_WF_RM< 1732 SDNode vop, SDNode vop_w, string instruction_name, bit isSEWAware = 0> 1733 : VPatBinaryFPWVL_VV_VF_RM<vop, instruction_name, isSEWAware> { 1734 foreach fvtiToFWti = AllWidenableFloatVectors in { 1735 defvar vti = fvtiToFWti.Vti; 1736 defvar wti = fvtiToFWti.Wti; 1737 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1738 GetVTypePredicates<wti>.Predicates) in { 1739 defm : VPatTiedBinaryNoMaskVL_V_RM<vop_w, instruction_name, "WV", 1740 wti.Vector, vti.Vector, vti.Log2SEW, 1741 vti.LMul, wti.RegClass, vti.RegClass, 1742 isSEWAware>; 1743 def : VPatBinaryVL_V_RM<vop_w, instruction_name, "WV", 1744 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1745 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1746 vti.RegClass, isSEWAware>; 1747 def : VPatBinaryVL_VF_RM<vop_w, instruction_name#"_W"#vti.ScalarSuffix, 1748 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1749 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1750 vti.ScalarRegClass, isSEWAware>; 1751 } 1752 } 1753} 1754 1755multiclass VPatNarrowShiftSplatExt_WX<SDNode op, PatFrags extop, string instruction_name> { 1756 foreach vtiToWti = AllWidenableIntVectors in { 1757 defvar vti = vtiToWti.Vti; 1758 defvar wti = vtiToWti.Wti; 1759 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1760 GetVTypePredicates<wti>.Predicates) in 1761 def : Pat< 1762 (vti.Vector 1763 (riscv_trunc_vector_vl 1764 (op (wti.Vector wti.RegClass:$rs2), 1765 (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1))), 1766 (vti.Mask true_mask), VLOpFrag)), 1767 srcvalue, (wti.Mask true_mask), VLOpFrag), 1768 (vti.Mask true_mask), VLOpFrag)), 1769 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1770 (vti.Vector (IMPLICIT_DEF)), 1771 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1772 } 1773} 1774 1775multiclass VPatNarrowShiftExtVL_WV<SDNode op, PatFrags extop, string instruction_name> { 1776 foreach vtiToWti = AllWidenableIntVectors in { 1777 defvar vti = vtiToWti.Vti; 1778 defvar wti = vtiToWti.Wti; 1779 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1780 GetVTypePredicates<wti>.Predicates) in 1781 def : Pat< 1782 (vti.Vector 1783 (riscv_trunc_vector_vl 1784 (op (wti.Vector wti.RegClass:$rs2), 1785 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), 1786 (vti.Mask true_mask), VLOpFrag)), 1787 srcvalue, (vti.Mask true_mask), VLOpFrag), 1788 (vti.Mask V0), VLOpFrag)), 1789 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_MASK") 1790 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, vti.RegClass:$rs1, 1791 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1792 } 1793} 1794 1795multiclass VPatNarrowShiftVL_WV<SDNode op, string instruction_name> { 1796 defm : VPatNarrowShiftExtVL_WV<op, riscv_sext_vl_oneuse, instruction_name>; 1797 defm : VPatNarrowShiftExtVL_WV<op, riscv_zext_vl_oneuse, instruction_name>; 1798} 1799 1800multiclass VPatMultiplyAddVL_VV_VX<SDNode op, string instruction_name> { 1801 foreach vti = AllIntegerVectors in { 1802 defvar suffix = vti.LMul.MX; 1803 let Predicates = GetVTypePredicates<vti>.Predicates in { 1804 // NOTE: We choose VMADD because it has the most commuting freedom. So it 1805 // works best with how TwoAddressInstructionPass tries commuting. 1806 def : Pat<(vti.Vector 1807 (op vti.RegClass:$rs2, 1808 (riscv_mul_vl_oneuse vti.RegClass:$rs1, 1809 vti.RegClass:$rd, 1810 srcvalue, (vti.Mask true_mask), VLOpFrag), 1811 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1812 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 1813 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1814 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1815 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally 1816 // commutable. 1817 def : Pat<(vti.Vector 1818 (op vti.RegClass:$rs2, 1819 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), 1820 vti.RegClass:$rd, 1821 srcvalue, (vti.Mask true_mask), VLOpFrag), 1822 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1823 (!cast<Instruction>(instruction_name#"_VX_" # suffix) 1824 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1825 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1826 } 1827 } 1828} 1829 1830multiclass VPatMultiplyAccVL_VV_VX<PatFrag op, string instruction_name> { 1831 foreach vti = AllIntegerVectors in { 1832 defvar suffix = vti.LMul.MX; 1833 let Predicates = GetVTypePredicates<vti>.Predicates in { 1834 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1835 (vti.Vector (op vti.RegClass:$rd, 1836 (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2, 1837 srcvalue, (vti.Mask true_mask), VLOpFrag), 1838 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1839 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1840 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1841 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1842 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1843 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1844 (vti.Vector (op vti.RegClass:$rd, 1845 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, 1846 srcvalue, (vti.Mask true_mask), VLOpFrag), 1847 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1848 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1849 (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK") 1850 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1851 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1852 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1853 (vti.Vector (op vti.RegClass:$rd, 1854 (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2, 1855 srcvalue, (vti.Mask true_mask), VLOpFrag), 1856 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1857 vti.RegClass:$rd, undef, VLOpFrag), 1858 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1859 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1860 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1861 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1862 (vti.Vector (op vti.RegClass:$rd, 1863 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, 1864 srcvalue, (vti.Mask true_mask), VLOpFrag), 1865 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1866 vti.RegClass:$rd, undef, VLOpFrag), 1867 (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK") 1868 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1869 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1870 } 1871 } 1872} 1873 1874multiclass VPatWidenMultiplyAddVL_VV_VX<SDNode vwmacc_op, string instr_name> { 1875 foreach vtiTowti = AllWidenableIntVectors in { 1876 defvar vti = vtiTowti.Vti; 1877 defvar wti = vtiTowti.Wti; 1878 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1879 GetVTypePredicates<wti>.Predicates) in { 1880 def : Pat<(vwmacc_op (vti.Vector vti.RegClass:$rs1), 1881 (vti.Vector vti.RegClass:$rs2), 1882 (wti.Vector wti.RegClass:$rd), 1883 (vti.Mask V0), VLOpFrag), 1884 (!cast<Instruction>(instr_name#"_VV_"#vti.LMul.MX#"_MASK") 1885 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1886 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1887 def : Pat<(vwmacc_op (SplatPat XLenVT:$rs1), 1888 (vti.Vector vti.RegClass:$rs2), 1889 (wti.Vector wti.RegClass:$rd), 1890 (vti.Mask V0), VLOpFrag), 1891 (!cast<Instruction>(instr_name#"_VX_"#vti.LMul.MX#"_MASK") 1892 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, 1893 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1894 TAIL_AGNOSTIC)>; 1895 } 1896 } 1897} 1898 1899multiclass VPatNarrowShiftSplat_WX_WI<SDNode op, string instruction_name> { 1900 foreach vtiTowti = AllWidenableIntVectors in { 1901 defvar vti = vtiTowti.Vti; 1902 defvar wti = vtiTowti.Wti; 1903 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1904 GetVTypePredicates<wti>.Predicates) in { 1905 def : Pat<(vti.Vector (riscv_trunc_vector_vl 1906 (wti.Vector (op wti.RegClass:$rs1, (SplatPat XLenVT:$rs2), 1907 srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), 1908 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1909 (vti.Vector (IMPLICIT_DEF)), 1910 wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; 1911 def : Pat<(vti.Vector (riscv_trunc_vector_vl 1912 (wti.Vector (op wti.RegClass:$rs1, (SplatPat_uimm5 uimm5:$rs2), 1913 srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), 1914 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX) 1915 (vti.Vector (IMPLICIT_DEF)), 1916 wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; 1917 } 1918 } 1919} 1920 1921multiclass VPatFPMulAddVL_VV_VF<SDPatternOperator vop, string instruction_name> { 1922 foreach vti = AllFloatVectors in { 1923 defvar suffix = vti.LMul.MX; 1924 let Predicates = GetVTypePredicates<vti>.Predicates in { 1925 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, 1926 vti.RegClass:$rs2, (vti.Mask V0), 1927 VLOpFrag)), 1928 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1929 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1930 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1931 1932 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), 1933 vti.RegClass:$rd, vti.RegClass:$rs2, 1934 (vti.Mask V0), 1935 VLOpFrag)), 1936 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1937 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1938 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1939 } 1940 } 1941} 1942 1943multiclass VPatFPMulAddVL_VV_VF_RM<SDPatternOperator vop, string instruction_name> { 1944 foreach vti = AllFloatVectors in { 1945 defvar suffix = vti.LMul.MX # "_E" # vti.SEW; 1946 let Predicates = GetVTypePredicates<vti>.Predicates in { 1947 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, 1948 vti.RegClass:$rs2, (vti.Mask V0), 1949 VLOpFrag)), 1950 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1951 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1952 (vti.Mask V0), 1953 // Value to indicate no rounding mode change in 1954 // RISCVInsertReadWriteCSR 1955 FRM_DYN, 1956 GPR:$vl, vti.Log2SEW, TA_MA)>; 1957 1958 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), 1959 vti.RegClass:$rd, vti.RegClass:$rs2, 1960 (vti.Mask V0), 1961 VLOpFrag)), 1962 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1963 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1964 (vti.Mask V0), 1965 // Value to indicate no rounding mode change in 1966 // RISCVInsertReadWriteCSR 1967 FRM_DYN, 1968 GPR:$vl, vti.Log2SEW, TA_MA)>; 1969 } 1970 } 1971} 1972 1973multiclass VPatFPMulAccVL_VV_VF<PatFrag vop, string instruction_name> { 1974 foreach vti = AllFloatVectors in { 1975 defvar suffix = vti.LMul.MX; 1976 let Predicates = GetVTypePredicates<vti>.Predicates in { 1977 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1978 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1979 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1980 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1981 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1982 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1983 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1984 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1985 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1986 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1987 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1988 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1989 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1990 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1991 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1992 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1993 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1994 vti.RegClass:$rd, undef, VLOpFrag), 1995 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1996 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1997 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1998 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1999 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 2000 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 2001 vti.RegClass:$rd, undef, VLOpFrag), 2002 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 2003 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 2004 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2005 } 2006 } 2007} 2008 2009multiclass VPatFPMulAccVL_VV_VF_RM<PatFrag vop, string instruction_name> { 2010 foreach vti = AllFloatVectors in { 2011 defvar suffix = vti.LMul.MX # "_E" # vti.SEW; 2012 let Predicates = GetVTypePredicates<vti>.Predicates in { 2013 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 2014 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 2015 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 2016 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 2017 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 2018 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 2019 (vti.Mask V0), 2020 // Value to indicate no rounding mode change in 2021 // RISCVInsertReadWriteCSR 2022 FRM_DYN, 2023 GPR:$vl, vti.Log2SEW, TU_MU)>; 2024 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 2025 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 2026 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 2027 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 2028 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 2029 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 2030 (vti.Mask V0), 2031 // Value to indicate no rounding mode change in 2032 // RISCVInsertReadWriteCSR 2033 FRM_DYN, 2034 GPR:$vl, vti.Log2SEW, TU_MU)>; 2035 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 2036 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 2037 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 2038 vti.RegClass:$rd, undef, VLOpFrag), 2039 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 2040 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 2041 (vti.Mask V0), 2042 // Value to indicate no rounding mode change in 2043 // RISCVInsertReadWriteCSR 2044 FRM_DYN, 2045 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2046 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 2047 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 2048 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 2049 vti.RegClass:$rd, undef, VLOpFrag), 2050 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 2051 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 2052 (vti.Mask V0), 2053 // Value to indicate no rounding mode change in 2054 // RISCVInsertReadWriteCSR 2055 FRM_DYN, 2056 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2057 } 2058 } 2059} 2060 2061multiclass VPatWidenFPMulAccVL_VV_VF<SDNode vop, string instruction_name> { 2062 foreach vtiToWti = AllWidenableFloatVectors in { 2063 defvar vti = vtiToWti.Vti; 2064 defvar wti = vtiToWti.Wti; 2065 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2066 GetVTypePredicates<wti>.Predicates) in { 2067 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 2068 (vti.Vector vti.RegClass:$rs2), 2069 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 2070 VLOpFrag), 2071 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX #"_MASK") 2072 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 2073 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2074 def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 2075 (vti.Vector vti.RegClass:$rs2), 2076 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 2077 VLOpFrag), 2078 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX #"_MASK") 2079 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 2080 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2081 } 2082 } 2083} 2084 2085multiclass VPatWidenFPMulAccVL_VV_VF_RM<SDNode vop, string instruction_name> { 2086 foreach vtiToWti = AllWidenableFloatVectors in { 2087 defvar vti = vtiToWti.Vti; 2088 defvar wti = vtiToWti.Wti; 2089 defvar suffix = vti.LMul.MX # "_E" # vti.SEW; 2090 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2091 GetVTypePredicates<wti>.Predicates) in { 2092 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 2093 (vti.Vector vti.RegClass:$rs2), 2094 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 2095 VLOpFrag), 2096 (!cast<Instruction>(instruction_name#"_VV_"#suffix#"_MASK") 2097 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 2098 (vti.Mask V0), 2099 // Value to indicate no rounding mode change in 2100 // RISCVInsertReadWriteCSR 2101 FRM_DYN, 2102 GPR:$vl, vti.Log2SEW, TA_MA)>; 2103 def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 2104 (vti.Vector vti.RegClass:$rs2), 2105 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 2106 VLOpFrag), 2107 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#suffix#"_MASK") 2108 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 2109 (vti.Mask V0), 2110 // Value to indicate no rounding mode change in 2111 // RISCVInsertReadWriteCSR 2112 FRM_DYN, 2113 GPR:$vl, vti.Log2SEW, TA_MA)>; 2114 } 2115 } 2116} 2117 2118multiclass VPatSlideVL_VX_VI<SDNode vop, string instruction_name> { 2119 foreach vti = AllVectors in { 2120 defvar ivti = GetIntVTypeInfo<vti>.Vti; 2121 let Predicates = GetVTypePredicates<ivti>.Predicates in { 2122 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rd), 2123 (vti.Vector vti.RegClass:$rs1), 2124 uimm5:$rs2, (vti.Mask V0), 2125 VLOpFrag, (XLenVT timm:$policy))), 2126 (!cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK") 2127 vti.RegClass:$rd, vti.RegClass:$rs1, uimm5:$rs2, 2128 (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2129 (XLenVT timm:$policy))>; 2130 2131 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rd), 2132 (vti.Vector vti.RegClass:$rs1), 2133 GPR:$rs2, (vti.Mask V0), 2134 VLOpFrag, (XLenVT timm:$policy))), 2135 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK") 2136 vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, 2137 (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2138 (XLenVT timm:$policy))>; 2139 } 2140 } 2141} 2142 2143multiclass VPatSlide1VL_VX<SDNode vop, string instruction_name> { 2144 foreach vti = AllIntegerVectors in { 2145 let Predicates = GetVTypePredicates<vti>.Predicates in { 2146 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rs3), 2147 (vti.Vector vti.RegClass:$rs1), 2148 GPR:$rs2, (vti.Mask V0), VLOpFrag)), 2149 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK") 2150 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, 2151 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 2152 } 2153 } 2154} 2155 2156multiclass VPatSlide1VL_VF<SDNode vop, string instruction_name> { 2157 foreach vti = AllFloatVectors in { 2158 let Predicates = GetVTypePredicates<vti>.Predicates in { 2159 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rs3), 2160 (vti.Vector vti.RegClass:$rs1), 2161 vti.Scalar:$rs2, (vti.Mask V0), VLOpFrag)), 2162 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_MASK") 2163 vti.RegClass:$rs3, vti.RegClass:$rs1, vti.Scalar:$rs2, 2164 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 2165 } 2166 } 2167} 2168 2169multiclass VPatAVGADDVL_VV_VX_RM<SDNode vop, int vxrm, string suffix = ""> { 2170 foreach vti = AllIntegerVectors in { 2171 let Predicates = GetVTypePredicates<vti>.Predicates in { 2172 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 2173 (vti.Vector vti.RegClass:$rs2), 2174 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2175 (!cast<Instruction>("PseudoVAADD"#suffix#"_VV_"#vti.LMul.MX#"_MASK") 2176 vti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs2, 2177 (vti.Mask V0), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2178 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 2179 (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 2180 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2181 (!cast<Instruction>("PseudoVAADD"#suffix#"_VX_"#vti.LMul.MX#"_MASK") 2182 vti.RegClass:$merge, vti.RegClass:$rs1, GPR:$rs2, 2183 (vti.Mask V0), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2184 } 2185 } 2186} 2187 2188//===----------------------------------------------------------------------===// 2189// Patterns. 2190//===----------------------------------------------------------------------===// 2191 2192// 11. Vector Integer Arithmetic Instructions 2193 2194// 11.1. Vector Single-Width Integer Add and Subtract 2195defm : VPatBinaryVL_VV_VX_VI<riscv_add_vl, "PseudoVADD">; 2196defm : VPatBinaryVL_VV_VX<riscv_sub_vl, "PseudoVSUB">; 2197// Handle VRSUB specially since it's the only integer binary op with reversed 2198// pattern operands 2199foreach vti = AllIntegerVectors in { 2200 let Predicates = GetVTypePredicates<vti>.Predicates in { 2201 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 2202 (vti.Vector vti.RegClass:$rs1), 2203 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2204 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK") 2205 vti.RegClass:$merge, vti.RegClass:$rs1, GPR:$rs2, 2206 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2207 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)), 2208 (vti.Vector vti.RegClass:$rs1), 2209 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2210 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK") 2211 vti.RegClass:$merge, vti.RegClass:$rs1, simm5:$rs2, 2212 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2213 } 2214} 2215 2216// 11.2. Vector Widening Integer Add/Subtract 2217defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwadd_vl, riscv_vwadd_w_vl, "PseudoVWADD">; 2218defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwaddu_vl, riscv_vwaddu_w_vl, "PseudoVWADDU">; 2219defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsub_vl, riscv_vwsub_w_vl, "PseudoVWSUB">; 2220defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsubu_vl, riscv_vwsubu_w_vl, "PseudoVWSUBU">; 2221 2222// shl_vl (ext_vl v, splat 1) is a special case of widening add. 2223foreach vtiToWti = AllWidenableIntVectors in { 2224 defvar vti = vtiToWti.Vti; 2225 defvar wti = vtiToWti.Wti; 2226 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2227 GetVTypePredicates<wti>.Predicates) in { 2228 def : Pat<(riscv_shl_vl (wti.Vector (riscv_sext_vl_oneuse 2229 (vti.Vector vti.RegClass:$rs1), 2230 (vti.Mask V0), VLOpFrag)), 2231 (wti.Vector (riscv_vmv_v_x_vl 2232 (wti.Vector undef), 1, VLOpFrag)), 2233 wti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2234 (!cast<Instruction>("PseudoVWADD_VV_"#vti.LMul.MX#"_MASK") 2235 wti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs1, 2236 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2237 def : Pat<(riscv_shl_vl (wti.Vector (riscv_zext_vl_oneuse 2238 (vti.Vector vti.RegClass:$rs1), 2239 (vti.Mask V0), VLOpFrag)), 2240 (wti.Vector (riscv_vmv_v_x_vl 2241 (wti.Vector undef), 1, VLOpFrag)), 2242 wti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2243 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX#"_MASK") 2244 wti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs1, 2245 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2246 } 2247} 2248 2249// 11.3. Vector Integer Extension 2250defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF2", 2251 AllFractionableVF2IntVectors>; 2252defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF2", 2253 AllFractionableVF2IntVectors>; 2254defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF4", 2255 AllFractionableVF4IntVectors>; 2256defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF4", 2257 AllFractionableVF4IntVectors>; 2258defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF8", 2259 AllFractionableVF8IntVectors>; 2260defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF8", 2261 AllFractionableVF8IntVectors>; 2262 2263// 11.5. Vector Bitwise Logical Instructions 2264defm : VPatBinaryVL_VV_VX_VI<riscv_and_vl, "PseudoVAND">; 2265defm : VPatBinaryVL_VV_VX_VI<riscv_or_vl, "PseudoVOR">; 2266defm : VPatBinaryVL_VV_VX_VI<riscv_xor_vl, "PseudoVXOR">; 2267 2268// 11.6. Vector Single-Width Bit Shift Instructions 2269defm : VPatBinaryVL_VV_VX_VI<riscv_shl_vl, "PseudoVSLL", uimm5>; 2270defm : VPatBinaryVL_VV_VX_VI<riscv_srl_vl, "PseudoVSRL", uimm5>; 2271defm : VPatBinaryVL_VV_VX_VI<riscv_sra_vl, "PseudoVSRA", uimm5>; 2272 2273foreach vti = AllIntegerVectors in { 2274 // Emit shift by 1 as an add since it might be faster. 2275 let Predicates = GetVTypePredicates<vti>.Predicates in 2276 def : Pat<(riscv_shl_vl (vti.Vector vti.RegClass:$rs1), 2277 (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)), 2278 srcvalue, (vti.Mask true_mask), VLOpFrag), 2279 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX) 2280 (vti.Vector (IMPLICIT_DEF)), 2281 vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 2282} 2283 2284// 11.7. Vector Narrowing Integer Right Shift Instructions 2285defm : VPatBinaryVL_WV_WX_WI<srl, "PseudoVNSRL">; 2286defm : VPatBinaryVL_WV_WX_WI<sra, "PseudoVNSRA">; 2287 2288defm : VPatNarrowShiftSplat_WX_WI<riscv_sra_vl, "PseudoVNSRA">; 2289defm : VPatNarrowShiftSplat_WX_WI<riscv_srl_vl, "PseudoVNSRL">; 2290defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_sext_vl_oneuse, "PseudoVNSRA">; 2291defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_zext_vl_oneuse, "PseudoVNSRA">; 2292defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_sext_vl_oneuse, "PseudoVNSRL">; 2293defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_zext_vl_oneuse, "PseudoVNSRL">; 2294 2295defm : VPatNarrowShiftVL_WV<riscv_srl_vl, "PseudoVNSRL">; 2296defm : VPatNarrowShiftVL_WV<riscv_sra_vl, "PseudoVNSRA">; 2297 2298defm : VPatBinaryNVL_WV_WX_WI<riscv_vnsrl_vl, "PseudoVNSRL">; 2299 2300foreach vtiTowti = AllWidenableIntVectors in { 2301 defvar vti = vtiTowti.Vti; 2302 defvar wti = vtiTowti.Wti; 2303 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2304 GetVTypePredicates<wti>.Predicates) in 2305 def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1), 2306 (vti.Mask V0), 2307 VLOpFrag)), 2308 (!cast<Instruction>("PseudoVNSRL_WI_"#vti.LMul.MX#"_MASK") 2309 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, 2310 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2311} 2312 2313// 11.8. Vector Integer Comparison Instructions 2314foreach vti = AllIntegerVectors in { 2315 let Predicates = GetVTypePredicates<vti>.Predicates in { 2316 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSEQ", SETEQ>; 2317 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSNE", SETNE>; 2318 2319 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 2320 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 2321 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2322 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2323 2324 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 2325 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 2326 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 2327 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 2328 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2329 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2330 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>; 2331 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>; 2332 // There is no VMSGE(U)_VX instruction 2333 2334 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 2335 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 2336 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2337 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2338 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>; 2339 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>; 2340 2341 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLE", SETLT, SETGT, 2342 SplatPat_simm5_plus1>; 2343 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLEU", SETULT, SETUGT, 2344 SplatPat_simm5_plus1_nonzero>; 2345 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGT", SETGE, SETLE, 2346 SplatPat_simm5_plus1>; 2347 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGTU", SETUGE, SETULE, 2348 SplatPat_simm5_plus1_nonzero>; 2349 } 2350} // foreach vti = AllIntegerVectors 2351 2352// 11.9. Vector Integer Min/Max Instructions 2353defm : VPatBinaryVL_VV_VX<riscv_umin_vl, "PseudoVMINU">; 2354defm : VPatBinaryVL_VV_VX<riscv_smin_vl, "PseudoVMIN">; 2355defm : VPatBinaryVL_VV_VX<riscv_umax_vl, "PseudoVMAXU">; 2356defm : VPatBinaryVL_VV_VX<riscv_smax_vl, "PseudoVMAX">; 2357 2358// 11.10. Vector Single-Width Integer Multiply Instructions 2359defm : VPatBinaryVL_VV_VX<riscv_mul_vl, "PseudoVMUL">; 2360defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH", IntegerVectorsExceptI64>; 2361defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU", IntegerVectorsExceptI64>; 2362// vsmul.vv and vsmul.vx are not included in EEW=64 in Zve64*. 2363let Predicates = [HasVInstructionsFullMultiply] in { 2364 defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH", I64IntegerVectors>; 2365 defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU", I64IntegerVectors>; 2366} 2367 2368// 11.11. Vector Integer Divide Instructions 2369defm : VPatBinaryVL_VV_VX<riscv_udiv_vl, "PseudoVDIVU", isSEWAware=1>; 2370defm : VPatBinaryVL_VV_VX<riscv_sdiv_vl, "PseudoVDIV", isSEWAware=1>; 2371defm : VPatBinaryVL_VV_VX<riscv_urem_vl, "PseudoVREMU", isSEWAware=1>; 2372defm : VPatBinaryVL_VV_VX<riscv_srem_vl, "PseudoVREM", isSEWAware=1>; 2373 2374// 11.12. Vector Widening Integer Multiply Instructions 2375defm : VPatBinaryWVL_VV_VX<riscv_vwmul_vl, "PseudoVWMUL">; 2376defm : VPatBinaryWVL_VV_VX<riscv_vwmulu_vl, "PseudoVWMULU">; 2377defm : VPatBinaryWVL_VV_VX<riscv_vwmulsu_vl, "PseudoVWMULSU">; 2378 2379// 11.13 Vector Single-Width Integer Multiply-Add Instructions 2380defm : VPatMultiplyAddVL_VV_VX<riscv_add_vl, "PseudoVMADD">; 2381defm : VPatMultiplyAddVL_VV_VX<riscv_sub_vl, "PseudoVNMSUB">; 2382defm : VPatMultiplyAccVL_VV_VX<riscv_add_vl_oneuse, "PseudoVMACC">; 2383defm : VPatMultiplyAccVL_VV_VX<riscv_sub_vl_oneuse, "PseudoVNMSAC">; 2384 2385// 11.14. Vector Widening Integer Multiply-Add Instructions 2386defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmacc_vl, "PseudoVWMACC">; 2387defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmaccu_vl, "PseudoVWMACCU">; 2388defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmaccsu_vl, "PseudoVWMACCSU">; 2389foreach vtiTowti = AllWidenableIntVectors in { 2390 defvar vti = vtiTowti.Vti; 2391 defvar wti = vtiTowti.Wti; 2392 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2393 GetVTypePredicates<wti>.Predicates) in 2394 def : Pat<(riscv_vwmaccsu_vl (vti.Vector vti.RegClass:$rs1), 2395 (SplatPat XLenVT:$rs2), 2396 (wti.Vector wti.RegClass:$rd), 2397 (vti.Mask V0), VLOpFrag), 2398 (!cast<Instruction>("PseudoVWMACCUS_VX_"#vti.LMul.MX#"_MASK") 2399 wti.RegClass:$rd, vti.ScalarRegClass:$rs2, vti.RegClass:$rs1, 2400 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2401} 2402 2403// 11.15. Vector Integer Merge Instructions 2404foreach vti = AllIntegerVectors in { 2405 let Predicates = GetVTypePredicates<vti>.Predicates in { 2406 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0), 2407 vti.RegClass:$rs1, 2408 vti.RegClass:$rs2, 2409 vti.RegClass:$merge, 2410 VLOpFrag)), 2411 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 2412 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 2413 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2414 2415 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0), 2416 (SplatPat XLenVT:$rs1), 2417 vti.RegClass:$rs2, 2418 vti.RegClass:$merge, 2419 VLOpFrag)), 2420 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 2421 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 2422 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2423 2424 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0), 2425 (SplatPat_simm5 simm5:$rs1), 2426 vti.RegClass:$rs2, 2427 vti.RegClass:$merge, 2428 VLOpFrag)), 2429 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 2430 vti.RegClass:$merge, vti.RegClass:$rs2, simm5:$rs1, 2431 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2432 } 2433} 2434 2435// 11.16. Vector Integer Move Instructions 2436foreach vti = AllVectors in { 2437 let Predicates = GetVTypePredicates<vti>.Predicates in { 2438 def : Pat<(vti.Vector (riscv_vmv_v_v_vl vti.RegClass:$passthru, 2439 vti.RegClass:$rs2, VLOpFrag)), 2440 (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX) 2441 vti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2442} 2443 2444foreach vti = AllIntegerVectors in { 2445 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.RegClass:$passthru, GPR:$rs2, VLOpFrag)), 2446 (!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX) 2447 vti.RegClass:$passthru, GPR:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2448 defvar ImmPat = !cast<ComplexPattern>("sew"#vti.SEW#"simm5"); 2449 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.RegClass:$passthru, (ImmPat simm5:$imm5), 2450 VLOpFrag)), 2451 (!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX) 2452 vti.RegClass:$passthru, simm5:$imm5, GPR:$vl, vti.Log2SEW, TU_MU)>; 2453 } 2454} 2455 2456// 12. Vector Fixed-Point Arithmetic Instructions 2457 2458// 12.1. Vector Single-Width Saturating Add and Subtract 2459defm : VPatBinaryVL_VV_VX_VI<riscv_saddsat_vl, "PseudoVSADD">; 2460defm : VPatBinaryVL_VV_VX_VI<riscv_uaddsat_vl, "PseudoVSADDU">; 2461defm : VPatBinaryVL_VV_VX<riscv_ssubsat_vl, "PseudoVSSUB">; 2462defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">; 2463 2464// 12.2. Vector Single-Width Averaging Add and Subtract 2465defm : VPatAVGADDVL_VV_VX_RM<riscv_avgfloors_vl, 0b10>; 2466defm : VPatAVGADDVL_VV_VX_RM<riscv_avgflooru_vl, 0b10, suffix="U">; 2467defm : VPatAVGADDVL_VV_VX_RM<riscv_avgceils_vl, 0b00>; 2468defm : VPatAVGADDVL_VV_VX_RM<riscv_avgceilu_vl, 0b00, suffix="U">; 2469 2470// 12.5. Vector Narrowing Fixed-Point Clip Instructions 2471defm : VPatBinaryRM_NVL_WV_WX_WI<riscv_vnclip_vl, "PseudoVNCLIP">; 2472defm : VPatBinaryRM_NVL_WV_WX_WI<riscv_vnclipu_vl, "PseudoVNCLIPU">; 2473 2474// 13. Vector Floating-Point Instructions 2475 2476// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions 2477defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fadd_vl, "PseudoVFADD", isSEWAware=1>; 2478defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fsub_vl, "PseudoVFSUB", isSEWAware=1>; 2479defm : VPatBinaryFPVL_R_VF_RM<any_riscv_fsub_vl, "PseudoVFRSUB", isSEWAware=1>; 2480 2481// 13.3. Vector Widening Floating-Point Add/Subtract Instructions 2482defm : VPatBinaryFPWVL_VV_VF_WV_WF_RM<riscv_vfwadd_vl, riscv_vfwadd_w_vl, 2483 "PseudoVFWADD", isSEWAware=1>; 2484defm : VPatBinaryFPWVL_VV_VF_WV_WF_RM<riscv_vfwsub_vl, riscv_vfwsub_w_vl, 2485 "PseudoVFWSUB", isSEWAware=1>; 2486 2487// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 2488defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fmul_vl, "PseudoVFMUL", isSEWAware=1>; 2489defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fdiv_vl, "PseudoVFDIV", isSEWAware=1>; 2490defm : VPatBinaryFPVL_R_VF_RM<any_riscv_fdiv_vl, "PseudoVFRDIV", isSEWAware=1>; 2491 2492// 13.5. Vector Widening Floating-Point Multiply Instructions 2493defm : VPatBinaryFPWVL_VV_VF_RM<riscv_vfwmul_vl, "PseudoVFWMUL", isSEWAware=1>; 2494 2495// 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. 2496defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfmadd_vl, "PseudoVFMADD">; 2497defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfmsub_vl, "PseudoVFMSUB">; 2498defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfnmadd_vl, "PseudoVFNMADD">; 2499defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfnmsub_vl, "PseudoVFNMSUB">; 2500defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfmadd_vl_oneuse, "PseudoVFMACC">; 2501defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfmsub_vl_oneuse, "PseudoVFMSAC">; 2502defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfnmadd_vl_oneuse, "PseudoVFNMACC">; 2503defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfnmsub_vl_oneuse, "PseudoVFNMSAC">; 2504 2505// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 2506defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwmadd_vl, "PseudoVFWMACC">; 2507defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwnmadd_vl, "PseudoVFWNMACC">; 2508defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwmsub_vl, "PseudoVFWMSAC">; 2509defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwnmsub_vl, "PseudoVFWNMSAC">; 2510 2511// 13.11. Vector Floating-Point MIN/MAX Instructions 2512defm : VPatBinaryFPVL_VV_VF<riscv_vfmin_vl, "PseudoVFMIN", isSEWAware=1>; 2513defm : VPatBinaryFPVL_VV_VF<riscv_vfmax_vl, "PseudoVFMAX", isSEWAware=1>; 2514 2515// 13.13. Vector Floating-Point Compare Instructions 2516defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETEQ, 2517 "PseudoVMFEQ", "PseudoVMFEQ">; 2518defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETOEQ, 2519 "PseudoVMFEQ", "PseudoVMFEQ">; 2520defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETNE, 2521 "PseudoVMFNE", "PseudoVMFNE">; 2522defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETUNE, 2523 "PseudoVMFNE", "PseudoVMFNE">; 2524defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETLT, 2525 "PseudoVMFLT", "PseudoVMFGT">; 2526defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETOLT, 2527 "PseudoVMFLT", "PseudoVMFGT">; 2528defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETLE, 2529 "PseudoVMFLE", "PseudoVMFGE">; 2530defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETOLE, 2531 "PseudoVMFLE", "PseudoVMFGE">; 2532 2533foreach vti = AllFloatVectors in { 2534 let Predicates = GetVTypePredicates<vti>.Predicates in { 2535 // 13.8. Vector Floating-Point Square-Root Instruction 2536 def : Pat<(any_riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask V0), 2537 VLOpFrag), 2538 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX # "_E" # vti.SEW # "_MASK") 2539 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 2540 (vti.Mask V0), 2541 // Value to indicate no rounding mode change in 2542 // RISCVInsertReadWriteCSR 2543 FRM_DYN, 2544 GPR:$vl, vti.Log2SEW, TA_MA)>; 2545 2546 // 13.12. Vector Floating-Point Sign-Injection Instructions 2547 def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), 2548 VLOpFrag), 2549 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX #"_E"#vti.SEW#"_MASK") 2550 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, 2551 vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2552 TA_MA)>; 2553 // Handle fneg with VFSGNJN using the same input for both operands. 2554 def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), 2555 VLOpFrag), 2556 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW #"_MASK") 2557 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, 2558 vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2559 TA_MA)>; 2560 2561 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2562 (vti.Vector vti.RegClass:$rs2), 2563 vti.RegClass:$merge, 2564 (vti.Mask V0), 2565 VLOpFrag), 2566 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_E"#vti.SEW#"_MASK") 2567 vti.RegClass:$merge, vti.RegClass:$rs1, 2568 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2569 TAIL_AGNOSTIC)>; 2570 2571 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2572 (riscv_fneg_vl vti.RegClass:$rs2, 2573 (vti.Mask true_mask), 2574 VLOpFrag), 2575 srcvalue, 2576 (vti.Mask true_mask), 2577 VLOpFrag), 2578 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW) 2579 (vti.Vector (IMPLICIT_DEF)), 2580 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; 2581 2582 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2583 (SplatFPOp vti.ScalarRegClass:$rs2), 2584 vti.RegClass:$merge, 2585 (vti.Mask V0), 2586 VLOpFrag), 2587 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX#"_E"#vti.SEW#"_MASK") 2588 vti.RegClass:$merge, vti.RegClass:$rs1, 2589 vti.ScalarRegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2590 TAIL_AGNOSTIC)>; 2591 2592 // Rounding without exception to implement nearbyint. 2593 def : Pat<(any_riscv_vfround_noexcept_vl (vti.Vector vti.RegClass:$rs1), 2594 (vti.Mask V0), VLOpFrag), 2595 (!cast<Instruction>("PseudoVFROUND_NOEXCEPT_V_" # vti.LMul.MX #"_MASK") 2596 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, 2597 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2598 2599 // 14.14. Vector Floating-Point Classify Instruction 2600 def : Pat<(riscv_fclass_vl (vti.Vector vti.RegClass:$rs2), 2601 (vti.Mask V0), VLOpFrag), 2602 (!cast<Instruction>("PseudoVFCLASS_V_"# vti.LMul.MX #"_MASK") 2603 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 2604 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2605 } 2606} 2607 2608foreach fvti = !listconcat(AllFloatVectors, AllBFloatVectors) in { 2609 // Floating-point vselects: 2610 // 11.15. Vector Integer Merge Instructions 2611 // 13.15. Vector Floating-Point Merge Instruction 2612 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 2613 let Predicates = GetVTypePredicates<ivti>.Predicates in { 2614 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2615 fvti.RegClass:$rs1, 2616 fvti.RegClass:$rs2, 2617 fvti.RegClass:$merge, 2618 VLOpFrag)), 2619 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 2620 fvti.RegClass:$merge, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 2621 GPR:$vl, fvti.Log2SEW)>; 2622 2623 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2624 (SplatFPOp (SelectFPImm (XLenVT GPR:$imm))), 2625 fvti.RegClass:$rs2, 2626 fvti.RegClass:$merge, 2627 VLOpFrag)), 2628 (!cast<Instruction>("PseudoVMERGE_VXM_"#fvti.LMul.MX) 2629 fvti.RegClass:$merge, fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask V0), 2630 GPR:$vl, fvti.Log2SEW)>; 2631 2632 2633 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2634 (SplatFPOp (fvti.Scalar fpimm0)), 2635 fvti.RegClass:$rs2, 2636 fvti.RegClass:$merge, 2637 VLOpFrag)), 2638 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 2639 fvti.RegClass:$merge, fvti.RegClass:$rs2, 0, (fvti.Mask V0), 2640 GPR:$vl, fvti.Log2SEW)>; 2641 } 2642} 2643 2644foreach fvti = AllFloatVectors in { 2645 let Predicates = GetVTypePredicates<fvti>.Predicates in { 2646 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2647 (SplatFPOp fvti.ScalarRegClass:$rs1), 2648 fvti.RegClass:$rs2, 2649 fvti.RegClass:$merge, 2650 VLOpFrag)), 2651 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 2652 fvti.RegClass:$merge, fvti.RegClass:$rs2, 2653 (fvti.Scalar fvti.ScalarRegClass:$rs1), 2654 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 2655 } 2656} 2657 2658foreach fvti = !listconcat(AllFloatVectors, AllBFloatVectors) in { 2659 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 2660 let Predicates = GetVTypePredicates<ivti>.Predicates in { 2661 // 13.16. Vector Floating-Point Move Instruction 2662 // If we're splatting fpimm0, use vmv.v.x vd, x0. 2663 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2664 fvti.Vector:$passthru, (fvti.Scalar (fpimm0)), VLOpFrag)), 2665 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX) 2666 $passthru, 0, GPR:$vl, fvti.Log2SEW, TU_MU)>; 2667 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2668 fvti.Vector:$passthru, (fvti.Scalar (SelectFPImm (XLenVT GPR:$imm))), VLOpFrag)), 2669 (!cast<Instruction>("PseudoVMV_V_X_"#fvti.LMul.MX) 2670 $passthru, GPR:$imm, GPR:$vl, fvti.Log2SEW, TU_MU)>; 2671 } 2672} 2673 2674foreach fvti = AllFloatVectors in { 2675 let Predicates = GetVTypePredicates<fvti>.Predicates in { 2676 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2677 fvti.Vector:$passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), 2678 (!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" # 2679 fvti.LMul.MX) 2680 $passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), 2681 GPR:$vl, fvti.Log2SEW, TU_MU)>; 2682 } 2683} 2684 2685// 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions 2686defm : VPatConvertFP2IVL_V_RM<riscv_vfcvt_xu_f_vl, "PseudoVFCVT_XU_F_V">; 2687defm : VPatConvertFP2IVL_V_RM<riscv_vfcvt_x_f_vl, "PseudoVFCVT_X_F_V">; 2688defm : VPatConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFCVT_RM_XU_F_V">; 2689defm : VPatConvertFP2I_RM_VL_V<any_riscv_vfcvt_rm_x_f_vl, "PseudoVFCVT_RM_X_F_V">; 2690 2691defm : VPatConvertFP2IVL_V<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFCVT_RTZ_XU_F_V">; 2692defm : VPatConvertFP2IVL_V<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFCVT_RTZ_X_F_V">; 2693 2694defm : VPatConvertI2FPVL_V_RM<any_riscv_uint_to_fp_vl, "PseudoVFCVT_F_XU_V">; 2695defm : VPatConvertI2FPVL_V_RM<any_riscv_sint_to_fp_vl, "PseudoVFCVT_F_X_V">; 2696 2697defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_xu_vl, "PseudoVFCVT_RM_F_XU_V">; 2698defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_x_vl, "PseudoVFCVT_RM_F_X_V">; 2699 2700// 13.18. Widening Floating-Point/Integer Type-Convert Instructions 2701defm : VPatWConvertFP2IVL_V_RM<riscv_vfcvt_xu_f_vl, "PseudoVFWCVT_XU_F_V">; 2702defm : VPatWConvertFP2IVL_V_RM<riscv_vfcvt_x_f_vl, "PseudoVFWCVT_X_F_V">; 2703defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFWCVT_RM_XU_F_V">; 2704defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_x_f_vl, "PseudoVFWCVT_RM_X_F_V">; 2705 2706defm : VPatWConvertFP2IVL_V<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFWCVT_RTZ_XU_F_V">; 2707defm : VPatWConvertFP2IVL_V<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFWCVT_RTZ_X_F_V">; 2708 2709defm : VPatWConvertI2FPVL_V<any_riscv_uint_to_fp_vl, "PseudoVFWCVT_F_XU_V">; 2710defm : VPatWConvertI2FPVL_V<any_riscv_sint_to_fp_vl, "PseudoVFWCVT_F_X_V">; 2711 2712foreach fvtiToFWti = AllWidenableFloatVectors in { 2713 defvar fvti = fvtiToFWti.Vti; 2714 defvar fwti = fvtiToFWti.Wti; 2715 let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], 2716 !listconcat(GetVTypePredicates<fvti>.Predicates, 2717 GetVTypePredicates<fwti>.Predicates)) in 2718 def : Pat<(fwti.Vector (any_riscv_fpextend_vl 2719 (fvti.Vector fvti.RegClass:$rs1), 2720 (fvti.Mask V0), 2721 VLOpFrag)), 2722 (!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 2723 (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 2724 (fvti.Mask V0), 2725 GPR:$vl, fvti.Log2SEW, TA_MA)>; 2726} 2727 2728foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in { 2729 defvar fvti = fvtiToFWti.Vti; 2730 defvar fwti = fvtiToFWti.Wti; 2731 let Predicates = [HasVInstructionsBF16] in 2732 def : Pat<(fwti.Vector (any_riscv_fpextend_vl 2733 (fvti.Vector fvti.RegClass:$rs1), 2734 (fvti.Mask V0), 2735 VLOpFrag)), 2736 (!cast<Instruction>("PseudoVFWCVTBF16_F_F_V_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 2737 (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 2738 (fvti.Mask V0), 2739 GPR:$vl, fvti.Log2SEW, TA_MA)>; 2740} 2741 2742// 13.19 Narrowing Floating-Point/Integer Type-Convert Instructions 2743defm : VPatNConvertFP2IVL_W_RM<riscv_vfcvt_xu_f_vl, "PseudoVFNCVT_XU_F_W">; 2744defm : VPatNConvertFP2IVL_W_RM<riscv_vfcvt_x_f_vl, "PseudoVFNCVT_X_F_W">; 2745defm : VPatNConvertFP2I_RM_VL_W<riscv_vfcvt_rm_xu_f_vl, "PseudoVFNCVT_RM_XU_F_W">; 2746defm : VPatNConvertFP2I_RM_VL_W<riscv_vfcvt_rm_x_f_vl, "PseudoVFNCVT_RM_X_F_W">; 2747 2748defm : VPatNConvertFP2IVL_W<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFNCVT_RTZ_XU_F_W">; 2749defm : VPatNConvertFP2IVL_W<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFNCVT_RTZ_X_F_W">; 2750 2751defm : VPatNConvertI2FPVL_W_RM<any_riscv_uint_to_fp_vl, "PseudoVFNCVT_F_XU_W">; 2752defm : VPatNConvertI2FPVL_W_RM<any_riscv_sint_to_fp_vl, "PseudoVFNCVT_F_X_W">; 2753 2754defm : VPatNConvertI2FP_RM_VL_W<riscv_vfcvt_rm_f_xu_vl, "PseudoVFNCVT_RM_F_XU_W">; 2755defm : VPatNConvertI2FP_RM_VL_W<riscv_vfcvt_rm_f_x_vl, "PseudoVFNCVT_RM_F_X_W">; 2756 2757foreach fvtiToFWti = AllWidenableFloatVectors in { 2758 defvar fvti = fvtiToFWti.Vti; 2759 defvar fwti = fvtiToFWti.Wti; 2760 // Define vfwcvt.f.f.v for f16 when Zvfhmin is enable. 2761 let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], 2762 !listconcat(GetVTypePredicates<fvti>.Predicates, 2763 GetVTypePredicates<fwti>.Predicates)) in { 2764 def : Pat<(fvti.Vector (any_riscv_fpround_vl 2765 (fwti.Vector fwti.RegClass:$rs1), 2766 (fwti.Mask V0), VLOpFrag)), 2767 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 2768 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 2769 (fwti.Mask V0), 2770 // Value to indicate no rounding mode change in 2771 // RISCVInsertReadWriteCSR 2772 FRM_DYN, 2773 GPR:$vl, fvti.Log2SEW, TA_MA)>; 2774 2775 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 2776 GetVTypePredicates<fwti>.Predicates) in 2777 def : Pat<(fvti.Vector (any_riscv_fncvt_rod_vl 2778 (fwti.Vector fwti.RegClass:$rs1), 2779 (fwti.Mask V0), VLOpFrag)), 2780 (!cast<Instruction>("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 2781 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 2782 (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 2783 } 2784} 2785 2786foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in { 2787 defvar fvti = fvtiToFWti.Vti; 2788 defvar fwti = fvtiToFWti.Wti; 2789 let Predicates = [HasVInstructionsBF16] in 2790 def : Pat<(fvti.Vector (any_riscv_fpround_vl 2791 (fwti.Vector fwti.RegClass:$rs1), 2792 (fwti.Mask V0), VLOpFrag)), 2793 (!cast<Instruction>("PseudoVFNCVTBF16_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 2794 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 2795 (fwti.Mask V0), 2796 // Value to indicate no rounding mode change in 2797 // RISCVInsertReadWriteCSR 2798 FRM_DYN, 2799 GPR:$vl, fvti.Log2SEW, TA_MA)>; 2800} 2801 2802// 14. Vector Reduction Operations 2803 2804// 14.1. Vector Single-Width Integer Reduction Instructions 2805defm : VPatReductionVL<rvv_vecreduce_ADD_vl, "PseudoVREDSUM", is_float=0>; 2806defm : VPatReductionVL<rvv_vecreduce_UMAX_vl, "PseudoVREDMAXU", is_float=0>; 2807defm : VPatReductionVL<rvv_vecreduce_SMAX_vl, "PseudoVREDMAX", is_float=0>; 2808defm : VPatReductionVL<rvv_vecreduce_UMIN_vl, "PseudoVREDMINU", is_float=0>; 2809defm : VPatReductionVL<rvv_vecreduce_SMIN_vl, "PseudoVREDMIN", is_float=0>; 2810defm : VPatReductionVL<rvv_vecreduce_AND_vl, "PseudoVREDAND", is_float=0>; 2811defm : VPatReductionVL<rvv_vecreduce_OR_vl, "PseudoVREDOR", is_float=0>; 2812defm : VPatReductionVL<rvv_vecreduce_XOR_vl, "PseudoVREDXOR", is_float=0>; 2813 2814// 14.2. Vector Widening Integer Reduction Instructions 2815defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, anyext_oneuse, "PseudoVWREDSUMU", is_float=0>; 2816defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, zext_oneuse, "PseudoVWREDSUMU", is_float=0>; 2817defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_zext_vl_oneuse, "PseudoVWREDSUMU", is_float=0>; 2818defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, sext_oneuse, "PseudoVWREDSUM", is_float=0>; 2819defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_sext_vl_oneuse, "PseudoVWREDSUM", is_float=0>; 2820 2821// 14.3. Vector Single-Width Floating-Point Reduction Instructions 2822defm : VPatReductionVL_RM<rvv_vecreduce_SEQ_FADD_vl, "PseudoVFREDOSUM", is_float=1>; 2823defm : VPatReductionVL_RM<rvv_vecreduce_FADD_vl, "PseudoVFREDUSUM", is_float=1>; 2824defm : VPatReductionVL<rvv_vecreduce_FMIN_vl, "PseudoVFREDMIN", is_float=1>; 2825defm : VPatReductionVL<rvv_vecreduce_FMAX_vl, "PseudoVFREDMAX", is_float=1>; 2826 2827// 14.4. Vector Widening Floating-Point Reduction Instructions 2828defm : VPatWidenReductionVL_RM<rvv_vecreduce_SEQ_FADD_vl, fpext_oneuse, 2829 "PseudoVFWREDOSUM", is_float=1>; 2830defm : VPatWidenReductionVL_Ext_VL_RM<rvv_vecreduce_SEQ_FADD_vl, 2831 riscv_fpextend_vl_oneuse, 2832 "PseudoVFWREDOSUM", is_float=1>; 2833defm : VPatWidenReductionVL_RM<rvv_vecreduce_FADD_vl, fpext_oneuse, 2834 "PseudoVFWREDUSUM", is_float=1>; 2835defm : VPatWidenReductionVL_Ext_VL_RM<rvv_vecreduce_FADD_vl, 2836 riscv_fpextend_vl_oneuse, 2837 "PseudoVFWREDUSUM", is_float=1>; 2838 2839// 15. Vector Mask Instructions 2840 2841foreach mti = AllMasks in { 2842 let Predicates = [HasVInstructions] in { 2843 // 15.1 Vector Mask-Register Logical Instructions 2844 def : Pat<(mti.Mask (riscv_vmset_vl VLOpFrag)), 2845 (!cast<Instruction>("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 2846 def : Pat<(mti.Mask (riscv_vmclr_vl VLOpFrag)), 2847 (!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 2848 2849 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2850 (!cast<Instruction>("PseudoVMAND_MM_" # mti.LMul.MX) 2851 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2852 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2853 (!cast<Instruction>("PseudoVMOR_MM_" # mti.LMul.MX) 2854 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2855 def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2856 (!cast<Instruction>("PseudoVMXOR_MM_" # mti.LMul.MX) 2857 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2858 2859 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, 2860 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 2861 VLOpFrag)), 2862 (!cast<Instruction>("PseudoVMANDN_MM_" # mti.LMul.MX) 2863 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2864 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, 2865 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 2866 VLOpFrag)), 2867 (!cast<Instruction>("PseudoVMORN_MM_" # mti.LMul.MX) 2868 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2869 // XOR is associative so we need 2 patterns for VMXNOR. 2870 def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1, 2871 VLOpFrag), 2872 VR:$rs2, VLOpFrag)), 2873 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX) 2874 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2875 2876 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2, 2877 VLOpFrag), 2878 VLOpFrag)), 2879 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX) 2880 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2881 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2, 2882 VLOpFrag), 2883 VLOpFrag)), 2884 (!cast<Instruction>("PseudoVMNOR_MM_" # mti.LMul.MX) 2885 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2886 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2, 2887 VLOpFrag), 2888 VLOpFrag)), 2889 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX) 2890 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2891 2892 // Match the not idiom to the vmnot.m pseudo. 2893 def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)), 2894 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX) 2895 VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>; 2896 2897 // 15.2 Vector count population in mask vcpop.m 2898 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), 2899 VLOpFrag)), 2900 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX) 2901 VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2902 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask V0), 2903 VLOpFrag)), 2904 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX # "_MASK") 2905 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 2906 2907 // 15.3 vfirst find-first-set mask bit 2908 def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), 2909 VLOpFrag)), 2910 (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX) 2911 VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2912 def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask V0), 2913 VLOpFrag)), 2914 (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX # "_MASK") 2915 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 2916 } 2917} 2918 2919// 16. Vector Permutation Instructions 2920 2921// 16.1. Integer Scalar Move Instructions 2922foreach vti = NoGroupIntegerVectors in { 2923 let Predicates = GetVTypePredicates<vti>.Predicates in { 2924 def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$merge), 2925 vti.ScalarRegClass:$rs1, 2926 VLOpFrag)), 2927 (PseudoVMV_S_X $merge, vti.ScalarRegClass:$rs1, GPR:$vl, 2928 vti.Log2SEW)>; 2929 } 2930} 2931 2932// 16.4. Vector Register Gather Instruction 2933foreach vti = AllIntegerVectors in { 2934 let Predicates = GetVTypePredicates<vti>.Predicates in { 2935 def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2, 2936 vti.RegClass:$rs1, 2937 vti.RegClass:$merge, 2938 (vti.Mask V0), 2939 VLOpFrag)), 2940 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") 2941 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 2942 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2943 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 2944 vti.RegClass:$merge, 2945 (vti.Mask V0), 2946 VLOpFrag)), 2947 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") 2948 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 2949 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2950 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, 2951 uimm5:$imm, 2952 vti.RegClass:$merge, 2953 (vti.Mask V0), 2954 VLOpFrag)), 2955 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 2956 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, 2957 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2958 } 2959 2960 // emul = lmul * 16 / sew 2961 defvar vlmul = vti.LMul; 2962 defvar octuple_lmul = vlmul.octuple; 2963 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 2964 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 2965 defvar emul_str = octuple_to_str<octuple_emul>.ret; 2966 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 2967 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str; 2968 let Predicates = GetVTypePredicates<vti>.Predicates in 2969 def : Pat<(vti.Vector 2970 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 2971 (ivti.Vector ivti.RegClass:$rs1), 2972 vti.RegClass:$merge, 2973 (vti.Mask V0), 2974 VLOpFrag)), 2975 (!cast<Instruction>(inst#"_MASK") 2976 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, 2977 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2978 } 2979} 2980 2981// 16.2. Floating-Point Scalar Move Instructions 2982foreach vti = NoGroupFloatVectors in { 2983 let Predicates = GetVTypePredicates<vti>.Predicates in { 2984 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 2985 (vti.Scalar (fpimm0)), 2986 VLOpFrag)), 2987 (PseudoVMV_S_X $merge, (XLenVT X0), GPR:$vl, vti.Log2SEW)>; 2988 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 2989 (vti.Scalar (SelectFPImm (XLenVT GPR:$imm))), 2990 VLOpFrag)), 2991 (PseudoVMV_S_X $merge, GPR:$imm, GPR:$vl, vti.Log2SEW)>; 2992 } 2993} 2994 2995foreach vti = AllFloatVectors in { 2996 let Predicates = GetVTypePredicates<vti>.Predicates in { 2997 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 2998 vti.ScalarRegClass:$rs1, 2999 VLOpFrag)), 3000 (!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX) 3001 vti.RegClass:$merge, 3002 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; 3003 } 3004 defvar ivti = GetIntVTypeInfo<vti>.Vti; 3005 let Predicates = GetVTypePredicates<ivti>.Predicates in { 3006 def : Pat<(vti.Vector 3007 (riscv_vrgather_vv_vl vti.RegClass:$rs2, 3008 (ivti.Vector vti.RegClass:$rs1), 3009 vti.RegClass:$merge, 3010 (vti.Mask V0), 3011 VLOpFrag)), 3012 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") 3013 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 3014 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 3015 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 3016 vti.RegClass:$merge, 3017 (vti.Mask V0), 3018 VLOpFrag)), 3019 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") 3020 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 3021 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 3022 def : Pat<(vti.Vector 3023 (riscv_vrgather_vx_vl vti.RegClass:$rs2, 3024 uimm5:$imm, 3025 vti.RegClass:$merge, 3026 (vti.Mask V0), 3027 VLOpFrag)), 3028 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 3029 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, 3030 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 3031 } 3032 3033 defvar vlmul = vti.LMul; 3034 defvar octuple_lmul = vlmul.octuple; 3035 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 3036 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 3037 defvar emul_str = octuple_to_str<octuple_emul>.ret; 3038 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 3039 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str; 3040 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 3041 GetVTypePredicates<ivti>.Predicates) in 3042 def : Pat<(vti.Vector 3043 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 3044 (ivti.Vector ivti.RegClass:$rs1), 3045 vti.RegClass:$merge, 3046 (vti.Mask V0), 3047 VLOpFrag)), 3048 (!cast<Instruction>(inst#"_MASK") 3049 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, 3050 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 3051 } 3052} 3053 3054//===----------------------------------------------------------------------===// 3055// Miscellaneous RISCVISD SDNodes 3056//===----------------------------------------------------------------------===// 3057 3058def riscv_vid_vl : SDNode<"RISCVISD::VID_VL", SDTypeProfile<1, 2, 3059 [SDTCisVec<0>, SDTCVecEltisVT<1, i1>, 3060 SDTCisSameNumEltsAs<0, 1>, SDTCisVT<2, XLenVT>]>, []>; 3061 3062def SDTRVVSlide : SDTypeProfile<1, 6, [ 3063 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>, 3064 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>, 3065 SDTCisVT<6, XLenVT> 3066]>; 3067def SDTRVVSlide1 : SDTypeProfile<1, 5, [ 3068 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisInt<0>, 3069 SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, 3070 SDTCisVT<5, XLenVT> 3071]>; 3072def SDTRVVFSlide1 : SDTypeProfile<1, 5, [ 3073 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisFP<0>, 3074 SDTCisEltOfVec<3, 0>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, 3075 SDTCisVT<5, XLenVT> 3076]>; 3077 3078def riscv_slideup_vl : SDNode<"RISCVISD::VSLIDEUP_VL", SDTRVVSlide, []>; 3079def riscv_slide1up_vl : SDNode<"RISCVISD::VSLIDE1UP_VL", SDTRVVSlide1, []>; 3080def riscv_slidedown_vl : SDNode<"RISCVISD::VSLIDEDOWN_VL", SDTRVVSlide, []>; 3081def riscv_slide1down_vl : SDNode<"RISCVISD::VSLIDE1DOWN_VL", SDTRVVSlide1, []>; 3082def riscv_fslide1up_vl : SDNode<"RISCVISD::VFSLIDE1UP_VL", SDTRVVFSlide1, []>; 3083def riscv_fslide1down_vl : SDNode<"RISCVISD::VFSLIDE1DOWN_VL", SDTRVVFSlide1, []>; 3084 3085foreach vti = AllIntegerVectors in { 3086 let Predicates = GetVTypePredicates<vti>.Predicates in { 3087 def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask V0), 3088 VLOpFrag)), 3089 (!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX#"_MASK") 3090 (vti.Vector (IMPLICIT_DEF)), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 3091 TAIL_AGNOSTIC)>; 3092 } 3093} 3094 3095defm : VPatSlideVL_VX_VI<riscv_slideup_vl, "PseudoVSLIDEUP">; 3096defm : VPatSlideVL_VX_VI<riscv_slidedown_vl, "PseudoVSLIDEDOWN">; 3097defm : VPatSlide1VL_VX<riscv_slide1up_vl, "PseudoVSLIDE1UP">; 3098defm : VPatSlide1VL_VF<riscv_fslide1up_vl, "PseudoVFSLIDE1UP">; 3099defm : VPatSlide1VL_VX<riscv_slide1down_vl, "PseudoVSLIDE1DOWN">; 3100defm : VPatSlide1VL_VF<riscv_fslide1down_vl, "PseudoVFSLIDE1DOWN">; 3101