//===- RISCVInstrInfoVSDPatterns.td - RVV SDNode patterns --*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// /// This file contains the required infrastructure and SDNode patterns to /// support code generation for the standard 'V' (Vector) extension, version /// version 1.0. /// /// This file is included from and depends upon RISCVInstrInfoVPseudos.td /// /// Note: the patterns for RVV intrinsics are found in /// RISCVInstrInfoVPseudos.td. /// //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // Helpers to define the SDNode patterns. //===----------------------------------------------------------------------===// def rvv_vnot : PatFrag<(ops node:$in), (xor node:$in, (riscv_vmset_vl (XLenVT srcvalue)))>; multiclass VPatUSLoadStoreSDNode { defvar load_instr = !cast("PseudoVLE"#sew#"_V_"#vlmul.MX); defvar store_instr = !cast("PseudoVSE"#sew#"_V_"#vlmul.MX); // Load def : Pat<(type (load GPR:$rs1)), (load_instr (type (IMPLICIT_DEF)), GPR:$rs1, avl, log2sew, TA_MA)>; // Store def : Pat<(store type:$rs2, GPR:$rs1), (store_instr reg_class:$rs2, GPR:$rs1, avl, log2sew)>; } multiclass VPatUSLoadStoreWholeVRSDNode { defvar load_instr = !cast("VL"#!substr(vlmul.MX, 1)#"RE"#sew#"_V"); defvar store_instr = !cast("VS"#!substr(vlmul.MX, 1)#"R_V"); // Load def : Pat<(type (load GPR:$rs1)), (load_instr GPR:$rs1)>; // Store def : Pat<(store type:$rs2, GPR:$rs1), (store_instr reg_class:$rs2, GPR:$rs1)>; } multiclass VPatUSLoadStoreMaskSDNode { defvar load_instr = !cast("PseudoVLM_V_"#m.BX); defvar store_instr = !cast("PseudoVSM_V_"#m.BX); // Load def : Pat<(m.Mask (load GPR:$rs1)), (load_instr (m.Mask (IMPLICIT_DEF)), GPR:$rs1, m.AVL, m.Log2SEW, TA_MA)>; // Store def : Pat<(store m.Mask:$rs2, GPR:$rs1), (store_instr VR:$rs2, GPR:$rs1, m.AVL, m.Log2SEW)>; } class VPatBinarySDNode_VV : Pat<(result_type (vop (op_type op_reg_class:$rs1), (op_type op_reg_class:$rs2))), (!cast( !if(isSEWAware, instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), instruction_name#"_VV_"# vlmul.MX)) (result_type (IMPLICIT_DEF)), op_reg_class:$rs1, op_reg_class:$rs2, avl, log2sew, TA_MA)>; class VPatBinarySDNode_VV_RM : Pat<(result_type (vop (op_type op_reg_class:$rs1), (op_type op_reg_class:$rs2))), (!cast( !if(isSEWAware, instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), instruction_name#"_VV_"# vlmul.MX)) (result_type (IMPLICIT_DEF)), op_reg_class:$rs1, op_reg_class:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, avl, log2sew, TA_MA)>; class VPatBinarySDNode_XI : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))))), (!cast( !if(isSEWAware, instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew), instruction_name#_#suffix#_# vlmul.MX)) (result_type (IMPLICIT_DEF)), vop_reg_class:$rs1, xop_kind:$rs2, avl, log2sew, TA_MA)>; multiclass VPatBinarySDNode_VV_VX vtilist = AllIntegerVectors, bit isSEWAware = 0> { foreach vti = vtilist in { let Predicates = GetVTypePredicates.Predicates in { def : VPatBinarySDNode_VV; def : VPatBinarySDNode_XI; } } } multiclass VPatBinarySDNode_VV_VX_VI : VPatBinarySDNode_VV_VX { foreach vti = AllIntegerVectors in { let Predicates = GetVTypePredicates.Predicates in def : VPatBinarySDNode_XI(SplatPat#_#ImmType), ImmType>; } } class VPatBinarySDNode_VF : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), (vop_type (SplatFPOp xop_kind:$rs2)))), (!cast( !if(isSEWAware, instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), instruction_name#"_"#vlmul.MX)) (result_type (IMPLICIT_DEF)), vop_reg_class:$rs1, (xop_type xop_kind:$rs2), avl, log2sew, TA_MA)>; class VPatBinarySDNode_VF_RM : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), (vop_type (SplatFPOp xop_kind:$rs2)))), (!cast( !if(isSEWAware, instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), instruction_name#"_"#vlmul.MX)) (result_type (IMPLICIT_DEF)), vop_reg_class:$rs1, (xop_type xop_kind:$rs2), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, avl, log2sew, TA_MA)>; multiclass VPatBinaryFPSDNode_VV_VF { foreach vti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in { def : VPatBinarySDNode_VV; def : VPatBinarySDNode_VF; } } } multiclass VPatBinaryFPSDNode_VV_VF_RM { foreach vti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in { def : VPatBinarySDNode_VV_RM; def : VPatBinarySDNode_VF_RM; } } } multiclass VPatBinaryFPSDNode_R_VF { foreach fvti = AllFloatVectors in let Predicates = GetVTypePredicates.Predicates in def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), (fvti.Vector fvti.RegClass:$rs1))), (!cast( !if(isSEWAware, instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) (fvti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, (fvti.Scalar fvti.ScalarRegClass:$rs2), fvti.AVL, fvti.Log2SEW, TA_MA)>; } multiclass VPatBinaryFPSDNode_R_VF_RM { foreach fvti = AllFloatVectors in let Predicates = GetVTypePredicates.Predicates in def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), (fvti.Vector fvti.RegClass:$rs1))), (!cast( !if(isSEWAware, instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) (fvti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, (fvti.Scalar fvti.ScalarRegClass:$rs2), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, fvti.AVL, fvti.Log2SEW, TA_MA)>; } multiclass VPatIntegerSetCCSDNode_VV { foreach vti = AllIntegerVectors in { defvar instruction = !cast(instruction_name#"_VV_"#vti.LMul.MX); let Predicates = GetVTypePredicates.Predicates in def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), (vti.Vector vti.RegClass:$rs2), cc)), (instruction vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>; } } multiclass VPatIntegerSetCCSDNode_VV_Swappable : VPatIntegerSetCCSDNode_VV { foreach vti = AllIntegerVectors in { defvar instruction = !cast(instruction_name#"_VV_"#vti.LMul.MX); let Predicates = GetVTypePredicates.Predicates in def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$rs1), invcc)), (instruction vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>; } } multiclass VPatIntegerSetCCSDNode_XI< string instruction_name, CondCode cc, string kind, ComplexPattern SplatPatKind, DAGOperand xop_kind> { foreach vti = AllIntegerVectors in { defvar instruction = !cast(instruction_name#_#kind#_#vti.LMul.MX); let Predicates = GetVTypePredicates.Predicates in def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), cc)), (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; } } multiclass VPatIntegerSetCCSDNode_XI_Swappable : VPatIntegerSetCCSDNode_XI { foreach vti = AllIntegerVectors in { defvar instruction = !cast(instruction_name#_#kind#_#vti.LMul.MX); let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), cc)), (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; def : Pat<(vti.Mask (setcc (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), (vti.Vector vti.RegClass:$rs1), invcc)), (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; } } } multiclass VPatIntegerSetCCSDNode_VX_Swappable : VPatIntegerSetCCSDNode_XI_Swappable; multiclass VPatIntegerSetCCSDNode_VI : VPatIntegerSetCCSDNode_XI; multiclass VPatIntegerSetCCSDNode_VIPlus1 { foreach vti = AllIntegerVectors in { defvar instruction = !cast(instruction_name#"_VI_"#vti.LMul.MX); let Predicates = GetVTypePredicates.Predicates in def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), (vti.Vector (splatpat_kind simm5:$rs2)), cc)), (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2), vti.AVL, vti.Log2SEW)>; } } multiclass VPatFPSetCCSDNode_VV_VF_FV { foreach fvti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in { def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), (fvti.Vector fvti.RegClass:$rs2), cc)), (!cast(inst_name#"_VV_"#fvti.LMul.MX) fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.AVL, fvti.Log2SEW)>; def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), (SplatFPOp fvti.ScalarRegClass:$rs2), cc)), (!cast(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, fvti.AVL, fvti.Log2SEW)>; def : Pat<(fvti.Mask (setcc (SplatFPOp fvti.ScalarRegClass:$rs2), (fvti.Vector fvti.RegClass:$rs1), cc)), (!cast(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, fvti.AVL, fvti.Log2SEW)>; } } } multiclass VPatExtendSDNode_V ops, string inst_name, string suffix, list fraction_list> { foreach vtiTofti = fraction_list in { defvar vti = vtiTofti.Vti; defvar fti = vtiTofti.Fti; foreach op = ops in let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(vti.Vector (op (fti.Vector fti.RegClass:$rs2))), (!cast(inst_name#"_"#suffix#"_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), fti.RegClass:$rs2, fti.AVL, vti.Log2SEW, TA_MA)>; } } multiclass VPatConvertI2FPSDNode_V_RM { foreach fvti = AllFloatVectors in { defvar ivti = GetIntVTypeInfo.Vti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), (!cast(instruction_name#"_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, fvti.AVL, fvti.Log2SEW, TA_MA)>; } } multiclass VPatConvertFP2ISDNode_V { foreach fvti = AllFloatVectors in { defvar ivti = GetIntVTypeInfo.Vti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), (!cast(instruction_name#"_"#ivti.LMul.MX) (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW, TA_MA)>; } } multiclass VPatWConvertI2FPSDNode_V { foreach vtiToWti = AllWidenableIntToFloatVectors in { defvar ivti = vtiToWti.Vti; defvar fwti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), (!cast(instruction_name#"_"#ivti.LMul.MX) (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW, TA_MA)>; } } multiclass VPatWConvertFP2ISDNode_V { foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar iwti = GetIntVTypeInfo.Vti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), (!cast(instruction_name#"_"#fvti.LMul.MX) (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW, TA_MA)>; } } multiclass VPatNConvertI2FPSDNode_W_RM { foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar iwti = GetIntVTypeInfo.Vti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1))), (!cast(instruction_name#"_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, fvti.AVL, fvti.Log2SEW, TA_MA)>; } } multiclass VPatNConvertFP2ISDNode_W { foreach vtiToWti = AllWidenableIntToFloatVectors in { defvar vti = vtiToWti.Vti; defvar fwti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1))), (!cast(instruction_name#"_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; } } multiclass VPatWidenBinarySDNode_VV_VX { foreach vtiToWti = AllWidenableIntVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs1)))), (!cast(instruction_name#"_VV_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), (wti.Vector (extop2 (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), (!cast(instruction_name#"_VX_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, GPR:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; } } } multiclass VPatWidenBinarySDNode_WV_WX { foreach vtiToWti = AllWidenableIntVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(op (wti.Vector wti.RegClass:$rs2), (wti.Vector (extop (vti.Vector vti.RegClass:$rs1)))), (!cast(instruction_name#"_WV_"#vti.LMul.MX#"_TIED") wti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(op (wti.Vector wti.RegClass:$rs2), (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), (!cast(instruction_name#"_WX_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, GPR:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; } } } multiclass VPatWidenBinarySDNode_VV_VX_WV_WX : VPatWidenBinarySDNode_VV_VX, VPatWidenBinarySDNode_WV_WX; multiclass VPatWidenMulAddSDNode_VV { foreach vtiToWti = AllWidenableIntVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat< (add (wti.Vector wti.RegClass:$rd), (mul_oneuse (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs1))), (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))), (!cast(instruction_name#"_VV_"#vti.LMul.MX) wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC )>; } } multiclass VPatWidenMulAddSDNode_VX { foreach vtiToWti = AllWidenableIntVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat< (add (wti.Vector wti.RegClass:$rd), (mul_oneuse (wti.Vector (extop1 (vti.Vector (SplatPat (XLenVT GPR:$rs1))))), (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))), (!cast(instruction_name#"_VX_"#vti.LMul.MX) wti.RegClass:$rd, GPR:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC )>; } } multiclass VPatWidenBinaryFPSDNode_VV_VF { foreach vtiToWti = AllWidenableFloatVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue))), (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), (XLenVT srcvalue)))), (!cast(instruction_name#"_VV_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue))), (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), (vti.Mask true_mask), (XLenVT srcvalue)))), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue))), (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; } } } multiclass VPatWidenBinaryFPSDNode_VV_VF_RM { foreach vtiToWti = AllWidenableFloatVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue))), (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), (XLenVT srcvalue)))), (!cast(instruction_name#"_VV_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.RegClass:$rs1, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue))), (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector (SplatFPOp (vti.Scalar vti.ScalarRegClass:$rs1))), (vti.Mask true_mask), (XLenVT srcvalue)))), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.ScalarRegClass:$rs1, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue))), (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.ScalarRegClass:$rs1, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, vti.AVL, vti.Log2SEW, TA_MA)>; } } } multiclass VPatWidenBinaryFPSDNode_WV_WF_RM { foreach vtiToWti = AllWidenableFloatVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(op (wti.Vector wti.RegClass:$rs2), (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), (XLenVT srcvalue)))), (!cast(instruction_name#"_WV_"#vti.LMul.MX#"_TIED") wti.RegClass:$rs2, vti.RegClass:$rs1, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(op (wti.Vector wti.RegClass:$rs2), (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), (vti.Mask true_mask), (XLenVT srcvalue)))), (!cast(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, vti.ScalarRegClass:$rs1, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(op (wti.Vector wti.RegClass:$rs2), (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), (!cast(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, vti.ScalarRegClass:$rs1, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, vti.AVL, vti.Log2SEW, TA_MA)>; } } } multiclass VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM : VPatWidenBinaryFPSDNode_VV_VF_RM, VPatWidenBinaryFPSDNode_WV_WF_RM; multiclass VPatWidenFPMulAccSDNode_VV_VF_RM { foreach vtiToWti = AllWidenableFloatVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), (XLenVT srcvalue))), (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue))), (wti.Vector wti.RegClass:$rd)), (!cast(instruction_name#"_VV_"#vti.LMul.MX) wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue))), (wti.Vector wti.RegClass:$rd)), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; } } } multiclass VPatWidenFPNegMulAccSDNode_VV_VF_RM { foreach vtiToWti = AllWidenableFloatVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), (XLenVT srcvalue)))), (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue)), (fneg wti.RegClass:$rd)), (!cast(instruction_name#"_VV_"#vti.LMul.MX) wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(fma (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))), (fneg (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue)))), (fneg wti.RegClass:$rd)), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue)), (fneg wti.RegClass:$rd)), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; } } } multiclass VPatWidenFPMulSacSDNode_VV_VF_RM { foreach vtiToWti = AllWidenableFloatVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), (XLenVT srcvalue))), (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue)), (fneg wti.RegClass:$rd)), (!cast(instruction_name#"_VV_"#vti.LMul.MX) wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue)), (fneg wti.RegClass:$rd)), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; } } } multiclass VPatWidenFPNegMulSacSDNode_VV_VF_RM { foreach vtiToWti = AllWidenableFloatVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), (XLenVT srcvalue)))), (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue)), wti.RegClass:$rd), (!cast(instruction_name#"_VV_"#vti.LMul.MX) wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), (fneg (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue)))), wti.RegClass:$rd), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue)), wti.RegClass:$rd), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; } } } multiclass VPatMultiplyAddSDNode_VV_VX { foreach vti = AllIntegerVectors in { defvar suffix = vti.LMul.MX; let Predicates = GetVTypePredicates.Predicates in { // NOTE: We choose VMADD because it has the most commuting freedom. So it // works best with how TwoAddressInstructionPass tries commuting. def : Pat<(vti.Vector (op vti.RegClass:$rs2, (mul_oneuse vti.RegClass:$rs1, vti.RegClass:$rd))), (!cast(instruction_name#"_VV_"# suffix) vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally // commutable. def : Pat<(vti.Vector (op vti.RegClass:$rs2, (mul_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rd))), (!cast(instruction_name#"_VX_" # suffix) vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; } } } multiclass VPatAVGADD_VV_VX_RM { foreach vti = AllIntegerVectors in { let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vop (vti.Vector vti.RegClass:$rs1), (vti.Vector vti.RegClass:$rs2)), (!cast("PseudoVAADDU_VV_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs2, vxrm, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(vop (vti.Vector vti.RegClass:$rs1), (vti.Vector (SplatPat (XLenVT GPR:$rs2)))), (!cast("PseudoVAADDU_VX_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, vxrm, vti.AVL, vti.Log2SEW, TA_MA)>; } } } //===----------------------------------------------------------------------===// // Patterns. //===----------------------------------------------------------------------===// // 7.4. Vector Unit-Stride Instructions foreach vti = !listconcat(FractionalGroupIntegerVectors, FractionalGroupFloatVectors, FractionalGroupBFloatVectors) in let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal], GetVTypePredicates.Predicates) in defm : VPatUSLoadStoreSDNode; foreach vti = [VI8M1, VI16M1, VI32M1, VI64M1, VBF16M1, VF16M1, VF32M1, VF64M1] in let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal], GetVTypePredicates.Predicates) in defm : VPatUSLoadStoreWholeVRSDNode; foreach vti = !listconcat(GroupIntegerVectors, GroupFloatVectors, GroupBFloatVectors) in let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal], GetVTypePredicates.Predicates) in defm : VPatUSLoadStoreWholeVRSDNode; foreach mti = AllMasks in let Predicates = [HasVInstructions] in defm : VPatUSLoadStoreMaskSDNode; // 11. Vector Integer Arithmetic Instructions // 11.1. Vector Single-Width Integer Add and Subtract defm : VPatBinarySDNode_VV_VX_VI; defm : VPatBinarySDNode_VV_VX; // Handle VRSUB specially since it's the only integer binary op with reversed // pattern operands foreach vti = AllIntegerVectors in { // FIXME: The AddedComplexity here is covering up a missing matcher for // widening vwsub.vx which can recognize a extended folded into the // scalar of the splat. let AddedComplexity = 20 in let Predicates = GetVTypePredicates.Predicates in { def : Pat<(sub (vti.Vector (SplatPat (XLenVT GPR:$rs2))), (vti.Vector vti.RegClass:$rs1)), (!cast("PseudoVRSUB_VX_"# vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(sub (vti.Vector (SplatPat_simm5 simm5:$rs2)), (vti.Vector vti.RegClass:$rs1)), (!cast("PseudoVRSUB_VI_"# vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, simm5:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; } } // 11.2. Vector Widening Integer Add and Subtract defm : VPatWidenBinarySDNode_VV_VX_WV_WX; defm : VPatWidenBinarySDNode_VV_VX_WV_WX; defm : VPatWidenBinarySDNode_VV_VX_WV_WX; defm : VPatWidenBinarySDNode_VV_VX_WV_WX; defm : VPatWidenBinarySDNode_VV_VX_WV_WX; defm : VPatWidenBinarySDNode_VV_VX_WV_WX; // shl (ext v, splat 1) is a special case of widening add. foreach vtiToWti = AllWidenableIntVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(shl (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs1))), (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), (!cast("PseudoVWADD_VV_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs1))), (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), (!cast("PseudoVWADDU_VV_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(shl (wti.Vector (anyext_oneuse (vti.Vector vti.RegClass:$rs1))), (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), (!cast("PseudoVWADDU_VV_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; } } // 11.3. Vector Integer Extension defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF2", AllFractionableVF2IntVectors>; defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF2", AllFractionableVF2IntVectors>; defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF4", AllFractionableVF4IntVectors>; defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF4", AllFractionableVF4IntVectors>; defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF8", AllFractionableVF8IntVectors>; defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF8", AllFractionableVF8IntVectors>; // 11.5. Vector Bitwise Logical Instructions defm : VPatBinarySDNode_VV_VX_VI; defm : VPatBinarySDNode_VV_VX_VI; defm : VPatBinarySDNode_VV_VX_VI; // 11.6. Vector Single-Width Bit Shift Instructions defm : VPatBinarySDNode_VV_VX_VI; defm : VPatBinarySDNode_VV_VX_VI; defm : VPatBinarySDNode_VV_VX_VI; foreach vti = AllIntegerVectors in { // Emit shift by 1 as an add since it might be faster. let Predicates = GetVTypePredicates.Predicates in def : Pat<(shl (vti.Vector vti.RegClass:$rs1), (vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)))), (!cast("PseudoVADD_VV_"# vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; } // 11.8. Vector Integer Comparison Instructions defm : VPatIntegerSetCCSDNode_VV<"PseudoVMSEQ", SETEQ>; defm : VPatIntegerSetCCSDNode_VV<"PseudoVMSNE", SETNE>; defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLT", SETLT, SETGT>; defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLTU", SETULT, SETUGT>; defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLE", SETLE, SETGE>; defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLEU", SETULE, SETUGE>; defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSEQ", SETEQ, SETEQ>; defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSNE", SETNE, SETNE>; defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLT", SETLT, SETGT>; defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLTU", SETULT, SETUGT>; defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLE", SETLE, SETGE>; defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLEU", SETULE, SETUGE>; defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSGT", SETGT, SETLT>; defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSGTU", SETUGT, SETULT>; // There is no VMSGE(U)_VX instruction defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSEQ", SETEQ>; defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSNE", SETNE>; defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSLE", SETLE>; defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSLEU", SETULE>; defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSGT", SETGT>; defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSGTU", SETUGT>; defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSLE", SETLT, SplatPat_simm5_plus1>; defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSLEU", SETULT, SplatPat_simm5_plus1_nonzero>; defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSGT", SETGE, SplatPat_simm5_plus1>; defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSGTU", SETUGE, SplatPat_simm5_plus1_nonzero>; // 11.9. Vector Integer Min/Max Instructions defm : VPatBinarySDNode_VV_VX; defm : VPatBinarySDNode_VV_VX; defm : VPatBinarySDNode_VV_VX; defm : VPatBinarySDNode_VV_VX; // 11.10. Vector Single-Width Integer Multiply Instructions defm : VPatBinarySDNode_VV_VX; defm : VPatBinarySDNode_VV_VX; defm : VPatBinarySDNode_VV_VX; let Predicates = [HasVInstructionsFullMultiply] in { defm : VPatBinarySDNode_VV_VX; defm : VPatBinarySDNode_VV_VX; } // 11.11. Vector Integer Divide Instructions defm : VPatBinarySDNode_VV_VX; defm : VPatBinarySDNode_VV_VX; defm : VPatBinarySDNode_VV_VX; defm : VPatBinarySDNode_VV_VX; foreach vtiTowti = AllWidenableIntVectors in { defvar vti = vtiTowti.Vti; defvar wti = vtiTowti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat< (vti.Vector (riscv_trunc_vector_vl (srem (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs1))), (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs2)))), (vti.Mask true_mask), (XLenVT srcvalue))), (!cast("PseudoVREM_VV_"#vti.LMul.MX#"_E"#!shl(1, vti.Log2SEW)) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; } } // 11.12. Vector Widening Integer Multiply Instructions defm : VPatWidenBinarySDNode_VV_VX; defm : VPatWidenBinarySDNode_VV_VX; defm : VPatWidenBinarySDNode_VV_VX; defm : VPatWidenBinarySDNode_VV_VX; defm : VPatWidenBinarySDNode_VV_VX; defm : VPatWidenBinarySDNode_VV_VX; // 11.13 Vector Single-Width Integer Multiply-Add Instructions. defm : VPatMultiplyAddSDNode_VV_VX; defm : VPatMultiplyAddSDNode_VV_VX; // 11.14 Vector Widening Integer Multiply-Add Instructions defm : VPatWidenMulAddSDNode_VV; defm : VPatWidenMulAddSDNode_VX; defm : VPatWidenMulAddSDNode_VV; defm : VPatWidenMulAddSDNode_VX; defm : VPatWidenMulAddSDNode_VV; defm : VPatWidenMulAddSDNode_VX; defm : VPatWidenMulAddSDNode_VX; // 11.15. Vector Integer Merge Instructions foreach vti = AllIntegerVectors in { let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Vector (vselect (vti.Mask V0), vti.RegClass:$rs1, vti.RegClass:$rs2)), (!cast("PseudoVMERGE_VVM_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat XLenVT:$rs1), vti.RegClass:$rs2)), (!cast("PseudoVMERGE_VXM_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat_simm5 simm5:$rs1), vti.RegClass:$rs2)), (!cast("PseudoVMERGE_VIM_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; } } // 12. Vector Fixed-Point Arithmetic Instructions // 12.1. Vector Single-Width Saturating Add and Subtract defm : VPatBinarySDNode_VV_VX_VI; defm : VPatBinarySDNode_VV_VX_VI; defm : VPatBinarySDNode_VV_VX; defm : VPatBinarySDNode_VV_VX; // 12.2. Vector Single-Width Averaging Add and Subtract defm : VPatAVGADD_VV_VX_RM; defm : VPatAVGADD_VV_VX_RM; // 15. Vector Mask Instructions // 15.1. Vector Mask-Register Logical Instructions foreach mti = AllMasks in { let Predicates = [HasVInstructions] in { def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)), (!cast("PseudoVMAND_MM_"#mti.LMul.MX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)), (!cast("PseudoVMOR_MM_"#mti.LMul.MX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)), (!cast("PseudoVMXOR_MM_"#mti.LMul.MX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; def : Pat<(mti.Mask (rvv_vnot (and VR:$rs1, VR:$rs2))), (!cast("PseudoVMNAND_MM_"#mti.LMul.MX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; def : Pat<(mti.Mask (rvv_vnot (or VR:$rs1, VR:$rs2))), (!cast("PseudoVMNOR_MM_"#mti.LMul.MX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; def : Pat<(mti.Mask (rvv_vnot (xor VR:$rs1, VR:$rs2))), (!cast("PseudoVMXNOR_MM_"#mti.LMul.MX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; def : Pat<(mti.Mask (and VR:$rs1, (rvv_vnot VR:$rs2))), (!cast("PseudoVMANDN_MM_"#mti.LMul.MX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; def : Pat<(mti.Mask (or VR:$rs1, (rvv_vnot VR:$rs2))), (!cast("PseudoVMORN_MM_"#mti.LMul.MX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; // Handle rvv_vnot the same as the vmnot.m pseudoinstruction. def : Pat<(mti.Mask (rvv_vnot VR:$rs)), (!cast("PseudoVMNAND_MM_"#mti.LMul.MX) VR:$rs, VR:$rs, mti.AVL, mti.Log2SEW)>; } } // 13. Vector Floating-Point Instructions // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions defm : VPatBinaryFPSDNode_VV_VF_RM; defm : VPatBinaryFPSDNode_VV_VF_RM; defm : VPatBinaryFPSDNode_R_VF_RM; // 13.3. Vector Widening Floating-Point Add/Subtract Instructions defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM; defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM; // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions defm : VPatBinaryFPSDNode_VV_VF_RM; defm : VPatBinaryFPSDNode_VV_VF_RM; defm : VPatBinaryFPSDNode_R_VF_RM; // 13.5. Vector Widening Floating-Point Multiply Instructions defm : VPatWidenBinaryFPSDNode_VV_VF_RM; // 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. foreach fvti = AllFloatVectors in { // NOTE: We choose VFMADD because it has the most commuting freedom. So it // works best with how TwoAddressInstructionPass tries commuting. defvar suffix = fvti.LMul.MX; let Predicates = GetVTypePredicates.Predicates in { def : Pat<(fvti.Vector (any_fma fvti.RegClass:$rs1, fvti.RegClass:$rd, fvti.RegClass:$rs2)), (!cast("PseudoVFMADD_VV_"# suffix) fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(fvti.Vector (any_fma fvti.RegClass:$rs1, fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), (!cast("PseudoVFMSUB_VV_"# suffix) fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(fvti.Vector (any_fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), (!cast("PseudoVFNMADD_VV_"# suffix) fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(fvti.Vector (any_fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, fvti.RegClass:$rs2)), (!cast("PseudoVFNMSUB_VV_"# suffix) fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; // The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally // commutable. def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), fvti.RegClass:$rd, fvti.RegClass:$rs2)), (!cast("PseudoVFMADD_V" # fvti.ScalarSuffix # "_" # suffix) fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), (!cast("PseudoVFMSUB_V" # fvti.ScalarSuffix # "_" # suffix) fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), (fneg fvti.RegClass:$rd), (fneg fvti.RegClass:$rs2))), (!cast("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), (fneg fvti.RegClass:$rd), fvti.RegClass:$rs2)), (!cast("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; // The splat might be negated. def : Pat<(fvti.Vector (any_fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)), fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), (!cast("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(fvti.Vector (any_fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)), fvti.RegClass:$rd, fvti.RegClass:$rs2)), (!cast("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; } } // 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions defm : VPatWidenFPMulAccSDNode_VV_VF_RM<"PseudoVFWMACC">; defm : VPatWidenFPNegMulAccSDNode_VV_VF_RM<"PseudoVFWNMACC">; defm : VPatWidenFPMulSacSDNode_VV_VF_RM<"PseudoVFWMSAC">; defm : VPatWidenFPNegMulSacSDNode_VV_VF_RM<"PseudoVFWNMSAC">; foreach vti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in { // 13.8. Vector Floating-Point Square-Root Instruction def : Pat<(any_fsqrt (vti.Vector vti.RegClass:$rs2)), (!cast("PseudoVFSQRT_V_"# vti.LMul.MX#"_E"#vti.SEW) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, vti.AVL, vti.Log2SEW, TA_MA)>; // 13.12. Vector Floating-Point Sign-Injection Instructions def : Pat<(fabs (vti.Vector vti.RegClass:$rs)), (!cast("PseudoVFSGNJX_VV_"# vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TA_MA)>; // Handle fneg with VFSGNJN using the same input for both operands. def : Pat<(fneg (vti.Vector vti.RegClass:$rs)), (!cast("PseudoVFSGNJN_VV_"# vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), (vti.Vector vti.RegClass:$rs2))), (!cast("PseudoVFSGNJ_VV_"# vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs2)))), (!cast("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), (vti.Vector (fneg vti.RegClass:$rs2)))), (!cast("PseudoVFSGNJN_VV_"# vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), (vti.Vector (fneg (SplatFPOp vti.ScalarRegClass:$rs2))))), (!cast("PseudoVFSGNJN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; } } // 13.11. Vector Floating-Point MIN/MAX Instructions defm : VPatBinaryFPSDNode_VV_VF; defm : VPatBinaryFPSDNode_VV_VF; // 13.13. Vector Floating-Point Compare Instructions defm : VPatFPSetCCSDNode_VV_VF_FV; defm : VPatFPSetCCSDNode_VV_VF_FV; defm : VPatFPSetCCSDNode_VV_VF_FV; defm : VPatFPSetCCSDNode_VV_VF_FV; defm : VPatFPSetCCSDNode_VV_VF_FV; defm : VPatFPSetCCSDNode_VV_VF_FV; defm : VPatFPSetCCSDNode_VV_VF_FV; defm : VPatFPSetCCSDNode_VV_VF_FV; // Floating-point vselects: // 11.15. Vector Integer Merge Instructions // 13.15. Vector Floating-Point Merge Instruction foreach fvti = AllFloatVectors in { defvar ivti = GetIntVTypeInfo.Vti; let Predicates = GetVTypePredicates.Predicates in { def : Pat<(fvti.Vector (vselect (fvti.Mask V0), fvti.RegClass:$rs1, fvti.RegClass:$rs2)), (!cast("PseudoVMERGE_VVM_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; def : Pat<(fvti.Vector (vselect (fvti.Mask V0), (SplatFPOp (fvti.Scalar fpimm0)), fvti.RegClass:$rs2)), (!cast("PseudoVMERGE_VIM_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs2, 0, (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; } let Predicates = GetVTypePredicates.Predicates in def : Pat<(fvti.Vector (vselect (fvti.Mask V0), (SplatFPOp fvti.ScalarRegClass:$rs1), fvti.RegClass:$rs2)), (!cast("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs2, (fvti.Scalar fvti.ScalarRegClass:$rs1), (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; } // 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions defm : VPatConvertFP2ISDNode_V; defm : VPatConvertFP2ISDNode_V; defm : VPatConvertI2FPSDNode_V_RM; defm : VPatConvertI2FPSDNode_V_RM; // 13.18. Widening Floating-Point/Integer Type-Convert Instructions defm : VPatWConvertFP2ISDNode_V; defm : VPatWConvertFP2ISDNode_V; defm : VPatWConvertI2FPSDNode_V; defm : VPatWConvertI2FPSDNode_V; // 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions defm : VPatNConvertFP2ISDNode_W; defm : VPatNConvertFP2ISDNode_W; defm : VPatNConvertI2FPSDNode_W_RM; defm : VPatNConvertI2FPSDNode_W_RM; foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates)) in def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))), (!cast("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, fvti.AVL, fvti.Log2SEW, TA_MA)>; } //===----------------------------------------------------------------------===// // Vector Splats //===----------------------------------------------------------------------===// foreach fvti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl undef, fvti.ScalarRegClass:$rs1, srcvalue)), (!cast("PseudoVFMV_V_"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), (fvti.Scalar fvti.ScalarRegClass:$rs1), fvti.AVL, fvti.Log2SEW, TA_MA)>; defvar ivti = GetIntVTypeInfo.Vti; let Predicates = GetVTypePredicates.Predicates in def : Pat<(fvti.Vector (SplatFPOp (fvti.Scalar fpimm0))), (!cast("PseudoVMV_V_I_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), 0, fvti.AVL, fvti.Log2SEW, TA_MA)>; } //===----------------------------------------------------------------------===// // Vector Element Extracts //===----------------------------------------------------------------------===// foreach vti = AllFloatVectors in { defvar vmv_f_s_inst = !cast(!strconcat("PseudoVFMV_", vti.ScalarSuffix, "_S_", vti.LMul.MX)); // Only pattern-match extract-element operations where the index is 0. Any // other index will have been custom-lowered to slide the vector correctly // into place. let Predicates = GetVTypePredicates.Predicates in def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)), (vmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>; }