1//===- RISCVInstrInfoVSDPatterns.td - RVV SDNode patterns --*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file contains the required infrastructure and SDNode patterns to 10/// support code generation for the standard 'V' (Vector) extension, version 11/// version 1.0. 12/// 13/// This file is included from and depends upon RISCVInstrInfoVPseudos.td 14/// 15/// Note: the patterns for RVV intrinsics are found in 16/// RISCVInstrInfoVPseudos.td. 17/// 18//===----------------------------------------------------------------------===// 19 20//===----------------------------------------------------------------------===// 21// Helpers to define the SDNode patterns. 22//===----------------------------------------------------------------------===// 23 24def rvv_vnot : PatFrag<(ops node:$in), 25 (xor node:$in, (riscv_vmset_vl (XLenVT srcvalue)))>; 26 27multiclass VPatUSLoadStoreSDNode<ValueType type, 28 int log2sew, 29 LMULInfo vlmul, 30 OutPatFrag avl, 31 VReg reg_class, 32 int sew = !shl(1, log2sew)> { 33 defvar load_instr = !cast<Instruction>("PseudoVLE"#sew#"_V_"#vlmul.MX); 34 defvar store_instr = !cast<Instruction>("PseudoVSE"#sew#"_V_"#vlmul.MX); 35 // Load 36 def : Pat<(type (load GPR:$rs1)), 37 (load_instr (type (IMPLICIT_DEF)), GPR:$rs1, avl, 38 log2sew, TA_MA)>; 39 // Store 40 def : Pat<(store type:$rs2, GPR:$rs1), 41 (store_instr reg_class:$rs2, GPR:$rs1, avl, log2sew)>; 42} 43 44multiclass VPatUSLoadStoreWholeVRSDNode<ValueType type, 45 int log2sew, 46 LMULInfo vlmul, 47 VReg reg_class, 48 int sew = !shl(1, log2sew)> { 49 defvar load_instr = 50 !cast<Instruction>("VL"#!substr(vlmul.MX, 1)#"RE"#sew#"_V"); 51 defvar store_instr = 52 !cast<Instruction>("VS"#!substr(vlmul.MX, 1)#"R_V"); 53 54 // Load 55 def : Pat<(type (load GPR:$rs1)), 56 (load_instr GPR:$rs1)>; 57 // Store 58 def : Pat<(store type:$rs2, GPR:$rs1), 59 (store_instr reg_class:$rs2, GPR:$rs1)>; 60} 61 62multiclass VPatUSLoadStoreMaskSDNode<MTypeInfo m> { 63 defvar load_instr = !cast<Instruction>("PseudoVLM_V_"#m.BX); 64 defvar store_instr = !cast<Instruction>("PseudoVSM_V_"#m.BX); 65 // Load 66 def : Pat<(m.Mask (load GPR:$rs1)), 67 (load_instr (m.Mask (IMPLICIT_DEF)), GPR:$rs1, m.AVL, 68 m.Log2SEW, TA_MA)>; 69 // Store 70 def : Pat<(store m.Mask:$rs2, GPR:$rs1), 71 (store_instr VR:$rs2, GPR:$rs1, m.AVL, m.Log2SEW)>; 72} 73 74class VPatBinarySDNode_VV<SDPatternOperator vop, 75 string instruction_name, 76 ValueType result_type, 77 ValueType op_type, 78 int log2sew, 79 LMULInfo vlmul, 80 OutPatFrag avl, 81 VReg op_reg_class, 82 bit isSEWAware = 0> : 83 Pat<(result_type (vop 84 (op_type op_reg_class:$rs1), 85 (op_type op_reg_class:$rs2))), 86 (!cast<Instruction>( 87 !if(isSEWAware, 88 instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), 89 instruction_name#"_VV_"# vlmul.MX)) 90 (result_type (IMPLICIT_DEF)), 91 op_reg_class:$rs1, 92 op_reg_class:$rs2, 93 avl, log2sew, TA_MA)>; 94 95class VPatBinarySDNode_VV_RM<SDPatternOperator vop, 96 string instruction_name, 97 ValueType result_type, 98 ValueType op_type, 99 int log2sew, 100 LMULInfo vlmul, 101 OutPatFrag avl, 102 VReg op_reg_class, 103 bit isSEWAware = 0> : 104 Pat<(result_type (vop 105 (op_type op_reg_class:$rs1), 106 (op_type op_reg_class:$rs2))), 107 (!cast<Instruction>( 108 !if(isSEWAware, 109 instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), 110 instruction_name#"_VV_"# vlmul.MX)) 111 (result_type (IMPLICIT_DEF)), 112 op_reg_class:$rs1, 113 op_reg_class:$rs2, 114 // Value to indicate no rounding mode change in 115 // RISCVInsertReadWriteCSR 116 FRM_DYN, 117 avl, log2sew, TA_MA)>; 118 119class VPatBinarySDNode_XI<SDPatternOperator vop, 120 string instruction_name, 121 string suffix, 122 ValueType result_type, 123 ValueType vop_type, 124 int log2sew, 125 LMULInfo vlmul, 126 OutPatFrag avl, 127 VReg vop_reg_class, 128 ComplexPattern SplatPatKind, 129 DAGOperand xop_kind, 130 bit isSEWAware = 0> : 131 Pat<(result_type (vop 132 (vop_type vop_reg_class:$rs1), 133 (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))))), 134 (!cast<Instruction>( 135 !if(isSEWAware, 136 instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew), 137 instruction_name#_#suffix#_# vlmul.MX)) 138 (result_type (IMPLICIT_DEF)), 139 vop_reg_class:$rs1, 140 xop_kind:$rs2, 141 avl, log2sew, TA_MA)>; 142 143multiclass VPatBinarySDNode_VV_VX<SDPatternOperator vop, string instruction_name, 144 list<VTypeInfo> vtilist = AllIntegerVectors, 145 bit isSEWAware = 0> { 146 foreach vti = vtilist in { 147 let Predicates = GetVTypePredicates<vti>.Predicates in { 148 def : VPatBinarySDNode_VV<vop, instruction_name, 149 vti.Vector, vti.Vector, vti.Log2SEW, 150 vti.LMul, vti.AVL, vti.RegClass, isSEWAware>; 151 def : VPatBinarySDNode_XI<vop, instruction_name, "VX", 152 vti.Vector, vti.Vector, vti.Log2SEW, 153 vti.LMul, vti.AVL, vti.RegClass, 154 SplatPat, GPR, isSEWAware>; 155 } 156 } 157} 158 159multiclass VPatBinarySDNode_VV_VX_VI<SDPatternOperator vop, string instruction_name, 160 Operand ImmType = simm5> 161 : VPatBinarySDNode_VV_VX<vop, instruction_name> { 162 foreach vti = AllIntegerVectors in { 163 let Predicates = GetVTypePredicates<vti>.Predicates in 164 def : VPatBinarySDNode_XI<vop, instruction_name, "VI", 165 vti.Vector, vti.Vector, vti.Log2SEW, 166 vti.LMul, vti.AVL, vti.RegClass, 167 !cast<ComplexPattern>(SplatPat#_#ImmType), 168 ImmType>; 169 } 170} 171 172class VPatBinarySDNode_VF<SDPatternOperator vop, 173 string instruction_name, 174 ValueType result_type, 175 ValueType vop_type, 176 ValueType xop_type, 177 int log2sew, 178 LMULInfo vlmul, 179 OutPatFrag avl, 180 VReg vop_reg_class, 181 DAGOperand xop_kind, 182 bit isSEWAware = 0> : 183 Pat<(result_type (vop (vop_type vop_reg_class:$rs1), 184 (vop_type (SplatFPOp xop_kind:$rs2)))), 185 (!cast<Instruction>( 186 !if(isSEWAware, 187 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), 188 instruction_name#"_"#vlmul.MX)) 189 (result_type (IMPLICIT_DEF)), 190 vop_reg_class:$rs1, 191 (xop_type xop_kind:$rs2), 192 avl, log2sew, TA_MA)>; 193 194class VPatBinarySDNode_VF_RM<SDPatternOperator vop, 195 string instruction_name, 196 ValueType result_type, 197 ValueType vop_type, 198 ValueType xop_type, 199 int log2sew, 200 LMULInfo vlmul, 201 OutPatFrag avl, 202 VReg vop_reg_class, 203 DAGOperand xop_kind, 204 bit isSEWAware = 0> : 205 Pat<(result_type (vop (vop_type vop_reg_class:$rs1), 206 (vop_type (SplatFPOp xop_kind:$rs2)))), 207 (!cast<Instruction>( 208 !if(isSEWAware, 209 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), 210 instruction_name#"_"#vlmul.MX)) 211 (result_type (IMPLICIT_DEF)), 212 vop_reg_class:$rs1, 213 (xop_type xop_kind:$rs2), 214 // Value to indicate no rounding mode change in 215 // RISCVInsertReadWriteCSR 216 FRM_DYN, 217 avl, log2sew, TA_MA)>; 218 219multiclass VPatBinaryFPSDNode_VV_VF<SDPatternOperator vop, string instruction_name, 220 bit isSEWAware = 0> { 221 foreach vti = AllFloatVectors in { 222 let Predicates = GetVTypePredicates<vti>.Predicates in { 223 def : VPatBinarySDNode_VV<vop, instruction_name, 224 vti.Vector, vti.Vector, vti.Log2SEW, 225 vti.LMul, vti.AVL, vti.RegClass, isSEWAware>; 226 def : VPatBinarySDNode_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 227 vti.Vector, vti.Vector, vti.Scalar, 228 vti.Log2SEW, vti.LMul, vti.AVL, vti.RegClass, 229 vti.ScalarRegClass, isSEWAware>; 230 } 231 } 232} 233 234multiclass VPatBinaryFPSDNode_VV_VF_RM<SDPatternOperator vop, string instruction_name, 235 bit isSEWAware = 0> { 236 foreach vti = AllFloatVectors in { 237 let Predicates = GetVTypePredicates<vti>.Predicates in { 238 def : VPatBinarySDNode_VV_RM<vop, instruction_name, 239 vti.Vector, vti.Vector, vti.Log2SEW, 240 vti.LMul, vti.AVL, vti.RegClass, isSEWAware>; 241 def : VPatBinarySDNode_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix, 242 vti.Vector, vti.Vector, vti.Scalar, 243 vti.Log2SEW, vti.LMul, vti.AVL, vti.RegClass, 244 vti.ScalarRegClass, isSEWAware>; 245 } 246 } 247} 248 249multiclass VPatBinaryFPSDNode_R_VF<SDPatternOperator vop, string instruction_name, 250 bit isSEWAware = 0> { 251 foreach fvti = AllFloatVectors in 252 let Predicates = GetVTypePredicates<fvti>.Predicates in 253 def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), 254 (fvti.Vector fvti.RegClass:$rs1))), 255 (!cast<Instruction>( 256 !if(isSEWAware, 257 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, 258 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) 259 (fvti.Vector (IMPLICIT_DEF)), 260 fvti.RegClass:$rs1, 261 (fvti.Scalar fvti.ScalarRegClass:$rs2), 262 fvti.AVL, fvti.Log2SEW, TA_MA)>; 263} 264 265multiclass VPatBinaryFPSDNode_R_VF_RM<SDPatternOperator vop, string instruction_name, 266 bit isSEWAware = 0> { 267 foreach fvti = AllFloatVectors in 268 let Predicates = GetVTypePredicates<fvti>.Predicates in 269 def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), 270 (fvti.Vector fvti.RegClass:$rs1))), 271 (!cast<Instruction>( 272 !if(isSEWAware, 273 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, 274 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) 275 (fvti.Vector (IMPLICIT_DEF)), 276 fvti.RegClass:$rs1, 277 (fvti.Scalar fvti.ScalarRegClass:$rs2), 278 // Value to indicate no rounding mode change in 279 // RISCVInsertReadWriteCSR 280 FRM_DYN, 281 fvti.AVL, fvti.Log2SEW, TA_MA)>; 282} 283 284multiclass VPatIntegerSetCCSDNode_VV<string instruction_name, 285 CondCode cc> { 286 foreach vti = AllIntegerVectors in { 287 defvar instruction = !cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX); 288 let Predicates = GetVTypePredicates<vti>.Predicates in 289 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 290 (vti.Vector vti.RegClass:$rs2), cc)), 291 (instruction vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, 292 vti.Log2SEW)>; 293 } 294} 295 296multiclass VPatIntegerSetCCSDNode_VV_Swappable<string instruction_name, 297 CondCode cc, CondCode invcc> 298 : VPatIntegerSetCCSDNode_VV<instruction_name, cc> { 299 foreach vti = AllIntegerVectors in { 300 defvar instruction = !cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX); 301 let Predicates = GetVTypePredicates<vti>.Predicates in 302 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs2), 303 (vti.Vector vti.RegClass:$rs1), invcc)), 304 (instruction vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, 305 vti.Log2SEW)>; 306 } 307} 308 309multiclass VPatIntegerSetCCSDNode_XI_Swappable<string instruction_name, 310 CondCode cc, CondCode invcc, 311 string kind, 312 ComplexPattern SplatPatKind, 313 DAGOperand xop_kind> { 314 foreach vti = AllIntegerVectors in { 315 defvar instruction = !cast<Instruction>(instruction_name#_#kind#_#vti.LMul.MX); 316 let Predicates = GetVTypePredicates<vti>.Predicates in { 317 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 318 (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), cc)), 319 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; 320 def : Pat<(vti.Mask (setcc (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), 321 (vti.Vector vti.RegClass:$rs1), invcc)), 322 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; 323 } 324 } 325} 326 327multiclass VPatIntegerSetCCSDNode_VX_Swappable<string instruction_name, 328 CondCode cc, CondCode invcc> 329 : VPatIntegerSetCCSDNode_XI_Swappable<instruction_name, cc, invcc, "VX", 330 SplatPat, GPR>; 331 332multiclass VPatIntegerSetCCSDNode_VI_Swappable<string instruction_name, 333 CondCode cc, CondCode invcc> 334 : VPatIntegerSetCCSDNode_XI_Swappable<instruction_name, cc, invcc, "VI", 335 SplatPat_simm5, simm5>; 336 337multiclass VPatIntegerSetCCSDNode_VIPlus1_Swappable<string instruction_name, 338 CondCode cc, CondCode invcc, 339 ComplexPattern splatpat_kind> { 340 foreach vti = AllIntegerVectors in { 341 defvar instruction = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX); 342 let Predicates = GetVTypePredicates<vti>.Predicates in { 343 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 344 (vti.Vector (splatpat_kind simm5:$rs2)), 345 cc)), 346 (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2), 347 vti.AVL, vti.Log2SEW)>; 348 def : Pat<(vti.Mask (setcc (vti.Vector (splatpat_kind simm5:$rs2)), 349 (vti.Vector vti.RegClass:$rs1), 350 invcc)), 351 (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2), 352 vti.AVL, vti.Log2SEW)>; 353 } 354 } 355} 356 357multiclass VPatFPSetCCSDNode_VV_VF_FV<CondCode cc, 358 string inst_name, 359 string swapped_op_inst_name> { 360 foreach fvti = AllFloatVectors in { 361 let Predicates = GetVTypePredicates<fvti>.Predicates in { 362 def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), 363 (fvti.Vector fvti.RegClass:$rs2), 364 cc)), 365 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX) 366 fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.AVL, fvti.Log2SEW)>; 367 def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), 368 (SplatFPOp fvti.ScalarRegClass:$rs2), 369 cc)), 370 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 371 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 372 fvti.AVL, fvti.Log2SEW)>; 373 def : Pat<(fvti.Mask (setcc (SplatFPOp fvti.ScalarRegClass:$rs2), 374 (fvti.Vector fvti.RegClass:$rs1), 375 cc)), 376 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 377 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 378 fvti.AVL, fvti.Log2SEW)>; 379 } 380 } 381} 382 383multiclass VPatExtendSDNode_V<list<SDNode> ops, string inst_name, string suffix, 384 list <VTypeInfoToFraction> fraction_list> { 385 foreach vtiTofti = fraction_list in { 386 defvar vti = vtiTofti.Vti; 387 defvar fti = vtiTofti.Fti; 388 foreach op = ops in 389 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 390 GetVTypePredicates<fti>.Predicates) in 391 def : Pat<(vti.Vector (op (fti.Vector fti.RegClass:$rs2))), 392 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX) 393 (vti.Vector (IMPLICIT_DEF)), 394 fti.RegClass:$rs2, fti.AVL, vti.Log2SEW, TA_MA)>; 395 } 396} 397 398multiclass VPatConvertI2FPSDNode_V_RM<SDPatternOperator vop, 399 string instruction_name> { 400 foreach fvti = AllFloatVectors in { 401 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 402 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 403 GetVTypePredicates<ivti>.Predicates) in 404 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), 405 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW) 406 (fvti.Vector (IMPLICIT_DEF)), 407 ivti.RegClass:$rs1, 408 // Value to indicate no rounding mode change in 409 // RISCVInsertReadWriteCSR 410 FRM_DYN, 411 fvti.AVL, fvti.Log2SEW, TA_MA)>; 412 } 413} 414 415multiclass VPatConvertFP2ISDNode_V<SDPatternOperator vop, 416 string instruction_name> { 417 foreach fvti = AllFloatVectors in { 418 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 419 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 420 GetVTypePredicates<ivti>.Predicates) in 421 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), 422 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX) 423 (ivti.Vector (IMPLICIT_DEF)), 424 fvti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW, TA_MA)>; 425 } 426} 427 428multiclass VPatWConvertI2FPSDNode_V<SDPatternOperator vop, 429 string instruction_name> { 430 foreach vtiToWti = AllWidenableIntToFloatVectors in { 431 defvar ivti = vtiToWti.Vti; 432 defvar fwti = vtiToWti.Wti; 433 let Predicates = !listconcat(GetVTypePredicates<ivti>.Predicates, 434 GetVTypePredicates<fwti>.Predicates) in 435 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), 436 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_E"#ivti.SEW) 437 (fwti.Vector (IMPLICIT_DEF)), 438 ivti.RegClass:$rs1, 439 ivti.AVL, ivti.Log2SEW, TA_MA)>; 440 } 441} 442 443multiclass VPatWConvertFP2ISDNode_V<SDPatternOperator vop, 444 string instruction_name> { 445 foreach fvtiToFWti = AllWidenableFloatVectors in { 446 defvar fvti = fvtiToFWti.Vti; 447 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 448 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 449 GetVTypePredicates<iwti>.Predicates) in 450 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), 451 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 452 (iwti.Vector (IMPLICIT_DEF)), 453 fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW, TA_MA)>; 454 } 455} 456 457multiclass VPatNConvertI2FPSDNode_W_RM<SDPatternOperator vop, 458 string instruction_name> { 459 foreach fvtiToFWti = AllWidenableFloatVectors in { 460 defvar fvti = fvtiToFWti.Vti; 461 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 462 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 463 GetVTypePredicates<iwti>.Predicates) in 464 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1))), 465 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW) 466 (fvti.Vector (IMPLICIT_DEF)), 467 iwti.RegClass:$rs1, 468 // Value to indicate no rounding mode change in 469 // RISCVInsertReadWriteCSR 470 FRM_DYN, 471 fvti.AVL, fvti.Log2SEW, TA_MA)>; 472 } 473} 474 475multiclass VPatNConvertFP2ISDNode_W<SDPatternOperator vop, 476 string instruction_name> { 477 foreach vtiToWti = AllWidenableIntToFloatVectors in { 478 defvar vti = vtiToWti.Vti; 479 defvar fwti = vtiToWti.Wti; 480 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 481 GetVTypePredicates<fwti>.Predicates) in 482 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1))), 483 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX) 484 (vti.Vector (IMPLICIT_DEF)), 485 fwti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 486 } 487} 488 489multiclass VPatWidenBinarySDNode_VV_VX<SDNode op, PatFrags extop1, PatFrags extop2, 490 string instruction_name> { 491 foreach vtiToWti = AllWidenableIntVectors in { 492 defvar vti = vtiToWti.Vti; 493 defvar wti = vtiToWti.Wti; 494 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 495 GetVTypePredicates<wti>.Predicates) in { 496 def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), 497 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs1)))), 498 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 499 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 500 vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 501 def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), 502 (wti.Vector (extop2 (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), 503 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX) 504 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 505 GPR:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 506 } 507 } 508} 509 510multiclass VPatWidenBinarySDNode_WV_WX<SDNode op, PatFrags extop, 511 string instruction_name> { 512 foreach vtiToWti = AllWidenableIntVectors in { 513 defvar vti = vtiToWti.Vti; 514 defvar wti = vtiToWti.Wti; 515 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 516 GetVTypePredicates<wti>.Predicates) in { 517 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 518 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1)))), 519 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_TIED") 520 wti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, 521 TAIL_AGNOSTIC)>; 522 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 523 (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), 524 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 525 (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, GPR:$rs1, 526 vti.AVL, vti.Log2SEW, TA_MA)>; 527 } 528 } 529} 530 531multiclass VPatWidenBinarySDNode_VV_VX_WV_WX<SDNode op, PatFrags extop, 532 string instruction_name> 533 : VPatWidenBinarySDNode_VV_VX<op, extop, extop, instruction_name>, 534 VPatWidenBinarySDNode_WV_WX<op, extop, instruction_name>; 535 536multiclass VPatWidenMulAddSDNode_VV<PatFrags extop1, PatFrags extop2, string instruction_name> { 537 foreach vtiToWti = AllWidenableIntVectors in { 538 defvar vti = vtiToWti.Vti; 539 defvar wti = vtiToWti.Wti; 540 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 541 GetVTypePredicates<wti>.Predicates) in 542 def : Pat< 543 (add (wti.Vector wti.RegClass:$rd), 544 (mul_oneuse (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs1))), 545 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))), 546 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 547 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 548 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC 549 )>; 550 } 551} 552multiclass VPatWidenMulAddSDNode_VX<PatFrags extop1, PatFrags extop2, string instruction_name> { 553 foreach vtiToWti = AllWidenableIntVectors in { 554 defvar vti = vtiToWti.Vti; 555 defvar wti = vtiToWti.Wti; 556 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 557 GetVTypePredicates<wti>.Predicates) in 558 def : Pat< 559 (add (wti.Vector wti.RegClass:$rd), 560 (mul_oneuse (wti.Vector (extop1 (vti.Vector (SplatPat (XLenVT GPR:$rs1))))), 561 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))), 562 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX) 563 wti.RegClass:$rd, GPR:$rs1, vti.RegClass:$rs2, 564 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC 565 )>; 566 } 567} 568 569multiclass VPatWidenBinaryFPSDNode_VV_VF<SDNode op, string instruction_name> { 570 foreach vtiToWti = AllWidenableFloatVectors in { 571 defvar vti = vtiToWti.Vti; 572 defvar wti = vtiToWti.Wti; 573 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 574 GetVTypePredicates<wti>.Predicates) in { 575 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 576 (vti.Vector vti.RegClass:$rs2), 577 (vti.Mask true_mask), (XLenVT srcvalue))), 578 (wti.Vector (riscv_fpextend_vl_oneuse 579 (vti.Vector vti.RegClass:$rs1), 580 (vti.Mask true_mask), (XLenVT srcvalue)))), 581 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 582 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 583 vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 584 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 585 (vti.Vector vti.RegClass:$rs2), 586 (vti.Mask true_mask), (XLenVT srcvalue))), 587 (wti.Vector (riscv_fpextend_vl_oneuse 588 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 589 (vti.Mask true_mask), (XLenVT srcvalue)))), 590 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 591 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 592 vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 593 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 594 (vti.Vector vti.RegClass:$rs2), 595 (vti.Mask true_mask), (XLenVT srcvalue))), 596 (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))), 597 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 598 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 599 vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 600 } 601 } 602} 603 604multiclass VPatWidenBinaryFPSDNode_VV_VF_RM<SDNode op, string instruction_name> { 605 foreach vtiToWti = AllWidenableFloatVectors in { 606 defvar vti = vtiToWti.Vti; 607 defvar wti = vtiToWti.Wti; 608 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 609 GetVTypePredicates<wti>.Predicates) in { 610 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 611 (vti.Vector vti.RegClass:$rs2), 612 (vti.Mask true_mask), (XLenVT srcvalue))), 613 (wti.Vector (riscv_fpextend_vl_oneuse 614 (vti.Vector vti.RegClass:$rs1), 615 (vti.Mask true_mask), (XLenVT srcvalue)))), 616 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_E"#vti.SEW) 617 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 618 vti.RegClass:$rs1, 619 // Value to indicate no rounding mode change in 620 // RISCVInsertReadWriteCSR 621 FRM_DYN, 622 vti.AVL, vti.Log2SEW, TA_MA)>; 623 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 624 (vti.Vector vti.RegClass:$rs2), 625 (vti.Mask true_mask), (XLenVT srcvalue))), 626 (wti.Vector (riscv_fpextend_vl_oneuse 627 (vti.Vector (SplatFPOp (vti.Scalar vti.ScalarRegClass:$rs1))), 628 (vti.Mask true_mask), (XLenVT srcvalue)))), 629 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW) 630 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 631 vti.ScalarRegClass:$rs1, 632 // Value to indicate no rounding mode change in 633 // RISCVInsertReadWriteCSR 634 FRM_DYN, 635 vti.AVL, vti.Log2SEW, TA_MA)>; 636 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 637 (vti.Vector vti.RegClass:$rs2), 638 (vti.Mask true_mask), (XLenVT srcvalue))), 639 (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 640 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW) 641 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 642 vti.ScalarRegClass:$rs1, 643 // Value to indicate no rounding mode change in 644 // RISCVInsertReadWriteCSR 645 FRM_DYN, 646 vti.AVL, vti.Log2SEW, TA_MA)>; 647 } 648 } 649} 650 651multiclass VPatWidenBinaryFPSDNode_WV_WF_RM<SDNode op, string instruction_name> { 652 foreach vtiToWti = AllWidenableFloatVectors in { 653 defvar vti = vtiToWti.Vti; 654 defvar wti = vtiToWti.Wti; 655 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 656 GetVTypePredicates<wti>.Predicates) in { 657 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 658 (wti.Vector (riscv_fpextend_vl_oneuse 659 (vti.Vector vti.RegClass:$rs1), 660 (vti.Mask true_mask), (XLenVT srcvalue)))), 661 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_E"#vti.SEW#"_TIED") 662 wti.RegClass:$rs2, vti.RegClass:$rs1, 663 // Value to indicate no rounding mode change in 664 // RISCVInsertReadWriteCSR 665 FRM_DYN, 666 vti.AVL, vti.Log2SEW, 667 TAIL_AGNOSTIC)>; 668 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 669 (wti.Vector (riscv_fpextend_vl_oneuse 670 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 671 (vti.Mask true_mask), (XLenVT srcvalue)))), 672 (!cast<Instruction>(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW) 673 (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, 674 vti.ScalarRegClass:$rs1, 675 // Value to indicate no rounding mode change in 676 // RISCVInsertReadWriteCSR 677 FRM_DYN, 678 vti.AVL, vti.Log2SEW, TA_MA)>; 679 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 680 (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 681 (!cast<Instruction>(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW) 682 (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, 683 vti.ScalarRegClass:$rs1, 684 // Value to indicate no rounding mode change in 685 // RISCVInsertReadWriteCSR 686 FRM_DYN, 687 vti.AVL, vti.Log2SEW, TA_MA)>; 688 } 689 } 690} 691 692multiclass VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM<SDNode op, 693 string instruction_name> 694 : VPatWidenBinaryFPSDNode_VV_VF_RM<op, instruction_name>, 695 VPatWidenBinaryFPSDNode_WV_WF_RM<op, instruction_name>; 696 697multiclass VPatWidenFPMulAccSDNode_VV_VF_RM<string instruction_name> { 698 foreach vtiToWti = AllWidenableFloatVectors in { 699 defvar vti = vtiToWti.Vti; 700 defvar wti = vtiToWti.Wti; 701 defvar suffix = vti.LMul.MX # "_E" # vti.SEW; 702 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 703 GetVTypePredicates<wti>.Predicates) in { 704 def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse 705 (vti.Vector vti.RegClass:$rs1), 706 (vti.Mask true_mask), (XLenVT srcvalue))), 707 (wti.Vector (riscv_fpextend_vl_oneuse 708 (vti.Vector vti.RegClass:$rs2), 709 (vti.Mask true_mask), (XLenVT srcvalue))), 710 (wti.Vector wti.RegClass:$rd)), 711 (!cast<Instruction>(instruction_name#"_VV_"#suffix) 712 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 713 // Value to indicate no rounding mode change in 714 // RISCVInsertReadWriteCSR 715 FRM_DYN, 716 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 717 def : Pat<(fma (wti.Vector (SplatFPOp 718 (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), 719 (wti.Vector (riscv_fpextend_vl_oneuse 720 (vti.Vector vti.RegClass:$rs2), 721 (vti.Mask true_mask), (XLenVT srcvalue))), 722 (wti.Vector wti.RegClass:$rd)), 723 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#suffix) 724 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 725 // Value to indicate no rounding mode change in 726 // RISCVInsertReadWriteCSR 727 FRM_DYN, 728 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 729 } 730 } 731} 732 733multiclass VPatWidenFPNegMulAccSDNode_VV_VF_RM<string instruction_name> { 734 foreach vtiToWti = AllWidenableFloatVectors in { 735 defvar vti = vtiToWti.Vti; 736 defvar wti = vtiToWti.Wti; 737 defvar suffix = vti.LMul.MX # "_E" # vti.SEW; 738 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 739 GetVTypePredicates<wti>.Predicates) in { 740 def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse 741 (vti.Vector vti.RegClass:$rs1), 742 (vti.Mask true_mask), (XLenVT srcvalue)))), 743 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 744 (vti.Mask true_mask), (XLenVT srcvalue)), 745 (fneg wti.RegClass:$rd)), 746 (!cast<Instruction>(instruction_name#"_VV_"#suffix) 747 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 748 // Value to indicate no rounding mode change in 749 // RISCVInsertReadWriteCSR 750 FRM_DYN, 751 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 752 def : Pat<(fma (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))), 753 (fneg (wti.Vector (riscv_fpextend_vl_oneuse 754 (vti.Vector vti.RegClass:$rs2), 755 (vti.Mask true_mask), (XLenVT srcvalue)))), 756 (fneg wti.RegClass:$rd)), 757 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#suffix) 758 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 759 // Value to indicate no rounding mode change in 760 // RISCVInsertReadWriteCSR 761 FRM_DYN, 762 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 763 def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 764 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 765 (vti.Mask true_mask), (XLenVT srcvalue)), 766 (fneg wti.RegClass:$rd)), 767 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#suffix) 768 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 769 // Value to indicate no rounding mode change in 770 // RISCVInsertReadWriteCSR 771 FRM_DYN, 772 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 773 } 774 } 775} 776 777multiclass VPatWidenFPMulSacSDNode_VV_VF_RM<string instruction_name> { 778 foreach vtiToWti = AllWidenableFloatVectors in { 779 defvar vti = vtiToWti.Vti; 780 defvar wti = vtiToWti.Wti; 781 defvar suffix = vti.LMul.MX # "_E" # vti.SEW; 782 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 783 GetVTypePredicates<wti>.Predicates) in { 784 def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse 785 (vti.Vector vti.RegClass:$rs1), 786 (vti.Mask true_mask), (XLenVT srcvalue))), 787 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 788 (vti.Mask true_mask), (XLenVT srcvalue)), 789 (fneg wti.RegClass:$rd)), 790 (!cast<Instruction>(instruction_name#"_VV_"#suffix) 791 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 792 // Value to indicate no rounding mode change in 793 // RISCVInsertReadWriteCSR 794 FRM_DYN, 795 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 796 def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), 797 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 798 (vti.Mask true_mask), (XLenVT srcvalue)), 799 (fneg wti.RegClass:$rd)), 800 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#suffix) 801 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 802 // Value to indicate no rounding mode change in 803 // RISCVInsertReadWriteCSR 804 FRM_DYN, 805 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 806 } 807 } 808} 809 810multiclass VPatWidenFPNegMulSacSDNode_VV_VF_RM<string instruction_name> { 811 foreach vtiToWti = AllWidenableFloatVectors in { 812 defvar vti = vtiToWti.Vti; 813 defvar wti = vtiToWti.Wti; 814 defvar suffix = vti.LMul.MX # "_E" # vti.SEW; 815 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 816 GetVTypePredicates<wti>.Predicates) in { 817 def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse 818 (vti.Vector vti.RegClass:$rs1), 819 (vti.Mask true_mask), (XLenVT srcvalue)))), 820 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 821 (vti.Mask true_mask), (XLenVT srcvalue)), 822 wti.RegClass:$rd), 823 (!cast<Instruction>(instruction_name#"_VV_"#suffix) 824 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 825 // Value to indicate no rounding mode change in 826 // RISCVInsertReadWriteCSR 827 FRM_DYN, 828 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 829 def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), 830 (fneg (wti.Vector (riscv_fpextend_vl_oneuse 831 (vti.Vector vti.RegClass:$rs2), 832 (vti.Mask true_mask), (XLenVT srcvalue)))), 833 wti.RegClass:$rd), 834 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#suffix) 835 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 836 // Value to indicate no rounding mode change in 837 // RISCVInsertReadWriteCSR 838 FRM_DYN, 839 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 840 def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 841 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 842 (vti.Mask true_mask), (XLenVT srcvalue)), 843 wti.RegClass:$rd), 844 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#suffix) 845 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 846 // Value to indicate no rounding mode change in 847 // RISCVInsertReadWriteCSR 848 FRM_DYN, 849 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 850 } 851 } 852} 853 854multiclass VPatMultiplyAddSDNode_VV_VX<SDNode op, string instruction_name> { 855 foreach vti = AllIntegerVectors in { 856 defvar suffix = vti.LMul.MX; 857 let Predicates = GetVTypePredicates<vti>.Predicates in { 858 // NOTE: We choose VMADD because it has the most commuting freedom. So it 859 // works best with how TwoAddressInstructionPass tries commuting. 860 def : Pat<(vti.Vector (op vti.RegClass:$rs2, 861 (mul_oneuse vti.RegClass:$rs1, vti.RegClass:$rd))), 862 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 863 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 864 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 865 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally 866 // commutable. 867 def : Pat<(vti.Vector (op vti.RegClass:$rs2, 868 (mul_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rd))), 869 (!cast<Instruction>(instruction_name#"_VX_" # suffix) 870 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 871 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 872 } 873 } 874} 875 876multiclass VPatAVGADD_VV_VX_RM<SDNode vop, int vxrm, string suffix = ""> { 877 foreach vti = AllIntegerVectors in { 878 let Predicates = GetVTypePredicates<vti>.Predicates in { 879 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 880 (vti.Vector vti.RegClass:$rs2)), 881 (!cast<Instruction>("PseudoVAADD"#suffix#"_VV_"#vti.LMul.MX) 882 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs2, 883 vxrm, vti.AVL, vti.Log2SEW, TA_MA)>; 884 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 885 (vti.Vector (SplatPat (XLenVT GPR:$rs2)))), 886 (!cast<Instruction>("PseudoVAADD"#suffix#"_VX_"#vti.LMul.MX) 887 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, 888 vxrm, vti.AVL, vti.Log2SEW, TA_MA)>; 889 } 890 } 891} 892 893//===----------------------------------------------------------------------===// 894// Patterns. 895//===----------------------------------------------------------------------===// 896 897// 7.4. Vector Unit-Stride Instructions 898foreach vti = !listconcat(FractionalGroupIntegerVectors, 899 FractionalGroupFloatVectors, 900 FractionalGroupBFloatVectors) in 901 let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal], 902 GetVTypePredicates<vti>.Predicates) in 903 defm : VPatUSLoadStoreSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 904 vti.AVL, vti.RegClass>; 905foreach vti = [VI8M1, VI16M1, VI32M1, VI64M1, VBF16M1, VF16M1, VF32M1, VF64M1] in 906 let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal], 907 GetVTypePredicates<vti>.Predicates) in 908 defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 909 vti.RegClass>; 910foreach vti = !listconcat(GroupIntegerVectors, GroupFloatVectors, GroupBFloatVectors) in 911 let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal], 912 GetVTypePredicates<vti>.Predicates) in 913 defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 914 vti.RegClass>; 915foreach mti = AllMasks in 916 let Predicates = [HasVInstructions] in 917 defm : VPatUSLoadStoreMaskSDNode<mti>; 918 919// 11. Vector Integer Arithmetic Instructions 920 921// 11.1. Vector Single-Width Integer Add and Subtract 922defm : VPatBinarySDNode_VV_VX_VI<add, "PseudoVADD">; 923defm : VPatBinarySDNode_VV_VX<sub, "PseudoVSUB">; 924// Handle VRSUB specially since it's the only integer binary op with reversed 925// pattern operands 926foreach vti = AllIntegerVectors in { 927 // FIXME: The AddedComplexity here is covering up a missing matcher for 928 // widening vwsub.vx which can recognize a extended folded into the 929 // scalar of the splat. 930 let AddedComplexity = 20 in 931 let Predicates = GetVTypePredicates<vti>.Predicates in { 932 def : Pat<(sub (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 933 (vti.Vector vti.RegClass:$rs1)), 934 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX) 935 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, 936 vti.AVL, vti.Log2SEW, TA_MA)>; 937 def : Pat<(sub (vti.Vector (SplatPat_simm5 simm5:$rs2)), 938 (vti.Vector vti.RegClass:$rs1)), 939 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX) 940 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, 941 simm5:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 942 } 943} 944 945// 11.2. Vector Widening Integer Add and Subtract 946defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, sext_oneuse, "PseudoVWADD">; 947defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, zext_oneuse, "PseudoVWADDU">; 948defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, anyext_oneuse, "PseudoVWADDU">; 949 950defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, sext_oneuse, "PseudoVWSUB">; 951defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, zext_oneuse, "PseudoVWSUBU">; 952defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, anyext_oneuse, "PseudoVWSUBU">; 953 954// shl (ext v, splat 1) is a special case of widening add. 955foreach vtiToWti = AllWidenableIntVectors in { 956 defvar vti = vtiToWti.Vti; 957 defvar wti = vtiToWti.Wti; 958 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 959 GetVTypePredicates<wti>.Predicates) in { 960 def : Pat<(shl (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs1))), 961 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 962 (!cast<Instruction>("PseudoVWADD_VV_"#vti.LMul.MX) 963 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 964 vti.AVL, vti.Log2SEW, TA_MA)>; 965 def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs1))), 966 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 967 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX) 968 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 969 vti.AVL, vti.Log2SEW, TA_MA)>; 970 def : Pat<(shl (wti.Vector (anyext_oneuse (vti.Vector vti.RegClass:$rs1))), 971 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 972 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX) 973 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 974 vti.AVL, vti.Log2SEW, TA_MA)>; 975 def : Pat<(shl (wti.Vector (riscv_sext_vl_oneuse (vti.Vector vti.RegClass:$rs1), (vti.Mask V0), VLOpFrag)), 976 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 977 (!cast<Instruction>("PseudoVWADD_VV_"#vti.LMul.MX#"_MASK") 978 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 979 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 980 def : Pat<(shl (wti.Vector (riscv_zext_vl_oneuse (vti.Vector vti.RegClass:$rs1), (vti.Mask V0), VLOpFrag)), 981 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 982 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX#"_MASK") 983 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 984 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 985 } 986} 987 988// 11.3. Vector Integer Extension 989defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF2", 990 AllFractionableVF2IntVectors>; 991defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF2", 992 AllFractionableVF2IntVectors>; 993defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF4", 994 AllFractionableVF4IntVectors>; 995defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF4", 996 AllFractionableVF4IntVectors>; 997defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF8", 998 AllFractionableVF8IntVectors>; 999defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF8", 1000 AllFractionableVF8IntVectors>; 1001 1002// 11.5. Vector Bitwise Logical Instructions 1003defm : VPatBinarySDNode_VV_VX_VI<and, "PseudoVAND">; 1004defm : VPatBinarySDNode_VV_VX_VI<or, "PseudoVOR">; 1005defm : VPatBinarySDNode_VV_VX_VI<xor, "PseudoVXOR">; 1006 1007// 11.6. Vector Single-Width Bit Shift Instructions 1008defm : VPatBinarySDNode_VV_VX_VI<shl, "PseudoVSLL", uimm5>; 1009defm : VPatBinarySDNode_VV_VX_VI<srl, "PseudoVSRL", uimm5>; 1010defm : VPatBinarySDNode_VV_VX_VI<sra, "PseudoVSRA", uimm5>; 1011 1012foreach vti = AllIntegerVectors in { 1013 // Emit shift by 1 as an add since it might be faster. 1014 let Predicates = GetVTypePredicates<vti>.Predicates in 1015 def : Pat<(shl (vti.Vector vti.RegClass:$rs1), 1016 (vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)))), 1017 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX) 1018 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, 1019 vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 1020 1021} 1022 1023// 11.8. Vector Integer Comparison Instructions 1024defm : VPatIntegerSetCCSDNode_VV<"PseudoVMSEQ", SETEQ>; 1025defm : VPatIntegerSetCCSDNode_VV<"PseudoVMSNE", SETNE>; 1026 1027defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLT", SETLT, SETGT>; 1028defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLTU", SETULT, SETUGT>; 1029defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLE", SETLE, SETGE>; 1030defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLEU", SETULE, SETUGE>; 1031 1032defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSEQ", SETEQ, SETEQ>; 1033defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSNE", SETNE, SETNE>; 1034defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLT", SETLT, SETGT>; 1035defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLTU", SETULT, SETUGT>; 1036defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLE", SETLE, SETGE>; 1037defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLEU", SETULE, SETUGE>; 1038defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSGT", SETGT, SETLT>; 1039defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSGTU", SETUGT, SETULT>; 1040// There is no VMSGE(U)_VX instruction 1041 1042defm : VPatIntegerSetCCSDNode_VI_Swappable<"PseudoVMSEQ", SETEQ, SETEQ>; 1043defm : VPatIntegerSetCCSDNode_VI_Swappable<"PseudoVMSNE", SETNE, SETNE>; 1044defm : VPatIntegerSetCCSDNode_VI_Swappable<"PseudoVMSLE", SETLE, SETGE>; 1045defm : VPatIntegerSetCCSDNode_VI_Swappable<"PseudoVMSLEU", SETULE, SETUGE>; 1046defm : VPatIntegerSetCCSDNode_VI_Swappable<"PseudoVMSGT", SETGT, SETLT>; 1047defm : VPatIntegerSetCCSDNode_VI_Swappable<"PseudoVMSGTU", SETUGT, SETULT>; 1048 1049defm : VPatIntegerSetCCSDNode_VIPlus1_Swappable<"PseudoVMSLE", SETLT, SETGT, 1050 SplatPat_simm5_plus1>; 1051defm : VPatIntegerSetCCSDNode_VIPlus1_Swappable<"PseudoVMSLEU", SETULT, SETUGT, 1052 SplatPat_simm5_plus1_nonzero>; 1053defm : VPatIntegerSetCCSDNode_VIPlus1_Swappable<"PseudoVMSGT", SETGE, SETLE, 1054 SplatPat_simm5_plus1>; 1055defm : VPatIntegerSetCCSDNode_VIPlus1_Swappable<"PseudoVMSGTU", SETUGE, SETULE, 1056 SplatPat_simm5_plus1_nonzero>; 1057 1058// 11.9. Vector Integer Min/Max Instructions 1059defm : VPatBinarySDNode_VV_VX<umin, "PseudoVMINU">; 1060defm : VPatBinarySDNode_VV_VX<smin, "PseudoVMIN">; 1061defm : VPatBinarySDNode_VV_VX<umax, "PseudoVMAXU">; 1062defm : VPatBinarySDNode_VV_VX<smax, "PseudoVMAX">; 1063 1064// 11.10. Vector Single-Width Integer Multiply Instructions 1065defm : VPatBinarySDNode_VV_VX<mul, "PseudoVMUL">; 1066 1067defm : VPatBinarySDNode_VV_VX<mulhs, "PseudoVMULH", IntegerVectorsExceptI64>; 1068defm : VPatBinarySDNode_VV_VX<mulhu, "PseudoVMULHU", IntegerVectorsExceptI64>; 1069 1070let Predicates = [HasVInstructionsFullMultiply] in { 1071 defm : VPatBinarySDNode_VV_VX<mulhs, "PseudoVMULH", I64IntegerVectors>; 1072 defm : VPatBinarySDNode_VV_VX<mulhu, "PseudoVMULHU", I64IntegerVectors>; 1073} 1074 1075// 11.11. Vector Integer Divide Instructions 1076defm : VPatBinarySDNode_VV_VX<udiv, "PseudoVDIVU", isSEWAware=1>; 1077defm : VPatBinarySDNode_VV_VX<sdiv, "PseudoVDIV", isSEWAware=1>; 1078defm : VPatBinarySDNode_VV_VX<urem, "PseudoVREMU", isSEWAware=1>; 1079defm : VPatBinarySDNode_VV_VX<srem, "PseudoVREM", isSEWAware=1>; 1080 1081foreach vtiTowti = AllWidenableIntVectors in { 1082 defvar vti = vtiTowti.Vti; 1083 defvar wti = vtiTowti.Wti; 1084 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1085 GetVTypePredicates<wti>.Predicates) in { 1086 def : Pat< 1087 (vti.Vector 1088 (riscv_trunc_vector_vl 1089 (srem (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs1))), 1090 (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs2)))), 1091 (vti.Mask true_mask), (XLenVT srcvalue))), 1092 (!cast<Instruction>("PseudoVREM_VV_"#vti.LMul.MX#"_E"#!shl(1, vti.Log2SEW)) 1093 (vti.Vector (IMPLICIT_DEF)), 1094 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1095 } 1096} 1097 1098// 11.12. Vector Widening Integer Multiply Instructions 1099defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, sext_oneuse, 1100 "PseudoVWMUL">; 1101defm : VPatWidenBinarySDNode_VV_VX<mul, zext_oneuse, zext_oneuse, 1102 "PseudoVWMULU">; 1103defm : VPatWidenBinarySDNode_VV_VX<mul, anyext_oneuse, anyext_oneuse, 1104 "PseudoVWMULU">; 1105defm : VPatWidenBinarySDNode_VV_VX<mul, zext_oneuse, anyext_oneuse, 1106 "PseudoVWMULU">; 1107defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, zext_oneuse, 1108 "PseudoVWMULSU">; 1109defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, anyext_oneuse, 1110 "PseudoVWMULSU">; 1111 1112// 11.13 Vector Single-Width Integer Multiply-Add Instructions. 1113defm : VPatMultiplyAddSDNode_VV_VX<add, "PseudoVMADD">; 1114defm : VPatMultiplyAddSDNode_VV_VX<sub, "PseudoVNMSUB">; 1115 1116// 11.14 Vector Widening Integer Multiply-Add Instructions 1117defm : VPatWidenMulAddSDNode_VV<sext_oneuse, sext_oneuse, "PseudoVWMACC">; 1118defm : VPatWidenMulAddSDNode_VX<sext_oneuse, sext_oneuse, "PseudoVWMACC">; 1119defm : VPatWidenMulAddSDNode_VV<zext_oneuse, zext_oneuse, "PseudoVWMACCU">; 1120defm : VPatWidenMulAddSDNode_VX<zext_oneuse, zext_oneuse, "PseudoVWMACCU">; 1121defm : VPatWidenMulAddSDNode_VV<sext_oneuse, zext_oneuse, "PseudoVWMACCSU">; 1122defm : VPatWidenMulAddSDNode_VX<sext_oneuse, zext_oneuse, "PseudoVWMACCSU">; 1123defm : VPatWidenMulAddSDNode_VX<zext_oneuse, sext_oneuse, "PseudoVWMACCUS">; 1124 1125// 11.15. Vector Integer Merge Instructions 1126foreach vti = AllIntegerVectors in { 1127 let Predicates = GetVTypePredicates<vti>.Predicates in { 1128 def : Pat<(vti.Vector (vselect (vti.Mask V0), vti.RegClass:$rs1, 1129 vti.RegClass:$rs2)), 1130 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 1131 (vti.Vector (IMPLICIT_DEF)), 1132 vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), 1133 vti.AVL, vti.Log2SEW)>; 1134 1135 def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat XLenVT:$rs1), 1136 vti.RegClass:$rs2)), 1137 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 1138 (vti.Vector (IMPLICIT_DEF)), 1139 vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; 1140 1141 def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat_simm5 simm5:$rs1), 1142 vti.RegClass:$rs2)), 1143 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 1144 (vti.Vector (IMPLICIT_DEF)), 1145 vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; 1146 } 1147} 1148 1149// 12. Vector Fixed-Point Arithmetic Instructions 1150 1151// 12.1. Vector Single-Width Saturating Add and Subtract 1152defm : VPatBinarySDNode_VV_VX_VI<saddsat, "PseudoVSADD">; 1153defm : VPatBinarySDNode_VV_VX_VI<uaddsat, "PseudoVSADDU">; 1154defm : VPatBinarySDNode_VV_VX<ssubsat, "PseudoVSSUB">; 1155defm : VPatBinarySDNode_VV_VX<usubsat, "PseudoVSSUBU">; 1156 1157// 12.2. Vector Single-Width Averaging Add and Subtract 1158defm : VPatAVGADD_VV_VX_RM<avgfloors, 0b10>; 1159defm : VPatAVGADD_VV_VX_RM<avgflooru, 0b10, suffix = "U">; 1160defm : VPatAVGADD_VV_VX_RM<avgceils, 0b00>; 1161defm : VPatAVGADD_VV_VX_RM<avgceilu, 0b00, suffix = "U">; 1162 1163// 15. Vector Mask Instructions 1164 1165// 15.1. Vector Mask-Register Logical Instructions 1166foreach mti = AllMasks in { 1167 let Predicates = [HasVInstructions] in { 1168 def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)), 1169 (!cast<Instruction>("PseudoVMAND_MM_"#mti.LMul.MX) 1170 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1171 def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)), 1172 (!cast<Instruction>("PseudoVMOR_MM_"#mti.LMul.MX) 1173 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1174 def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)), 1175 (!cast<Instruction>("PseudoVMXOR_MM_"#mti.LMul.MX) 1176 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1177 1178 def : Pat<(mti.Mask (rvv_vnot (and VR:$rs1, VR:$rs2))), 1179 (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX) 1180 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1181 def : Pat<(mti.Mask (rvv_vnot (or VR:$rs1, VR:$rs2))), 1182 (!cast<Instruction>("PseudoVMNOR_MM_"#mti.LMul.MX) 1183 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1184 def : Pat<(mti.Mask (rvv_vnot (xor VR:$rs1, VR:$rs2))), 1185 (!cast<Instruction>("PseudoVMXNOR_MM_"#mti.LMul.MX) 1186 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1187 1188 def : Pat<(mti.Mask (and VR:$rs1, (rvv_vnot VR:$rs2))), 1189 (!cast<Instruction>("PseudoVMANDN_MM_"#mti.LMul.MX) 1190 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1191 def : Pat<(mti.Mask (or VR:$rs1, (rvv_vnot VR:$rs2))), 1192 (!cast<Instruction>("PseudoVMORN_MM_"#mti.LMul.MX) 1193 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1194 1195 // Handle rvv_vnot the same as the vmnot.m pseudoinstruction. 1196 def : Pat<(mti.Mask (rvv_vnot VR:$rs)), 1197 (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX) 1198 VR:$rs, VR:$rs, mti.AVL, mti.Log2SEW)>; 1199 } 1200} 1201 1202// 13. Vector Floating-Point Instructions 1203 1204// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions 1205defm : VPatBinaryFPSDNode_VV_VF_RM<any_fadd, "PseudoVFADD", isSEWAware=1>; 1206defm : VPatBinaryFPSDNode_VV_VF_RM<any_fsub, "PseudoVFSUB", isSEWAware=1>; 1207defm : VPatBinaryFPSDNode_R_VF_RM<any_fsub, "PseudoVFRSUB", isSEWAware=1>; 1208 1209// 13.3. Vector Widening Floating-Point Add/Subtract Instructions 1210defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM<fadd, "PseudoVFWADD">; 1211defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM<fsub, "PseudoVFWSUB">; 1212 1213// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 1214defm : VPatBinaryFPSDNode_VV_VF_RM<any_fmul, "PseudoVFMUL", isSEWAware=1>; 1215defm : VPatBinaryFPSDNode_VV_VF_RM<any_fdiv, "PseudoVFDIV", isSEWAware=1>; 1216defm : VPatBinaryFPSDNode_R_VF_RM<any_fdiv, "PseudoVFRDIV", isSEWAware=1>; 1217 1218// 13.5. Vector Widening Floating-Point Multiply Instructions 1219defm : VPatWidenBinaryFPSDNode_VV_VF_RM<fmul, "PseudoVFWMUL">; 1220 1221// 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. 1222foreach fvti = AllFloatVectors in { 1223 // NOTE: We choose VFMADD because it has the most commuting freedom. So it 1224 // works best with how TwoAddressInstructionPass tries commuting. 1225 defvar suffix = fvti.LMul.MX # "_E" # fvti.SEW; 1226 let Predicates = GetVTypePredicates<fvti>.Predicates in { 1227 def : Pat<(fvti.Vector (any_fma fvti.RegClass:$rs1, fvti.RegClass:$rd, 1228 fvti.RegClass:$rs2)), 1229 (!cast<Instruction>("PseudoVFMADD_VV_"# suffix) 1230 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1231 // Value to indicate no rounding mode change in 1232 // RISCVInsertReadWriteCSR 1233 FRM_DYN, 1234 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1235 def : Pat<(fvti.Vector (any_fma fvti.RegClass:$rs1, fvti.RegClass:$rd, 1236 (fneg fvti.RegClass:$rs2))), 1237 (!cast<Instruction>("PseudoVFMSUB_VV_"# suffix) 1238 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1239 // Value to indicate no rounding mode change in 1240 // RISCVInsertReadWriteCSR 1241 FRM_DYN, 1242 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1243 def : Pat<(fvti.Vector (any_fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, 1244 (fneg fvti.RegClass:$rs2))), 1245 (!cast<Instruction>("PseudoVFNMADD_VV_"# suffix) 1246 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1247 // Value to indicate no rounding mode change in 1248 // RISCVInsertReadWriteCSR 1249 FRM_DYN, 1250 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1251 def : Pat<(fvti.Vector (any_fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, 1252 fvti.RegClass:$rs2)), 1253 (!cast<Instruction>("PseudoVFNMSUB_VV_"# suffix) 1254 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1255 // Value to indicate no rounding mode change in 1256 // RISCVInsertReadWriteCSR 1257 FRM_DYN, 1258 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1259 1260 // The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally 1261 // commutable. 1262 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1263 fvti.RegClass:$rd, fvti.RegClass:$rs2)), 1264 (!cast<Instruction>("PseudoVFMADD_V" # fvti.ScalarSuffix # "_" # suffix) 1265 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1266 // Value to indicate no rounding mode change in 1267 // RISCVInsertReadWriteCSR 1268 FRM_DYN, 1269 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1270 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1271 fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), 1272 (!cast<Instruction>("PseudoVFMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 1273 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1274 // Value to indicate no rounding mode change in 1275 // RISCVInsertReadWriteCSR 1276 FRM_DYN, 1277 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1278 1279 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1280 (fneg fvti.RegClass:$rd), (fneg fvti.RegClass:$rs2))), 1281 (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) 1282 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1283 // Value to indicate no rounding mode change in 1284 // RISCVInsertReadWriteCSR 1285 FRM_DYN, 1286 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1287 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1288 (fneg fvti.RegClass:$rd), fvti.RegClass:$rs2)), 1289 (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 1290 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1291 // Value to indicate no rounding mode change in 1292 // RISCVInsertReadWriteCSR 1293 FRM_DYN, 1294 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1295 1296 // The splat might be negated. 1297 def : Pat<(fvti.Vector (any_fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)), 1298 fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), 1299 (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) 1300 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1301 // Value to indicate no rounding mode change in 1302 // RISCVInsertReadWriteCSR 1303 FRM_DYN, 1304 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1305 def : Pat<(fvti.Vector (any_fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)), 1306 fvti.RegClass:$rd, fvti.RegClass:$rs2)), 1307 (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 1308 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1309 // Value to indicate no rounding mode change in 1310 // RISCVInsertReadWriteCSR 1311 FRM_DYN, 1312 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1313 } 1314} 1315 1316// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 1317defm : VPatWidenFPMulAccSDNode_VV_VF_RM<"PseudoVFWMACC">; 1318defm : VPatWidenFPNegMulAccSDNode_VV_VF_RM<"PseudoVFWNMACC">; 1319defm : VPatWidenFPMulSacSDNode_VV_VF_RM<"PseudoVFWMSAC">; 1320defm : VPatWidenFPNegMulSacSDNode_VV_VF_RM<"PseudoVFWNMSAC">; 1321 1322foreach vti = AllFloatVectors in { 1323 let Predicates = GetVTypePredicates<vti>.Predicates in { 1324 // 13.8. Vector Floating-Point Square-Root Instruction 1325 def : Pat<(any_fsqrt (vti.Vector vti.RegClass:$rs2)), 1326 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX#"_E"#vti.SEW) 1327 (vti.Vector (IMPLICIT_DEF)), 1328 vti.RegClass:$rs2, 1329 // Value to indicate no rounding mode change in 1330 // RISCVInsertReadWriteCSR 1331 FRM_DYN, 1332 vti.AVL, vti.Log2SEW, TA_MA)>; 1333 1334 // 13.12. Vector Floating-Point Sign-Injection Instructions 1335 def : Pat<(fabs (vti.Vector vti.RegClass:$rs)), 1336 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX#"_E"#vti.SEW) 1337 (vti.Vector (IMPLICIT_DEF)), 1338 vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TA_MA)>; 1339 // Handle fneg with VFSGNJN using the same input for both operands. 1340 def : Pat<(fneg (vti.Vector vti.RegClass:$rs)), 1341 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW) 1342 (vti.Vector (IMPLICIT_DEF)), 1343 vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TA_MA)>; 1344 1345 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1346 (vti.Vector vti.RegClass:$rs2))), 1347 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_E"#vti.SEW) 1348 (vti.Vector (IMPLICIT_DEF)), 1349 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1350 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1351 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs2)))), 1352 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW) 1353 (vti.Vector (IMPLICIT_DEF)), 1354 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1355 1356 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1357 (vti.Vector (fneg vti.RegClass:$rs2)))), 1358 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW) 1359 (vti.Vector (IMPLICIT_DEF)), 1360 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1361 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1362 (vti.Vector (fneg (SplatFPOp vti.ScalarRegClass:$rs2))))), 1363 (!cast<Instruction>("PseudoVFSGNJN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW) 1364 (vti.Vector (IMPLICIT_DEF)), 1365 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1366 } 1367} 1368 1369// 13.11. Vector Floating-Point MIN/MAX Instructions 1370defm : VPatBinaryFPSDNode_VV_VF<fminnum, "PseudoVFMIN", isSEWAware=1>; 1371defm : VPatBinaryFPSDNode_VV_VF<fmaxnum, "PseudoVFMAX", isSEWAware=1>; 1372 1373// 13.13. Vector Floating-Point Compare Instructions 1374defm : VPatFPSetCCSDNode_VV_VF_FV<SETEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 1375defm : VPatFPSetCCSDNode_VV_VF_FV<SETOEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 1376 1377defm : VPatFPSetCCSDNode_VV_VF_FV<SETNE, "PseudoVMFNE", "PseudoVMFNE">; 1378defm : VPatFPSetCCSDNode_VV_VF_FV<SETUNE, "PseudoVMFNE", "PseudoVMFNE">; 1379 1380defm : VPatFPSetCCSDNode_VV_VF_FV<SETLT, "PseudoVMFLT", "PseudoVMFGT">; 1381defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLT, "PseudoVMFLT", "PseudoVMFGT">; 1382 1383defm : VPatFPSetCCSDNode_VV_VF_FV<SETLE, "PseudoVMFLE", "PseudoVMFGE">; 1384defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">; 1385 1386// Floating-point vselects: 1387// 11.15. Vector Integer Merge Instructions 1388// 13.15. Vector Floating-Point Merge Instruction 1389foreach fvti = !listconcat(AllFloatVectors, AllBFloatVectors) in { 1390 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1391 let Predicates = GetVTypePredicates<ivti>.Predicates in { 1392 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), fvti.RegClass:$rs1, 1393 fvti.RegClass:$rs2)), 1394 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 1395 (fvti.Vector (IMPLICIT_DEF)), 1396 fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 1397 fvti.AVL, fvti.Log2SEW)>; 1398 1399 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), 1400 (SplatFPOp (SelectFPImm (XLenVT GPR:$imm))), 1401 fvti.RegClass:$rs2)), 1402 (!cast<Instruction>("PseudoVMERGE_VXM_"#fvti.LMul.MX) 1403 (fvti.Vector (IMPLICIT_DEF)), 1404 fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; 1405 1406 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), 1407 (SplatFPOp (fvti.Scalar fpimm0)), 1408 fvti.RegClass:$rs2)), 1409 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 1410 (fvti.Vector (IMPLICIT_DEF)), 1411 fvti.RegClass:$rs2, 0, (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; 1412 } 1413} 1414 1415foreach fvti = AllFloatVectors in { 1416 let Predicates = GetVTypePredicates<fvti>.Predicates in 1417 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), 1418 (SplatFPOp fvti.ScalarRegClass:$rs1), 1419 fvti.RegClass:$rs2)), 1420 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 1421 (fvti.Vector (IMPLICIT_DEF)), 1422 fvti.RegClass:$rs2, 1423 (fvti.Scalar fvti.ScalarRegClass:$rs1), 1424 (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; 1425} 1426 1427// 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions 1428defm : VPatConvertFP2ISDNode_V<any_fp_to_sint, "PseudoVFCVT_RTZ_X_F_V">; 1429defm : VPatConvertFP2ISDNode_V<any_fp_to_uint, "PseudoVFCVT_RTZ_XU_F_V">; 1430defm : VPatConvertI2FPSDNode_V_RM<any_sint_to_fp, "PseudoVFCVT_F_X_V">; 1431defm : VPatConvertI2FPSDNode_V_RM<any_uint_to_fp, "PseudoVFCVT_F_XU_V">; 1432 1433// 13.18. Widening Floating-Point/Integer Type-Convert Instructions 1434defm : VPatWConvertFP2ISDNode_V<any_fp_to_sint, "PseudoVFWCVT_RTZ_X_F_V">; 1435defm : VPatWConvertFP2ISDNode_V<any_fp_to_uint, "PseudoVFWCVT_RTZ_XU_F_V">; 1436defm : VPatWConvertI2FPSDNode_V<any_sint_to_fp, "PseudoVFWCVT_F_X_V">; 1437defm : VPatWConvertI2FPSDNode_V<any_uint_to_fp, "PseudoVFWCVT_F_XU_V">; 1438 1439// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions 1440defm : VPatNConvertFP2ISDNode_W<any_fp_to_sint, "PseudoVFNCVT_RTZ_X_F_W">; 1441defm : VPatNConvertFP2ISDNode_W<any_fp_to_uint, "PseudoVFNCVT_RTZ_XU_F_W">; 1442defm : VPatNConvertI2FPSDNode_W_RM<any_sint_to_fp, "PseudoVFNCVT_F_X_W">; 1443defm : VPatNConvertI2FPSDNode_W_RM<any_uint_to_fp, "PseudoVFNCVT_F_XU_W">; 1444foreach fvtiToFWti = AllWidenableFloatVectors in { 1445 defvar fvti = fvtiToFWti.Vti; 1446 defvar fwti = fvtiToFWti.Wti; 1447 let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], 1448 !listconcat(GetVTypePredicates<fvti>.Predicates, 1449 GetVTypePredicates<fwti>.Predicates)) in 1450 def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))), 1451 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW) 1452 (fvti.Vector (IMPLICIT_DEF)), 1453 fwti.RegClass:$rs1, 1454 // Value to indicate no rounding mode change in 1455 // RISCVInsertReadWriteCSR 1456 FRM_DYN, 1457 fvti.AVL, fvti.Log2SEW, TA_MA)>; 1458} 1459 1460foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in { 1461 defvar fvti = fvtiToFWti.Vti; 1462 defvar fwti = fvtiToFWti.Wti; 1463 let Predicates = [HasVInstructionsBF16] in 1464 def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))), 1465 (!cast<Instruction>("PseudoVFNCVTBF16_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW) 1466 (fvti.Vector (IMPLICIT_DEF)), 1467 fwti.RegClass:$rs1, 1468 // Value to indicate no rounding mode change in 1469 // RISCVInsertReadWriteCSR 1470 FRM_DYN, 1471 fvti.AVL, fvti.Log2SEW, TA_MA)>; 1472} 1473 1474//===----------------------------------------------------------------------===// 1475// Vector Element Extracts 1476//===----------------------------------------------------------------------===// 1477foreach vti = AllFloatVectors in { 1478 defvar vmv_f_s_inst = !cast<Instruction>(!strconcat("PseudoVFMV_", 1479 vti.ScalarSuffix, 1480 "_S_", vti.LMul.MX)); 1481 // Only pattern-match extract-element operations where the index is 0. Any 1482 // other index will have been custom-lowered to slide the vector correctly 1483 // into place. 1484 let Predicates = GetVTypePredicates<vti>.Predicates in 1485 def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)), 1486 (vmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>; 1487} 1488