1//===- RISCVInstrInfoVSDPatterns.td - RVV SDNode patterns --*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file contains the required infrastructure and SDNode patterns to 10/// support code generation for the standard 'V' (Vector) extension, version 11/// version 1.0. 12/// 13/// This file is included from and depends upon RISCVInstrInfoVPseudos.td 14/// 15/// Note: the patterns for RVV intrinsics are found in 16/// RISCVInstrInfoVPseudos.td. 17/// 18//===----------------------------------------------------------------------===// 19 20//===----------------------------------------------------------------------===// 21// Helpers to define the SDNode patterns. 22//===----------------------------------------------------------------------===// 23 24def rvv_vnot : PatFrag<(ops node:$in), 25 (xor node:$in, (riscv_vmset_vl (XLenVT srcvalue)))>; 26 27multiclass VPatUSLoadStoreSDNode<ValueType type, 28 int log2sew, 29 LMULInfo vlmul, 30 OutPatFrag avl, 31 VReg reg_class, 32 int sew = !shl(1, log2sew)> { 33 defvar load_instr = !cast<Instruction>("PseudoVLE"#sew#"_V_"#vlmul.MX); 34 defvar store_instr = !cast<Instruction>("PseudoVSE"#sew#"_V_"#vlmul.MX); 35 // Load 36 def : Pat<(type (load GPR:$rs1)), 37 (load_instr (type (IMPLICIT_DEF)), GPR:$rs1, avl, 38 log2sew, TU_MU)>; 39 // Store 40 def : Pat<(store type:$rs2, GPR:$rs1), 41 (store_instr reg_class:$rs2, GPR:$rs1, avl, log2sew)>; 42} 43 44multiclass VPatUSLoadStoreWholeVRSDNode<ValueType type, 45 int log2sew, 46 LMULInfo vlmul, 47 VReg reg_class, 48 int sew = !shl(1, log2sew)> { 49 defvar load_instr = 50 !cast<Instruction>("VL"#!substr(vlmul.MX, 1)#"RE"#sew#"_V"); 51 defvar store_instr = 52 !cast<Instruction>("VS"#!substr(vlmul.MX, 1)#"R_V"); 53 54 // Load 55 def : Pat<(type (load GPR:$rs1)), 56 (load_instr GPR:$rs1)>; 57 // Store 58 def : Pat<(store type:$rs2, GPR:$rs1), 59 (store_instr reg_class:$rs2, GPR:$rs1)>; 60} 61 62multiclass VPatUSLoadStoreMaskSDNode<MTypeInfo m> { 63 defvar load_instr = !cast<Instruction>("PseudoVLM_V_"#m.BX); 64 defvar store_instr = !cast<Instruction>("PseudoVSM_V_"#m.BX); 65 // Load 66 def : Pat<(m.Mask (load GPR:$rs1)), 67 (load_instr (m.Mask (IMPLICIT_DEF)), GPR:$rs1, m.AVL, 68 m.Log2SEW, TA_MA)>; 69 // Store 70 def : Pat<(store m.Mask:$rs2, GPR:$rs1), 71 (store_instr VR:$rs2, GPR:$rs1, m.AVL, m.Log2SEW)>; 72} 73 74class VPatBinarySDNode_VV<SDPatternOperator vop, 75 string instruction_name, 76 ValueType result_type, 77 ValueType op_type, 78 int log2sew, 79 LMULInfo vlmul, 80 OutPatFrag avl, 81 VReg op_reg_class, 82 bit isSEWAware = 0> : 83 Pat<(result_type (vop 84 (op_type op_reg_class:$rs1), 85 (op_type op_reg_class:$rs2))), 86 (!cast<Instruction>( 87 !if(isSEWAware, 88 instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), 89 instruction_name#"_VV_"# vlmul.MX)) 90 (result_type (IMPLICIT_DEF)), 91 op_reg_class:$rs1, 92 op_reg_class:$rs2, 93 avl, log2sew, TA_MA)>; 94 95class VPatBinarySDNode_VV_RM<SDPatternOperator vop, 96 string instruction_name, 97 ValueType result_type, 98 ValueType op_type, 99 int log2sew, 100 LMULInfo vlmul, 101 OutPatFrag avl, 102 VReg op_reg_class, 103 bit isSEWAware = 0> : 104 Pat<(result_type (vop 105 (op_type op_reg_class:$rs1), 106 (op_type op_reg_class:$rs2))), 107 (!cast<Instruction>( 108 !if(isSEWAware, 109 instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), 110 instruction_name#"_VV_"# vlmul.MX)) 111 (result_type (IMPLICIT_DEF)), 112 op_reg_class:$rs1, 113 op_reg_class:$rs2, 114 // Value to indicate no rounding mode change in 115 // RISCVInsertReadWriteCSR 116 FRM_DYN, 117 avl, log2sew, TA_MA)>; 118 119class VPatBinarySDNode_XI<SDPatternOperator vop, 120 string instruction_name, 121 string suffix, 122 ValueType result_type, 123 ValueType vop_type, 124 int log2sew, 125 LMULInfo vlmul, 126 OutPatFrag avl, 127 VReg vop_reg_class, 128 ComplexPattern SplatPatKind, 129 DAGOperand xop_kind, 130 bit isSEWAware = 0> : 131 Pat<(result_type (vop 132 (vop_type vop_reg_class:$rs1), 133 (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))))), 134 (!cast<Instruction>( 135 !if(isSEWAware, 136 instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew), 137 instruction_name#_#suffix#_# vlmul.MX)) 138 (result_type (IMPLICIT_DEF)), 139 vop_reg_class:$rs1, 140 xop_kind:$rs2, 141 avl, log2sew, TA_MA)>; 142 143multiclass VPatBinarySDNode_VV_VX<SDPatternOperator vop, string instruction_name, 144 list<VTypeInfo> vtilist = AllIntegerVectors, 145 bit isSEWAware = 0> { 146 foreach vti = vtilist in { 147 let Predicates = GetVTypePredicates<vti>.Predicates in { 148 def : VPatBinarySDNode_VV<vop, instruction_name, 149 vti.Vector, vti.Vector, vti.Log2SEW, 150 vti.LMul, vti.AVL, vti.RegClass, isSEWAware>; 151 def : VPatBinarySDNode_XI<vop, instruction_name, "VX", 152 vti.Vector, vti.Vector, vti.Log2SEW, 153 vti.LMul, vti.AVL, vti.RegClass, 154 SplatPat, GPR, isSEWAware>; 155 } 156 } 157} 158 159multiclass VPatBinarySDNode_VV_VX_VI<SDPatternOperator vop, string instruction_name, 160 Operand ImmType = simm5> 161 : VPatBinarySDNode_VV_VX<vop, instruction_name> { 162 foreach vti = AllIntegerVectors in { 163 let Predicates = GetVTypePredicates<vti>.Predicates in 164 def : VPatBinarySDNode_XI<vop, instruction_name, "VI", 165 vti.Vector, vti.Vector, vti.Log2SEW, 166 vti.LMul, vti.AVL, vti.RegClass, 167 !cast<ComplexPattern>(SplatPat#_#ImmType), 168 ImmType>; 169 } 170} 171 172class VPatBinarySDNode_VF<SDPatternOperator vop, 173 string instruction_name, 174 ValueType result_type, 175 ValueType vop_type, 176 ValueType xop_type, 177 int log2sew, 178 LMULInfo vlmul, 179 OutPatFrag avl, 180 VReg vop_reg_class, 181 DAGOperand xop_kind, 182 bit isSEWAware = 0> : 183 Pat<(result_type (vop (vop_type vop_reg_class:$rs1), 184 (vop_type (SplatFPOp xop_kind:$rs2)))), 185 (!cast<Instruction>( 186 !if(isSEWAware, 187 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), 188 instruction_name#"_"#vlmul.MX)) 189 (result_type (IMPLICIT_DEF)), 190 vop_reg_class:$rs1, 191 (xop_type xop_kind:$rs2), 192 avl, log2sew, TA_MA)>; 193 194class VPatBinarySDNode_VF_RM<SDPatternOperator vop, 195 string instruction_name, 196 ValueType result_type, 197 ValueType vop_type, 198 ValueType xop_type, 199 int log2sew, 200 LMULInfo vlmul, 201 OutPatFrag avl, 202 VReg vop_reg_class, 203 DAGOperand xop_kind, 204 bit isSEWAware = 0> : 205 Pat<(result_type (vop (vop_type vop_reg_class:$rs1), 206 (vop_type (SplatFPOp xop_kind:$rs2)))), 207 (!cast<Instruction>( 208 !if(isSEWAware, 209 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), 210 instruction_name#"_"#vlmul.MX)) 211 (result_type (IMPLICIT_DEF)), 212 vop_reg_class:$rs1, 213 (xop_type xop_kind:$rs2), 214 // Value to indicate no rounding mode change in 215 // RISCVInsertReadWriteCSR 216 FRM_DYN, 217 avl, log2sew, TA_MA)>; 218 219multiclass VPatBinaryFPSDNode_VV_VF<SDPatternOperator vop, string instruction_name, 220 bit isSEWAware = 0> { 221 foreach vti = AllFloatVectors in { 222 let Predicates = GetVTypePredicates<vti>.Predicates in { 223 def : VPatBinarySDNode_VV<vop, instruction_name, 224 vti.Vector, vti.Vector, vti.Log2SEW, 225 vti.LMul, vti.AVL, vti.RegClass, isSEWAware>; 226 def : VPatBinarySDNode_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 227 vti.Vector, vti.Vector, vti.Scalar, 228 vti.Log2SEW, vti.LMul, vti.AVL, vti.RegClass, 229 vti.ScalarRegClass, isSEWAware>; 230 } 231 } 232} 233 234multiclass VPatBinaryFPSDNode_VV_VF_RM<SDPatternOperator vop, string instruction_name, 235 bit isSEWAware = 0> { 236 foreach vti = AllFloatVectors in { 237 let Predicates = GetVTypePredicates<vti>.Predicates in { 238 def : VPatBinarySDNode_VV_RM<vop, instruction_name, 239 vti.Vector, vti.Vector, vti.Log2SEW, 240 vti.LMul, vti.AVL, vti.RegClass, isSEWAware>; 241 def : VPatBinarySDNode_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix, 242 vti.Vector, vti.Vector, vti.Scalar, 243 vti.Log2SEW, vti.LMul, vti.AVL, vti.RegClass, 244 vti.ScalarRegClass, isSEWAware>; 245 } 246 } 247} 248 249multiclass VPatBinaryFPSDNode_R_VF<SDPatternOperator vop, string instruction_name, 250 bit isSEWAware = 0> { 251 foreach fvti = AllFloatVectors in 252 let Predicates = GetVTypePredicates<fvti>.Predicates in 253 def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), 254 (fvti.Vector fvti.RegClass:$rs1))), 255 (!cast<Instruction>( 256 !if(isSEWAware, 257 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, 258 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) 259 (fvti.Vector (IMPLICIT_DEF)), 260 fvti.RegClass:$rs1, 261 (fvti.Scalar fvti.ScalarRegClass:$rs2), 262 fvti.AVL, fvti.Log2SEW, TA_MA)>; 263} 264 265multiclass VPatBinaryFPSDNode_R_VF_RM<SDPatternOperator vop, string instruction_name, 266 bit isSEWAware = 0> { 267 foreach fvti = AllFloatVectors in 268 let Predicates = GetVTypePredicates<fvti>.Predicates in 269 def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), 270 (fvti.Vector fvti.RegClass:$rs1))), 271 (!cast<Instruction>( 272 !if(isSEWAware, 273 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, 274 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) 275 (fvti.Vector (IMPLICIT_DEF)), 276 fvti.RegClass:$rs1, 277 (fvti.Scalar fvti.ScalarRegClass:$rs2), 278 // Value to indicate no rounding mode change in 279 // RISCVInsertReadWriteCSR 280 FRM_DYN, 281 fvti.AVL, fvti.Log2SEW, TA_MA)>; 282} 283 284multiclass VPatIntegerSetCCSDNode_VV<string instruction_name, 285 CondCode cc> { 286 foreach vti = AllIntegerVectors in { 287 defvar instruction = !cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX); 288 let Predicates = GetVTypePredicates<vti>.Predicates in 289 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 290 (vti.Vector vti.RegClass:$rs2), cc)), 291 (instruction vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, 292 vti.Log2SEW)>; 293 } 294} 295 296multiclass VPatIntegerSetCCSDNode_VV_Swappable<string instruction_name, 297 CondCode cc, CondCode invcc> 298 : VPatIntegerSetCCSDNode_VV<instruction_name, cc> { 299 foreach vti = AllIntegerVectors in { 300 defvar instruction = !cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX); 301 let Predicates = GetVTypePredicates<vti>.Predicates in 302 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs2), 303 (vti.Vector vti.RegClass:$rs1), invcc)), 304 (instruction vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, 305 vti.Log2SEW)>; 306 } 307} 308 309multiclass VPatIntegerSetCCSDNode_XI< 310 string instruction_name, 311 CondCode cc, 312 string kind, 313 ComplexPattern SplatPatKind, 314 DAGOperand xop_kind> { 315 foreach vti = AllIntegerVectors in { 316 defvar instruction = !cast<Instruction>(instruction_name#_#kind#_#vti.LMul.MX); 317 let Predicates = GetVTypePredicates<vti>.Predicates in 318 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 319 (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), cc)), 320 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; 321 } 322} 323 324multiclass VPatIntegerSetCCSDNode_XI_Swappable<string instruction_name, 325 CondCode cc, CondCode invcc, 326 string kind, 327 ComplexPattern SplatPatKind, 328 DAGOperand xop_kind> 329 : VPatIntegerSetCCSDNode_XI<instruction_name, cc, kind, SplatPatKind, 330 xop_kind> { 331 foreach vti = AllIntegerVectors in { 332 defvar instruction = !cast<Instruction>(instruction_name#_#kind#_#vti.LMul.MX); 333 let Predicates = GetVTypePredicates<vti>.Predicates in { 334 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 335 (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), cc)), 336 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; 337 def : Pat<(vti.Mask (setcc (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), 338 (vti.Vector vti.RegClass:$rs1), invcc)), 339 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; 340 } 341 } 342} 343 344multiclass VPatIntegerSetCCSDNode_VX_Swappable<string instruction_name, 345 CondCode cc, CondCode invcc> 346 : VPatIntegerSetCCSDNode_XI_Swappable<instruction_name, cc, invcc, "VX", 347 SplatPat, GPR>; 348 349multiclass VPatIntegerSetCCSDNode_VI<string instruction_name, CondCode cc> 350 : VPatIntegerSetCCSDNode_XI<instruction_name, cc, "VI", SplatPat_simm5, simm5>; 351 352multiclass VPatIntegerSetCCSDNode_VIPlus1<string instruction_name, CondCode cc, 353 ComplexPattern splatpat_kind> { 354 foreach vti = AllIntegerVectors in { 355 defvar instruction = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX); 356 let Predicates = GetVTypePredicates<vti>.Predicates in 357 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 358 (vti.Vector (splatpat_kind simm5:$rs2)), 359 cc)), 360 (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2), 361 vti.AVL, vti.Log2SEW)>; 362 } 363} 364 365multiclass VPatFPSetCCSDNode_VV_VF_FV<CondCode cc, 366 string inst_name, 367 string swapped_op_inst_name> { 368 foreach fvti = AllFloatVectors in { 369 let Predicates = GetVTypePredicates<fvti>.Predicates in { 370 def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), 371 (fvti.Vector fvti.RegClass:$rs2), 372 cc)), 373 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX) 374 fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.AVL, fvti.Log2SEW)>; 375 def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), 376 (SplatFPOp fvti.ScalarRegClass:$rs2), 377 cc)), 378 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 379 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 380 fvti.AVL, fvti.Log2SEW)>; 381 def : Pat<(fvti.Mask (setcc (SplatFPOp fvti.ScalarRegClass:$rs2), 382 (fvti.Vector fvti.RegClass:$rs1), 383 cc)), 384 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 385 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 386 fvti.AVL, fvti.Log2SEW)>; 387 } 388 } 389} 390 391multiclass VPatExtendSDNode_V<list<SDNode> ops, string inst_name, string suffix, 392 list <VTypeInfoToFraction> fraction_list> { 393 foreach vtiTofti = fraction_list in { 394 defvar vti = vtiTofti.Vti; 395 defvar fti = vtiTofti.Fti; 396 foreach op = ops in 397 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 398 GetVTypePredicates<fti>.Predicates) in 399 def : Pat<(vti.Vector (op (fti.Vector fti.RegClass:$rs2))), 400 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX) 401 (vti.Vector (IMPLICIT_DEF)), 402 fti.RegClass:$rs2, fti.AVL, vti.Log2SEW, TU_MU)>; 403 } 404} 405 406multiclass VPatConvertI2FPSDNode_V_RM<SDPatternOperator vop, 407 string instruction_name> { 408 foreach fvti = AllFloatVectors in { 409 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 410 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 411 GetVTypePredicates<ivti>.Predicates) in 412 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), 413 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 414 (fvti.Vector (IMPLICIT_DEF)), 415 ivti.RegClass:$rs1, 416 // Value to indicate no rounding mode change in 417 // RISCVInsertReadWriteCSR 418 FRM_DYN, 419 fvti.AVL, fvti.Log2SEW, TU_MU)>; 420 } 421} 422 423multiclass VPatConvertFP2ISDNode_V<SDPatternOperator vop, 424 string instruction_name> { 425 foreach fvti = AllFloatVectors in { 426 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 427 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 428 GetVTypePredicates<ivti>.Predicates) in 429 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), 430 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX) 431 (ivti.Vector (IMPLICIT_DEF)), 432 fvti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW, TU_MU)>; 433 } 434} 435 436multiclass VPatWConvertI2FPSDNode_V<SDPatternOperator vop, 437 string instruction_name> { 438 foreach vtiToWti = AllWidenableIntToFloatVectors in { 439 defvar ivti = vtiToWti.Vti; 440 defvar fwti = vtiToWti.Wti; 441 let Predicates = !listconcat(GetVTypePredicates<ivti>.Predicates, 442 GetVTypePredicates<fwti>.Predicates) in 443 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), 444 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX) 445 (fwti.Vector (IMPLICIT_DEF)), 446 ivti.RegClass:$rs1, 447 ivti.AVL, ivti.Log2SEW, TU_MU)>; 448 } 449} 450 451multiclass VPatWConvertFP2ISDNode_V<SDPatternOperator vop, 452 string instruction_name> { 453 foreach fvtiToFWti = AllWidenableFloatVectors in { 454 defvar fvti = fvtiToFWti.Vti; 455 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 456 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 457 GetVTypePredicates<iwti>.Predicates) in 458 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), 459 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 460 (iwti.Vector (IMPLICIT_DEF)), 461 fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW, TU_MU)>; 462 } 463} 464 465multiclass VPatNConvertI2FPSDNode_W_RM<SDPatternOperator vop, 466 string instruction_name> { 467 foreach fvtiToFWti = AllWidenableFloatVectors in { 468 defvar fvti = fvtiToFWti.Vti; 469 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 470 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 471 GetVTypePredicates<iwti>.Predicates) in 472 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1))), 473 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 474 (fvti.Vector (IMPLICIT_DEF)), 475 iwti.RegClass:$rs1, 476 // Value to indicate no rounding mode change in 477 // RISCVInsertReadWriteCSR 478 FRM_DYN, 479 fvti.AVL, fvti.Log2SEW, TU_MU)>; 480 } 481} 482 483multiclass VPatNConvertFP2ISDNode_W<SDPatternOperator vop, 484 string instruction_name> { 485 foreach vtiToWti = AllWidenableIntToFloatVectors in { 486 defvar vti = vtiToWti.Vti; 487 defvar fwti = vtiToWti.Wti; 488 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 489 GetVTypePredicates<fwti>.Predicates) in 490 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1))), 491 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX) 492 (vti.Vector (IMPLICIT_DEF)), 493 fwti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; 494 } 495} 496 497multiclass VPatWidenBinarySDNode_VV_VX<SDNode op, PatFrags extop1, PatFrags extop2, 498 string instruction_name> { 499 foreach vtiToWti = AllWidenableIntVectors in { 500 defvar vti = vtiToWti.Vti; 501 defvar wti = vtiToWti.Wti; 502 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 503 GetVTypePredicates<wti>.Predicates) in { 504 def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), 505 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs1)))), 506 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 507 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 508 vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; 509 def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), 510 (wti.Vector (extop2 (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), 511 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX) 512 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 513 GPR:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; 514 } 515 } 516} 517 518multiclass VPatWidenBinarySDNode_WV_WX<SDNode op, PatFrags extop, 519 string instruction_name> { 520 foreach vtiToWti = AllWidenableIntVectors in { 521 defvar vti = vtiToWti.Vti; 522 defvar wti = vtiToWti.Wti; 523 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 524 GetVTypePredicates<wti>.Predicates) in { 525 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 526 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1)))), 527 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_TIED") 528 wti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, 529 TAIL_AGNOSTIC)>; 530 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 531 (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), 532 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 533 (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, GPR:$rs1, 534 vti.AVL, vti.Log2SEW, TU_MU)>; 535 } 536 } 537} 538 539multiclass VPatWidenBinarySDNode_VV_VX_WV_WX<SDNode op, PatFrags extop, 540 string instruction_name> 541 : VPatWidenBinarySDNode_VV_VX<op, extop, extop, instruction_name>, 542 VPatWidenBinarySDNode_WV_WX<op, extop, instruction_name>; 543 544multiclass VPatWidenMulAddSDNode_VV<PatFrags extop1, PatFrags extop2, string instruction_name> { 545 foreach vtiToWti = AllWidenableIntVectors in { 546 defvar vti = vtiToWti.Vti; 547 defvar wti = vtiToWti.Wti; 548 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 549 GetVTypePredicates<wti>.Predicates) in 550 def : Pat< 551 (add (wti.Vector wti.RegClass:$rd), 552 (mul_oneuse (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs1))), 553 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))), 554 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 555 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 556 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC 557 )>; 558 } 559} 560multiclass VPatWidenMulAddSDNode_VX<PatFrags extop1, PatFrags extop2, string instruction_name> { 561 foreach vtiToWti = AllWidenableIntVectors in { 562 defvar vti = vtiToWti.Vti; 563 defvar wti = vtiToWti.Wti; 564 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 565 GetVTypePredicates<wti>.Predicates) in 566 def : Pat< 567 (add (wti.Vector wti.RegClass:$rd), 568 (mul_oneuse (wti.Vector (extop1 (vti.Vector (SplatPat (XLenVT GPR:$rs1))))), 569 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))), 570 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX) 571 wti.RegClass:$rd, GPR:$rs1, vti.RegClass:$rs2, 572 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC 573 )>; 574 } 575} 576 577multiclass VPatWidenBinaryFPSDNode_VV_VF<SDNode op, string instruction_name> { 578 foreach vtiToWti = AllWidenableFloatVectors in { 579 defvar vti = vtiToWti.Vti; 580 defvar wti = vtiToWti.Wti; 581 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 582 GetVTypePredicates<wti>.Predicates) in { 583 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 584 (vti.Vector vti.RegClass:$rs2), 585 (vti.Mask true_mask), (XLenVT srcvalue))), 586 (wti.Vector (riscv_fpextend_vl_oneuse 587 (vti.Vector vti.RegClass:$rs1), 588 (vti.Mask true_mask), (XLenVT srcvalue)))), 589 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 590 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 591 vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; 592 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 593 (vti.Vector vti.RegClass:$rs2), 594 (vti.Mask true_mask), (XLenVT srcvalue))), 595 (wti.Vector (riscv_fpextend_vl_oneuse 596 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 597 (vti.Mask true_mask), (XLenVT srcvalue)))), 598 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 599 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 600 vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; 601 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 602 (vti.Vector vti.RegClass:$rs2), 603 (vti.Mask true_mask), (XLenVT srcvalue))), 604 (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))), 605 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 606 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 607 vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; 608 } 609 } 610} 611 612multiclass VPatWidenBinaryFPSDNode_VV_VF_RM<SDNode op, string instruction_name> { 613 foreach vtiToWti = AllWidenableFloatVectors in { 614 defvar vti = vtiToWti.Vti; 615 defvar wti = vtiToWti.Wti; 616 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 617 GetVTypePredicates<wti>.Predicates) in { 618 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 619 (vti.Vector vti.RegClass:$rs2), 620 (vti.Mask true_mask), (XLenVT srcvalue))), 621 (wti.Vector (riscv_fpextend_vl_oneuse 622 (vti.Vector vti.RegClass:$rs1), 623 (vti.Mask true_mask), (XLenVT srcvalue)))), 624 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 625 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 626 vti.RegClass:$rs1, 627 // Value to indicate no rounding mode change in 628 // RISCVInsertReadWriteCSR 629 FRM_DYN, 630 vti.AVL, vti.Log2SEW, TU_MU)>; 631 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 632 (vti.Vector vti.RegClass:$rs2), 633 (vti.Mask true_mask), (XLenVT srcvalue))), 634 (wti.Vector (riscv_fpextend_vl_oneuse 635 (vti.Vector (SplatFPOp (vti.Scalar vti.ScalarRegClass:$rs1))), 636 (vti.Mask true_mask), (XLenVT srcvalue)))), 637 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 638 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 639 vti.ScalarRegClass:$rs1, 640 // Value to indicate no rounding mode change in 641 // RISCVInsertReadWriteCSR 642 FRM_DYN, 643 vti.AVL, vti.Log2SEW, TU_MU)>; 644 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 645 (vti.Vector vti.RegClass:$rs2), 646 (vti.Mask true_mask), (XLenVT srcvalue))), 647 (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 648 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 649 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 650 vti.ScalarRegClass:$rs1, 651 // Value to indicate no rounding mode change in 652 // RISCVInsertReadWriteCSR 653 FRM_DYN, 654 vti.AVL, vti.Log2SEW, TU_MU)>; 655 } 656 } 657} 658 659multiclass VPatWidenBinaryFPSDNode_WV_WF_RM<SDNode op, string instruction_name> { 660 foreach vtiToWti = AllWidenableFloatVectors in { 661 defvar vti = vtiToWti.Vti; 662 defvar wti = vtiToWti.Wti; 663 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 664 GetVTypePredicates<wti>.Predicates) in { 665 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 666 (wti.Vector (riscv_fpextend_vl_oneuse 667 (vti.Vector vti.RegClass:$rs1), 668 (vti.Mask true_mask), (XLenVT srcvalue)))), 669 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_TIED") 670 wti.RegClass:$rs2, vti.RegClass:$rs1, 671 // Value to indicate no rounding mode change in 672 // RISCVInsertReadWriteCSR 673 FRM_DYN, 674 vti.AVL, vti.Log2SEW, 675 TAIL_AGNOSTIC)>; 676 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 677 (wti.Vector (riscv_fpextend_vl_oneuse 678 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 679 (vti.Mask true_mask), (XLenVT srcvalue)))), 680 (!cast<Instruction>(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX) 681 (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, 682 vti.ScalarRegClass:$rs1, 683 // Value to indicate no rounding mode change in 684 // RISCVInsertReadWriteCSR 685 FRM_DYN, 686 vti.AVL, vti.Log2SEW, TU_MU)>; 687 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 688 (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 689 (!cast<Instruction>(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX) 690 (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, 691 vti.ScalarRegClass:$rs1, 692 // Value to indicate no rounding mode change in 693 // RISCVInsertReadWriteCSR 694 FRM_DYN, 695 vti.AVL, vti.Log2SEW, TU_MU)>; 696 } 697 } 698} 699 700multiclass VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM<SDNode op, 701 string instruction_name> 702 : VPatWidenBinaryFPSDNode_VV_VF_RM<op, instruction_name>, 703 VPatWidenBinaryFPSDNode_WV_WF_RM<op, instruction_name>; 704 705multiclass VPatWidenFPMulAccSDNode_VV_VF_RM<string instruction_name> { 706 foreach vtiToWti = AllWidenableFloatVectors in { 707 defvar vti = vtiToWti.Vti; 708 defvar wti = vtiToWti.Wti; 709 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 710 GetVTypePredicates<wti>.Predicates) in { 711 def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse 712 (vti.Vector vti.RegClass:$rs1), 713 (vti.Mask true_mask), (XLenVT srcvalue))), 714 (wti.Vector (riscv_fpextend_vl_oneuse 715 (vti.Vector vti.RegClass:$rs2), 716 (vti.Mask true_mask), (XLenVT srcvalue))), 717 (wti.Vector wti.RegClass:$rd)), 718 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 719 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 720 // Value to indicate no rounding mode change in 721 // RISCVInsertReadWriteCSR 722 FRM_DYN, 723 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 724 def : Pat<(fma (wti.Vector (SplatFPOp 725 (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), 726 (wti.Vector (riscv_fpextend_vl_oneuse 727 (vti.Vector vti.RegClass:$rs2), 728 (vti.Mask true_mask), (XLenVT srcvalue))), 729 (wti.Vector wti.RegClass:$rd)), 730 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 731 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 732 // Value to indicate no rounding mode change in 733 // RISCVInsertReadWriteCSR 734 FRM_DYN, 735 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 736 } 737 } 738} 739 740multiclass VPatWidenFPNegMulAccSDNode_VV_VF_RM<string instruction_name> { 741 foreach vtiToWti = AllWidenableFloatVectors in { 742 defvar vti = vtiToWti.Vti; 743 defvar wti = vtiToWti.Wti; 744 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 745 GetVTypePredicates<wti>.Predicates) in { 746 def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse 747 (vti.Vector vti.RegClass:$rs1), 748 (vti.Mask true_mask), (XLenVT srcvalue)))), 749 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 750 (vti.Mask true_mask), (XLenVT srcvalue)), 751 (fneg wti.RegClass:$rd)), 752 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 753 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 754 // Value to indicate no rounding mode change in 755 // RISCVInsertReadWriteCSR 756 FRM_DYN, 757 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 758 def : Pat<(fma (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))), 759 (fneg (wti.Vector (riscv_fpextend_vl_oneuse 760 (vti.Vector vti.RegClass:$rs2), 761 (vti.Mask true_mask), (XLenVT srcvalue)))), 762 (fneg wti.RegClass:$rd)), 763 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 764 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 765 // Value to indicate no rounding mode change in 766 // RISCVInsertReadWriteCSR 767 FRM_DYN, 768 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 769 def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 770 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 771 (vti.Mask true_mask), (XLenVT srcvalue)), 772 (fneg wti.RegClass:$rd)), 773 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 774 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 775 // Value to indicate no rounding mode change in 776 // RISCVInsertReadWriteCSR 777 FRM_DYN, 778 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 779 } 780 } 781} 782 783multiclass VPatWidenFPMulSacSDNode_VV_VF_RM<string instruction_name> { 784 foreach vtiToWti = AllWidenableFloatVectors in { 785 defvar vti = vtiToWti.Vti; 786 defvar wti = vtiToWti.Wti; 787 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 788 GetVTypePredicates<wti>.Predicates) in { 789 def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse 790 (vti.Vector vti.RegClass:$rs1), 791 (vti.Mask true_mask), (XLenVT srcvalue))), 792 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 793 (vti.Mask true_mask), (XLenVT srcvalue)), 794 (fneg wti.RegClass:$rd)), 795 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 796 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 797 // Value to indicate no rounding mode change in 798 // RISCVInsertReadWriteCSR 799 FRM_DYN, 800 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 801 def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), 802 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 803 (vti.Mask true_mask), (XLenVT srcvalue)), 804 (fneg wti.RegClass:$rd)), 805 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 806 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 807 // Value to indicate no rounding mode change in 808 // RISCVInsertReadWriteCSR 809 FRM_DYN, 810 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 811 } 812 } 813} 814 815multiclass VPatWidenFPNegMulSacSDNode_VV_VF_RM<string instruction_name> { 816 foreach vtiToWti = AllWidenableFloatVectors in { 817 defvar vti = vtiToWti.Vti; 818 defvar wti = vtiToWti.Wti; 819 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 820 GetVTypePredicates<wti>.Predicates) in { 821 def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse 822 (vti.Vector vti.RegClass:$rs1), 823 (vti.Mask true_mask), (XLenVT srcvalue)))), 824 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 825 (vti.Mask true_mask), (XLenVT srcvalue)), 826 wti.RegClass:$rd), 827 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 828 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 829 // Value to indicate no rounding mode change in 830 // RISCVInsertReadWriteCSR 831 FRM_DYN, 832 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 833 def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), 834 (fneg (wti.Vector (riscv_fpextend_vl_oneuse 835 (vti.Vector vti.RegClass:$rs2), 836 (vti.Mask true_mask), (XLenVT srcvalue)))), 837 wti.RegClass:$rd), 838 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 839 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 840 // Value to indicate no rounding mode change in 841 // RISCVInsertReadWriteCSR 842 FRM_DYN, 843 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 844 def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 845 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 846 (vti.Mask true_mask), (XLenVT srcvalue)), 847 wti.RegClass:$rd), 848 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 849 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 850 // Value to indicate no rounding mode change in 851 // RISCVInsertReadWriteCSR 852 FRM_DYN, 853 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 854 } 855 } 856} 857 858multiclass VPatMultiplyAddSDNode_VV_VX<SDNode op, string instruction_name> { 859 foreach vti = AllIntegerVectors in { 860 defvar suffix = vti.LMul.MX; 861 let Predicates = GetVTypePredicates<vti>.Predicates in { 862 // NOTE: We choose VMADD because it has the most commuting freedom. So it 863 // works best with how TwoAddressInstructionPass tries commuting. 864 def : Pat<(vti.Vector (op vti.RegClass:$rs2, 865 (mul_oneuse vti.RegClass:$rs1, vti.RegClass:$rd))), 866 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 867 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 868 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 869 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally 870 // commutable. 871 def : Pat<(vti.Vector (op vti.RegClass:$rs2, 872 (mul_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rd))), 873 (!cast<Instruction>(instruction_name#"_VX_" # suffix) 874 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 875 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 876 } 877 } 878} 879 880//===----------------------------------------------------------------------===// 881// Patterns. 882//===----------------------------------------------------------------------===// 883 884// 7.4. Vector Unit-Stride Instructions 885foreach vti = !listconcat(FractionalGroupIntegerVectors, 886 FractionalGroupFloatVectors) in 887 let Predicates = GetVTypePredicates<vti>.Predicates in 888 defm : VPatUSLoadStoreSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 889 vti.AVL, vti.RegClass>; 890foreach vti = [VI8M1, VI16M1, VI32M1, VI64M1, VF16M1, VF32M1, VF64M1] in 891 let Predicates = GetVTypePredicates<vti>.Predicates in 892 defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 893 vti.RegClass>; 894foreach vti = !listconcat(GroupIntegerVectors, GroupFloatVectors) in 895 let Predicates = GetVTypePredicates<vti>.Predicates in 896 defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 897 vti.RegClass>; 898foreach mti = AllMasks in 899 let Predicates = [HasVInstructions] in 900 defm : VPatUSLoadStoreMaskSDNode<mti>; 901 902// 11. Vector Integer Arithmetic Instructions 903 904// 11.1. Vector Single-Width Integer Add and Subtract 905defm : VPatBinarySDNode_VV_VX_VI<add, "PseudoVADD">; 906defm : VPatBinarySDNode_VV_VX<sub, "PseudoVSUB">; 907// Handle VRSUB specially since it's the only integer binary op with reversed 908// pattern operands 909foreach vti = AllIntegerVectors in { 910 // FIXME: The AddedComplexity here is covering up a missing matcher for 911 // widening vwsub.vx which can recognize a extended folded into the 912 // scalar of the splat. 913 let AddedComplexity = 20 in 914 let Predicates = GetVTypePredicates<vti>.Predicates in { 915 def : Pat<(sub (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 916 (vti.Vector vti.RegClass:$rs1)), 917 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX) 918 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, 919 vti.AVL, vti.Log2SEW, TU_MU)>; 920 def : Pat<(sub (vti.Vector (SplatPat_simm5 simm5:$rs2)), 921 (vti.Vector vti.RegClass:$rs1)), 922 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX) 923 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, 924 simm5:$rs2, vti.AVL, vti.Log2SEW, TU_MU)>; 925 } 926} 927 928// 11.2. Vector Widening Integer Add and Subtract 929defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, sext_oneuse, "PseudoVWADD">; 930defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, zext_oneuse, "PseudoVWADDU">; 931defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, anyext_oneuse, "PseudoVWADDU">; 932 933defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, sext_oneuse, "PseudoVWSUB">; 934defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, zext_oneuse, "PseudoVWSUBU">; 935defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, anyext_oneuse, "PseudoVWSUBU">; 936 937// shl (ext v, splat 1) is a special case of widening add. 938foreach vtiToWti = AllWidenableIntVectors in { 939 defvar vti = vtiToWti.Vti; 940 defvar wti = vtiToWti.Wti; 941 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 942 GetVTypePredicates<wti>.Predicates) in { 943 def : Pat<(shl (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs1))), 944 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 945 (!cast<Instruction>("PseudoVWADD_VV_"#vti.LMul.MX) 946 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 947 vti.AVL, vti.Log2SEW, TU_MU)>; 948 def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs1))), 949 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 950 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX) 951 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 952 vti.AVL, vti.Log2SEW, TU_MU)>; 953 def : Pat<(shl (wti.Vector (anyext_oneuse (vti.Vector vti.RegClass:$rs1))), 954 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 955 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX) 956 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 957 vti.AVL, vti.Log2SEW, TU_MU)>; 958 } 959} 960 961// 11.3. Vector Integer Extension 962defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF2", 963 AllFractionableVF2IntVectors>; 964defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF2", 965 AllFractionableVF2IntVectors>; 966defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF4", 967 AllFractionableVF4IntVectors>; 968defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF4", 969 AllFractionableVF4IntVectors>; 970defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF8", 971 AllFractionableVF8IntVectors>; 972defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF8", 973 AllFractionableVF8IntVectors>; 974 975// 11.5. Vector Bitwise Logical Instructions 976defm : VPatBinarySDNode_VV_VX_VI<and, "PseudoVAND">; 977defm : VPatBinarySDNode_VV_VX_VI<or, "PseudoVOR">; 978defm : VPatBinarySDNode_VV_VX_VI<xor, "PseudoVXOR">; 979 980// 11.6. Vector Single-Width Bit Shift Instructions 981defm : VPatBinarySDNode_VV_VX_VI<shl, "PseudoVSLL", uimm5>; 982defm : VPatBinarySDNode_VV_VX_VI<srl, "PseudoVSRL", uimm5>; 983defm : VPatBinarySDNode_VV_VX_VI<sra, "PseudoVSRA", uimm5>; 984 985foreach vti = AllIntegerVectors in { 986 // Emit shift by 1 as an add since it might be faster. 987 let Predicates = GetVTypePredicates<vti>.Predicates in 988 def : Pat<(shl (vti.Vector vti.RegClass:$rs1), 989 (vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)))), 990 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX) 991 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, 992 vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; 993 994} 995 996// 11.8. Vector Integer Comparison Instructions 997defm : VPatIntegerSetCCSDNode_VV<"PseudoVMSEQ", SETEQ>; 998defm : VPatIntegerSetCCSDNode_VV<"PseudoVMSNE", SETNE>; 999 1000defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLT", SETLT, SETGT>; 1001defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLTU", SETULT, SETUGT>; 1002defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLE", SETLE, SETGE>; 1003defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLEU", SETULE, SETUGE>; 1004 1005defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSEQ", SETEQ, SETEQ>; 1006defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSNE", SETNE, SETNE>; 1007defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLT", SETLT, SETGT>; 1008defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLTU", SETULT, SETUGT>; 1009defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLE", SETLE, SETGE>; 1010defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLEU", SETULE, SETUGE>; 1011defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSGT", SETGT, SETLT>; 1012defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSGTU", SETUGT, SETULT>; 1013// There is no VMSGE(U)_VX instruction 1014 1015defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSEQ", SETEQ>; 1016defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSNE", SETNE>; 1017defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSLE", SETLE>; 1018defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSLEU", SETULE>; 1019defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSGT", SETGT>; 1020defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSGTU", SETUGT>; 1021 1022defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSLE", SETLT, 1023 SplatPat_simm5_plus1>; 1024defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSLEU", SETULT, 1025 SplatPat_simm5_plus1_nonzero>; 1026defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSGT", SETGE, 1027 SplatPat_simm5_plus1>; 1028defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSGTU", SETUGE, 1029 SplatPat_simm5_plus1_nonzero>; 1030 1031// 11.9. Vector Integer Min/Max Instructions 1032defm : VPatBinarySDNode_VV_VX<umin, "PseudoVMINU">; 1033defm : VPatBinarySDNode_VV_VX<smin, "PseudoVMIN">; 1034defm : VPatBinarySDNode_VV_VX<umax, "PseudoVMAXU">; 1035defm : VPatBinarySDNode_VV_VX<smax, "PseudoVMAX">; 1036 1037// 11.10. Vector Single-Width Integer Multiply Instructions 1038defm : VPatBinarySDNode_VV_VX<mul, "PseudoVMUL">; 1039 1040defm : VPatBinarySDNode_VV_VX<mulhs, "PseudoVMULH", IntegerVectorsExceptI64>; 1041defm : VPatBinarySDNode_VV_VX<mulhu, "PseudoVMULHU", IntegerVectorsExceptI64>; 1042 1043let Predicates = [HasVInstructionsFullMultiply] in { 1044 defm : VPatBinarySDNode_VV_VX<mulhs, "PseudoVMULH", I64IntegerVectors>; 1045 defm : VPatBinarySDNode_VV_VX<mulhu, "PseudoVMULHU", I64IntegerVectors>; 1046} 1047 1048// 11.11. Vector Integer Divide Instructions 1049defm : VPatBinarySDNode_VV_VX<udiv, "PseudoVDIVU", isSEWAware=1>; 1050defm : VPatBinarySDNode_VV_VX<sdiv, "PseudoVDIV", isSEWAware=1>; 1051defm : VPatBinarySDNode_VV_VX<urem, "PseudoVREMU", isSEWAware=1>; 1052defm : VPatBinarySDNode_VV_VX<srem, "PseudoVREM", isSEWAware=1>; 1053 1054// 11.12. Vector Widening Integer Multiply Instructions 1055defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, sext_oneuse, 1056 "PseudoVWMUL">; 1057defm : VPatWidenBinarySDNode_VV_VX<mul, zext_oneuse, zext_oneuse, 1058 "PseudoVWMULU">; 1059defm : VPatWidenBinarySDNode_VV_VX<mul, anyext_oneuse, anyext_oneuse, 1060 "PseudoVWMULU">; 1061defm : VPatWidenBinarySDNode_VV_VX<mul, zext_oneuse, anyext_oneuse, 1062 "PseudoVWMULU">; 1063defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, zext_oneuse, 1064 "PseudoVWMULSU">; 1065defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, anyext_oneuse, 1066 "PseudoVWMULSU">; 1067 1068// 11.13 Vector Single-Width Integer Multiply-Add Instructions. 1069defm : VPatMultiplyAddSDNode_VV_VX<add, "PseudoVMADD">; 1070defm : VPatMultiplyAddSDNode_VV_VX<sub, "PseudoVNMSUB">; 1071 1072// 11.14 Vector Widening Integer Multiply-Add Instructions 1073defm : VPatWidenMulAddSDNode_VV<sext_oneuse, sext_oneuse, "PseudoVWMACC">; 1074defm : VPatWidenMulAddSDNode_VX<sext_oneuse, sext_oneuse, "PseudoVWMACC">; 1075defm : VPatWidenMulAddSDNode_VV<zext_oneuse, zext_oneuse, "PseudoVWMACCU">; 1076defm : VPatWidenMulAddSDNode_VX<zext_oneuse, zext_oneuse, "PseudoVWMACCU">; 1077defm : VPatWidenMulAddSDNode_VV<sext_oneuse, zext_oneuse, "PseudoVWMACCSU">; 1078defm : VPatWidenMulAddSDNode_VX<sext_oneuse, zext_oneuse, "PseudoVWMACCSU">; 1079defm : VPatWidenMulAddSDNode_VX<zext_oneuse, sext_oneuse, "PseudoVWMACCUS">; 1080 1081// 11.15. Vector Integer Merge Instructions 1082foreach vti = AllIntegerVectors in { 1083 let Predicates = GetVTypePredicates<vti>.Predicates in { 1084 def : Pat<(vti.Vector (vselect (vti.Mask V0), vti.RegClass:$rs1, 1085 vti.RegClass:$rs2)), 1086 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 1087 (vti.Vector (IMPLICIT_DEF)), 1088 vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), 1089 vti.AVL, vti.Log2SEW)>; 1090 1091 def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat XLenVT:$rs1), 1092 vti.RegClass:$rs2)), 1093 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 1094 (vti.Vector (IMPLICIT_DEF)), 1095 vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; 1096 1097 def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat_simm5 simm5:$rs1), 1098 vti.RegClass:$rs2)), 1099 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 1100 (vti.Vector (IMPLICIT_DEF)), 1101 vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; 1102 } 1103} 1104 1105// 12. Vector Fixed-Point Arithmetic Instructions 1106 1107// 12.1. Vector Single-Width Saturating Add and Subtract 1108defm : VPatBinarySDNode_VV_VX_VI<saddsat, "PseudoVSADD">; 1109defm : VPatBinarySDNode_VV_VX_VI<uaddsat, "PseudoVSADDU">; 1110defm : VPatBinarySDNode_VV_VX<ssubsat, "PseudoVSSUB">; 1111defm : VPatBinarySDNode_VV_VX<usubsat, "PseudoVSSUBU">; 1112 1113// 15. Vector Mask Instructions 1114 1115// 15.1. Vector Mask-Register Logical Instructions 1116foreach mti = AllMasks in { 1117 let Predicates = [HasVInstructions] in { 1118 def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)), 1119 (!cast<Instruction>("PseudoVMAND_MM_"#mti.LMul.MX) 1120 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1121 def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)), 1122 (!cast<Instruction>("PseudoVMOR_MM_"#mti.LMul.MX) 1123 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1124 def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)), 1125 (!cast<Instruction>("PseudoVMXOR_MM_"#mti.LMul.MX) 1126 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1127 1128 def : Pat<(mti.Mask (rvv_vnot (and VR:$rs1, VR:$rs2))), 1129 (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX) 1130 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1131 def : Pat<(mti.Mask (rvv_vnot (or VR:$rs1, VR:$rs2))), 1132 (!cast<Instruction>("PseudoVMNOR_MM_"#mti.LMul.MX) 1133 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1134 def : Pat<(mti.Mask (rvv_vnot (xor VR:$rs1, VR:$rs2))), 1135 (!cast<Instruction>("PseudoVMXNOR_MM_"#mti.LMul.MX) 1136 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1137 1138 def : Pat<(mti.Mask (and VR:$rs1, (rvv_vnot VR:$rs2))), 1139 (!cast<Instruction>("PseudoVMANDN_MM_"#mti.LMul.MX) 1140 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1141 def : Pat<(mti.Mask (or VR:$rs1, (rvv_vnot VR:$rs2))), 1142 (!cast<Instruction>("PseudoVMORN_MM_"#mti.LMul.MX) 1143 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1144 1145 // Handle rvv_vnot the same as the vmnot.m pseudoinstruction. 1146 def : Pat<(mti.Mask (rvv_vnot VR:$rs)), 1147 (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX) 1148 VR:$rs, VR:$rs, mti.AVL, mti.Log2SEW)>; 1149 } 1150} 1151 1152// 13. Vector Floating-Point Instructions 1153 1154// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions 1155defm : VPatBinaryFPSDNode_VV_VF_RM<any_fadd, "PseudoVFADD">; 1156defm : VPatBinaryFPSDNode_VV_VF_RM<any_fsub, "PseudoVFSUB">; 1157defm : VPatBinaryFPSDNode_R_VF_RM<any_fsub, "PseudoVFRSUB">; 1158 1159// 13.3. Vector Widening Floating-Point Add/Subtract Instructions 1160defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM<fadd, "PseudoVFWADD">; 1161defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM<fsub, "PseudoVFWSUB">; 1162 1163// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 1164defm : VPatBinaryFPSDNode_VV_VF_RM<any_fmul, "PseudoVFMUL">; 1165defm : VPatBinaryFPSDNode_VV_VF_RM<any_fdiv, "PseudoVFDIV", isSEWAware=1>; 1166defm : VPatBinaryFPSDNode_R_VF_RM<any_fdiv, "PseudoVFRDIV", isSEWAware=1>; 1167 1168// 13.5. Vector Widening Floating-Point Multiply Instructions 1169defm : VPatWidenBinaryFPSDNode_VV_VF_RM<fmul, "PseudoVFWMUL">; 1170 1171// 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. 1172foreach fvti = AllFloatVectors in { 1173 // NOTE: We choose VFMADD because it has the most commuting freedom. So it 1174 // works best with how TwoAddressInstructionPass tries commuting. 1175 defvar suffix = fvti.LMul.MX; 1176 let Predicates = GetVTypePredicates<fvti>.Predicates in { 1177 def : Pat<(fvti.Vector (any_fma fvti.RegClass:$rs1, fvti.RegClass:$rd, 1178 fvti.RegClass:$rs2)), 1179 (!cast<Instruction>("PseudoVFMADD_VV_"# suffix) 1180 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1181 // Value to indicate no rounding mode change in 1182 // RISCVInsertReadWriteCSR 1183 FRM_DYN, 1184 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1185 def : Pat<(fvti.Vector (any_fma fvti.RegClass:$rs1, fvti.RegClass:$rd, 1186 (fneg fvti.RegClass:$rs2))), 1187 (!cast<Instruction>("PseudoVFMSUB_VV_"# suffix) 1188 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1189 // Value to indicate no rounding mode change in 1190 // RISCVInsertReadWriteCSR 1191 FRM_DYN, 1192 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1193 def : Pat<(fvti.Vector (any_fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, 1194 (fneg fvti.RegClass:$rs2))), 1195 (!cast<Instruction>("PseudoVFNMADD_VV_"# suffix) 1196 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1197 // Value to indicate no rounding mode change in 1198 // RISCVInsertReadWriteCSR 1199 FRM_DYN, 1200 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1201 def : Pat<(fvti.Vector (any_fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, 1202 fvti.RegClass:$rs2)), 1203 (!cast<Instruction>("PseudoVFNMSUB_VV_"# suffix) 1204 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1205 // Value to indicate no rounding mode change in 1206 // RISCVInsertReadWriteCSR 1207 FRM_DYN, 1208 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1209 1210 // The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally 1211 // commutable. 1212 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1213 fvti.RegClass:$rd, fvti.RegClass:$rs2)), 1214 (!cast<Instruction>("PseudoVFMADD_V" # fvti.ScalarSuffix # "_" # suffix) 1215 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1216 // Value to indicate no rounding mode change in 1217 // RISCVInsertReadWriteCSR 1218 FRM_DYN, 1219 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1220 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1221 fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), 1222 (!cast<Instruction>("PseudoVFMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 1223 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1224 // Value to indicate no rounding mode change in 1225 // RISCVInsertReadWriteCSR 1226 FRM_DYN, 1227 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1228 1229 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1230 (fneg fvti.RegClass:$rd), (fneg fvti.RegClass:$rs2))), 1231 (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) 1232 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1233 // Value to indicate no rounding mode change in 1234 // RISCVInsertReadWriteCSR 1235 FRM_DYN, 1236 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1237 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1238 (fneg fvti.RegClass:$rd), fvti.RegClass:$rs2)), 1239 (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 1240 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1241 // Value to indicate no rounding mode change in 1242 // RISCVInsertReadWriteCSR 1243 FRM_DYN, 1244 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1245 1246 // The splat might be negated. 1247 def : Pat<(fvti.Vector (any_fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)), 1248 fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), 1249 (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) 1250 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1251 // Value to indicate no rounding mode change in 1252 // RISCVInsertReadWriteCSR 1253 FRM_DYN, 1254 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1255 def : Pat<(fvti.Vector (any_fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)), 1256 fvti.RegClass:$rd, fvti.RegClass:$rs2)), 1257 (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 1258 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1259 // Value to indicate no rounding mode change in 1260 // RISCVInsertReadWriteCSR 1261 FRM_DYN, 1262 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1263 } 1264} 1265 1266// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 1267defm : VPatWidenFPMulAccSDNode_VV_VF_RM<"PseudoVFWMACC">; 1268defm : VPatWidenFPNegMulAccSDNode_VV_VF_RM<"PseudoVFWNMACC">; 1269defm : VPatWidenFPMulSacSDNode_VV_VF_RM<"PseudoVFWMSAC">; 1270defm : VPatWidenFPNegMulSacSDNode_VV_VF_RM<"PseudoVFWNMSAC">; 1271 1272foreach vti = AllFloatVectors in { 1273 let Predicates = GetVTypePredicates<vti>.Predicates in { 1274 // 13.8. Vector Floating-Point Square-Root Instruction 1275 def : Pat<(any_fsqrt (vti.Vector vti.RegClass:$rs2)), 1276 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX#"_E"#vti.SEW) 1277 (vti.Vector (IMPLICIT_DEF)), 1278 vti.RegClass:$rs2, 1279 // Value to indicate no rounding mode change in 1280 // RISCVInsertReadWriteCSR 1281 FRM_DYN, 1282 vti.AVL, vti.Log2SEW, TU_MU)>; 1283 1284 // 13.12. Vector Floating-Point Sign-Injection Instructions 1285 def : Pat<(fabs (vti.Vector vti.RegClass:$rs)), 1286 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX) 1287 (vti.Vector (IMPLICIT_DEF)), 1288 vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TU_MU)>; 1289 // Handle fneg with VFSGNJN using the same input for both operands. 1290 def : Pat<(fneg (vti.Vector vti.RegClass:$rs)), 1291 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 1292 (vti.Vector (IMPLICIT_DEF)), 1293 vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TU_MU)>; 1294 1295 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1296 (vti.Vector vti.RegClass:$rs2))), 1297 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX) 1298 (vti.Vector (IMPLICIT_DEF)), 1299 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TU_MU)>; 1300 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1301 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs2)))), 1302 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 1303 (vti.Vector (IMPLICIT_DEF)), 1304 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TU_MU)>; 1305 1306 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1307 (vti.Vector (fneg vti.RegClass:$rs2)))), 1308 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 1309 (vti.Vector (IMPLICIT_DEF)), 1310 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TU_MU)>; 1311 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1312 (vti.Vector (fneg (SplatFPOp vti.ScalarRegClass:$rs2))))), 1313 (!cast<Instruction>("PseudoVFSGNJN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 1314 (vti.Vector (IMPLICIT_DEF)), 1315 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TU_MU)>; 1316 } 1317} 1318 1319// 13.11. Vector Floating-Point MIN/MAX Instructions 1320defm : VPatBinaryFPSDNode_VV_VF<fminnum, "PseudoVFMIN">; 1321defm : VPatBinaryFPSDNode_VV_VF<fmaxnum, "PseudoVFMAX">; 1322 1323// 13.13. Vector Floating-Point Compare Instructions 1324defm : VPatFPSetCCSDNode_VV_VF_FV<SETEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 1325defm : VPatFPSetCCSDNode_VV_VF_FV<SETOEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 1326 1327defm : VPatFPSetCCSDNode_VV_VF_FV<SETNE, "PseudoVMFNE", "PseudoVMFNE">; 1328defm : VPatFPSetCCSDNode_VV_VF_FV<SETUNE, "PseudoVMFNE", "PseudoVMFNE">; 1329 1330defm : VPatFPSetCCSDNode_VV_VF_FV<SETLT, "PseudoVMFLT", "PseudoVMFGT">; 1331defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLT, "PseudoVMFLT", "PseudoVMFGT">; 1332 1333defm : VPatFPSetCCSDNode_VV_VF_FV<SETLE, "PseudoVMFLE", "PseudoVMFGE">; 1334defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">; 1335 1336// Floating-point vselects: 1337// 11.15. Vector Integer Merge Instructions 1338// 13.15. Vector Floating-Point Merge Instruction 1339foreach fvti = AllFloatVectors in { 1340 let Predicates = GetVTypePredicates<fvti>.Predicates in { 1341 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), fvti.RegClass:$rs1, 1342 fvti.RegClass:$rs2)), 1343 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 1344 (fvti.Vector (IMPLICIT_DEF)), 1345 fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 1346 fvti.AVL, fvti.Log2SEW)>; 1347 1348 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), 1349 (SplatFPOp fvti.ScalarRegClass:$rs1), 1350 fvti.RegClass:$rs2)), 1351 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 1352 (fvti.Vector (IMPLICIT_DEF)), 1353 fvti.RegClass:$rs2, 1354 (fvti.Scalar fvti.ScalarRegClass:$rs1), 1355 (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; 1356 1357 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), 1358 (SplatFPOp (fvti.Scalar fpimm0)), 1359 fvti.RegClass:$rs2)), 1360 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 1361 (fvti.Vector (IMPLICIT_DEF)), 1362 fvti.RegClass:$rs2, 0, (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; 1363 } 1364} 1365 1366// 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions 1367defm : VPatConvertFP2ISDNode_V<any_fp_to_sint, "PseudoVFCVT_RTZ_X_F_V">; 1368defm : VPatConvertFP2ISDNode_V<any_fp_to_uint, "PseudoVFCVT_RTZ_XU_F_V">; 1369defm : VPatConvertI2FPSDNode_V_RM<any_sint_to_fp, "PseudoVFCVT_F_X_V">; 1370defm : VPatConvertI2FPSDNode_V_RM<any_uint_to_fp, "PseudoVFCVT_F_XU_V">; 1371 1372// 13.18. Widening Floating-Point/Integer Type-Convert Instructions 1373defm : VPatWConvertFP2ISDNode_V<any_fp_to_sint, "PseudoVFWCVT_RTZ_X_F_V">; 1374defm : VPatWConvertFP2ISDNode_V<any_fp_to_uint, "PseudoVFWCVT_RTZ_XU_F_V">; 1375defm : VPatWConvertI2FPSDNode_V<any_sint_to_fp, "PseudoVFWCVT_F_X_V">; 1376defm : VPatWConvertI2FPSDNode_V<any_uint_to_fp, "PseudoVFWCVT_F_XU_V">; 1377 1378// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions 1379defm : VPatNConvertFP2ISDNode_W<any_fp_to_sint, "PseudoVFNCVT_RTZ_X_F_W">; 1380defm : VPatNConvertFP2ISDNode_W<any_fp_to_uint, "PseudoVFNCVT_RTZ_XU_F_W">; 1381defm : VPatNConvertI2FPSDNode_W_RM<any_sint_to_fp, "PseudoVFNCVT_F_X_W">; 1382defm : VPatNConvertI2FPSDNode_W_RM<any_uint_to_fp, "PseudoVFNCVT_F_XU_W">; 1383foreach fvtiToFWti = AllWidenableFloatVectors in { 1384 defvar fvti = fvtiToFWti.Vti; 1385 defvar fwti = fvtiToFWti.Wti; 1386 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1387 GetVTypePredicates<fwti>.Predicates) in 1388 def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))), 1389 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX) 1390 (fvti.Vector (IMPLICIT_DEF)), 1391 fwti.RegClass:$rs1, 1392 // Value to indicate no rounding mode change in 1393 // RISCVInsertReadWriteCSR 1394 FRM_DYN, 1395 fvti.AVL, fvti.Log2SEW, TU_MU)>; 1396} 1397 1398//===----------------------------------------------------------------------===// 1399// Vector Splats 1400//===----------------------------------------------------------------------===// 1401 1402foreach fvti = AllFloatVectors in { 1403 let Predicates = GetVTypePredicates<fvti>.Predicates in { 1404 def : Pat<(fvti.Vector (SplatFPOp fvti.ScalarRegClass:$rs1)), 1405 (!cast<Instruction>("PseudoVFMV_V_"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 1406 (fvti.Vector (IMPLICIT_DEF)), 1407 (fvti.Scalar fvti.ScalarRegClass:$rs1), 1408 fvti.AVL, fvti.Log2SEW, TU_MU)>; 1409 1410 def : Pat<(fvti.Vector (SplatFPOp (fvti.Scalar fpimm0))), 1411 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX) 1412 (fvti.Vector (IMPLICIT_DEF)), 1413 0, fvti.AVL, fvti.Log2SEW, TU_MU)>; 1414 } 1415} 1416 1417//===----------------------------------------------------------------------===// 1418// Vector Element Extracts 1419//===----------------------------------------------------------------------===// 1420foreach vti = AllFloatVectors in { 1421 defvar vmv_f_s_inst = !cast<Instruction>(!strconcat("PseudoVFMV_", 1422 vti.ScalarSuffix, 1423 "_S_", vti.LMul.MX)); 1424 // Only pattern-match extract-element operations where the index is 0. Any 1425 // other index will have been custom-lowered to slide the vector correctly 1426 // into place. 1427 let Predicates = GetVTypePredicates<vti>.Predicates in 1428 def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)), 1429 (vmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>; 1430} 1431