1//===- RISCVInstrInfoVSDPatterns.td - RVV SDNode patterns --*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file contains the required infrastructure and SDNode patterns to 10/// support code generation for the standard 'V' (Vector) extension, version 11/// version 1.0. 12/// 13/// This file is included from and depends upon RISCVInstrInfoVPseudos.td 14/// 15/// Note: the patterns for RVV intrinsics are found in 16/// RISCVInstrInfoVPseudos.td. 17/// 18//===----------------------------------------------------------------------===// 19 20//===----------------------------------------------------------------------===// 21// Helpers to define the SDNode patterns. 22//===----------------------------------------------------------------------===// 23 24def rvv_vnot : PatFrag<(ops node:$in), 25 (xor node:$in, (riscv_vmset_vl (XLenVT srcvalue)))>; 26 27multiclass VPatUSLoadStoreSDNode<ValueType type, 28 int log2sew, 29 LMULInfo vlmul, 30 OutPatFrag avl, 31 VReg reg_class, 32 int sew = !shl(1, log2sew)> { 33 defvar load_instr = !cast<Instruction>("PseudoVLE"#sew#"_V_"#vlmul.MX); 34 defvar store_instr = !cast<Instruction>("PseudoVSE"#sew#"_V_"#vlmul.MX); 35 // Load 36 def : Pat<(type (load GPR:$rs1)), 37 (load_instr (type (IMPLICIT_DEF)), GPR:$rs1, avl, 38 log2sew, TA_MA)>; 39 // Store 40 def : Pat<(store type:$rs2, GPR:$rs1), 41 (store_instr reg_class:$rs2, GPR:$rs1, avl, log2sew)>; 42} 43 44multiclass VPatUSLoadStoreWholeVRSDNode<ValueType type, 45 int log2sew, 46 LMULInfo vlmul, 47 VReg reg_class, 48 int sew = !shl(1, log2sew)> { 49 defvar load_instr = 50 !cast<Instruction>("VL"#!substr(vlmul.MX, 1)#"RE"#sew#"_V"); 51 defvar store_instr = 52 !cast<Instruction>("VS"#!substr(vlmul.MX, 1)#"R_V"); 53 54 // Load 55 def : Pat<(type (load GPR:$rs1)), 56 (load_instr GPR:$rs1)>; 57 // Store 58 def : Pat<(store type:$rs2, GPR:$rs1), 59 (store_instr reg_class:$rs2, GPR:$rs1)>; 60} 61 62multiclass VPatUSLoadStoreMaskSDNode<MTypeInfo m> { 63 defvar load_instr = !cast<Instruction>("PseudoVLM_V_"#m.BX); 64 defvar store_instr = !cast<Instruction>("PseudoVSM_V_"#m.BX); 65 // Load 66 def : Pat<(m.Mask (load GPR:$rs1)), 67 (load_instr (m.Mask (IMPLICIT_DEF)), GPR:$rs1, m.AVL, 68 m.Log2SEW, TA_MA)>; 69 // Store 70 def : Pat<(store m.Mask:$rs2, GPR:$rs1), 71 (store_instr VR:$rs2, GPR:$rs1, m.AVL, m.Log2SEW)>; 72} 73 74class VPatBinarySDNode_VV<SDPatternOperator vop, 75 string instruction_name, 76 ValueType result_type, 77 ValueType op_type, 78 int log2sew, 79 LMULInfo vlmul, 80 OutPatFrag avl, 81 VReg op_reg_class, 82 bit isSEWAware = 0> : 83 Pat<(result_type (vop 84 (op_type op_reg_class:$rs1), 85 (op_type op_reg_class:$rs2))), 86 (!cast<Instruction>( 87 !if(isSEWAware, 88 instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), 89 instruction_name#"_VV_"# vlmul.MX)) 90 (result_type (IMPLICIT_DEF)), 91 op_reg_class:$rs1, 92 op_reg_class:$rs2, 93 avl, log2sew, TA_MA)>; 94 95class VPatBinarySDNode_VV_RM<SDPatternOperator vop, 96 string instruction_name, 97 ValueType result_type, 98 ValueType op_type, 99 int log2sew, 100 LMULInfo vlmul, 101 OutPatFrag avl, 102 VReg op_reg_class, 103 bit isSEWAware = 0> : 104 Pat<(result_type (vop 105 (op_type op_reg_class:$rs1), 106 (op_type op_reg_class:$rs2))), 107 (!cast<Instruction>( 108 !if(isSEWAware, 109 instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), 110 instruction_name#"_VV_"# vlmul.MX)) 111 (result_type (IMPLICIT_DEF)), 112 op_reg_class:$rs1, 113 op_reg_class:$rs2, 114 // Value to indicate no rounding mode change in 115 // RISCVInsertReadWriteCSR 116 FRM_DYN, 117 avl, log2sew, TA_MA)>; 118 119class VPatBinarySDNode_XI<SDPatternOperator vop, 120 string instruction_name, 121 string suffix, 122 ValueType result_type, 123 ValueType vop_type, 124 int log2sew, 125 LMULInfo vlmul, 126 OutPatFrag avl, 127 VReg vop_reg_class, 128 ComplexPattern SplatPatKind, 129 DAGOperand xop_kind, 130 bit isSEWAware = 0> : 131 Pat<(result_type (vop 132 (vop_type vop_reg_class:$rs1), 133 (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))))), 134 (!cast<Instruction>( 135 !if(isSEWAware, 136 instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew), 137 instruction_name#_#suffix#_# vlmul.MX)) 138 (result_type (IMPLICIT_DEF)), 139 vop_reg_class:$rs1, 140 xop_kind:$rs2, 141 avl, log2sew, TA_MA)>; 142 143multiclass VPatBinarySDNode_VV_VX<SDPatternOperator vop, string instruction_name, 144 list<VTypeInfo> vtilist = AllIntegerVectors, 145 bit isSEWAware = 0> { 146 foreach vti = vtilist in { 147 let Predicates = GetVTypePredicates<vti>.Predicates in { 148 def : VPatBinarySDNode_VV<vop, instruction_name, 149 vti.Vector, vti.Vector, vti.Log2SEW, 150 vti.LMul, vti.AVL, vti.RegClass, isSEWAware>; 151 def : VPatBinarySDNode_XI<vop, instruction_name, "VX", 152 vti.Vector, vti.Vector, vti.Log2SEW, 153 vti.LMul, vti.AVL, vti.RegClass, 154 SplatPat, GPR, isSEWAware>; 155 } 156 } 157} 158 159multiclass VPatBinarySDNode_VV_VX_VI<SDPatternOperator vop, string instruction_name, 160 Operand ImmType = simm5> 161 : VPatBinarySDNode_VV_VX<vop, instruction_name> { 162 foreach vti = AllIntegerVectors in { 163 let Predicates = GetVTypePredicates<vti>.Predicates in 164 def : VPatBinarySDNode_XI<vop, instruction_name, "VI", 165 vti.Vector, vti.Vector, vti.Log2SEW, 166 vti.LMul, vti.AVL, vti.RegClass, 167 !cast<ComplexPattern>(SplatPat#_#ImmType), 168 ImmType>; 169 } 170} 171 172class VPatBinarySDNode_VF<SDPatternOperator vop, 173 string instruction_name, 174 ValueType result_type, 175 ValueType vop_type, 176 ValueType xop_type, 177 int log2sew, 178 LMULInfo vlmul, 179 OutPatFrag avl, 180 VReg vop_reg_class, 181 DAGOperand xop_kind, 182 bit isSEWAware = 0> : 183 Pat<(result_type (vop (vop_type vop_reg_class:$rs1), 184 (vop_type (SplatFPOp xop_kind:$rs2)))), 185 (!cast<Instruction>( 186 !if(isSEWAware, 187 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), 188 instruction_name#"_"#vlmul.MX)) 189 (result_type (IMPLICIT_DEF)), 190 vop_reg_class:$rs1, 191 (xop_type xop_kind:$rs2), 192 avl, log2sew, TA_MA)>; 193 194class VPatBinarySDNode_VF_RM<SDPatternOperator vop, 195 string instruction_name, 196 ValueType result_type, 197 ValueType vop_type, 198 ValueType xop_type, 199 int log2sew, 200 LMULInfo vlmul, 201 OutPatFrag avl, 202 VReg vop_reg_class, 203 DAGOperand xop_kind, 204 bit isSEWAware = 0> : 205 Pat<(result_type (vop (vop_type vop_reg_class:$rs1), 206 (vop_type (SplatFPOp xop_kind:$rs2)))), 207 (!cast<Instruction>( 208 !if(isSEWAware, 209 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), 210 instruction_name#"_"#vlmul.MX)) 211 (result_type (IMPLICIT_DEF)), 212 vop_reg_class:$rs1, 213 (xop_type xop_kind:$rs2), 214 // Value to indicate no rounding mode change in 215 // RISCVInsertReadWriteCSR 216 FRM_DYN, 217 avl, log2sew, TA_MA)>; 218 219multiclass VPatBinaryFPSDNode_VV_VF<SDPatternOperator vop, string instruction_name, 220 bit isSEWAware = 0> { 221 foreach vti = AllFloatVectors in { 222 let Predicates = GetVTypePredicates<vti>.Predicates in { 223 def : VPatBinarySDNode_VV<vop, instruction_name, 224 vti.Vector, vti.Vector, vti.Log2SEW, 225 vti.LMul, vti.AVL, vti.RegClass, isSEWAware>; 226 def : VPatBinarySDNode_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 227 vti.Vector, vti.Vector, vti.Scalar, 228 vti.Log2SEW, vti.LMul, vti.AVL, vti.RegClass, 229 vti.ScalarRegClass, isSEWAware>; 230 } 231 } 232} 233 234multiclass VPatBinaryFPSDNode_VV_VF_RM<SDPatternOperator vop, string instruction_name, 235 bit isSEWAware = 0> { 236 foreach vti = AllFloatVectors in { 237 let Predicates = GetVTypePredicates<vti>.Predicates in { 238 def : VPatBinarySDNode_VV_RM<vop, instruction_name, 239 vti.Vector, vti.Vector, vti.Log2SEW, 240 vti.LMul, vti.AVL, vti.RegClass, isSEWAware>; 241 def : VPatBinarySDNode_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix, 242 vti.Vector, vti.Vector, vti.Scalar, 243 vti.Log2SEW, vti.LMul, vti.AVL, vti.RegClass, 244 vti.ScalarRegClass, isSEWAware>; 245 } 246 } 247} 248 249multiclass VPatBinaryFPSDNode_R_VF<SDPatternOperator vop, string instruction_name, 250 bit isSEWAware = 0> { 251 foreach fvti = AllFloatVectors in 252 let Predicates = GetVTypePredicates<fvti>.Predicates in 253 def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), 254 (fvti.Vector fvti.RegClass:$rs1))), 255 (!cast<Instruction>( 256 !if(isSEWAware, 257 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, 258 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) 259 (fvti.Vector (IMPLICIT_DEF)), 260 fvti.RegClass:$rs1, 261 (fvti.Scalar fvti.ScalarRegClass:$rs2), 262 fvti.AVL, fvti.Log2SEW, TA_MA)>; 263} 264 265multiclass VPatBinaryFPSDNode_R_VF_RM<SDPatternOperator vop, string instruction_name, 266 bit isSEWAware = 0> { 267 foreach fvti = AllFloatVectors in 268 let Predicates = GetVTypePredicates<fvti>.Predicates in 269 def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), 270 (fvti.Vector fvti.RegClass:$rs1))), 271 (!cast<Instruction>( 272 !if(isSEWAware, 273 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, 274 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) 275 (fvti.Vector (IMPLICIT_DEF)), 276 fvti.RegClass:$rs1, 277 (fvti.Scalar fvti.ScalarRegClass:$rs2), 278 // Value to indicate no rounding mode change in 279 // RISCVInsertReadWriteCSR 280 FRM_DYN, 281 fvti.AVL, fvti.Log2SEW, TA_MA)>; 282} 283 284multiclass VPatIntegerSetCCSDNode_VV<string instruction_name, 285 CondCode cc> { 286 foreach vti = AllIntegerVectors in { 287 defvar instruction = !cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX); 288 let Predicates = GetVTypePredicates<vti>.Predicates in 289 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 290 (vti.Vector vti.RegClass:$rs2), cc)), 291 (instruction vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, 292 vti.Log2SEW)>; 293 } 294} 295 296multiclass VPatIntegerSetCCSDNode_VV_Swappable<string instruction_name, 297 CondCode cc, CondCode invcc> 298 : VPatIntegerSetCCSDNode_VV<instruction_name, cc> { 299 foreach vti = AllIntegerVectors in { 300 defvar instruction = !cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX); 301 let Predicates = GetVTypePredicates<vti>.Predicates in 302 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs2), 303 (vti.Vector vti.RegClass:$rs1), invcc)), 304 (instruction vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, 305 vti.Log2SEW)>; 306 } 307} 308 309multiclass VPatIntegerSetCCSDNode_XI< 310 string instruction_name, 311 CondCode cc, 312 string kind, 313 ComplexPattern SplatPatKind, 314 DAGOperand xop_kind> { 315 foreach vti = AllIntegerVectors in { 316 defvar instruction = !cast<Instruction>(instruction_name#_#kind#_#vti.LMul.MX); 317 let Predicates = GetVTypePredicates<vti>.Predicates in 318 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 319 (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), cc)), 320 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; 321 } 322} 323 324multiclass VPatIntegerSetCCSDNode_XI_Swappable<string instruction_name, 325 CondCode cc, CondCode invcc, 326 string kind, 327 ComplexPattern SplatPatKind, 328 DAGOperand xop_kind> 329 : VPatIntegerSetCCSDNode_XI<instruction_name, cc, kind, SplatPatKind, 330 xop_kind> { 331 foreach vti = AllIntegerVectors in { 332 defvar instruction = !cast<Instruction>(instruction_name#_#kind#_#vti.LMul.MX); 333 let Predicates = GetVTypePredicates<vti>.Predicates in { 334 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 335 (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), cc)), 336 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; 337 def : Pat<(vti.Mask (setcc (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), 338 (vti.Vector vti.RegClass:$rs1), invcc)), 339 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; 340 } 341 } 342} 343 344multiclass VPatIntegerSetCCSDNode_VX_Swappable<string instruction_name, 345 CondCode cc, CondCode invcc> 346 : VPatIntegerSetCCSDNode_XI_Swappable<instruction_name, cc, invcc, "VX", 347 SplatPat, GPR>; 348 349multiclass VPatIntegerSetCCSDNode_VI<string instruction_name, CondCode cc> 350 : VPatIntegerSetCCSDNode_XI<instruction_name, cc, "VI", SplatPat_simm5, simm5>; 351 352multiclass VPatIntegerSetCCSDNode_VIPlus1<string instruction_name, CondCode cc, 353 ComplexPattern splatpat_kind> { 354 foreach vti = AllIntegerVectors in { 355 defvar instruction = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX); 356 let Predicates = GetVTypePredicates<vti>.Predicates in 357 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 358 (vti.Vector (splatpat_kind simm5:$rs2)), 359 cc)), 360 (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2), 361 vti.AVL, vti.Log2SEW)>; 362 } 363} 364 365multiclass VPatFPSetCCSDNode_VV_VF_FV<CondCode cc, 366 string inst_name, 367 string swapped_op_inst_name> { 368 foreach fvti = AllFloatVectors in { 369 let Predicates = GetVTypePredicates<fvti>.Predicates in { 370 def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), 371 (fvti.Vector fvti.RegClass:$rs2), 372 cc)), 373 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX) 374 fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.AVL, fvti.Log2SEW)>; 375 def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), 376 (SplatFPOp fvti.ScalarRegClass:$rs2), 377 cc)), 378 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 379 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 380 fvti.AVL, fvti.Log2SEW)>; 381 def : Pat<(fvti.Mask (setcc (SplatFPOp fvti.ScalarRegClass:$rs2), 382 (fvti.Vector fvti.RegClass:$rs1), 383 cc)), 384 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 385 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 386 fvti.AVL, fvti.Log2SEW)>; 387 } 388 } 389} 390 391multiclass VPatExtendSDNode_V<list<SDNode> ops, string inst_name, string suffix, 392 list <VTypeInfoToFraction> fraction_list> { 393 foreach vtiTofti = fraction_list in { 394 defvar vti = vtiTofti.Vti; 395 defvar fti = vtiTofti.Fti; 396 foreach op = ops in 397 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 398 GetVTypePredicates<fti>.Predicates) in 399 def : Pat<(vti.Vector (op (fti.Vector fti.RegClass:$rs2))), 400 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX) 401 (vti.Vector (IMPLICIT_DEF)), 402 fti.RegClass:$rs2, fti.AVL, vti.Log2SEW, TA_MA)>; 403 } 404} 405 406multiclass VPatConvertI2FPSDNode_V_RM<SDPatternOperator vop, 407 string instruction_name> { 408 foreach fvti = AllFloatVectors in { 409 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 410 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 411 GetVTypePredicates<ivti>.Predicates) in 412 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), 413 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 414 (fvti.Vector (IMPLICIT_DEF)), 415 ivti.RegClass:$rs1, 416 // Value to indicate no rounding mode change in 417 // RISCVInsertReadWriteCSR 418 FRM_DYN, 419 fvti.AVL, fvti.Log2SEW, TA_MA)>; 420 } 421} 422 423multiclass VPatConvertFP2ISDNode_V<SDPatternOperator vop, 424 string instruction_name> { 425 foreach fvti = AllFloatVectors in { 426 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 427 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 428 GetVTypePredicates<ivti>.Predicates) in 429 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), 430 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX) 431 (ivti.Vector (IMPLICIT_DEF)), 432 fvti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW, TA_MA)>; 433 } 434} 435 436multiclass VPatWConvertI2FPSDNode_V<SDPatternOperator vop, 437 string instruction_name> { 438 foreach vtiToWti = AllWidenableIntToFloatVectors in { 439 defvar ivti = vtiToWti.Vti; 440 defvar fwti = vtiToWti.Wti; 441 let Predicates = !listconcat(GetVTypePredicates<ivti>.Predicates, 442 GetVTypePredicates<fwti>.Predicates) in 443 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), 444 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX) 445 (fwti.Vector (IMPLICIT_DEF)), 446 ivti.RegClass:$rs1, 447 ivti.AVL, ivti.Log2SEW, TA_MA)>; 448 } 449} 450 451multiclass VPatWConvertFP2ISDNode_V<SDPatternOperator vop, 452 string instruction_name> { 453 foreach fvtiToFWti = AllWidenableFloatVectors in { 454 defvar fvti = fvtiToFWti.Vti; 455 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 456 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 457 GetVTypePredicates<iwti>.Predicates) in 458 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), 459 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 460 (iwti.Vector (IMPLICIT_DEF)), 461 fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW, TA_MA)>; 462 } 463} 464 465multiclass VPatNConvertI2FPSDNode_W_RM<SDPatternOperator vop, 466 string instruction_name> { 467 foreach fvtiToFWti = AllWidenableFloatVectors in { 468 defvar fvti = fvtiToFWti.Vti; 469 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 470 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 471 GetVTypePredicates<iwti>.Predicates) in 472 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1))), 473 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 474 (fvti.Vector (IMPLICIT_DEF)), 475 iwti.RegClass:$rs1, 476 // Value to indicate no rounding mode change in 477 // RISCVInsertReadWriteCSR 478 FRM_DYN, 479 fvti.AVL, fvti.Log2SEW, TA_MA)>; 480 } 481} 482 483multiclass VPatNConvertFP2ISDNode_W<SDPatternOperator vop, 484 string instruction_name> { 485 foreach vtiToWti = AllWidenableIntToFloatVectors in { 486 defvar vti = vtiToWti.Vti; 487 defvar fwti = vtiToWti.Wti; 488 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 489 GetVTypePredicates<fwti>.Predicates) in 490 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1))), 491 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX) 492 (vti.Vector (IMPLICIT_DEF)), 493 fwti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 494 } 495} 496 497multiclass VPatWidenBinarySDNode_VV_VX<SDNode op, PatFrags extop1, PatFrags extop2, 498 string instruction_name> { 499 foreach vtiToWti = AllWidenableIntVectors in { 500 defvar vti = vtiToWti.Vti; 501 defvar wti = vtiToWti.Wti; 502 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 503 GetVTypePredicates<wti>.Predicates) in { 504 def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), 505 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs1)))), 506 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 507 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 508 vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 509 def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), 510 (wti.Vector (extop2 (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), 511 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX) 512 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 513 GPR:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 514 } 515 } 516} 517 518multiclass VPatWidenBinarySDNode_WV_WX<SDNode op, PatFrags extop, 519 string instruction_name> { 520 foreach vtiToWti = AllWidenableIntVectors in { 521 defvar vti = vtiToWti.Vti; 522 defvar wti = vtiToWti.Wti; 523 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 524 GetVTypePredicates<wti>.Predicates) in { 525 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 526 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1)))), 527 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_TIED") 528 wti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, 529 TAIL_AGNOSTIC)>; 530 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 531 (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), 532 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 533 (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, GPR:$rs1, 534 vti.AVL, vti.Log2SEW, TA_MA)>; 535 } 536 } 537} 538 539multiclass VPatWidenBinarySDNode_VV_VX_WV_WX<SDNode op, PatFrags extop, 540 string instruction_name> 541 : VPatWidenBinarySDNode_VV_VX<op, extop, extop, instruction_name>, 542 VPatWidenBinarySDNode_WV_WX<op, extop, instruction_name>; 543 544multiclass VPatWidenMulAddSDNode_VV<PatFrags extop1, PatFrags extop2, string instruction_name> { 545 foreach vtiToWti = AllWidenableIntVectors in { 546 defvar vti = vtiToWti.Vti; 547 defvar wti = vtiToWti.Wti; 548 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 549 GetVTypePredicates<wti>.Predicates) in 550 def : Pat< 551 (add (wti.Vector wti.RegClass:$rd), 552 (mul_oneuse (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs1))), 553 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))), 554 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 555 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 556 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC 557 )>; 558 } 559} 560multiclass VPatWidenMulAddSDNode_VX<PatFrags extop1, PatFrags extop2, string instruction_name> { 561 foreach vtiToWti = AllWidenableIntVectors in { 562 defvar vti = vtiToWti.Vti; 563 defvar wti = vtiToWti.Wti; 564 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 565 GetVTypePredicates<wti>.Predicates) in 566 def : Pat< 567 (add (wti.Vector wti.RegClass:$rd), 568 (mul_oneuse (wti.Vector (extop1 (vti.Vector (SplatPat (XLenVT GPR:$rs1))))), 569 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))), 570 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX) 571 wti.RegClass:$rd, GPR:$rs1, vti.RegClass:$rs2, 572 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC 573 )>; 574 } 575} 576 577multiclass VPatWidenBinaryFPSDNode_VV_VF<SDNode op, string instruction_name> { 578 foreach vtiToWti = AllWidenableFloatVectors in { 579 defvar vti = vtiToWti.Vti; 580 defvar wti = vtiToWti.Wti; 581 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 582 GetVTypePredicates<wti>.Predicates) in { 583 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 584 (vti.Vector vti.RegClass:$rs2), 585 (vti.Mask true_mask), (XLenVT srcvalue))), 586 (wti.Vector (riscv_fpextend_vl_oneuse 587 (vti.Vector vti.RegClass:$rs1), 588 (vti.Mask true_mask), (XLenVT srcvalue)))), 589 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 590 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 591 vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 592 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 593 (vti.Vector vti.RegClass:$rs2), 594 (vti.Mask true_mask), (XLenVT srcvalue))), 595 (wti.Vector (riscv_fpextend_vl_oneuse 596 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 597 (vti.Mask true_mask), (XLenVT srcvalue)))), 598 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 599 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 600 vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 601 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 602 (vti.Vector vti.RegClass:$rs2), 603 (vti.Mask true_mask), (XLenVT srcvalue))), 604 (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))), 605 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 606 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 607 vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 608 } 609 } 610} 611 612multiclass VPatWidenBinaryFPSDNode_VV_VF_RM<SDNode op, string instruction_name> { 613 foreach vtiToWti = AllWidenableFloatVectors in { 614 defvar vti = vtiToWti.Vti; 615 defvar wti = vtiToWti.Wti; 616 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 617 GetVTypePredicates<wti>.Predicates) in { 618 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 619 (vti.Vector vti.RegClass:$rs2), 620 (vti.Mask true_mask), (XLenVT srcvalue))), 621 (wti.Vector (riscv_fpextend_vl_oneuse 622 (vti.Vector vti.RegClass:$rs1), 623 (vti.Mask true_mask), (XLenVT srcvalue)))), 624 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 625 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 626 vti.RegClass:$rs1, 627 // Value to indicate no rounding mode change in 628 // RISCVInsertReadWriteCSR 629 FRM_DYN, 630 vti.AVL, vti.Log2SEW, TA_MA)>; 631 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 632 (vti.Vector vti.RegClass:$rs2), 633 (vti.Mask true_mask), (XLenVT srcvalue))), 634 (wti.Vector (riscv_fpextend_vl_oneuse 635 (vti.Vector (SplatFPOp (vti.Scalar vti.ScalarRegClass:$rs1))), 636 (vti.Mask true_mask), (XLenVT srcvalue)))), 637 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 638 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 639 vti.ScalarRegClass:$rs1, 640 // Value to indicate no rounding mode change in 641 // RISCVInsertReadWriteCSR 642 FRM_DYN, 643 vti.AVL, vti.Log2SEW, TA_MA)>; 644 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 645 (vti.Vector vti.RegClass:$rs2), 646 (vti.Mask true_mask), (XLenVT srcvalue))), 647 (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 648 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 649 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 650 vti.ScalarRegClass:$rs1, 651 // Value to indicate no rounding mode change in 652 // RISCVInsertReadWriteCSR 653 FRM_DYN, 654 vti.AVL, vti.Log2SEW, TA_MA)>; 655 } 656 } 657} 658 659multiclass VPatWidenBinaryFPSDNode_WV_WF_RM<SDNode op, string instruction_name> { 660 foreach vtiToWti = AllWidenableFloatVectors in { 661 defvar vti = vtiToWti.Vti; 662 defvar wti = vtiToWti.Wti; 663 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 664 GetVTypePredicates<wti>.Predicates) in { 665 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 666 (wti.Vector (riscv_fpextend_vl_oneuse 667 (vti.Vector vti.RegClass:$rs1), 668 (vti.Mask true_mask), (XLenVT srcvalue)))), 669 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_TIED") 670 wti.RegClass:$rs2, vti.RegClass:$rs1, 671 // Value to indicate no rounding mode change in 672 // RISCVInsertReadWriteCSR 673 FRM_DYN, 674 vti.AVL, vti.Log2SEW, 675 TAIL_AGNOSTIC)>; 676 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 677 (wti.Vector (riscv_fpextend_vl_oneuse 678 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 679 (vti.Mask true_mask), (XLenVT srcvalue)))), 680 (!cast<Instruction>(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX) 681 (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, 682 vti.ScalarRegClass:$rs1, 683 // Value to indicate no rounding mode change in 684 // RISCVInsertReadWriteCSR 685 FRM_DYN, 686 vti.AVL, vti.Log2SEW, TA_MA)>; 687 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 688 (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 689 (!cast<Instruction>(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX) 690 (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, 691 vti.ScalarRegClass:$rs1, 692 // Value to indicate no rounding mode change in 693 // RISCVInsertReadWriteCSR 694 FRM_DYN, 695 vti.AVL, vti.Log2SEW, TA_MA)>; 696 } 697 } 698} 699 700multiclass VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM<SDNode op, 701 string instruction_name> 702 : VPatWidenBinaryFPSDNode_VV_VF_RM<op, instruction_name>, 703 VPatWidenBinaryFPSDNode_WV_WF_RM<op, instruction_name>; 704 705multiclass VPatWidenFPMulAccSDNode_VV_VF_RM<string instruction_name> { 706 foreach vtiToWti = AllWidenableFloatVectors in { 707 defvar vti = vtiToWti.Vti; 708 defvar wti = vtiToWti.Wti; 709 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 710 GetVTypePredicates<wti>.Predicates) in { 711 def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse 712 (vti.Vector vti.RegClass:$rs1), 713 (vti.Mask true_mask), (XLenVT srcvalue))), 714 (wti.Vector (riscv_fpextend_vl_oneuse 715 (vti.Vector vti.RegClass:$rs2), 716 (vti.Mask true_mask), (XLenVT srcvalue))), 717 (wti.Vector wti.RegClass:$rd)), 718 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 719 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 720 // Value to indicate no rounding mode change in 721 // RISCVInsertReadWriteCSR 722 FRM_DYN, 723 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 724 def : Pat<(fma (wti.Vector (SplatFPOp 725 (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), 726 (wti.Vector (riscv_fpextend_vl_oneuse 727 (vti.Vector vti.RegClass:$rs2), 728 (vti.Mask true_mask), (XLenVT srcvalue))), 729 (wti.Vector wti.RegClass:$rd)), 730 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 731 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 732 // Value to indicate no rounding mode change in 733 // RISCVInsertReadWriteCSR 734 FRM_DYN, 735 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 736 } 737 } 738} 739 740multiclass VPatWidenFPNegMulAccSDNode_VV_VF_RM<string instruction_name> { 741 foreach vtiToWti = AllWidenableFloatVectors in { 742 defvar vti = vtiToWti.Vti; 743 defvar wti = vtiToWti.Wti; 744 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 745 GetVTypePredicates<wti>.Predicates) in { 746 def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse 747 (vti.Vector vti.RegClass:$rs1), 748 (vti.Mask true_mask), (XLenVT srcvalue)))), 749 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 750 (vti.Mask true_mask), (XLenVT srcvalue)), 751 (fneg wti.RegClass:$rd)), 752 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 753 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 754 // Value to indicate no rounding mode change in 755 // RISCVInsertReadWriteCSR 756 FRM_DYN, 757 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 758 def : Pat<(fma (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))), 759 (fneg (wti.Vector (riscv_fpextend_vl_oneuse 760 (vti.Vector vti.RegClass:$rs2), 761 (vti.Mask true_mask), (XLenVT srcvalue)))), 762 (fneg wti.RegClass:$rd)), 763 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 764 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 765 // Value to indicate no rounding mode change in 766 // RISCVInsertReadWriteCSR 767 FRM_DYN, 768 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 769 def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 770 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 771 (vti.Mask true_mask), (XLenVT srcvalue)), 772 (fneg wti.RegClass:$rd)), 773 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 774 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 775 // Value to indicate no rounding mode change in 776 // RISCVInsertReadWriteCSR 777 FRM_DYN, 778 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 779 } 780 } 781} 782 783multiclass VPatWidenFPMulSacSDNode_VV_VF_RM<string instruction_name> { 784 foreach vtiToWti = AllWidenableFloatVectors in { 785 defvar vti = vtiToWti.Vti; 786 defvar wti = vtiToWti.Wti; 787 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 788 GetVTypePredicates<wti>.Predicates) in { 789 def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse 790 (vti.Vector vti.RegClass:$rs1), 791 (vti.Mask true_mask), (XLenVT srcvalue))), 792 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 793 (vti.Mask true_mask), (XLenVT srcvalue)), 794 (fneg wti.RegClass:$rd)), 795 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 796 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 797 // Value to indicate no rounding mode change in 798 // RISCVInsertReadWriteCSR 799 FRM_DYN, 800 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 801 def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), 802 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 803 (vti.Mask true_mask), (XLenVT srcvalue)), 804 (fneg wti.RegClass:$rd)), 805 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 806 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 807 // Value to indicate no rounding mode change in 808 // RISCVInsertReadWriteCSR 809 FRM_DYN, 810 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 811 } 812 } 813} 814 815multiclass VPatWidenFPNegMulSacSDNode_VV_VF_RM<string instruction_name> { 816 foreach vtiToWti = AllWidenableFloatVectors in { 817 defvar vti = vtiToWti.Vti; 818 defvar wti = vtiToWti.Wti; 819 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 820 GetVTypePredicates<wti>.Predicates) in { 821 def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse 822 (vti.Vector vti.RegClass:$rs1), 823 (vti.Mask true_mask), (XLenVT srcvalue)))), 824 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 825 (vti.Mask true_mask), (XLenVT srcvalue)), 826 wti.RegClass:$rd), 827 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 828 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 829 // Value to indicate no rounding mode change in 830 // RISCVInsertReadWriteCSR 831 FRM_DYN, 832 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 833 def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), 834 (fneg (wti.Vector (riscv_fpextend_vl_oneuse 835 (vti.Vector vti.RegClass:$rs2), 836 (vti.Mask true_mask), (XLenVT srcvalue)))), 837 wti.RegClass:$rd), 838 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 839 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 840 // Value to indicate no rounding mode change in 841 // RISCVInsertReadWriteCSR 842 FRM_DYN, 843 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 844 def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 845 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 846 (vti.Mask true_mask), (XLenVT srcvalue)), 847 wti.RegClass:$rd), 848 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 849 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 850 // Value to indicate no rounding mode change in 851 // RISCVInsertReadWriteCSR 852 FRM_DYN, 853 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 854 } 855 } 856} 857 858multiclass VPatMultiplyAddSDNode_VV_VX<SDNode op, string instruction_name> { 859 foreach vti = AllIntegerVectors in { 860 defvar suffix = vti.LMul.MX; 861 let Predicates = GetVTypePredicates<vti>.Predicates in { 862 // NOTE: We choose VMADD because it has the most commuting freedom. So it 863 // works best with how TwoAddressInstructionPass tries commuting. 864 def : Pat<(vti.Vector (op vti.RegClass:$rs2, 865 (mul_oneuse vti.RegClass:$rs1, vti.RegClass:$rd))), 866 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 867 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 868 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 869 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally 870 // commutable. 871 def : Pat<(vti.Vector (op vti.RegClass:$rs2, 872 (mul_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rd))), 873 (!cast<Instruction>(instruction_name#"_VX_" # suffix) 874 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 875 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 876 } 877 } 878} 879 880//===----------------------------------------------------------------------===// 881// Patterns. 882//===----------------------------------------------------------------------===// 883 884// 7.4. Vector Unit-Stride Instructions 885foreach vti = !listconcat(FractionalGroupIntegerVectors, 886 FractionalGroupFloatVectors, 887 FractionalGroupBFloatVectors) in 888 let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal], 889 GetVTypePredicates<vti>.Predicates) in 890 defm : VPatUSLoadStoreSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 891 vti.AVL, vti.RegClass>; 892foreach vti = [VI8M1, VI16M1, VI32M1, VI64M1, VBF16M1, VF16M1, VF32M1, VF64M1] in 893 let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal], 894 GetVTypePredicates<vti>.Predicates) in 895 defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 896 vti.RegClass>; 897foreach vti = !listconcat(GroupIntegerVectors, GroupFloatVectors, GroupBFloatVectors) in 898 let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal], 899 GetVTypePredicates<vti>.Predicates) in 900 defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 901 vti.RegClass>; 902foreach mti = AllMasks in 903 let Predicates = [HasVInstructions] in 904 defm : VPatUSLoadStoreMaskSDNode<mti>; 905 906// 11. Vector Integer Arithmetic Instructions 907 908// 11.1. Vector Single-Width Integer Add and Subtract 909defm : VPatBinarySDNode_VV_VX_VI<add, "PseudoVADD">; 910defm : VPatBinarySDNode_VV_VX<sub, "PseudoVSUB">; 911// Handle VRSUB specially since it's the only integer binary op with reversed 912// pattern operands 913foreach vti = AllIntegerVectors in { 914 // FIXME: The AddedComplexity here is covering up a missing matcher for 915 // widening vwsub.vx which can recognize a extended folded into the 916 // scalar of the splat. 917 let AddedComplexity = 20 in 918 let Predicates = GetVTypePredicates<vti>.Predicates in { 919 def : Pat<(sub (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 920 (vti.Vector vti.RegClass:$rs1)), 921 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX) 922 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, 923 vti.AVL, vti.Log2SEW, TA_MA)>; 924 def : Pat<(sub (vti.Vector (SplatPat_simm5 simm5:$rs2)), 925 (vti.Vector vti.RegClass:$rs1)), 926 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX) 927 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, 928 simm5:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 929 } 930} 931 932// 11.2. Vector Widening Integer Add and Subtract 933defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, sext_oneuse, "PseudoVWADD">; 934defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, zext_oneuse, "PseudoVWADDU">; 935defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, anyext_oneuse, "PseudoVWADDU">; 936 937defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, sext_oneuse, "PseudoVWSUB">; 938defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, zext_oneuse, "PseudoVWSUBU">; 939defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, anyext_oneuse, "PseudoVWSUBU">; 940 941// shl (ext v, splat 1) is a special case of widening add. 942foreach vtiToWti = AllWidenableIntVectors in { 943 defvar vti = vtiToWti.Vti; 944 defvar wti = vtiToWti.Wti; 945 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 946 GetVTypePredicates<wti>.Predicates) in { 947 def : Pat<(shl (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs1))), 948 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 949 (!cast<Instruction>("PseudoVWADD_VV_"#vti.LMul.MX) 950 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 951 vti.AVL, vti.Log2SEW, TA_MA)>; 952 def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs1))), 953 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 954 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX) 955 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 956 vti.AVL, vti.Log2SEW, TA_MA)>; 957 def : Pat<(shl (wti.Vector (anyext_oneuse (vti.Vector vti.RegClass:$rs1))), 958 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 959 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX) 960 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 961 vti.AVL, vti.Log2SEW, TA_MA)>; 962 } 963} 964 965// 11.3. Vector Integer Extension 966defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF2", 967 AllFractionableVF2IntVectors>; 968defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF2", 969 AllFractionableVF2IntVectors>; 970defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF4", 971 AllFractionableVF4IntVectors>; 972defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF4", 973 AllFractionableVF4IntVectors>; 974defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF8", 975 AllFractionableVF8IntVectors>; 976defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF8", 977 AllFractionableVF8IntVectors>; 978 979// 11.5. Vector Bitwise Logical Instructions 980defm : VPatBinarySDNode_VV_VX_VI<and, "PseudoVAND">; 981defm : VPatBinarySDNode_VV_VX_VI<or, "PseudoVOR">; 982defm : VPatBinarySDNode_VV_VX_VI<xor, "PseudoVXOR">; 983 984// 11.6. Vector Single-Width Bit Shift Instructions 985defm : VPatBinarySDNode_VV_VX_VI<shl, "PseudoVSLL", uimm5>; 986defm : VPatBinarySDNode_VV_VX_VI<srl, "PseudoVSRL", uimm5>; 987defm : VPatBinarySDNode_VV_VX_VI<sra, "PseudoVSRA", uimm5>; 988 989foreach vti = AllIntegerVectors in { 990 // Emit shift by 1 as an add since it might be faster. 991 let Predicates = GetVTypePredicates<vti>.Predicates in 992 def : Pat<(shl (vti.Vector vti.RegClass:$rs1), 993 (vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)))), 994 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX) 995 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, 996 vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 997 998} 999 1000// 11.8. Vector Integer Comparison Instructions 1001defm : VPatIntegerSetCCSDNode_VV<"PseudoVMSEQ", SETEQ>; 1002defm : VPatIntegerSetCCSDNode_VV<"PseudoVMSNE", SETNE>; 1003 1004defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLT", SETLT, SETGT>; 1005defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLTU", SETULT, SETUGT>; 1006defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLE", SETLE, SETGE>; 1007defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLEU", SETULE, SETUGE>; 1008 1009defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSEQ", SETEQ, SETEQ>; 1010defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSNE", SETNE, SETNE>; 1011defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLT", SETLT, SETGT>; 1012defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLTU", SETULT, SETUGT>; 1013defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLE", SETLE, SETGE>; 1014defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLEU", SETULE, SETUGE>; 1015defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSGT", SETGT, SETLT>; 1016defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSGTU", SETUGT, SETULT>; 1017// There is no VMSGE(U)_VX instruction 1018 1019defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSEQ", SETEQ>; 1020defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSNE", SETNE>; 1021defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSLE", SETLE>; 1022defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSLEU", SETULE>; 1023defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSGT", SETGT>; 1024defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSGTU", SETUGT>; 1025 1026defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSLE", SETLT, 1027 SplatPat_simm5_plus1>; 1028defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSLEU", SETULT, 1029 SplatPat_simm5_plus1_nonzero>; 1030defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSGT", SETGE, 1031 SplatPat_simm5_plus1>; 1032defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSGTU", SETUGE, 1033 SplatPat_simm5_plus1_nonzero>; 1034 1035// 11.9. Vector Integer Min/Max Instructions 1036defm : VPatBinarySDNode_VV_VX<umin, "PseudoVMINU">; 1037defm : VPatBinarySDNode_VV_VX<smin, "PseudoVMIN">; 1038defm : VPatBinarySDNode_VV_VX<umax, "PseudoVMAXU">; 1039defm : VPatBinarySDNode_VV_VX<smax, "PseudoVMAX">; 1040 1041// 11.10. Vector Single-Width Integer Multiply Instructions 1042defm : VPatBinarySDNode_VV_VX<mul, "PseudoVMUL">; 1043 1044defm : VPatBinarySDNode_VV_VX<mulhs, "PseudoVMULH", IntegerVectorsExceptI64>; 1045defm : VPatBinarySDNode_VV_VX<mulhu, "PseudoVMULHU", IntegerVectorsExceptI64>; 1046 1047let Predicates = [HasVInstructionsFullMultiply] in { 1048 defm : VPatBinarySDNode_VV_VX<mulhs, "PseudoVMULH", I64IntegerVectors>; 1049 defm : VPatBinarySDNode_VV_VX<mulhu, "PseudoVMULHU", I64IntegerVectors>; 1050} 1051 1052// 11.11. Vector Integer Divide Instructions 1053defm : VPatBinarySDNode_VV_VX<udiv, "PseudoVDIVU", isSEWAware=1>; 1054defm : VPatBinarySDNode_VV_VX<sdiv, "PseudoVDIV", isSEWAware=1>; 1055defm : VPatBinarySDNode_VV_VX<urem, "PseudoVREMU", isSEWAware=1>; 1056defm : VPatBinarySDNode_VV_VX<srem, "PseudoVREM", isSEWAware=1>; 1057 1058foreach vtiTowti = AllWidenableIntVectors in { 1059 defvar vti = vtiTowti.Vti; 1060 defvar wti = vtiTowti.Wti; 1061 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1062 GetVTypePredicates<wti>.Predicates) in { 1063 def : Pat< 1064 (vti.Vector 1065 (riscv_trunc_vector_vl 1066 (srem (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs1))), 1067 (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs2)))), 1068 (vti.Mask true_mask), (XLenVT srcvalue))), 1069 (!cast<Instruction>("PseudoVREM_VV_"#vti.LMul.MX#"_E"#!shl(1, vti.Log2SEW)) 1070 (vti.Vector (IMPLICIT_DEF)), 1071 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1072 } 1073} 1074 1075// 11.12. Vector Widening Integer Multiply Instructions 1076defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, sext_oneuse, 1077 "PseudoVWMUL">; 1078defm : VPatWidenBinarySDNode_VV_VX<mul, zext_oneuse, zext_oneuse, 1079 "PseudoVWMULU">; 1080defm : VPatWidenBinarySDNode_VV_VX<mul, anyext_oneuse, anyext_oneuse, 1081 "PseudoVWMULU">; 1082defm : VPatWidenBinarySDNode_VV_VX<mul, zext_oneuse, anyext_oneuse, 1083 "PseudoVWMULU">; 1084defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, zext_oneuse, 1085 "PseudoVWMULSU">; 1086defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, anyext_oneuse, 1087 "PseudoVWMULSU">; 1088 1089// 11.13 Vector Single-Width Integer Multiply-Add Instructions. 1090defm : VPatMultiplyAddSDNode_VV_VX<add, "PseudoVMADD">; 1091defm : VPatMultiplyAddSDNode_VV_VX<sub, "PseudoVNMSUB">; 1092 1093// 11.14 Vector Widening Integer Multiply-Add Instructions 1094defm : VPatWidenMulAddSDNode_VV<sext_oneuse, sext_oneuse, "PseudoVWMACC">; 1095defm : VPatWidenMulAddSDNode_VX<sext_oneuse, sext_oneuse, "PseudoVWMACC">; 1096defm : VPatWidenMulAddSDNode_VV<zext_oneuse, zext_oneuse, "PseudoVWMACCU">; 1097defm : VPatWidenMulAddSDNode_VX<zext_oneuse, zext_oneuse, "PseudoVWMACCU">; 1098defm : VPatWidenMulAddSDNode_VV<sext_oneuse, zext_oneuse, "PseudoVWMACCSU">; 1099defm : VPatWidenMulAddSDNode_VX<sext_oneuse, zext_oneuse, "PseudoVWMACCSU">; 1100defm : VPatWidenMulAddSDNode_VX<zext_oneuse, sext_oneuse, "PseudoVWMACCUS">; 1101 1102// 11.15. Vector Integer Merge Instructions 1103foreach vti = AllIntegerVectors in { 1104 let Predicates = GetVTypePredicates<vti>.Predicates in { 1105 def : Pat<(vti.Vector (vselect (vti.Mask V0), vti.RegClass:$rs1, 1106 vti.RegClass:$rs2)), 1107 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 1108 (vti.Vector (IMPLICIT_DEF)), 1109 vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), 1110 vti.AVL, vti.Log2SEW)>; 1111 1112 def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat XLenVT:$rs1), 1113 vti.RegClass:$rs2)), 1114 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 1115 (vti.Vector (IMPLICIT_DEF)), 1116 vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; 1117 1118 def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat_simm5 simm5:$rs1), 1119 vti.RegClass:$rs2)), 1120 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 1121 (vti.Vector (IMPLICIT_DEF)), 1122 vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; 1123 } 1124} 1125 1126// 12. Vector Fixed-Point Arithmetic Instructions 1127 1128// 12.1. Vector Single-Width Saturating Add and Subtract 1129defm : VPatBinarySDNode_VV_VX_VI<saddsat, "PseudoVSADD">; 1130defm : VPatBinarySDNode_VV_VX_VI<uaddsat, "PseudoVSADDU">; 1131defm : VPatBinarySDNode_VV_VX<ssubsat, "PseudoVSSUB">; 1132defm : VPatBinarySDNode_VV_VX<usubsat, "PseudoVSSUBU">; 1133 1134// 12.2. Vector Single-Width Averaging Add and Subtract 1135foreach vti = AllIntegerVectors in { 1136 let Predicates = GetVTypePredicates<vti>.Predicates in { 1137 def : Pat<(avgflooru (vti.Vector vti.RegClass:$rs1), 1138 (vti.Vector vti.RegClass:$rs2)), 1139 (!cast<Instruction>("PseudoVAADDU_VV_"#vti.LMul.MX) 1140 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs2, 1141 0b10, vti.AVL, vti.Log2SEW, TA_MA)>; 1142 def : Pat<(avgflooru (vti.Vector vti.RegClass:$rs1), 1143 (vti.Vector (SplatPat (XLenVT GPR:$rs2)))), 1144 (!cast<Instruction>("PseudoVAADDU_VX_"#vti.LMul.MX) 1145 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, 1146 0b10, vti.AVL, vti.Log2SEW, TA_MA)>; 1147 } 1148} 1149 1150// 15. Vector Mask Instructions 1151 1152// 15.1. Vector Mask-Register Logical Instructions 1153foreach mti = AllMasks in { 1154 let Predicates = [HasVInstructions] in { 1155 def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)), 1156 (!cast<Instruction>("PseudoVMAND_MM_"#mti.LMul.MX) 1157 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1158 def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)), 1159 (!cast<Instruction>("PseudoVMOR_MM_"#mti.LMul.MX) 1160 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1161 def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)), 1162 (!cast<Instruction>("PseudoVMXOR_MM_"#mti.LMul.MX) 1163 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1164 1165 def : Pat<(mti.Mask (rvv_vnot (and VR:$rs1, VR:$rs2))), 1166 (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX) 1167 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1168 def : Pat<(mti.Mask (rvv_vnot (or VR:$rs1, VR:$rs2))), 1169 (!cast<Instruction>("PseudoVMNOR_MM_"#mti.LMul.MX) 1170 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1171 def : Pat<(mti.Mask (rvv_vnot (xor VR:$rs1, VR:$rs2))), 1172 (!cast<Instruction>("PseudoVMXNOR_MM_"#mti.LMul.MX) 1173 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1174 1175 def : Pat<(mti.Mask (and VR:$rs1, (rvv_vnot VR:$rs2))), 1176 (!cast<Instruction>("PseudoVMANDN_MM_"#mti.LMul.MX) 1177 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1178 def : Pat<(mti.Mask (or VR:$rs1, (rvv_vnot VR:$rs2))), 1179 (!cast<Instruction>("PseudoVMORN_MM_"#mti.LMul.MX) 1180 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1181 1182 // Handle rvv_vnot the same as the vmnot.m pseudoinstruction. 1183 def : Pat<(mti.Mask (rvv_vnot VR:$rs)), 1184 (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX) 1185 VR:$rs, VR:$rs, mti.AVL, mti.Log2SEW)>; 1186 } 1187} 1188 1189// 13. Vector Floating-Point Instructions 1190 1191// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions 1192defm : VPatBinaryFPSDNode_VV_VF_RM<any_fadd, "PseudoVFADD">; 1193defm : VPatBinaryFPSDNode_VV_VF_RM<any_fsub, "PseudoVFSUB">; 1194defm : VPatBinaryFPSDNode_R_VF_RM<any_fsub, "PseudoVFRSUB">; 1195 1196// 13.3. Vector Widening Floating-Point Add/Subtract Instructions 1197defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM<fadd, "PseudoVFWADD">; 1198defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM<fsub, "PseudoVFWSUB">; 1199 1200// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 1201defm : VPatBinaryFPSDNode_VV_VF_RM<any_fmul, "PseudoVFMUL">; 1202defm : VPatBinaryFPSDNode_VV_VF_RM<any_fdiv, "PseudoVFDIV", isSEWAware=1>; 1203defm : VPatBinaryFPSDNode_R_VF_RM<any_fdiv, "PseudoVFRDIV", isSEWAware=1>; 1204 1205// 13.5. Vector Widening Floating-Point Multiply Instructions 1206defm : VPatWidenBinaryFPSDNode_VV_VF_RM<fmul, "PseudoVFWMUL">; 1207 1208// 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. 1209foreach fvti = AllFloatVectors in { 1210 // NOTE: We choose VFMADD because it has the most commuting freedom. So it 1211 // works best with how TwoAddressInstructionPass tries commuting. 1212 defvar suffix = fvti.LMul.MX; 1213 let Predicates = GetVTypePredicates<fvti>.Predicates in { 1214 def : Pat<(fvti.Vector (any_fma fvti.RegClass:$rs1, fvti.RegClass:$rd, 1215 fvti.RegClass:$rs2)), 1216 (!cast<Instruction>("PseudoVFMADD_VV_"# suffix) 1217 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1218 // Value to indicate no rounding mode change in 1219 // RISCVInsertReadWriteCSR 1220 FRM_DYN, 1221 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1222 def : Pat<(fvti.Vector (any_fma fvti.RegClass:$rs1, fvti.RegClass:$rd, 1223 (fneg fvti.RegClass:$rs2))), 1224 (!cast<Instruction>("PseudoVFMSUB_VV_"# suffix) 1225 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1226 // Value to indicate no rounding mode change in 1227 // RISCVInsertReadWriteCSR 1228 FRM_DYN, 1229 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1230 def : Pat<(fvti.Vector (any_fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, 1231 (fneg fvti.RegClass:$rs2))), 1232 (!cast<Instruction>("PseudoVFNMADD_VV_"# suffix) 1233 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1234 // Value to indicate no rounding mode change in 1235 // RISCVInsertReadWriteCSR 1236 FRM_DYN, 1237 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1238 def : Pat<(fvti.Vector (any_fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, 1239 fvti.RegClass:$rs2)), 1240 (!cast<Instruction>("PseudoVFNMSUB_VV_"# suffix) 1241 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1242 // Value to indicate no rounding mode change in 1243 // RISCVInsertReadWriteCSR 1244 FRM_DYN, 1245 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1246 1247 // The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally 1248 // commutable. 1249 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1250 fvti.RegClass:$rd, fvti.RegClass:$rs2)), 1251 (!cast<Instruction>("PseudoVFMADD_V" # fvti.ScalarSuffix # "_" # suffix) 1252 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1253 // Value to indicate no rounding mode change in 1254 // RISCVInsertReadWriteCSR 1255 FRM_DYN, 1256 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1257 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1258 fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), 1259 (!cast<Instruction>("PseudoVFMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 1260 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1261 // Value to indicate no rounding mode change in 1262 // RISCVInsertReadWriteCSR 1263 FRM_DYN, 1264 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1265 1266 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1267 (fneg fvti.RegClass:$rd), (fneg fvti.RegClass:$rs2))), 1268 (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) 1269 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1270 // Value to indicate no rounding mode change in 1271 // RISCVInsertReadWriteCSR 1272 FRM_DYN, 1273 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1274 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1275 (fneg fvti.RegClass:$rd), fvti.RegClass:$rs2)), 1276 (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 1277 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1278 // Value to indicate no rounding mode change in 1279 // RISCVInsertReadWriteCSR 1280 FRM_DYN, 1281 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1282 1283 // The splat might be negated. 1284 def : Pat<(fvti.Vector (any_fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)), 1285 fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), 1286 (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) 1287 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1288 // Value to indicate no rounding mode change in 1289 // RISCVInsertReadWriteCSR 1290 FRM_DYN, 1291 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1292 def : Pat<(fvti.Vector (any_fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)), 1293 fvti.RegClass:$rd, fvti.RegClass:$rs2)), 1294 (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 1295 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1296 // Value to indicate no rounding mode change in 1297 // RISCVInsertReadWriteCSR 1298 FRM_DYN, 1299 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1300 } 1301} 1302 1303// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 1304defm : VPatWidenFPMulAccSDNode_VV_VF_RM<"PseudoVFWMACC">; 1305defm : VPatWidenFPNegMulAccSDNode_VV_VF_RM<"PseudoVFWNMACC">; 1306defm : VPatWidenFPMulSacSDNode_VV_VF_RM<"PseudoVFWMSAC">; 1307defm : VPatWidenFPNegMulSacSDNode_VV_VF_RM<"PseudoVFWNMSAC">; 1308 1309foreach vti = AllFloatVectors in { 1310 let Predicates = GetVTypePredicates<vti>.Predicates in { 1311 // 13.8. Vector Floating-Point Square-Root Instruction 1312 def : Pat<(any_fsqrt (vti.Vector vti.RegClass:$rs2)), 1313 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX#"_E"#vti.SEW) 1314 (vti.Vector (IMPLICIT_DEF)), 1315 vti.RegClass:$rs2, 1316 // Value to indicate no rounding mode change in 1317 // RISCVInsertReadWriteCSR 1318 FRM_DYN, 1319 vti.AVL, vti.Log2SEW, TA_MA)>; 1320 1321 // 13.12. Vector Floating-Point Sign-Injection Instructions 1322 def : Pat<(fabs (vti.Vector vti.RegClass:$rs)), 1323 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX) 1324 (vti.Vector (IMPLICIT_DEF)), 1325 vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TA_MA)>; 1326 // Handle fneg with VFSGNJN using the same input for both operands. 1327 def : Pat<(fneg (vti.Vector vti.RegClass:$rs)), 1328 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 1329 (vti.Vector (IMPLICIT_DEF)), 1330 vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TA_MA)>; 1331 1332 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1333 (vti.Vector vti.RegClass:$rs2))), 1334 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX) 1335 (vti.Vector (IMPLICIT_DEF)), 1336 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1337 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1338 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs2)))), 1339 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 1340 (vti.Vector (IMPLICIT_DEF)), 1341 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1342 1343 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1344 (vti.Vector (fneg vti.RegClass:$rs2)))), 1345 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 1346 (vti.Vector (IMPLICIT_DEF)), 1347 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1348 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1349 (vti.Vector (fneg (SplatFPOp vti.ScalarRegClass:$rs2))))), 1350 (!cast<Instruction>("PseudoVFSGNJN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 1351 (vti.Vector (IMPLICIT_DEF)), 1352 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1353 } 1354} 1355 1356// 13.11. Vector Floating-Point MIN/MAX Instructions 1357defm : VPatBinaryFPSDNode_VV_VF<fminnum, "PseudoVFMIN">; 1358defm : VPatBinaryFPSDNode_VV_VF<fmaxnum, "PseudoVFMAX">; 1359 1360// 13.13. Vector Floating-Point Compare Instructions 1361defm : VPatFPSetCCSDNode_VV_VF_FV<SETEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 1362defm : VPatFPSetCCSDNode_VV_VF_FV<SETOEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 1363 1364defm : VPatFPSetCCSDNode_VV_VF_FV<SETNE, "PseudoVMFNE", "PseudoVMFNE">; 1365defm : VPatFPSetCCSDNode_VV_VF_FV<SETUNE, "PseudoVMFNE", "PseudoVMFNE">; 1366 1367defm : VPatFPSetCCSDNode_VV_VF_FV<SETLT, "PseudoVMFLT", "PseudoVMFGT">; 1368defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLT, "PseudoVMFLT", "PseudoVMFGT">; 1369 1370defm : VPatFPSetCCSDNode_VV_VF_FV<SETLE, "PseudoVMFLE", "PseudoVMFGE">; 1371defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">; 1372 1373// Floating-point vselects: 1374// 11.15. Vector Integer Merge Instructions 1375// 13.15. Vector Floating-Point Merge Instruction 1376foreach fvti = AllFloatVectors in { 1377 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1378 let Predicates = GetVTypePredicates<ivti>.Predicates in { 1379 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), fvti.RegClass:$rs1, 1380 fvti.RegClass:$rs2)), 1381 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 1382 (fvti.Vector (IMPLICIT_DEF)), 1383 fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 1384 fvti.AVL, fvti.Log2SEW)>; 1385 1386 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), 1387 (SplatFPOp (fvti.Scalar fpimm0)), 1388 fvti.RegClass:$rs2)), 1389 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 1390 (fvti.Vector (IMPLICIT_DEF)), 1391 fvti.RegClass:$rs2, 0, (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; 1392 1393 } 1394 let Predicates = GetVTypePredicates<fvti>.Predicates in 1395 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), 1396 (SplatFPOp fvti.ScalarRegClass:$rs1), 1397 fvti.RegClass:$rs2)), 1398 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 1399 (fvti.Vector (IMPLICIT_DEF)), 1400 fvti.RegClass:$rs2, 1401 (fvti.Scalar fvti.ScalarRegClass:$rs1), 1402 (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; 1403} 1404 1405// 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions 1406defm : VPatConvertFP2ISDNode_V<any_fp_to_sint, "PseudoVFCVT_RTZ_X_F_V">; 1407defm : VPatConvertFP2ISDNode_V<any_fp_to_uint, "PseudoVFCVT_RTZ_XU_F_V">; 1408defm : VPatConvertI2FPSDNode_V_RM<any_sint_to_fp, "PseudoVFCVT_F_X_V">; 1409defm : VPatConvertI2FPSDNode_V_RM<any_uint_to_fp, "PseudoVFCVT_F_XU_V">; 1410 1411// 13.18. Widening Floating-Point/Integer Type-Convert Instructions 1412defm : VPatWConvertFP2ISDNode_V<any_fp_to_sint, "PseudoVFWCVT_RTZ_X_F_V">; 1413defm : VPatWConvertFP2ISDNode_V<any_fp_to_uint, "PseudoVFWCVT_RTZ_XU_F_V">; 1414defm : VPatWConvertI2FPSDNode_V<any_sint_to_fp, "PseudoVFWCVT_F_X_V">; 1415defm : VPatWConvertI2FPSDNode_V<any_uint_to_fp, "PseudoVFWCVT_F_XU_V">; 1416 1417// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions 1418defm : VPatNConvertFP2ISDNode_W<any_fp_to_sint, "PseudoVFNCVT_RTZ_X_F_W">; 1419defm : VPatNConvertFP2ISDNode_W<any_fp_to_uint, "PseudoVFNCVT_RTZ_XU_F_W">; 1420defm : VPatNConvertI2FPSDNode_W_RM<any_sint_to_fp, "PseudoVFNCVT_F_X_W">; 1421defm : VPatNConvertI2FPSDNode_W_RM<any_uint_to_fp, "PseudoVFNCVT_F_XU_W">; 1422foreach fvtiToFWti = AllWidenableFloatVectors in { 1423 defvar fvti = fvtiToFWti.Vti; 1424 defvar fwti = fvtiToFWti.Wti; 1425 let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], 1426 !listconcat(GetVTypePredicates<fvti>.Predicates, 1427 GetVTypePredicates<fwti>.Predicates)) in 1428 def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))), 1429 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX) 1430 (fvti.Vector (IMPLICIT_DEF)), 1431 fwti.RegClass:$rs1, 1432 // Value to indicate no rounding mode change in 1433 // RISCVInsertReadWriteCSR 1434 FRM_DYN, 1435 fvti.AVL, fvti.Log2SEW, TA_MA)>; 1436} 1437 1438//===----------------------------------------------------------------------===// 1439// Vector Splats 1440//===----------------------------------------------------------------------===// 1441 1442foreach fvti = AllFloatVectors in { 1443 let Predicates = GetVTypePredicates<fvti>.Predicates in 1444 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl undef, fvti.ScalarRegClass:$rs1, srcvalue)), 1445 (!cast<Instruction>("PseudoVFMV_V_"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 1446 (fvti.Vector (IMPLICIT_DEF)), 1447 (fvti.Scalar fvti.ScalarRegClass:$rs1), 1448 fvti.AVL, fvti.Log2SEW, TA_MA)>; 1449 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1450 let Predicates = GetVTypePredicates<ivti>.Predicates in 1451 def : Pat<(fvti.Vector (SplatFPOp (fvti.Scalar fpimm0))), 1452 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX) 1453 (fvti.Vector (IMPLICIT_DEF)), 1454 0, fvti.AVL, fvti.Log2SEW, TA_MA)>; 1455} 1456 1457//===----------------------------------------------------------------------===// 1458// Vector Element Extracts 1459//===----------------------------------------------------------------------===// 1460foreach vti = AllFloatVectors in { 1461 defvar vmv_f_s_inst = !cast<Instruction>(!strconcat("PseudoVFMV_", 1462 vti.ScalarSuffix, 1463 "_S_", vti.LMul.MX)); 1464 // Only pattern-match extract-element operations where the index is 0. Any 1465 // other index will have been custom-lowered to slide the vector correctly 1466 // into place. 1467 let Predicates = GetVTypePredicates<vti>.Predicates in 1468 def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)), 1469 (vmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>; 1470} 1471