1//===- RISCVInstrInfoVSDPatterns.td - RVV SDNode patterns --*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file contains the required infrastructure and SDNode patterns to 10/// support code generation for the standard 'V' (Vector) extension, version 11/// version 1.0. 12/// 13/// This file is included from and depends upon RISCVInstrInfoVPseudos.td 14/// 15/// Note: the patterns for RVV intrinsics are found in 16/// RISCVInstrInfoVPseudos.td. 17/// 18//===----------------------------------------------------------------------===// 19 20//===----------------------------------------------------------------------===// 21// Helpers to define the SDNode patterns. 22//===----------------------------------------------------------------------===// 23 24def rvv_vnot : PatFrag<(ops node:$in), 25 (xor node:$in, (riscv_vmset_vl (XLenVT srcvalue)))>; 26 27multiclass VPatUSLoadStoreSDNode<ValueType type, 28 int log2sew, 29 LMULInfo vlmul, 30 OutPatFrag avl, 31 VReg reg_class, 32 int sew = !shl(1, log2sew)> { 33 defvar load_instr = !cast<Instruction>("PseudoVLE"#sew#"_V_"#vlmul.MX); 34 defvar store_instr = !cast<Instruction>("PseudoVSE"#sew#"_V_"#vlmul.MX); 35 // Load 36 def : Pat<(type (load GPR:$rs1)), 37 (load_instr (type (IMPLICIT_DEF)), GPR:$rs1, avl, 38 log2sew, TA_MA)>; 39 // Store 40 def : Pat<(store type:$rs2, GPR:$rs1), 41 (store_instr reg_class:$rs2, GPR:$rs1, avl, log2sew)>; 42} 43 44multiclass VPatUSLoadStoreWholeVRSDNode<ValueType type, 45 int log2sew, 46 LMULInfo vlmul, 47 VReg reg_class, 48 int sew = !shl(1, log2sew)> { 49 defvar load_instr = 50 !cast<Instruction>("VL"#!substr(vlmul.MX, 1)#"RE"#sew#"_V"); 51 defvar store_instr = 52 !cast<Instruction>("VS"#!substr(vlmul.MX, 1)#"R_V"); 53 54 // Load 55 def : Pat<(type (load GPR:$rs1)), 56 (load_instr GPR:$rs1)>; 57 // Store 58 def : Pat<(store type:$rs2, GPR:$rs1), 59 (store_instr reg_class:$rs2, GPR:$rs1)>; 60} 61 62multiclass VPatUSLoadStoreMaskSDNode<MTypeInfo m> { 63 defvar load_instr = !cast<Instruction>("PseudoVLM_V_"#m.BX); 64 defvar store_instr = !cast<Instruction>("PseudoVSM_V_"#m.BX); 65 // Load 66 def : Pat<(m.Mask (load GPR:$rs1)), 67 (load_instr (m.Mask (IMPLICIT_DEF)), GPR:$rs1, m.AVL, 68 m.Log2SEW, TA_MA)>; 69 // Store 70 def : Pat<(store m.Mask:$rs2, GPR:$rs1), 71 (store_instr VR:$rs2, GPR:$rs1, m.AVL, m.Log2SEW)>; 72} 73 74class VPatBinarySDNode_VV<SDPatternOperator vop, 75 string instruction_name, 76 ValueType result_type, 77 ValueType op_type, 78 int log2sew, 79 LMULInfo vlmul, 80 OutPatFrag avl, 81 VReg op_reg_class, 82 bit isSEWAware = 0> : 83 Pat<(result_type (vop 84 (op_type op_reg_class:$rs1), 85 (op_type op_reg_class:$rs2))), 86 (!cast<Instruction>( 87 !if(isSEWAware, 88 instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), 89 instruction_name#"_VV_"# vlmul.MX)) 90 (result_type (IMPLICIT_DEF)), 91 op_reg_class:$rs1, 92 op_reg_class:$rs2, 93 avl, log2sew, TA_MA)>; 94 95class VPatBinarySDNode_VV_RM<SDPatternOperator vop, 96 string instruction_name, 97 ValueType result_type, 98 ValueType op_type, 99 int log2sew, 100 LMULInfo vlmul, 101 OutPatFrag avl, 102 VReg op_reg_class, 103 bit isSEWAware = 0> : 104 Pat<(result_type (vop 105 (op_type op_reg_class:$rs1), 106 (op_type op_reg_class:$rs2))), 107 (!cast<Instruction>( 108 !if(isSEWAware, 109 instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), 110 instruction_name#"_VV_"# vlmul.MX)) 111 (result_type (IMPLICIT_DEF)), 112 op_reg_class:$rs1, 113 op_reg_class:$rs2, 114 // Value to indicate no rounding mode change in 115 // RISCVInsertReadWriteCSR 116 FRM_DYN, 117 avl, log2sew, TA_MA)>; 118 119class VPatBinarySDNode_XI<SDPatternOperator vop, 120 string instruction_name, 121 string suffix, 122 ValueType result_type, 123 ValueType vop_type, 124 int log2sew, 125 LMULInfo vlmul, 126 OutPatFrag avl, 127 VReg vop_reg_class, 128 ComplexPattern SplatPatKind, 129 DAGOperand xop_kind, 130 bit isSEWAware = 0> : 131 Pat<(result_type (vop 132 (vop_type vop_reg_class:$rs1), 133 (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))))), 134 (!cast<Instruction>( 135 !if(isSEWAware, 136 instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew), 137 instruction_name#_#suffix#_# vlmul.MX)) 138 (result_type (IMPLICIT_DEF)), 139 vop_reg_class:$rs1, 140 xop_kind:$rs2, 141 avl, log2sew, TA_MA)>; 142 143multiclass VPatBinarySDNode_VV_VX<SDPatternOperator vop, string instruction_name, 144 list<VTypeInfo> vtilist = AllIntegerVectors, 145 bit isSEWAware = 0> { 146 foreach vti = vtilist in { 147 let Predicates = GetVTypePredicates<vti>.Predicates in { 148 def : VPatBinarySDNode_VV<vop, instruction_name, 149 vti.Vector, vti.Vector, vti.Log2SEW, 150 vti.LMul, vti.AVL, vti.RegClass, isSEWAware>; 151 def : VPatBinarySDNode_XI<vop, instruction_name, "VX", 152 vti.Vector, vti.Vector, vti.Log2SEW, 153 vti.LMul, vti.AVL, vti.RegClass, 154 SplatPat, GPR, isSEWAware>; 155 } 156 } 157} 158 159multiclass VPatBinarySDNode_VV_VX_VI<SDPatternOperator vop, string instruction_name, 160 Operand ImmType = simm5> 161 : VPatBinarySDNode_VV_VX<vop, instruction_name> { 162 foreach vti = AllIntegerVectors in { 163 let Predicates = GetVTypePredicates<vti>.Predicates in 164 def : VPatBinarySDNode_XI<vop, instruction_name, "VI", 165 vti.Vector, vti.Vector, vti.Log2SEW, 166 vti.LMul, vti.AVL, vti.RegClass, 167 !cast<ComplexPattern>(SplatPat#_#ImmType), 168 ImmType>; 169 } 170} 171 172class VPatBinarySDNode_VF<SDPatternOperator vop, 173 string instruction_name, 174 ValueType result_type, 175 ValueType vop_type, 176 ValueType xop_type, 177 int log2sew, 178 LMULInfo vlmul, 179 OutPatFrag avl, 180 VReg vop_reg_class, 181 DAGOperand xop_kind, 182 bit isSEWAware = 0> : 183 Pat<(result_type (vop (vop_type vop_reg_class:$rs1), 184 (vop_type (SplatFPOp xop_kind:$rs2)))), 185 (!cast<Instruction>( 186 !if(isSEWAware, 187 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), 188 instruction_name#"_"#vlmul.MX)) 189 (result_type (IMPLICIT_DEF)), 190 vop_reg_class:$rs1, 191 (xop_type xop_kind:$rs2), 192 avl, log2sew, TA_MA)>; 193 194class VPatBinarySDNode_VF_RM<SDPatternOperator vop, 195 string instruction_name, 196 ValueType result_type, 197 ValueType vop_type, 198 ValueType xop_type, 199 int log2sew, 200 LMULInfo vlmul, 201 OutPatFrag avl, 202 VReg vop_reg_class, 203 DAGOperand xop_kind, 204 bit isSEWAware = 0> : 205 Pat<(result_type (vop (vop_type vop_reg_class:$rs1), 206 (vop_type (SplatFPOp xop_kind:$rs2)))), 207 (!cast<Instruction>( 208 !if(isSEWAware, 209 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), 210 instruction_name#"_"#vlmul.MX)) 211 (result_type (IMPLICIT_DEF)), 212 vop_reg_class:$rs1, 213 (xop_type xop_kind:$rs2), 214 // Value to indicate no rounding mode change in 215 // RISCVInsertReadWriteCSR 216 FRM_DYN, 217 avl, log2sew, TA_MA)>; 218 219multiclass VPatBinaryFPSDNode_VV_VF<SDPatternOperator vop, string instruction_name, 220 bit isSEWAware = 0> { 221 foreach vti = AllFloatVectors in { 222 let Predicates = GetVTypePredicates<vti>.Predicates in { 223 def : VPatBinarySDNode_VV<vop, instruction_name, 224 vti.Vector, vti.Vector, vti.Log2SEW, 225 vti.LMul, vti.AVL, vti.RegClass, isSEWAware>; 226 def : VPatBinarySDNode_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 227 vti.Vector, vti.Vector, vti.Scalar, 228 vti.Log2SEW, vti.LMul, vti.AVL, vti.RegClass, 229 vti.ScalarRegClass, isSEWAware>; 230 } 231 } 232} 233 234multiclass VPatBinaryFPSDNode_VV_VF_RM<SDPatternOperator vop, string instruction_name, 235 bit isSEWAware = 0> { 236 foreach vti = AllFloatVectors in { 237 let Predicates = GetVTypePredicates<vti>.Predicates in { 238 def : VPatBinarySDNode_VV_RM<vop, instruction_name, 239 vti.Vector, vti.Vector, vti.Log2SEW, 240 vti.LMul, vti.AVL, vti.RegClass, isSEWAware>; 241 def : VPatBinarySDNode_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix, 242 vti.Vector, vti.Vector, vti.Scalar, 243 vti.Log2SEW, vti.LMul, vti.AVL, vti.RegClass, 244 vti.ScalarRegClass, isSEWAware>; 245 } 246 } 247} 248 249multiclass VPatBinaryFPSDNode_R_VF<SDPatternOperator vop, string instruction_name, 250 bit isSEWAware = 0> { 251 foreach fvti = AllFloatVectors in 252 let Predicates = GetVTypePredicates<fvti>.Predicates in 253 def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), 254 (fvti.Vector fvti.RegClass:$rs1))), 255 (!cast<Instruction>( 256 !if(isSEWAware, 257 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, 258 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) 259 (fvti.Vector (IMPLICIT_DEF)), 260 fvti.RegClass:$rs1, 261 (fvti.Scalar fvti.ScalarRegClass:$rs2), 262 fvti.AVL, fvti.Log2SEW, TA_MA)>; 263} 264 265multiclass VPatBinaryFPSDNode_R_VF_RM<SDPatternOperator vop, string instruction_name, 266 bit isSEWAware = 0> { 267 foreach fvti = AllFloatVectors in 268 let Predicates = GetVTypePredicates<fvti>.Predicates in 269 def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), 270 (fvti.Vector fvti.RegClass:$rs1))), 271 (!cast<Instruction>( 272 !if(isSEWAware, 273 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, 274 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) 275 (fvti.Vector (IMPLICIT_DEF)), 276 fvti.RegClass:$rs1, 277 (fvti.Scalar fvti.ScalarRegClass:$rs2), 278 // Value to indicate no rounding mode change in 279 // RISCVInsertReadWriteCSR 280 FRM_DYN, 281 fvti.AVL, fvti.Log2SEW, TA_MA)>; 282} 283 284multiclass VPatIntegerSetCCSDNode_VV<string instruction_name, 285 CondCode cc> { 286 foreach vti = AllIntegerVectors in { 287 defvar instruction = !cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX); 288 let Predicates = GetVTypePredicates<vti>.Predicates in 289 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 290 (vti.Vector vti.RegClass:$rs2), cc)), 291 (instruction vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, 292 vti.Log2SEW)>; 293 } 294} 295 296multiclass VPatIntegerSetCCSDNode_VV_Swappable<string instruction_name, 297 CondCode cc, CondCode invcc> 298 : VPatIntegerSetCCSDNode_VV<instruction_name, cc> { 299 foreach vti = AllIntegerVectors in { 300 defvar instruction = !cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX); 301 let Predicates = GetVTypePredicates<vti>.Predicates in 302 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs2), 303 (vti.Vector vti.RegClass:$rs1), invcc)), 304 (instruction vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, 305 vti.Log2SEW)>; 306 } 307} 308 309multiclass VPatIntegerSetCCSDNode_XI< 310 string instruction_name, 311 CondCode cc, 312 string kind, 313 ComplexPattern SplatPatKind, 314 DAGOperand xop_kind> { 315 foreach vti = AllIntegerVectors in { 316 defvar instruction = !cast<Instruction>(instruction_name#_#kind#_#vti.LMul.MX); 317 let Predicates = GetVTypePredicates<vti>.Predicates in 318 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 319 (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), cc)), 320 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; 321 } 322} 323 324multiclass VPatIntegerSetCCSDNode_XI_Swappable<string instruction_name, 325 CondCode cc, CondCode invcc, 326 string kind, 327 ComplexPattern SplatPatKind, 328 DAGOperand xop_kind> 329 : VPatIntegerSetCCSDNode_XI<instruction_name, cc, kind, SplatPatKind, 330 xop_kind> { 331 foreach vti = AllIntegerVectors in { 332 defvar instruction = !cast<Instruction>(instruction_name#_#kind#_#vti.LMul.MX); 333 let Predicates = GetVTypePredicates<vti>.Predicates in { 334 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 335 (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), cc)), 336 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; 337 def : Pat<(vti.Mask (setcc (vti.Vector (SplatPatKind (XLenVT xop_kind:$rs2))), 338 (vti.Vector vti.RegClass:$rs1), invcc)), 339 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; 340 } 341 } 342} 343 344multiclass VPatIntegerSetCCSDNode_VX_Swappable<string instruction_name, 345 CondCode cc, CondCode invcc> 346 : VPatIntegerSetCCSDNode_XI_Swappable<instruction_name, cc, invcc, "VX", 347 SplatPat, GPR>; 348 349multiclass VPatIntegerSetCCSDNode_VI<string instruction_name, CondCode cc> 350 : VPatIntegerSetCCSDNode_XI<instruction_name, cc, "VI", SplatPat_simm5, simm5>; 351 352multiclass VPatIntegerSetCCSDNode_VIPlus1<string instruction_name, CondCode cc, 353 ComplexPattern splatpat_kind> { 354 foreach vti = AllIntegerVectors in { 355 defvar instruction = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX); 356 let Predicates = GetVTypePredicates<vti>.Predicates in 357 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 358 (vti.Vector (splatpat_kind simm5:$rs2)), 359 cc)), 360 (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2), 361 vti.AVL, vti.Log2SEW)>; 362 } 363} 364 365multiclass VPatFPSetCCSDNode_VV_VF_FV<CondCode cc, 366 string inst_name, 367 string swapped_op_inst_name> { 368 foreach fvti = AllFloatVectors in { 369 let Predicates = GetVTypePredicates<fvti>.Predicates in { 370 def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), 371 (fvti.Vector fvti.RegClass:$rs2), 372 cc)), 373 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX) 374 fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.AVL, fvti.Log2SEW)>; 375 def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), 376 (SplatFPOp fvti.ScalarRegClass:$rs2), 377 cc)), 378 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 379 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 380 fvti.AVL, fvti.Log2SEW)>; 381 def : Pat<(fvti.Mask (setcc (SplatFPOp fvti.ScalarRegClass:$rs2), 382 (fvti.Vector fvti.RegClass:$rs1), 383 cc)), 384 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 385 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 386 fvti.AVL, fvti.Log2SEW)>; 387 } 388 } 389} 390 391multiclass VPatExtendSDNode_V<list<SDNode> ops, string inst_name, string suffix, 392 list <VTypeInfoToFraction> fraction_list> { 393 foreach vtiTofti = fraction_list in { 394 defvar vti = vtiTofti.Vti; 395 defvar fti = vtiTofti.Fti; 396 foreach op = ops in 397 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 398 GetVTypePredicates<fti>.Predicates) in 399 def : Pat<(vti.Vector (op (fti.Vector fti.RegClass:$rs2))), 400 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX) 401 (vti.Vector (IMPLICIT_DEF)), 402 fti.RegClass:$rs2, fti.AVL, vti.Log2SEW, TA_MA)>; 403 } 404} 405 406multiclass VPatConvertI2FPSDNode_V_RM<SDPatternOperator vop, 407 string instruction_name> { 408 foreach fvti = AllFloatVectors in { 409 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 410 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 411 GetVTypePredicates<ivti>.Predicates) in 412 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), 413 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 414 (fvti.Vector (IMPLICIT_DEF)), 415 ivti.RegClass:$rs1, 416 // Value to indicate no rounding mode change in 417 // RISCVInsertReadWriteCSR 418 FRM_DYN, 419 fvti.AVL, fvti.Log2SEW, TA_MA)>; 420 } 421} 422 423multiclass VPatConvertFP2ISDNode_V<SDPatternOperator vop, 424 string instruction_name> { 425 foreach fvti = AllFloatVectors in { 426 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 427 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 428 GetVTypePredicates<ivti>.Predicates) in 429 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), 430 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX) 431 (ivti.Vector (IMPLICIT_DEF)), 432 fvti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW, TA_MA)>; 433 } 434} 435 436multiclass VPatWConvertI2FPSDNode_V<SDPatternOperator vop, 437 string instruction_name> { 438 foreach vtiToWti = AllWidenableIntToFloatVectors in { 439 defvar ivti = vtiToWti.Vti; 440 defvar fwti = vtiToWti.Wti; 441 let Predicates = !listconcat(GetVTypePredicates<ivti>.Predicates, 442 GetVTypePredicates<fwti>.Predicates) in 443 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), 444 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX) 445 (fwti.Vector (IMPLICIT_DEF)), 446 ivti.RegClass:$rs1, 447 ivti.AVL, ivti.Log2SEW, TA_MA)>; 448 } 449} 450 451multiclass VPatWConvertFP2ISDNode_V<SDPatternOperator vop, 452 string instruction_name> { 453 foreach fvtiToFWti = AllWidenableFloatVectors in { 454 defvar fvti = fvtiToFWti.Vti; 455 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 456 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 457 GetVTypePredicates<iwti>.Predicates) in 458 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), 459 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 460 (iwti.Vector (IMPLICIT_DEF)), 461 fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW, TA_MA)>; 462 } 463} 464 465multiclass VPatNConvertI2FPSDNode_W_RM<SDPatternOperator vop, 466 string instruction_name> { 467 foreach fvtiToFWti = AllWidenableFloatVectors in { 468 defvar fvti = fvtiToFWti.Vti; 469 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 470 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 471 GetVTypePredicates<iwti>.Predicates) in 472 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1))), 473 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 474 (fvti.Vector (IMPLICIT_DEF)), 475 iwti.RegClass:$rs1, 476 // Value to indicate no rounding mode change in 477 // RISCVInsertReadWriteCSR 478 FRM_DYN, 479 fvti.AVL, fvti.Log2SEW, TA_MA)>; 480 } 481} 482 483multiclass VPatNConvertFP2ISDNode_W<SDPatternOperator vop, 484 string instruction_name> { 485 foreach vtiToWti = AllWidenableIntToFloatVectors in { 486 defvar vti = vtiToWti.Vti; 487 defvar fwti = vtiToWti.Wti; 488 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 489 GetVTypePredicates<fwti>.Predicates) in 490 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1))), 491 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX) 492 (vti.Vector (IMPLICIT_DEF)), 493 fwti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 494 } 495} 496 497multiclass VPatWidenBinarySDNode_VV_VX<SDNode op, PatFrags extop1, PatFrags extop2, 498 string instruction_name> { 499 foreach vtiToWti = AllWidenableIntVectors in { 500 defvar vti = vtiToWti.Vti; 501 defvar wti = vtiToWti.Wti; 502 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 503 GetVTypePredicates<wti>.Predicates) in { 504 def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), 505 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs1)))), 506 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 507 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 508 vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 509 def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), 510 (wti.Vector (extop2 (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), 511 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX) 512 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 513 GPR:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 514 } 515 } 516} 517 518multiclass VPatWidenBinarySDNode_WV_WX<SDNode op, PatFrags extop, 519 string instruction_name> { 520 foreach vtiToWti = AllWidenableIntVectors in { 521 defvar vti = vtiToWti.Vti; 522 defvar wti = vtiToWti.Wti; 523 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 524 GetVTypePredicates<wti>.Predicates) in { 525 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 526 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1)))), 527 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_TIED") 528 wti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, 529 TAIL_AGNOSTIC)>; 530 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 531 (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), 532 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 533 (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, GPR:$rs1, 534 vti.AVL, vti.Log2SEW, TA_MA)>; 535 } 536 } 537} 538 539multiclass VPatWidenBinarySDNode_VV_VX_WV_WX<SDNode op, PatFrags extop, 540 string instruction_name> 541 : VPatWidenBinarySDNode_VV_VX<op, extop, extop, instruction_name>, 542 VPatWidenBinarySDNode_WV_WX<op, extop, instruction_name>; 543 544multiclass VPatWidenMulAddSDNode_VV<PatFrags extop1, PatFrags extop2, string instruction_name> { 545 foreach vtiToWti = AllWidenableIntVectors in { 546 defvar vti = vtiToWti.Vti; 547 defvar wti = vtiToWti.Wti; 548 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 549 GetVTypePredicates<wti>.Predicates) in 550 def : Pat< 551 (add (wti.Vector wti.RegClass:$rd), 552 (mul_oneuse (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs1))), 553 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))), 554 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 555 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 556 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC 557 )>; 558 } 559} 560multiclass VPatWidenMulAddSDNode_VX<PatFrags extop1, PatFrags extop2, string instruction_name> { 561 foreach vtiToWti = AllWidenableIntVectors in { 562 defvar vti = vtiToWti.Vti; 563 defvar wti = vtiToWti.Wti; 564 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 565 GetVTypePredicates<wti>.Predicates) in 566 def : Pat< 567 (add (wti.Vector wti.RegClass:$rd), 568 (mul_oneuse (wti.Vector (extop1 (vti.Vector (SplatPat (XLenVT GPR:$rs1))))), 569 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))), 570 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX) 571 wti.RegClass:$rd, GPR:$rs1, vti.RegClass:$rs2, 572 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC 573 )>; 574 } 575} 576 577multiclass VPatWidenBinaryFPSDNode_VV_VF<SDNode op, string instruction_name> { 578 foreach vtiToWti = AllWidenableFloatVectors in { 579 defvar vti = vtiToWti.Vti; 580 defvar wti = vtiToWti.Wti; 581 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 582 GetVTypePredicates<wti>.Predicates) in { 583 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 584 (vti.Vector vti.RegClass:$rs2), 585 (vti.Mask true_mask), (XLenVT srcvalue))), 586 (wti.Vector (riscv_fpextend_vl_oneuse 587 (vti.Vector vti.RegClass:$rs1), 588 (vti.Mask true_mask), (XLenVT srcvalue)))), 589 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 590 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 591 vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 592 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 593 (vti.Vector vti.RegClass:$rs2), 594 (vti.Mask true_mask), (XLenVT srcvalue))), 595 (wti.Vector (riscv_fpextend_vl_oneuse 596 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 597 (vti.Mask true_mask), (XLenVT srcvalue)))), 598 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 599 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 600 vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 601 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 602 (vti.Vector vti.RegClass:$rs2), 603 (vti.Mask true_mask), (XLenVT srcvalue))), 604 (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))), 605 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 606 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 607 vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 608 } 609 } 610} 611 612multiclass VPatWidenBinaryFPSDNode_VV_VF_RM<SDNode op, string instruction_name> { 613 foreach vtiToWti = AllWidenableFloatVectors in { 614 defvar vti = vtiToWti.Vti; 615 defvar wti = vtiToWti.Wti; 616 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 617 GetVTypePredicates<wti>.Predicates) in { 618 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 619 (vti.Vector vti.RegClass:$rs2), 620 (vti.Mask true_mask), (XLenVT srcvalue))), 621 (wti.Vector (riscv_fpextend_vl_oneuse 622 (vti.Vector vti.RegClass:$rs1), 623 (vti.Mask true_mask), (XLenVT srcvalue)))), 624 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 625 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 626 vti.RegClass:$rs1, 627 // Value to indicate no rounding mode change in 628 // RISCVInsertReadWriteCSR 629 FRM_DYN, 630 vti.AVL, vti.Log2SEW, TA_MA)>; 631 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 632 (vti.Vector vti.RegClass:$rs2), 633 (vti.Mask true_mask), (XLenVT srcvalue))), 634 (wti.Vector (riscv_fpextend_vl_oneuse 635 (vti.Vector (SplatFPOp (vti.Scalar vti.ScalarRegClass:$rs1))), 636 (vti.Mask true_mask), (XLenVT srcvalue)))), 637 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 638 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 639 vti.ScalarRegClass:$rs1, 640 // Value to indicate no rounding mode change in 641 // RISCVInsertReadWriteCSR 642 FRM_DYN, 643 vti.AVL, vti.Log2SEW, TA_MA)>; 644 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 645 (vti.Vector vti.RegClass:$rs2), 646 (vti.Mask true_mask), (XLenVT srcvalue))), 647 (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 648 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 649 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 650 vti.ScalarRegClass:$rs1, 651 // Value to indicate no rounding mode change in 652 // RISCVInsertReadWriteCSR 653 FRM_DYN, 654 vti.AVL, vti.Log2SEW, TA_MA)>; 655 } 656 } 657} 658 659multiclass VPatWidenBinaryFPSDNode_WV_WF_RM<SDNode op, string instruction_name> { 660 foreach vtiToWti = AllWidenableFloatVectors in { 661 defvar vti = vtiToWti.Vti; 662 defvar wti = vtiToWti.Wti; 663 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 664 GetVTypePredicates<wti>.Predicates) in { 665 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 666 (wti.Vector (riscv_fpextend_vl_oneuse 667 (vti.Vector vti.RegClass:$rs1), 668 (vti.Mask true_mask), (XLenVT srcvalue)))), 669 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_TIED") 670 wti.RegClass:$rs2, vti.RegClass:$rs1, 671 // Value to indicate no rounding mode change in 672 // RISCVInsertReadWriteCSR 673 FRM_DYN, 674 vti.AVL, vti.Log2SEW, 675 TAIL_AGNOSTIC)>; 676 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 677 (wti.Vector (riscv_fpextend_vl_oneuse 678 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 679 (vti.Mask true_mask), (XLenVT srcvalue)))), 680 (!cast<Instruction>(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX) 681 (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, 682 vti.ScalarRegClass:$rs1, 683 // Value to indicate no rounding mode change in 684 // RISCVInsertReadWriteCSR 685 FRM_DYN, 686 vti.AVL, vti.Log2SEW, TA_MA)>; 687 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 688 (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 689 (!cast<Instruction>(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX) 690 (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, 691 vti.ScalarRegClass:$rs1, 692 // Value to indicate no rounding mode change in 693 // RISCVInsertReadWriteCSR 694 FRM_DYN, 695 vti.AVL, vti.Log2SEW, TA_MA)>; 696 } 697 } 698} 699 700multiclass VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM<SDNode op, 701 string instruction_name> 702 : VPatWidenBinaryFPSDNode_VV_VF_RM<op, instruction_name>, 703 VPatWidenBinaryFPSDNode_WV_WF_RM<op, instruction_name>; 704 705multiclass VPatWidenFPMulAccSDNode_VV_VF_RM<string instruction_name> { 706 foreach vtiToWti = AllWidenableFloatVectors in { 707 defvar vti = vtiToWti.Vti; 708 defvar wti = vtiToWti.Wti; 709 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 710 GetVTypePredicates<wti>.Predicates) in { 711 def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse 712 (vti.Vector vti.RegClass:$rs1), 713 (vti.Mask true_mask), (XLenVT srcvalue))), 714 (wti.Vector (riscv_fpextend_vl_oneuse 715 (vti.Vector vti.RegClass:$rs2), 716 (vti.Mask true_mask), (XLenVT srcvalue))), 717 (wti.Vector wti.RegClass:$rd)), 718 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 719 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 720 // Value to indicate no rounding mode change in 721 // RISCVInsertReadWriteCSR 722 FRM_DYN, 723 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 724 def : Pat<(fma (wti.Vector (SplatFPOp 725 (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), 726 (wti.Vector (riscv_fpextend_vl_oneuse 727 (vti.Vector vti.RegClass:$rs2), 728 (vti.Mask true_mask), (XLenVT srcvalue))), 729 (wti.Vector wti.RegClass:$rd)), 730 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 731 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 732 // Value to indicate no rounding mode change in 733 // RISCVInsertReadWriteCSR 734 FRM_DYN, 735 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 736 } 737 } 738} 739 740multiclass VPatWidenFPNegMulAccSDNode_VV_VF_RM<string instruction_name> { 741 foreach vtiToWti = AllWidenableFloatVectors in { 742 defvar vti = vtiToWti.Vti; 743 defvar wti = vtiToWti.Wti; 744 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 745 GetVTypePredicates<wti>.Predicates) in { 746 def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse 747 (vti.Vector vti.RegClass:$rs1), 748 (vti.Mask true_mask), (XLenVT srcvalue)))), 749 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 750 (vti.Mask true_mask), (XLenVT srcvalue)), 751 (fneg wti.RegClass:$rd)), 752 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 753 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 754 // Value to indicate no rounding mode change in 755 // RISCVInsertReadWriteCSR 756 FRM_DYN, 757 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 758 def : Pat<(fma (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))), 759 (fneg (wti.Vector (riscv_fpextend_vl_oneuse 760 (vti.Vector vti.RegClass:$rs2), 761 (vti.Mask true_mask), (XLenVT srcvalue)))), 762 (fneg wti.RegClass:$rd)), 763 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 764 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 765 // Value to indicate no rounding mode change in 766 // RISCVInsertReadWriteCSR 767 FRM_DYN, 768 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 769 def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 770 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 771 (vti.Mask true_mask), (XLenVT srcvalue)), 772 (fneg wti.RegClass:$rd)), 773 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 774 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 775 // Value to indicate no rounding mode change in 776 // RISCVInsertReadWriteCSR 777 FRM_DYN, 778 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 779 } 780 } 781} 782 783multiclass VPatWidenFPMulSacSDNode_VV_VF_RM<string instruction_name> { 784 foreach vtiToWti = AllWidenableFloatVectors in { 785 defvar vti = vtiToWti.Vti; 786 defvar wti = vtiToWti.Wti; 787 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 788 GetVTypePredicates<wti>.Predicates) in { 789 def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse 790 (vti.Vector vti.RegClass:$rs1), 791 (vti.Mask true_mask), (XLenVT srcvalue))), 792 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 793 (vti.Mask true_mask), (XLenVT srcvalue)), 794 (fneg wti.RegClass:$rd)), 795 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 796 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 797 // Value to indicate no rounding mode change in 798 // RISCVInsertReadWriteCSR 799 FRM_DYN, 800 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 801 def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), 802 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 803 (vti.Mask true_mask), (XLenVT srcvalue)), 804 (fneg wti.RegClass:$rd)), 805 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 806 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 807 // Value to indicate no rounding mode change in 808 // RISCVInsertReadWriteCSR 809 FRM_DYN, 810 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 811 } 812 } 813} 814 815multiclass VPatWidenFPNegMulSacSDNode_VV_VF_RM<string instruction_name> { 816 foreach vtiToWti = AllWidenableFloatVectors in { 817 defvar vti = vtiToWti.Vti; 818 defvar wti = vtiToWti.Wti; 819 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 820 GetVTypePredicates<wti>.Predicates) in { 821 def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse 822 (vti.Vector vti.RegClass:$rs1), 823 (vti.Mask true_mask), (XLenVT srcvalue)))), 824 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 825 (vti.Mask true_mask), (XLenVT srcvalue)), 826 wti.RegClass:$rd), 827 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 828 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 829 // Value to indicate no rounding mode change in 830 // RISCVInsertReadWriteCSR 831 FRM_DYN, 832 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 833 def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), 834 (fneg (wti.Vector (riscv_fpextend_vl_oneuse 835 (vti.Vector vti.RegClass:$rs2), 836 (vti.Mask true_mask), (XLenVT srcvalue)))), 837 wti.RegClass:$rd), 838 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 839 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 840 // Value to indicate no rounding mode change in 841 // RISCVInsertReadWriteCSR 842 FRM_DYN, 843 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 844 def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), 845 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 846 (vti.Mask true_mask), (XLenVT srcvalue)), 847 wti.RegClass:$rd), 848 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 849 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 850 // Value to indicate no rounding mode change in 851 // RISCVInsertReadWriteCSR 852 FRM_DYN, 853 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 854 } 855 } 856} 857 858multiclass VPatMultiplyAddSDNode_VV_VX<SDNode op, string instruction_name> { 859 foreach vti = AllIntegerVectors in { 860 defvar suffix = vti.LMul.MX; 861 let Predicates = GetVTypePredicates<vti>.Predicates in { 862 // NOTE: We choose VMADD because it has the most commuting freedom. So it 863 // works best with how TwoAddressInstructionPass tries commuting. 864 def : Pat<(vti.Vector (op vti.RegClass:$rs2, 865 (mul_oneuse vti.RegClass:$rs1, vti.RegClass:$rd))), 866 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 867 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 868 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 869 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally 870 // commutable. 871 def : Pat<(vti.Vector (op vti.RegClass:$rs2, 872 (mul_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rd))), 873 (!cast<Instruction>(instruction_name#"_VX_" # suffix) 874 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 875 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 876 } 877 } 878} 879 880multiclass VPatAVGADD_VV_VX_RM<SDNode vop, int vxrm> { 881 foreach vti = AllIntegerVectors in { 882 let Predicates = GetVTypePredicates<vti>.Predicates in { 883 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 884 (vti.Vector vti.RegClass:$rs2)), 885 (!cast<Instruction>("PseudoVAADDU_VV_"#vti.LMul.MX) 886 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs2, 887 vxrm, vti.AVL, vti.Log2SEW, TA_MA)>; 888 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 889 (vti.Vector (SplatPat (XLenVT GPR:$rs2)))), 890 (!cast<Instruction>("PseudoVAADDU_VX_"#vti.LMul.MX) 891 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, 892 vxrm, vti.AVL, vti.Log2SEW, TA_MA)>; 893 } 894 } 895} 896 897//===----------------------------------------------------------------------===// 898// Patterns. 899//===----------------------------------------------------------------------===// 900 901// 7.4. Vector Unit-Stride Instructions 902foreach vti = !listconcat(FractionalGroupIntegerVectors, 903 FractionalGroupFloatVectors, 904 FractionalGroupBFloatVectors) in 905 let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal], 906 GetVTypePredicates<vti>.Predicates) in 907 defm : VPatUSLoadStoreSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 908 vti.AVL, vti.RegClass>; 909foreach vti = [VI8M1, VI16M1, VI32M1, VI64M1, VBF16M1, VF16M1, VF32M1, VF64M1] in 910 let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal], 911 GetVTypePredicates<vti>.Predicates) in 912 defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 913 vti.RegClass>; 914foreach vti = !listconcat(GroupIntegerVectors, GroupFloatVectors, GroupBFloatVectors) in 915 let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal], 916 GetVTypePredicates<vti>.Predicates) in 917 defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 918 vti.RegClass>; 919foreach mti = AllMasks in 920 let Predicates = [HasVInstructions] in 921 defm : VPatUSLoadStoreMaskSDNode<mti>; 922 923// 11. Vector Integer Arithmetic Instructions 924 925// 11.1. Vector Single-Width Integer Add and Subtract 926defm : VPatBinarySDNode_VV_VX_VI<add, "PseudoVADD">; 927defm : VPatBinarySDNode_VV_VX<sub, "PseudoVSUB">; 928// Handle VRSUB specially since it's the only integer binary op with reversed 929// pattern operands 930foreach vti = AllIntegerVectors in { 931 // FIXME: The AddedComplexity here is covering up a missing matcher for 932 // widening vwsub.vx which can recognize a extended folded into the 933 // scalar of the splat. 934 let AddedComplexity = 20 in 935 let Predicates = GetVTypePredicates<vti>.Predicates in { 936 def : Pat<(sub (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 937 (vti.Vector vti.RegClass:$rs1)), 938 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX) 939 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, 940 vti.AVL, vti.Log2SEW, TA_MA)>; 941 def : Pat<(sub (vti.Vector (SplatPat_simm5 simm5:$rs2)), 942 (vti.Vector vti.RegClass:$rs1)), 943 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX) 944 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, 945 simm5:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 946 } 947} 948 949// 11.2. Vector Widening Integer Add and Subtract 950defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, sext_oneuse, "PseudoVWADD">; 951defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, zext_oneuse, "PseudoVWADDU">; 952defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, anyext_oneuse, "PseudoVWADDU">; 953 954defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, sext_oneuse, "PseudoVWSUB">; 955defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, zext_oneuse, "PseudoVWSUBU">; 956defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, anyext_oneuse, "PseudoVWSUBU">; 957 958// shl (ext v, splat 1) is a special case of widening add. 959foreach vtiToWti = AllWidenableIntVectors in { 960 defvar vti = vtiToWti.Vti; 961 defvar wti = vtiToWti.Wti; 962 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 963 GetVTypePredicates<wti>.Predicates) in { 964 def : Pat<(shl (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs1))), 965 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 966 (!cast<Instruction>("PseudoVWADD_VV_"#vti.LMul.MX) 967 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 968 vti.AVL, vti.Log2SEW, TA_MA)>; 969 def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs1))), 970 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 971 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX) 972 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 973 vti.AVL, vti.Log2SEW, TA_MA)>; 974 def : Pat<(shl (wti.Vector (anyext_oneuse (vti.Vector vti.RegClass:$rs1))), 975 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), 976 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX) 977 (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, 978 vti.AVL, vti.Log2SEW, TA_MA)>; 979 } 980} 981 982// 11.3. Vector Integer Extension 983defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF2", 984 AllFractionableVF2IntVectors>; 985defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF2", 986 AllFractionableVF2IntVectors>; 987defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF4", 988 AllFractionableVF4IntVectors>; 989defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF4", 990 AllFractionableVF4IntVectors>; 991defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF8", 992 AllFractionableVF8IntVectors>; 993defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF8", 994 AllFractionableVF8IntVectors>; 995 996// 11.5. Vector Bitwise Logical Instructions 997defm : VPatBinarySDNode_VV_VX_VI<and, "PseudoVAND">; 998defm : VPatBinarySDNode_VV_VX_VI<or, "PseudoVOR">; 999defm : VPatBinarySDNode_VV_VX_VI<xor, "PseudoVXOR">; 1000 1001// 11.6. Vector Single-Width Bit Shift Instructions 1002defm : VPatBinarySDNode_VV_VX_VI<shl, "PseudoVSLL", uimm5>; 1003defm : VPatBinarySDNode_VV_VX_VI<srl, "PseudoVSRL", uimm5>; 1004defm : VPatBinarySDNode_VV_VX_VI<sra, "PseudoVSRA", uimm5>; 1005 1006foreach vti = AllIntegerVectors in { 1007 // Emit shift by 1 as an add since it might be faster. 1008 let Predicates = GetVTypePredicates<vti>.Predicates in 1009 def : Pat<(shl (vti.Vector vti.RegClass:$rs1), 1010 (vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)))), 1011 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX) 1012 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, 1013 vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; 1014 1015} 1016 1017// 11.8. Vector Integer Comparison Instructions 1018defm : VPatIntegerSetCCSDNode_VV<"PseudoVMSEQ", SETEQ>; 1019defm : VPatIntegerSetCCSDNode_VV<"PseudoVMSNE", SETNE>; 1020 1021defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLT", SETLT, SETGT>; 1022defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLTU", SETULT, SETUGT>; 1023defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLE", SETLE, SETGE>; 1024defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLEU", SETULE, SETUGE>; 1025 1026defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSEQ", SETEQ, SETEQ>; 1027defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSNE", SETNE, SETNE>; 1028defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLT", SETLT, SETGT>; 1029defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLTU", SETULT, SETUGT>; 1030defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLE", SETLE, SETGE>; 1031defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLEU", SETULE, SETUGE>; 1032defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSGT", SETGT, SETLT>; 1033defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSGTU", SETUGT, SETULT>; 1034// There is no VMSGE(U)_VX instruction 1035 1036defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSEQ", SETEQ>; 1037defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSNE", SETNE>; 1038defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSLE", SETLE>; 1039defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSLEU", SETULE>; 1040defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSGT", SETGT>; 1041defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSGTU", SETUGT>; 1042 1043defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSLE", SETLT, 1044 SplatPat_simm5_plus1>; 1045defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSLEU", SETULT, 1046 SplatPat_simm5_plus1_nonzero>; 1047defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSGT", SETGE, 1048 SplatPat_simm5_plus1>; 1049defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSGTU", SETUGE, 1050 SplatPat_simm5_plus1_nonzero>; 1051 1052// 11.9. Vector Integer Min/Max Instructions 1053defm : VPatBinarySDNode_VV_VX<umin, "PseudoVMINU">; 1054defm : VPatBinarySDNode_VV_VX<smin, "PseudoVMIN">; 1055defm : VPatBinarySDNode_VV_VX<umax, "PseudoVMAXU">; 1056defm : VPatBinarySDNode_VV_VX<smax, "PseudoVMAX">; 1057 1058// 11.10. Vector Single-Width Integer Multiply Instructions 1059defm : VPatBinarySDNode_VV_VX<mul, "PseudoVMUL">; 1060 1061defm : VPatBinarySDNode_VV_VX<mulhs, "PseudoVMULH", IntegerVectorsExceptI64>; 1062defm : VPatBinarySDNode_VV_VX<mulhu, "PseudoVMULHU", IntegerVectorsExceptI64>; 1063 1064let Predicates = [HasVInstructionsFullMultiply] in { 1065 defm : VPatBinarySDNode_VV_VX<mulhs, "PseudoVMULH", I64IntegerVectors>; 1066 defm : VPatBinarySDNode_VV_VX<mulhu, "PseudoVMULHU", I64IntegerVectors>; 1067} 1068 1069// 11.11. Vector Integer Divide Instructions 1070defm : VPatBinarySDNode_VV_VX<udiv, "PseudoVDIVU", isSEWAware=1>; 1071defm : VPatBinarySDNode_VV_VX<sdiv, "PseudoVDIV", isSEWAware=1>; 1072defm : VPatBinarySDNode_VV_VX<urem, "PseudoVREMU", isSEWAware=1>; 1073defm : VPatBinarySDNode_VV_VX<srem, "PseudoVREM", isSEWAware=1>; 1074 1075foreach vtiTowti = AllWidenableIntVectors in { 1076 defvar vti = vtiTowti.Vti; 1077 defvar wti = vtiTowti.Wti; 1078 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1079 GetVTypePredicates<wti>.Predicates) in { 1080 def : Pat< 1081 (vti.Vector 1082 (riscv_trunc_vector_vl 1083 (srem (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs1))), 1084 (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs2)))), 1085 (vti.Mask true_mask), (XLenVT srcvalue))), 1086 (!cast<Instruction>("PseudoVREM_VV_"#vti.LMul.MX#"_E"#!shl(1, vti.Log2SEW)) 1087 (vti.Vector (IMPLICIT_DEF)), 1088 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1089 } 1090} 1091 1092// 11.12. Vector Widening Integer Multiply Instructions 1093defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, sext_oneuse, 1094 "PseudoVWMUL">; 1095defm : VPatWidenBinarySDNode_VV_VX<mul, zext_oneuse, zext_oneuse, 1096 "PseudoVWMULU">; 1097defm : VPatWidenBinarySDNode_VV_VX<mul, anyext_oneuse, anyext_oneuse, 1098 "PseudoVWMULU">; 1099defm : VPatWidenBinarySDNode_VV_VX<mul, zext_oneuse, anyext_oneuse, 1100 "PseudoVWMULU">; 1101defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, zext_oneuse, 1102 "PseudoVWMULSU">; 1103defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, anyext_oneuse, 1104 "PseudoVWMULSU">; 1105 1106// 11.13 Vector Single-Width Integer Multiply-Add Instructions. 1107defm : VPatMultiplyAddSDNode_VV_VX<add, "PseudoVMADD">; 1108defm : VPatMultiplyAddSDNode_VV_VX<sub, "PseudoVNMSUB">; 1109 1110// 11.14 Vector Widening Integer Multiply-Add Instructions 1111defm : VPatWidenMulAddSDNode_VV<sext_oneuse, sext_oneuse, "PseudoVWMACC">; 1112defm : VPatWidenMulAddSDNode_VX<sext_oneuse, sext_oneuse, "PseudoVWMACC">; 1113defm : VPatWidenMulAddSDNode_VV<zext_oneuse, zext_oneuse, "PseudoVWMACCU">; 1114defm : VPatWidenMulAddSDNode_VX<zext_oneuse, zext_oneuse, "PseudoVWMACCU">; 1115defm : VPatWidenMulAddSDNode_VV<sext_oneuse, zext_oneuse, "PseudoVWMACCSU">; 1116defm : VPatWidenMulAddSDNode_VX<sext_oneuse, zext_oneuse, "PseudoVWMACCSU">; 1117defm : VPatWidenMulAddSDNode_VX<zext_oneuse, sext_oneuse, "PseudoVWMACCUS">; 1118 1119// 11.15. Vector Integer Merge Instructions 1120foreach vti = AllIntegerVectors in { 1121 let Predicates = GetVTypePredicates<vti>.Predicates in { 1122 def : Pat<(vti.Vector (vselect (vti.Mask V0), vti.RegClass:$rs1, 1123 vti.RegClass:$rs2)), 1124 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 1125 (vti.Vector (IMPLICIT_DEF)), 1126 vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), 1127 vti.AVL, vti.Log2SEW)>; 1128 1129 def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat XLenVT:$rs1), 1130 vti.RegClass:$rs2)), 1131 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 1132 (vti.Vector (IMPLICIT_DEF)), 1133 vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; 1134 1135 def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat_simm5 simm5:$rs1), 1136 vti.RegClass:$rs2)), 1137 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 1138 (vti.Vector (IMPLICIT_DEF)), 1139 vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; 1140 } 1141} 1142 1143// 12. Vector Fixed-Point Arithmetic Instructions 1144 1145// 12.1. Vector Single-Width Saturating Add and Subtract 1146defm : VPatBinarySDNode_VV_VX_VI<saddsat, "PseudoVSADD">; 1147defm : VPatBinarySDNode_VV_VX_VI<uaddsat, "PseudoVSADDU">; 1148defm : VPatBinarySDNode_VV_VX<ssubsat, "PseudoVSSUB">; 1149defm : VPatBinarySDNode_VV_VX<usubsat, "PseudoVSSUBU">; 1150 1151// 12.2. Vector Single-Width Averaging Add and Subtract 1152defm : VPatAVGADD_VV_VX_RM<avgflooru, 0b10>; 1153defm : VPatAVGADD_VV_VX_RM<avgceilu, 0b00>; 1154 1155// 15. Vector Mask Instructions 1156 1157// 15.1. Vector Mask-Register Logical Instructions 1158foreach mti = AllMasks in { 1159 let Predicates = [HasVInstructions] in { 1160 def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)), 1161 (!cast<Instruction>("PseudoVMAND_MM_"#mti.LMul.MX) 1162 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1163 def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)), 1164 (!cast<Instruction>("PseudoVMOR_MM_"#mti.LMul.MX) 1165 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1166 def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)), 1167 (!cast<Instruction>("PseudoVMXOR_MM_"#mti.LMul.MX) 1168 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1169 1170 def : Pat<(mti.Mask (rvv_vnot (and VR:$rs1, VR:$rs2))), 1171 (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX) 1172 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1173 def : Pat<(mti.Mask (rvv_vnot (or VR:$rs1, VR:$rs2))), 1174 (!cast<Instruction>("PseudoVMNOR_MM_"#mti.LMul.MX) 1175 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1176 def : Pat<(mti.Mask (rvv_vnot (xor VR:$rs1, VR:$rs2))), 1177 (!cast<Instruction>("PseudoVMXNOR_MM_"#mti.LMul.MX) 1178 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1179 1180 def : Pat<(mti.Mask (and VR:$rs1, (rvv_vnot VR:$rs2))), 1181 (!cast<Instruction>("PseudoVMANDN_MM_"#mti.LMul.MX) 1182 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1183 def : Pat<(mti.Mask (or VR:$rs1, (rvv_vnot VR:$rs2))), 1184 (!cast<Instruction>("PseudoVMORN_MM_"#mti.LMul.MX) 1185 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 1186 1187 // Handle rvv_vnot the same as the vmnot.m pseudoinstruction. 1188 def : Pat<(mti.Mask (rvv_vnot VR:$rs)), 1189 (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX) 1190 VR:$rs, VR:$rs, mti.AVL, mti.Log2SEW)>; 1191 } 1192} 1193 1194// 13. Vector Floating-Point Instructions 1195 1196// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions 1197defm : VPatBinaryFPSDNode_VV_VF_RM<any_fadd, "PseudoVFADD">; 1198defm : VPatBinaryFPSDNode_VV_VF_RM<any_fsub, "PseudoVFSUB">; 1199defm : VPatBinaryFPSDNode_R_VF_RM<any_fsub, "PseudoVFRSUB">; 1200 1201// 13.3. Vector Widening Floating-Point Add/Subtract Instructions 1202defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM<fadd, "PseudoVFWADD">; 1203defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM<fsub, "PseudoVFWSUB">; 1204 1205// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 1206defm : VPatBinaryFPSDNode_VV_VF_RM<any_fmul, "PseudoVFMUL">; 1207defm : VPatBinaryFPSDNode_VV_VF_RM<any_fdiv, "PseudoVFDIV", isSEWAware=1>; 1208defm : VPatBinaryFPSDNode_R_VF_RM<any_fdiv, "PseudoVFRDIV", isSEWAware=1>; 1209 1210// 13.5. Vector Widening Floating-Point Multiply Instructions 1211defm : VPatWidenBinaryFPSDNode_VV_VF_RM<fmul, "PseudoVFWMUL">; 1212 1213// 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. 1214foreach fvti = AllFloatVectors in { 1215 // NOTE: We choose VFMADD because it has the most commuting freedom. So it 1216 // works best with how TwoAddressInstructionPass tries commuting. 1217 defvar suffix = fvti.LMul.MX; 1218 let Predicates = GetVTypePredicates<fvti>.Predicates in { 1219 def : Pat<(fvti.Vector (any_fma fvti.RegClass:$rs1, fvti.RegClass:$rd, 1220 fvti.RegClass:$rs2)), 1221 (!cast<Instruction>("PseudoVFMADD_VV_"# suffix) 1222 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1223 // Value to indicate no rounding mode change in 1224 // RISCVInsertReadWriteCSR 1225 FRM_DYN, 1226 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1227 def : Pat<(fvti.Vector (any_fma fvti.RegClass:$rs1, fvti.RegClass:$rd, 1228 (fneg fvti.RegClass:$rs2))), 1229 (!cast<Instruction>("PseudoVFMSUB_VV_"# suffix) 1230 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1231 // Value to indicate no rounding mode change in 1232 // RISCVInsertReadWriteCSR 1233 FRM_DYN, 1234 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1235 def : Pat<(fvti.Vector (any_fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, 1236 (fneg fvti.RegClass:$rs2))), 1237 (!cast<Instruction>("PseudoVFNMADD_VV_"# suffix) 1238 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1239 // Value to indicate no rounding mode change in 1240 // RISCVInsertReadWriteCSR 1241 FRM_DYN, 1242 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1243 def : Pat<(fvti.Vector (any_fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, 1244 fvti.RegClass:$rs2)), 1245 (!cast<Instruction>("PseudoVFNMSUB_VV_"# suffix) 1246 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 1247 // Value to indicate no rounding mode change in 1248 // RISCVInsertReadWriteCSR 1249 FRM_DYN, 1250 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1251 1252 // The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally 1253 // commutable. 1254 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1255 fvti.RegClass:$rd, fvti.RegClass:$rs2)), 1256 (!cast<Instruction>("PseudoVFMADD_V" # fvti.ScalarSuffix # "_" # suffix) 1257 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1258 // Value to indicate no rounding mode change in 1259 // RISCVInsertReadWriteCSR 1260 FRM_DYN, 1261 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1262 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1263 fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), 1264 (!cast<Instruction>("PseudoVFMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 1265 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1266 // Value to indicate no rounding mode change in 1267 // RISCVInsertReadWriteCSR 1268 FRM_DYN, 1269 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1270 1271 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1272 (fneg fvti.RegClass:$rd), (fneg fvti.RegClass:$rs2))), 1273 (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) 1274 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1275 // Value to indicate no rounding mode change in 1276 // RISCVInsertReadWriteCSR 1277 FRM_DYN, 1278 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1279 def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), 1280 (fneg fvti.RegClass:$rd), fvti.RegClass:$rs2)), 1281 (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 1282 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1283 // Value to indicate no rounding mode change in 1284 // RISCVInsertReadWriteCSR 1285 FRM_DYN, 1286 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1287 1288 // The splat might be negated. 1289 def : Pat<(fvti.Vector (any_fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)), 1290 fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), 1291 (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) 1292 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1293 // Value to indicate no rounding mode change in 1294 // RISCVInsertReadWriteCSR 1295 FRM_DYN, 1296 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1297 def : Pat<(fvti.Vector (any_fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)), 1298 fvti.RegClass:$rd, fvti.RegClass:$rs2)), 1299 (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 1300 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 1301 // Value to indicate no rounding mode change in 1302 // RISCVInsertReadWriteCSR 1303 FRM_DYN, 1304 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1305 } 1306} 1307 1308// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 1309defm : VPatWidenFPMulAccSDNode_VV_VF_RM<"PseudoVFWMACC">; 1310defm : VPatWidenFPNegMulAccSDNode_VV_VF_RM<"PseudoVFWNMACC">; 1311defm : VPatWidenFPMulSacSDNode_VV_VF_RM<"PseudoVFWMSAC">; 1312defm : VPatWidenFPNegMulSacSDNode_VV_VF_RM<"PseudoVFWNMSAC">; 1313 1314foreach vti = AllFloatVectors in { 1315 let Predicates = GetVTypePredicates<vti>.Predicates in { 1316 // 13.8. Vector Floating-Point Square-Root Instruction 1317 def : Pat<(any_fsqrt (vti.Vector vti.RegClass:$rs2)), 1318 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX#"_E"#vti.SEW) 1319 (vti.Vector (IMPLICIT_DEF)), 1320 vti.RegClass:$rs2, 1321 // Value to indicate no rounding mode change in 1322 // RISCVInsertReadWriteCSR 1323 FRM_DYN, 1324 vti.AVL, vti.Log2SEW, TA_MA)>; 1325 1326 // 13.12. Vector Floating-Point Sign-Injection Instructions 1327 def : Pat<(fabs (vti.Vector vti.RegClass:$rs)), 1328 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX) 1329 (vti.Vector (IMPLICIT_DEF)), 1330 vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TA_MA)>; 1331 // Handle fneg with VFSGNJN using the same input for both operands. 1332 def : Pat<(fneg (vti.Vector vti.RegClass:$rs)), 1333 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 1334 (vti.Vector (IMPLICIT_DEF)), 1335 vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TA_MA)>; 1336 1337 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1338 (vti.Vector vti.RegClass:$rs2))), 1339 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX) 1340 (vti.Vector (IMPLICIT_DEF)), 1341 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1342 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1343 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs2)))), 1344 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 1345 (vti.Vector (IMPLICIT_DEF)), 1346 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1347 1348 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1349 (vti.Vector (fneg vti.RegClass:$rs2)))), 1350 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 1351 (vti.Vector (IMPLICIT_DEF)), 1352 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1353 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 1354 (vti.Vector (fneg (SplatFPOp vti.ScalarRegClass:$rs2))))), 1355 (!cast<Instruction>("PseudoVFSGNJN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 1356 (vti.Vector (IMPLICIT_DEF)), 1357 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; 1358 } 1359} 1360 1361// 13.11. Vector Floating-Point MIN/MAX Instructions 1362defm : VPatBinaryFPSDNode_VV_VF<fminnum, "PseudoVFMIN">; 1363defm : VPatBinaryFPSDNode_VV_VF<fmaxnum, "PseudoVFMAX">; 1364 1365// 13.13. Vector Floating-Point Compare Instructions 1366defm : VPatFPSetCCSDNode_VV_VF_FV<SETEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 1367defm : VPatFPSetCCSDNode_VV_VF_FV<SETOEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 1368 1369defm : VPatFPSetCCSDNode_VV_VF_FV<SETNE, "PseudoVMFNE", "PseudoVMFNE">; 1370defm : VPatFPSetCCSDNode_VV_VF_FV<SETUNE, "PseudoVMFNE", "PseudoVMFNE">; 1371 1372defm : VPatFPSetCCSDNode_VV_VF_FV<SETLT, "PseudoVMFLT", "PseudoVMFGT">; 1373defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLT, "PseudoVMFLT", "PseudoVMFGT">; 1374 1375defm : VPatFPSetCCSDNode_VV_VF_FV<SETLE, "PseudoVMFLE", "PseudoVMFGE">; 1376defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">; 1377 1378// Floating-point vselects: 1379// 11.15. Vector Integer Merge Instructions 1380// 13.15. Vector Floating-Point Merge Instruction 1381foreach fvti = AllFloatVectors in { 1382 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1383 let Predicates = GetVTypePredicates<ivti>.Predicates in { 1384 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), fvti.RegClass:$rs1, 1385 fvti.RegClass:$rs2)), 1386 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 1387 (fvti.Vector (IMPLICIT_DEF)), 1388 fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 1389 fvti.AVL, fvti.Log2SEW)>; 1390 1391 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), 1392 (SplatFPOp (fvti.Scalar fpimm0)), 1393 fvti.RegClass:$rs2)), 1394 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 1395 (fvti.Vector (IMPLICIT_DEF)), 1396 fvti.RegClass:$rs2, 0, (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; 1397 1398 } 1399 let Predicates = GetVTypePredicates<fvti>.Predicates in 1400 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), 1401 (SplatFPOp fvti.ScalarRegClass:$rs1), 1402 fvti.RegClass:$rs2)), 1403 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 1404 (fvti.Vector (IMPLICIT_DEF)), 1405 fvti.RegClass:$rs2, 1406 (fvti.Scalar fvti.ScalarRegClass:$rs1), 1407 (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; 1408} 1409 1410// 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions 1411defm : VPatConvertFP2ISDNode_V<any_fp_to_sint, "PseudoVFCVT_RTZ_X_F_V">; 1412defm : VPatConvertFP2ISDNode_V<any_fp_to_uint, "PseudoVFCVT_RTZ_XU_F_V">; 1413defm : VPatConvertI2FPSDNode_V_RM<any_sint_to_fp, "PseudoVFCVT_F_X_V">; 1414defm : VPatConvertI2FPSDNode_V_RM<any_uint_to_fp, "PseudoVFCVT_F_XU_V">; 1415 1416// 13.18. Widening Floating-Point/Integer Type-Convert Instructions 1417defm : VPatWConvertFP2ISDNode_V<any_fp_to_sint, "PseudoVFWCVT_RTZ_X_F_V">; 1418defm : VPatWConvertFP2ISDNode_V<any_fp_to_uint, "PseudoVFWCVT_RTZ_XU_F_V">; 1419defm : VPatWConvertI2FPSDNode_V<any_sint_to_fp, "PseudoVFWCVT_F_X_V">; 1420defm : VPatWConvertI2FPSDNode_V<any_uint_to_fp, "PseudoVFWCVT_F_XU_V">; 1421 1422// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions 1423defm : VPatNConvertFP2ISDNode_W<any_fp_to_sint, "PseudoVFNCVT_RTZ_X_F_W">; 1424defm : VPatNConvertFP2ISDNode_W<any_fp_to_uint, "PseudoVFNCVT_RTZ_XU_F_W">; 1425defm : VPatNConvertI2FPSDNode_W_RM<any_sint_to_fp, "PseudoVFNCVT_F_X_W">; 1426defm : VPatNConvertI2FPSDNode_W_RM<any_uint_to_fp, "PseudoVFNCVT_F_XU_W">; 1427foreach fvtiToFWti = AllWidenableFloatVectors in { 1428 defvar fvti = fvtiToFWti.Vti; 1429 defvar fwti = fvtiToFWti.Wti; 1430 let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], 1431 !listconcat(GetVTypePredicates<fvti>.Predicates, 1432 GetVTypePredicates<fwti>.Predicates)) in 1433 def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))), 1434 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX) 1435 (fvti.Vector (IMPLICIT_DEF)), 1436 fwti.RegClass:$rs1, 1437 // Value to indicate no rounding mode change in 1438 // RISCVInsertReadWriteCSR 1439 FRM_DYN, 1440 fvti.AVL, fvti.Log2SEW, TA_MA)>; 1441} 1442 1443//===----------------------------------------------------------------------===// 1444// Vector Splats 1445//===----------------------------------------------------------------------===// 1446 1447foreach fvti = AllFloatVectors in { 1448 let Predicates = GetVTypePredicates<fvti>.Predicates in 1449 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl undef, fvti.ScalarRegClass:$rs1, srcvalue)), 1450 (!cast<Instruction>("PseudoVFMV_V_"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 1451 (fvti.Vector (IMPLICIT_DEF)), 1452 (fvti.Scalar fvti.ScalarRegClass:$rs1), 1453 fvti.AVL, fvti.Log2SEW, TA_MA)>; 1454 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1455 let Predicates = GetVTypePredicates<ivti>.Predicates in 1456 def : Pat<(fvti.Vector (SplatFPOp (fvti.Scalar fpimm0))), 1457 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX) 1458 (fvti.Vector (IMPLICIT_DEF)), 1459 0, fvti.AVL, fvti.Log2SEW, TA_MA)>; 1460} 1461 1462//===----------------------------------------------------------------------===// 1463// Vector Element Extracts 1464//===----------------------------------------------------------------------===// 1465foreach vti = AllFloatVectors in { 1466 defvar vmv_f_s_inst = !cast<Instruction>(!strconcat("PseudoVFMV_", 1467 vti.ScalarSuffix, 1468 "_S_", vti.LMul.MX)); 1469 // Only pattern-match extract-element operations where the index is 0. Any 1470 // other index will have been custom-lowered to slide the vector correctly 1471 // into place. 1472 let Predicates = GetVTypePredicates<vti>.Predicates in 1473 def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)), 1474 (vmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>; 1475} 1476