1//===- RISCVInstrInfoVSDPatterns.td - RVV SDNode patterns --*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file contains the required infrastructure and SDNode patterns to 10/// support code generation for the standard 'V' (Vector) extension, version 11/// 0.10. This version is still experimental as the 'V' extension hasn't been 12/// ratified yet. 13/// 14/// This file is included from and depends upon RISCVInstrInfoVPseudos.td 15/// 16/// Note: the patterns for RVV intrinsics are found in 17/// RISCVInstrInfoVPseudos.td. 18/// 19//===----------------------------------------------------------------------===// 20 21//===----------------------------------------------------------------------===// 22// Helpers to define the SDNode patterns. 23//===----------------------------------------------------------------------===// 24 25def SDTSplatI64 : SDTypeProfile<1, 1, [ 26 SDTCVecEltisVT<0, i64>, SDTCisVT<1, i32> 27]>; 28 29def rv32_splat_i64 : SDNode<"RISCVISD::SPLAT_VECTOR_I64", SDTSplatI64>; 30 31def SDT_RISCVVMSETCLR_VL : SDTypeProfile<1, 1, [SDTCVecEltisVT<0, i1>, 32 SDTCisVT<1, XLenVT>]>; 33def riscv_vmclr_vl : SDNode<"RISCVISD::VMCLR_VL", SDT_RISCVVMSETCLR_VL>; 34def riscv_vmset_vl : SDNode<"RISCVISD::VMSET_VL", SDT_RISCVVMSETCLR_VL>; 35 36def rvv_vnot : PatFrag<(ops node:$in), 37 (xor node:$in, (riscv_vmset_vl (XLenVT srcvalue)))>; 38 39// Give explicit Complexity to prefer simm5/uimm5. 40def SplatPat : ComplexPattern<vAny, 1, "selectVSplat", [splat_vector, rv32_splat_i64], [], 1>; 41def SplatPat_simm5 : ComplexPattern<vAny, 1, "selectVSplatSimm5", [splat_vector, rv32_splat_i64], [], 2>; 42def SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimm5", [splat_vector, rv32_splat_i64], [], 2>; 43def SplatPat_simm5_plus1 44 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1", 45 [splat_vector, rv32_splat_i64], [], 2>; 46def SplatPat_simm5_plus1_nonzero 47 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1NonZero", 48 [splat_vector, rv32_splat_i64], [], 2>; 49 50class SwapHelper<dag Prefix, dag A, dag B, dag Suffix, bit swap> { 51 dag Value = !con(Prefix, !if(swap, B, A), !if(swap, A, B), Suffix); 52} 53 54multiclass VPatUSLoadStoreSDNode<ValueType type, 55 int log2sew, 56 LMULInfo vlmul, 57 OutPatFrag avl, 58 VReg reg_class, 59 int sew = !shl(1, log2sew)> 60{ 61 defvar load_instr = !cast<Instruction>("PseudoVLE"#sew#"_V_"#vlmul.MX); 62 defvar store_instr = !cast<Instruction>("PseudoVSE"#sew#"_V_"#vlmul.MX); 63 // Load 64 def : Pat<(type (load BaseAddr:$rs1)), 65 (load_instr BaseAddr:$rs1, avl, log2sew)>; 66 // Store 67 def : Pat<(store type:$rs2, BaseAddr:$rs1), 68 (store_instr reg_class:$rs2, BaseAddr:$rs1, avl, log2sew)>; 69} 70 71multiclass VPatUSLoadStoreWholeVRSDNode<ValueType type, 72 int log2sew, 73 LMULInfo vlmul, 74 VReg reg_class, 75 int sew = !shl(1, log2sew)> 76{ 77 defvar load_instr = 78 !cast<Instruction>("VL"#!substr(vlmul.MX, 1)#"RE"#sew#"_V"); 79 defvar store_instr = 80 !cast<Instruction>("VS"#!substr(vlmul.MX, 1)#"R_V"); 81 82 // Load 83 def : Pat<(type (load BaseAddr:$rs1)), 84 (load_instr BaseAddr:$rs1)>; 85 // Store 86 def : Pat<(store type:$rs2, BaseAddr:$rs1), 87 (store_instr reg_class:$rs2, BaseAddr:$rs1)>; 88} 89 90multiclass VPatUSLoadStoreMaskSDNode<MTypeInfo m> 91{ 92 defvar load_instr = !cast<Instruction>("PseudoVLE1_V_"#m.BX); 93 defvar store_instr = !cast<Instruction>("PseudoVSE1_V_"#m.BX); 94 // Load 95 def : Pat<(m.Mask (load BaseAddr:$rs1)), 96 (load_instr BaseAddr:$rs1, m.AVL, m.Log2SEW)>; 97 // Store 98 def : Pat<(store m.Mask:$rs2, BaseAddr:$rs1), 99 (store_instr VR:$rs2, BaseAddr:$rs1, m.AVL, m.Log2SEW)>; 100} 101 102class VPatBinarySDNode_VV<SDNode vop, 103 string instruction_name, 104 ValueType result_type, 105 ValueType op_type, 106 ValueType mask_type, 107 int sew, 108 LMULInfo vlmul, 109 OutPatFrag avl, 110 VReg RetClass, 111 VReg op_reg_class> : 112 Pat<(result_type (vop 113 (op_type op_reg_class:$rs1), 114 (op_type op_reg_class:$rs2))), 115 (!cast<Instruction>(instruction_name#"_VV_"# vlmul.MX) 116 op_reg_class:$rs1, 117 op_reg_class:$rs2, 118 avl, sew)>; 119 120class VPatBinarySDNode_XI<SDNode vop, 121 string instruction_name, 122 string suffix, 123 ValueType result_type, 124 ValueType vop_type, 125 ValueType mask_type, 126 int sew, 127 LMULInfo vlmul, 128 OutPatFrag avl, 129 VReg RetClass, 130 VReg vop_reg_class, 131 ComplexPattern SplatPatKind, 132 DAGOperand xop_kind> : 133 Pat<(result_type (vop 134 (vop_type vop_reg_class:$rs1), 135 (vop_type (SplatPatKind xop_kind:$rs2)))), 136 (!cast<Instruction>(instruction_name#_#suffix#_# vlmul.MX) 137 vop_reg_class:$rs1, 138 xop_kind:$rs2, 139 avl, sew)>; 140 141multiclass VPatBinarySDNode_VV_VX<SDNode vop, string instruction_name> { 142 foreach vti = AllIntegerVectors in { 143 def : VPatBinarySDNode_VV<vop, instruction_name, 144 vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, 145 vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>; 146 def : VPatBinarySDNode_XI<vop, instruction_name, "VX", 147 vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, 148 vti.LMul, vti.AVL, vti.RegClass, vti.RegClass, 149 SplatPat, GPR>; 150 } 151} 152 153multiclass VPatBinarySDNode_VV_VX_VI<SDNode vop, string instruction_name, 154 Operand ImmType = simm5> 155 : VPatBinarySDNode_VV_VX<vop, instruction_name> { 156 foreach vti = AllIntegerVectors in { 157 def : VPatBinarySDNode_XI<vop, instruction_name, "VI", 158 vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, 159 vti.LMul, vti.AVL, vti.RegClass, vti.RegClass, 160 !cast<ComplexPattern>(SplatPat#_#ImmType), 161 ImmType>; 162 } 163} 164 165class VPatBinarySDNode_VF<SDNode vop, 166 string instruction_name, 167 ValueType result_type, 168 ValueType vop_type, 169 ValueType xop_type, 170 ValueType mask_type, 171 int sew, 172 LMULInfo vlmul, 173 OutPatFrag avl, 174 VReg RetClass, 175 VReg vop_reg_class, 176 DAGOperand xop_kind> : 177 Pat<(result_type (vop (vop_type vop_reg_class:$rs1), 178 (vop_type (splat_vector xop_kind:$rs2)))), 179 (!cast<Instruction>(instruction_name#"_"#vlmul.MX) 180 vop_reg_class:$rs1, 181 (xop_type xop_kind:$rs2), 182 avl, sew)>; 183 184multiclass VPatBinaryFPSDNode_VV_VF<SDNode vop, string instruction_name> { 185 foreach vti = AllFloatVectors in { 186 def : VPatBinarySDNode_VV<vop, instruction_name, 187 vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, 188 vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>; 189 def : VPatBinarySDNode_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 190 vti.Vector, vti.Vector, vti.Scalar, vti.Mask, 191 vti.Log2SEW, vti.LMul, vti.AVL, vti.RegClass, vti.RegClass, 192 vti.ScalarRegClass>; 193 } 194} 195 196multiclass VPatBinaryFPSDNode_R_VF<SDNode vop, string instruction_name> { 197 foreach fvti = AllFloatVectors in 198 def : Pat<(fvti.Vector (vop (fvti.Vector (splat_vector fvti.Scalar:$rs2)), 199 (fvti.Vector fvti.RegClass:$rs1))), 200 (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 201 fvti.RegClass:$rs1, 202 (fvti.Scalar fvti.ScalarRegClass:$rs2), 203 fvti.AVL, fvti.Log2SEW)>; 204} 205 206multiclass VPatIntegerSetCCSDNode_VV<CondCode cc, 207 string instruction_name, 208 bit swap = 0> { 209 foreach vti = AllIntegerVectors in { 210 defvar instruction = !cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX); 211 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 212 (vti.Vector vti.RegClass:$rs2), cc)), 213 SwapHelper<(instruction), 214 (instruction vti.RegClass:$rs1), 215 (instruction vti.RegClass:$rs2), 216 (instruction vti.AVL, vti.Log2SEW), 217 swap>.Value>; 218 } 219} 220 221multiclass VPatIntegerSetCCSDNode_XI<CondCode cc, 222 string instruction_name, 223 string kind, 224 ComplexPattern SplatPatKind, 225 DAGOperand xop_kind, 226 bit swap = 0> { 227 foreach vti = AllIntegerVectors in { 228 defvar instruction = !cast<Instruction>(instruction_name#_#kind#_#vti.LMul.MX); 229 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 230 (vti.Vector (SplatPatKind xop_kind:$rs2)), cc)), 231 SwapHelper<(instruction), 232 (instruction vti.RegClass:$rs1), 233 (instruction xop_kind:$rs2), 234 (instruction vti.AVL, vti.Log2SEW), 235 swap>.Value>; 236 } 237} 238 239multiclass VPatIntegerSetCCSDNode_VV_VX_VI<CondCode cc, 240 string instruction_name, 241 bit swap = 0> { 242 defm : VPatIntegerSetCCSDNode_VV<cc, instruction_name, swap>; 243 defm : VPatIntegerSetCCSDNode_XI<cc, instruction_name, "VX", 244 SplatPat, GPR, swap>; 245 defm : VPatIntegerSetCCSDNode_XI<cc, instruction_name, "VI", 246 SplatPat_simm5, simm5, swap>; 247} 248 249multiclass VPatIntegerSetCCSDNode_VV_VX<CondCode cc, 250 string instruction_name, 251 bit swap = 0> { 252 defm : VPatIntegerSetCCSDNode_VV<cc, instruction_name, swap>; 253 defm : VPatIntegerSetCCSDNode_XI<cc, instruction_name, "VX", 254 SplatPat, GPR, swap>; 255} 256 257multiclass VPatIntegerSetCCSDNode_VX_VI<CondCode cc, 258 string instruction_name, 259 bit swap = 0> { 260 defm : VPatIntegerSetCCSDNode_XI<cc, instruction_name, "VX", 261 SplatPat, GPR, swap>; 262 defm : VPatIntegerSetCCSDNode_XI<cc, instruction_name, "VI", 263 SplatPat_simm5, simm5, swap>; 264} 265 266multiclass VPatIntegerSetCCSDNode_VIPlus1<CondCode cc, string instruction_name, 267 ComplexPattern splatpat_kind> { 268 foreach vti = AllIntegerVectors in { 269 defvar instruction = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX); 270 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 271 (vti.Vector (splatpat_kind simm5:$rs2)), 272 cc)), 273 (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2), 274 vti.AVL, vti.Log2SEW)>; 275 } 276} 277 278multiclass VPatFPSetCCSDNode_VV_VF_FV<CondCode cc, 279 string inst_name, 280 string swapped_op_inst_name> { 281 foreach fvti = AllFloatVectors in { 282 def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), 283 (fvti.Vector fvti.RegClass:$rs2), 284 cc)), 285 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX) 286 fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.AVL, fvti.Log2SEW)>; 287 def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), 288 (splat_vector fvti.ScalarRegClass:$rs2), 289 cc)), 290 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 291 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 292 fvti.AVL, fvti.Log2SEW)>; 293 def : Pat<(fvti.Mask (setcc (splat_vector fvti.ScalarRegClass:$rs2), 294 (fvti.Vector fvti.RegClass:$rs1), 295 cc)), 296 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 297 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 298 fvti.AVL, fvti.Log2SEW)>; 299 } 300} 301 302multiclass VPatExtendSDNode_V<list<SDNode> ops, string inst_name, string suffix, 303 list <VTypeInfoToFraction> fraction_list> { 304 foreach vtiTofti = fraction_list in { 305 defvar vti = vtiTofti.Vti; 306 defvar fti = vtiTofti.Fti; 307 foreach op = ops in 308 def : Pat<(vti.Vector (op (fti.Vector fti.RegClass:$rs2))), 309 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX) 310 fti.RegClass:$rs2, fti.AVL, vti.Log2SEW)>; 311 } 312} 313 314multiclass VPatConvertI2FPSDNode_V<SDNode vop, string instruction_name> { 315 foreach fvti = AllFloatVectors in { 316 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 317 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), 318 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 319 ivti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>; 320 } 321} 322 323multiclass VPatConvertFP2ISDNode_V<SDNode vop, string instruction_name> { 324 foreach fvti = AllFloatVectors in { 325 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 326 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), 327 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX) 328 fvti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW)>; 329 } 330} 331 332multiclass VPatWConvertI2FPSDNode_V<SDNode vop, string instruction_name> { 333 foreach vtiToWti = AllWidenableIntToFloatVectors in { 334 defvar ivti = vtiToWti.Vti; 335 defvar fwti = vtiToWti.Wti; 336 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), 337 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX) 338 ivti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW)>; 339 } 340} 341 342multiclass VPatWConvertFP2ISDNode_V<SDNode vop, string instruction_name> { 343 foreach fvtiToFWti = AllWidenableFloatVectors in { 344 defvar fvti = fvtiToFWti.Vti; 345 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 346 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), 347 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 348 fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>; 349 } 350} 351 352multiclass VPatNConvertI2FPSDNode_V<SDNode vop, string instruction_name> { 353 foreach fvtiToFWti = AllWidenableFloatVectors in { 354 defvar fvti = fvtiToFWti.Vti; 355 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 356 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1))), 357 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 358 iwti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>; 359 } 360} 361 362multiclass VPatNConvertFP2ISDNode_V<SDNode vop, string instruction_name> { 363 foreach vtiToWti = AllWidenableIntToFloatVectors in { 364 defvar vti = vtiToWti.Vti; 365 defvar fwti = vtiToWti.Wti; 366 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1))), 367 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX) 368 fwti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; 369 } 370} 371 372//===----------------------------------------------------------------------===// 373// Patterns. 374//===----------------------------------------------------------------------===// 375 376let Predicates = [HasStdExtV] in { 377 378// 7.4. Vector Unit-Stride Instructions 379foreach vti = !listconcat(FractionalGroupIntegerVectors, 380 FractionalGroupFloatVectors) in 381 defm : VPatUSLoadStoreSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 382 vti.AVL, vti.RegClass>; 383foreach vti = [VI8M1, VI16M1, VI32M1, VI64M1, VF16M1, VF32M1, VF64M1] in 384 defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 385 vti.RegClass>; 386foreach vti = !listconcat(GroupIntegerVectors, GroupFloatVectors) in 387 defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 388 vti.RegClass>; 389foreach mti = AllMasks in 390 defm : VPatUSLoadStoreMaskSDNode<mti>; 391 392// 12.1. Vector Single-Width Integer Add and Subtract 393defm : VPatBinarySDNode_VV_VX_VI<add, "PseudoVADD">; 394defm : VPatBinarySDNode_VV_VX<sub, "PseudoVSUB">; 395// Handle VRSUB specially since it's the only integer binary op with reversed 396// pattern operands 397foreach vti = AllIntegerVectors in { 398 def : Pat<(sub (vti.Vector (SplatPat GPR:$rs2)), 399 (vti.Vector vti.RegClass:$rs1)), 400 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX) 401 vti.RegClass:$rs1, GPR:$rs2, vti.AVL, vti.Log2SEW)>; 402 def : Pat<(sub (vti.Vector (SplatPat_simm5 simm5:$rs2)), 403 (vti.Vector vti.RegClass:$rs1)), 404 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX) 405 vti.RegClass:$rs1, simm5:$rs2, vti.AVL, vti.Log2SEW)>; 406} 407 408// 12.3. Vector Integer Extension 409defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF2", 410 AllFractionableVF2IntVectors>; 411defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF2", 412 AllFractionableVF2IntVectors>; 413defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF4", 414 AllFractionableVF4IntVectors>; 415defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF4", 416 AllFractionableVF4IntVectors>; 417defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF8", 418 AllFractionableVF8IntVectors>; 419defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF8", 420 AllFractionableVF8IntVectors>; 421 422// 12.5. Vector Bitwise Logical Instructions 423defm : VPatBinarySDNode_VV_VX_VI<and, "PseudoVAND">; 424defm : VPatBinarySDNode_VV_VX_VI<or, "PseudoVOR">; 425defm : VPatBinarySDNode_VV_VX_VI<xor, "PseudoVXOR">; 426 427// 12.6. Vector Single-Width Bit Shift Instructions 428defm : VPatBinarySDNode_VV_VX_VI<shl, "PseudoVSLL", uimm5>; 429defm : VPatBinarySDNode_VV_VX_VI<srl, "PseudoVSRL", uimm5>; 430defm : VPatBinarySDNode_VV_VX_VI<sra, "PseudoVSRA", uimm5>; 431 432foreach vti = AllIntegerVectors in { 433 // Emit shift by 1 as an add since it might be faster. 434 def : Pat<(shl (vti.Vector vti.RegClass:$rs1), 435 (vti.Vector (splat_vector (XLenVT 1)))), 436 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX) 437 vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; 438} 439foreach vti = [VI64M1, VI64M2, VI64M4, VI64M8] in { 440 def : Pat<(shl (vti.Vector vti.RegClass:$rs1), 441 (vti.Vector (rv32_splat_i64 (XLenVT 1)))), 442 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX) 443 vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; 444 445} 446 447// 12.8. Vector Integer Comparison Instructions 448defm : VPatIntegerSetCCSDNode_VV_VX_VI<SETEQ, "PseudoVMSEQ">; 449defm : VPatIntegerSetCCSDNode_VV_VX_VI<SETNE, "PseudoVMSNE">; 450 451defm : VPatIntegerSetCCSDNode_VV_VX<SETLT, "PseudoVMSLT">; 452defm : VPatIntegerSetCCSDNode_VV_VX<SETULT, "PseudoVMSLTU">; 453defm : VPatIntegerSetCCSDNode_VIPlus1<SETLT, "PseudoVMSLE", 454 SplatPat_simm5_plus1>; 455defm : VPatIntegerSetCCSDNode_VIPlus1<SETULT, "PseudoVMSLEU", 456 SplatPat_simm5_plus1_nonzero>; 457 458defm : VPatIntegerSetCCSDNode_VV<SETGT, "PseudoVMSLT", /*swap*/1>; 459defm : VPatIntegerSetCCSDNode_VV<SETUGT, "PseudoVMSLTU", /*swap*/1>; 460defm : VPatIntegerSetCCSDNode_VX_VI<SETGT, "PseudoVMSGT">; 461defm : VPatIntegerSetCCSDNode_VX_VI<SETUGT, "PseudoVMSGTU">; 462 463defm : VPatIntegerSetCCSDNode_VV_VX_VI<SETLE, "PseudoVMSLE">; 464defm : VPatIntegerSetCCSDNode_VV_VX_VI<SETULE, "PseudoVMSLEU">; 465 466defm : VPatIntegerSetCCSDNode_VV<SETGE, "PseudoVMSLE", /*swap*/1>; 467defm : VPatIntegerSetCCSDNode_VV<SETUGE, "PseudoVMSLEU", /*swap*/1>; 468defm : VPatIntegerSetCCSDNode_VIPlus1<SETGE, "PseudoVMSGT", 469 SplatPat_simm5_plus1>; 470defm : VPatIntegerSetCCSDNode_VIPlus1<SETUGE, "PseudoVMSGTU", 471 SplatPat_simm5_plus1_nonzero>; 472 473// 12.9. Vector Integer Min/Max Instructions 474defm : VPatBinarySDNode_VV_VX<umin, "PseudoVMINU">; 475defm : VPatBinarySDNode_VV_VX<smin, "PseudoVMIN">; 476defm : VPatBinarySDNode_VV_VX<umax, "PseudoVMAXU">; 477defm : VPatBinarySDNode_VV_VX<smax, "PseudoVMAX">; 478 479// 12.10. Vector Single-Width Integer Multiply Instructions 480defm : VPatBinarySDNode_VV_VX<mul, "PseudoVMUL">; 481defm : VPatBinarySDNode_VV_VX<mulhs, "PseudoVMULH">; 482defm : VPatBinarySDNode_VV_VX<mulhu, "PseudoVMULHU">; 483 484// 12.11. Vector Integer Divide Instructions 485defm : VPatBinarySDNode_VV_VX<udiv, "PseudoVDIVU">; 486defm : VPatBinarySDNode_VV_VX<sdiv, "PseudoVDIV">; 487defm : VPatBinarySDNode_VV_VX<urem, "PseudoVREMU">; 488defm : VPatBinarySDNode_VV_VX<srem, "PseudoVREM">; 489 490// 12.13 Vector Single-Width Integer Multiply-Add Instructions. 491foreach vti = AllIntegerVectors in { 492 // NOTE: We choose VMADD because it has the most commuting freedom. So it 493 // works best with how TwoAddressInstructionPass tries commuting. 494 defvar suffix = vti.LMul.MX # "_COMMUTABLE"; 495 def : Pat<(vti.Vector (add vti.RegClass:$rs2, 496 (mul_oneuse vti.RegClass:$rs1, vti.RegClass:$rd))), 497 (!cast<Instruction>("PseudoVMADD_VV_"# suffix) 498 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 499 vti.AVL, vti.Log2SEW)>; 500 def : Pat<(vti.Vector (sub vti.RegClass:$rs2, 501 (mul_oneuse vti.RegClass:$rs1, vti.RegClass:$rd))), 502 (!cast<Instruction>("PseudoVNMSUB_VV_"# suffix) 503 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 504 vti.AVL, vti.Log2SEW)>; 505 506 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally 507 // commutable. 508 def : Pat<(vti.Vector (add vti.RegClass:$rs2, 509 (mul_oneuse (SplatPat XLenVT:$rs1), 510 vti.RegClass:$rd))), 511 (!cast<Instruction>("PseudoVMADD_VX_" # suffix) 512 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 513 vti.AVL, vti.Log2SEW)>; 514 def : Pat<(vti.Vector (sub vti.RegClass:$rs2, 515 (mul_oneuse (SplatPat XLenVT:$rs1), 516 vti.RegClass:$rd))), 517 (!cast<Instruction>("PseudoVNMSUB_VX_" # suffix) 518 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 519 vti.AVL, vti.Log2SEW)>; 520} 521 522// 12.15. Vector Integer Merge Instructions 523foreach vti = AllIntegerVectors in { 524 def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), vti.RegClass:$rs1, 525 vti.RegClass:$rs2)), 526 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 527 vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm, 528 vti.AVL, vti.Log2SEW)>; 529 530 def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat XLenVT:$rs1), 531 vti.RegClass:$rs2)), 532 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 533 vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, vti.AVL, vti.Log2SEW)>; 534 535 def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat_simm5 simm5:$rs1), 536 vti.RegClass:$rs2)), 537 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 538 vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, vti.AVL, vti.Log2SEW)>; 539} 540 541// 12.1. Vector Single-Width Saturating Add and Subtract 542defm : VPatBinarySDNode_VV_VX_VI<saddsat, "PseudoVSADD">; 543defm : VPatBinarySDNode_VV_VX_VI<uaddsat, "PseudoVSADDU">; 544defm : VPatBinarySDNode_VV_VX<ssubsat, "PseudoVSSUB">; 545defm : VPatBinarySDNode_VV_VX<usubsat, "PseudoVSSUBU">; 546 547// 16.1. Vector Mask-Register Logical Instructions 548foreach mti = AllMasks in { 549 def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)), 550 (!cast<Instruction>("PseudoVMAND_MM_"#mti.LMul.MX) 551 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 552 def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)), 553 (!cast<Instruction>("PseudoVMOR_MM_"#mti.LMul.MX) 554 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 555 def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)), 556 (!cast<Instruction>("PseudoVMXOR_MM_"#mti.LMul.MX) 557 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 558 559 def : Pat<(mti.Mask (rvv_vnot (and VR:$rs1, VR:$rs2))), 560 (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX) 561 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 562 def : Pat<(mti.Mask (rvv_vnot (or VR:$rs1, VR:$rs2))), 563 (!cast<Instruction>("PseudoVMNOR_MM_"#mti.LMul.MX) 564 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 565 def : Pat<(mti.Mask (rvv_vnot (xor VR:$rs1, VR:$rs2))), 566 (!cast<Instruction>("PseudoVMXNOR_MM_"#mti.LMul.MX) 567 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 568 569 def : Pat<(mti.Mask (and VR:$rs1, (rvv_vnot VR:$rs2))), 570 (!cast<Instruction>("PseudoVMANDNOT_MM_"#mti.LMul.MX) 571 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 572 def : Pat<(mti.Mask (or VR:$rs1, (rvv_vnot VR:$rs2))), 573 (!cast<Instruction>("PseudoVMORNOT_MM_"#mti.LMul.MX) 574 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 575 576 // Handle rvv_vnot the same as the vmnot.m pseudoinstruction. 577 def : Pat<(mti.Mask (rvv_vnot VR:$rs)), 578 (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX) 579 VR:$rs, VR:$rs, mti.AVL, mti.Log2SEW)>; 580} 581 582} // Predicates = [HasStdExtV] 583 584let Predicates = [HasStdExtV, HasStdExtF] in { 585 586// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions 587defm : VPatBinaryFPSDNode_VV_VF<fadd, "PseudoVFADD">; 588defm : VPatBinaryFPSDNode_VV_VF<fsub, "PseudoVFSUB">; 589defm : VPatBinaryFPSDNode_R_VF<fsub, "PseudoVFRSUB">; 590 591// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 592defm : VPatBinaryFPSDNode_VV_VF<fmul, "PseudoVFMUL">; 593defm : VPatBinaryFPSDNode_VV_VF<fdiv, "PseudoVFDIV">; 594defm : VPatBinaryFPSDNode_R_VF<fdiv, "PseudoVFRDIV">; 595 596// 14.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. 597foreach fvti = AllFloatVectors in { 598 // NOTE: We choose VFMADD because it has the most commuting freedom. So it 599 // works best with how TwoAddressInstructionPass tries commuting. 600 defvar suffix = fvti.LMul.MX # "_COMMUTABLE"; 601 def : Pat<(fvti.Vector (fma fvti.RegClass:$rs1, fvti.RegClass:$rd, 602 fvti.RegClass:$rs2)), 603 (!cast<Instruction>("PseudoVFMADD_VV_"# suffix) 604 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 605 fvti.AVL, fvti.Log2SEW)>; 606 def : Pat<(fvti.Vector (fma fvti.RegClass:$rs1, fvti.RegClass:$rd, 607 (fneg fvti.RegClass:$rs2))), 608 (!cast<Instruction>("PseudoVFMSUB_VV_"# suffix) 609 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 610 fvti.AVL, fvti.Log2SEW)>; 611 def : Pat<(fvti.Vector (fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, 612 (fneg fvti.RegClass:$rs2))), 613 (!cast<Instruction>("PseudoVFNMADD_VV_"# suffix) 614 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 615 fvti.AVL, fvti.Log2SEW)>; 616 def : Pat<(fvti.Vector (fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, 617 fvti.RegClass:$rs2)), 618 (!cast<Instruction>("PseudoVFNMSUB_VV_"# suffix) 619 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 620 fvti.AVL, fvti.Log2SEW)>; 621 622 // The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally 623 // commutable. 624 def : Pat<(fvti.Vector (fma (splat_vector fvti.ScalarRegClass:$rs1), 625 fvti.RegClass:$rd, fvti.RegClass:$rs2)), 626 (!cast<Instruction>("PseudoVFMADD_V" # fvti.ScalarSuffix # "_" # suffix) 627 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 628 fvti.AVL, fvti.Log2SEW)>; 629 def : Pat<(fvti.Vector (fma (splat_vector fvti.ScalarRegClass:$rs1), 630 fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), 631 (!cast<Instruction>("PseudoVFMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 632 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 633 fvti.AVL, fvti.Log2SEW)>; 634 635 def : Pat<(fvti.Vector (fma (splat_vector fvti.ScalarRegClass:$rs1), 636 (fneg fvti.RegClass:$rd), (fneg fvti.RegClass:$rs2))), 637 (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) 638 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 639 fvti.AVL, fvti.Log2SEW)>; 640 def : Pat<(fvti.Vector (fma (splat_vector fvti.ScalarRegClass:$rs1), 641 (fneg fvti.RegClass:$rd), fvti.RegClass:$rs2)), 642 (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 643 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 644 fvti.AVL, fvti.Log2SEW)>; 645 646 // The splat might be negated. 647 def : Pat<(fvti.Vector (fma (fneg (splat_vector fvti.ScalarRegClass:$rs1)), 648 fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), 649 (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) 650 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 651 fvti.AVL, fvti.Log2SEW)>; 652 def : Pat<(fvti.Vector (fma (fneg (splat_vector fvti.ScalarRegClass:$rs1)), 653 fvti.RegClass:$rd, fvti.RegClass:$rs2)), 654 (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 655 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 656 fvti.AVL, fvti.Log2SEW)>; 657} 658 659foreach vti = AllFloatVectors in { 660 // 14.8. Vector Floating-Point Square-Root Instruction 661 def : Pat<(fsqrt (vti.Vector vti.RegClass:$rs2)), 662 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX) 663 vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>; 664 665 // 14.12. Vector Floating-Point Sign-Injection Instructions 666 def : Pat<(fabs (vti.Vector vti.RegClass:$rs)), 667 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX) 668 vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW)>; 669 // Handle fneg with VFSGNJN using the same input for both operands. 670 def : Pat<(fneg (vti.Vector vti.RegClass:$rs)), 671 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 672 vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW)>; 673 674 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 675 (vti.Vector vti.RegClass:$rs2))), 676 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX) 677 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>; 678 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 679 (vti.Vector (splat_vector vti.ScalarRegClass:$rs2)))), 680 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 681 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW)>; 682 683 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 684 (vti.Vector (fneg vti.RegClass:$rs2)))), 685 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 686 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>; 687 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 688 (vti.Vector (fneg (splat_vector vti.ScalarRegClass:$rs2))))), 689 (!cast<Instruction>("PseudoVFSGNJN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 690 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW)>; 691} 692 693// 14.11. Vector Floating-Point MIN/MAX Instructions 694defm : VPatBinaryFPSDNode_VV_VF<fminnum, "PseudoVFMIN">; 695defm : VPatBinaryFPSDNode_VV_VF<fmaxnum, "PseudoVFMAX">; 696 697// 14.13. Vector Floating-Point Compare Instructions 698defm : VPatFPSetCCSDNode_VV_VF_FV<SETEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 699defm : VPatFPSetCCSDNode_VV_VF_FV<SETOEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 700 701defm : VPatFPSetCCSDNode_VV_VF_FV<SETNE, "PseudoVMFNE", "PseudoVMFNE">; 702defm : VPatFPSetCCSDNode_VV_VF_FV<SETUNE, "PseudoVMFNE", "PseudoVMFNE">; 703 704defm : VPatFPSetCCSDNode_VV_VF_FV<SETLT, "PseudoVMFLT", "PseudoVMFGT">; 705defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLT, "PseudoVMFLT", "PseudoVMFGT">; 706 707defm : VPatFPSetCCSDNode_VV_VF_FV<SETLE, "PseudoVMFLE", "PseudoVMFGE">; 708defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">; 709 710// Floating-point vselects: 711// 12.15. Vector Integer Merge Instructions 712// 14.15. Vector Floating-Point Merge Instruction 713foreach fvti = AllFloatVectors in { 714 def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), fvti.RegClass:$rs1, 715 fvti.RegClass:$rs2)), 716 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 717 fvti.RegClass:$rs2, fvti.RegClass:$rs1, VMV0:$vm, 718 fvti.AVL, fvti.Log2SEW)>; 719 720 def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), 721 (splat_vector fvti.ScalarRegClass:$rs1), 722 fvti.RegClass:$rs2)), 723 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 724 fvti.RegClass:$rs2, 725 (fvti.Scalar fvti.ScalarRegClass:$rs1), 726 VMV0:$vm, fvti.AVL, fvti.Log2SEW)>; 727 728 def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), 729 (splat_vector (fvti.Scalar fpimm0)), 730 fvti.RegClass:$rs2)), 731 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 732 fvti.RegClass:$rs2, 0, VMV0:$vm, fvti.AVL, fvti.Log2SEW)>; 733} 734 735// 14.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions 736defm : VPatConvertFP2ISDNode_V<fp_to_sint, "PseudoVFCVT_RTZ_X_F_V">; 737defm : VPatConvertFP2ISDNode_V<fp_to_uint, "PseudoVFCVT_RTZ_XU_F_V">; 738defm : VPatConvertI2FPSDNode_V<sint_to_fp, "PseudoVFCVT_F_X_V">; 739defm : VPatConvertI2FPSDNode_V<uint_to_fp, "PseudoVFCVT_F_XU_V">; 740 741// 14.18. Widening Floating-Point/Integer Type-Convert Instructions 742defm : VPatWConvertFP2ISDNode_V<fp_to_sint, "PseudoVFWCVT_RTZ_X_F_V">; 743defm : VPatWConvertFP2ISDNode_V<fp_to_uint, "PseudoVFWCVT_RTZ_XU_F_V">; 744defm : VPatWConvertI2FPSDNode_V<sint_to_fp, "PseudoVFWCVT_F_X_V">; 745defm : VPatWConvertI2FPSDNode_V<uint_to_fp, "PseudoVFWCVT_F_XU_V">; 746foreach fvtiToFWti = AllWidenableFloatVectors in { 747 defvar fvti = fvtiToFWti.Vti; 748 defvar fwti = fvtiToFWti.Wti; 749 def : Pat<(fwti.Vector (fpextend (fvti.Vector fvti.RegClass:$rs1))), 750 (!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX) 751 fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>; 752} 753 754// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions 755defm : VPatNConvertFP2ISDNode_V<fp_to_sint, "PseudoVFNCVT_RTZ_X_F_W">; 756defm : VPatNConvertFP2ISDNode_V<fp_to_uint, "PseudoVFNCVT_RTZ_XU_F_W">; 757defm : VPatNConvertI2FPSDNode_V<sint_to_fp, "PseudoVFNCVT_F_X_W">; 758defm : VPatNConvertI2FPSDNode_V<uint_to_fp, "PseudoVFNCVT_F_XU_W">; 759foreach fvtiToFWti = AllWidenableFloatVectors in { 760 defvar fvti = fvtiToFWti.Vti; 761 defvar fwti = fvtiToFWti.Wti; 762 def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))), 763 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX) 764 fwti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>; 765} 766} // Predicates = [HasStdExtV, HasStdExtF] 767 768//===----------------------------------------------------------------------===// 769// Vector Splats 770//===----------------------------------------------------------------------===// 771 772let Predicates = [HasStdExtV] in { 773foreach vti = AllIntegerVectors in { 774 def : Pat<(vti.Vector (SplatPat GPR:$rs1)), 775 (!cast<Instruction>("PseudoVMV_V_X_" # vti.LMul.MX) 776 GPR:$rs1, vti.AVL, vti.Log2SEW)>; 777 def : Pat<(vti.Vector (SplatPat_simm5 simm5:$rs1)), 778 (!cast<Instruction>("PseudoVMV_V_I_" # vti.LMul.MX) 779 simm5:$rs1, vti.AVL, vti.Log2SEW)>; 780} 781} // Predicates = [HasStdExtV] 782 783let Predicates = [HasStdExtV, HasStdExtF] in { 784foreach fvti = AllFloatVectors in { 785 def : Pat<(fvti.Vector (splat_vector fvti.ScalarRegClass:$rs1)), 786 (!cast<Instruction>("PseudoVFMV_V_"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 787 (fvti.Scalar fvti.ScalarRegClass:$rs1), 788 fvti.AVL, fvti.Log2SEW)>; 789 790 def : Pat<(fvti.Vector (splat_vector (fvti.Scalar fpimm0))), 791 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX) 792 0, fvti.AVL, fvti.Log2SEW)>; 793} 794} // Predicates = [HasStdExtV, HasStdExtF] 795 796//===----------------------------------------------------------------------===// 797// Vector Element Extracts 798//===----------------------------------------------------------------------===// 799let Predicates = [HasStdExtV, HasStdExtF] in 800foreach vti = AllFloatVectors in { 801 defvar vmv_f_s_inst = !cast<Instruction>(!strconcat("PseudoVFMV_", 802 vti.ScalarSuffix, 803 "_S_", vti.LMul.MX)); 804 // Only pattern-match extract-element operations where the index is 0. Any 805 // other index will have been custom-lowered to slide the vector correctly 806 // into place. 807 def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)), 808 (vmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>; 809} 810