1//===- RISCVInstrInfoVSDPatterns.td - RVV SDNode patterns --*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file contains the required infrastructure and SDNode patterns to 10/// support code generation for the standard 'V' (Vector) extension, version 11/// version 1.0. 12/// 13/// This file is included from and depends upon RISCVInstrInfoVPseudos.td 14/// 15/// Note: the patterns for RVV intrinsics are found in 16/// RISCVInstrInfoVPseudos.td. 17/// 18//===----------------------------------------------------------------------===// 19 20//===----------------------------------------------------------------------===// 21// Helpers to define the SDNode patterns. 22//===----------------------------------------------------------------------===// 23 24def rvv_vnot : PatFrag<(ops node:$in), 25 (xor node:$in, (riscv_vmset_vl (XLenVT srcvalue)))>; 26 27multiclass VPatUSLoadStoreSDNode<ValueType type, 28 int log2sew, 29 LMULInfo vlmul, 30 OutPatFrag avl, 31 VReg reg_class, 32 int sew = !shl(1, log2sew)> 33{ 34 defvar load_instr = !cast<Instruction>("PseudoVLE"#sew#"_V_"#vlmul.MX); 35 defvar store_instr = !cast<Instruction>("PseudoVSE"#sew#"_V_"#vlmul.MX); 36 // Load 37 def : Pat<(type (load GPR:$rs1)), 38 (load_instr GPR:$rs1, avl, log2sew)>; 39 // Store 40 def : Pat<(store type:$rs2, GPR:$rs1), 41 (store_instr reg_class:$rs2, GPR:$rs1, avl, log2sew)>; 42} 43 44multiclass VPatUSLoadStoreWholeVRSDNode<ValueType type, 45 int log2sew, 46 LMULInfo vlmul, 47 VReg reg_class, 48 int sew = !shl(1, log2sew)> 49{ 50 defvar load_instr = 51 !cast<Instruction>("VL"#!substr(vlmul.MX, 1)#"RE"#sew#"_V"); 52 defvar store_instr = 53 !cast<Instruction>("VS"#!substr(vlmul.MX, 1)#"R_V"); 54 55 // Load 56 def : Pat<(type (load GPR:$rs1)), 57 (load_instr GPR:$rs1)>; 58 // Store 59 def : Pat<(store type:$rs2, GPR:$rs1), 60 (store_instr reg_class:$rs2, GPR:$rs1)>; 61} 62 63multiclass VPatUSLoadStoreMaskSDNode<MTypeInfo m> 64{ 65 defvar load_instr = !cast<Instruction>("PseudoVLM_V_"#m.BX); 66 defvar store_instr = !cast<Instruction>("PseudoVSM_V_"#m.BX); 67 // Load 68 def : Pat<(m.Mask (load GPR:$rs1)), 69 (load_instr GPR:$rs1, m.AVL, m.Log2SEW)>; 70 // Store 71 def : Pat<(store m.Mask:$rs2, GPR:$rs1), 72 (store_instr VR:$rs2, GPR:$rs1, m.AVL, m.Log2SEW)>; 73} 74 75class VPatBinarySDNode_VV<SDNode vop, 76 string instruction_name, 77 ValueType result_type, 78 ValueType op_type, 79 int sew, 80 LMULInfo vlmul, 81 OutPatFrag avl, 82 VReg op_reg_class> : 83 Pat<(result_type (vop 84 (op_type op_reg_class:$rs1), 85 (op_type op_reg_class:$rs2))), 86 (!cast<Instruction>(instruction_name#"_VV_"# vlmul.MX) 87 op_reg_class:$rs1, 88 op_reg_class:$rs2, 89 avl, sew)>; 90 91class VPatBinarySDNode_XI<SDNode vop, 92 string instruction_name, 93 string suffix, 94 ValueType result_type, 95 ValueType vop_type, 96 int sew, 97 LMULInfo vlmul, 98 OutPatFrag avl, 99 VReg vop_reg_class, 100 ComplexPattern SplatPatKind, 101 DAGOperand xop_kind> : 102 Pat<(result_type (vop 103 (vop_type vop_reg_class:$rs1), 104 (vop_type (SplatPatKind xop_kind:$rs2)))), 105 (!cast<Instruction>(instruction_name#_#suffix#_# vlmul.MX) 106 vop_reg_class:$rs1, 107 xop_kind:$rs2, 108 avl, sew)>; 109 110multiclass VPatBinarySDNode_VV_VX<SDNode vop, string instruction_name> { 111 foreach vti = AllIntegerVectors in { 112 def : VPatBinarySDNode_VV<vop, instruction_name, 113 vti.Vector, vti.Vector, vti.Log2SEW, 114 vti.LMul, vti.AVL, vti.RegClass>; 115 def : VPatBinarySDNode_XI<vop, instruction_name, "VX", 116 vti.Vector, vti.Vector, vti.Log2SEW, 117 vti.LMul, vti.AVL, vti.RegClass, 118 SplatPat, GPR>; 119 } 120} 121 122multiclass VPatBinarySDNode_VV_VX_VI<SDNode vop, string instruction_name, 123 Operand ImmType = simm5> 124 : VPatBinarySDNode_VV_VX<vop, instruction_name> { 125 foreach vti = AllIntegerVectors in { 126 def : VPatBinarySDNode_XI<vop, instruction_name, "VI", 127 vti.Vector, vti.Vector, vti.Log2SEW, 128 vti.LMul, vti.AVL, vti.RegClass, 129 !cast<ComplexPattern>(SplatPat#_#ImmType), 130 ImmType>; 131 } 132} 133 134class VPatBinarySDNode_VF<SDNode vop, 135 string instruction_name, 136 ValueType result_type, 137 ValueType vop_type, 138 ValueType xop_type, 139 int sew, 140 LMULInfo vlmul, 141 OutPatFrag avl, 142 VReg vop_reg_class, 143 DAGOperand xop_kind> : 144 Pat<(result_type (vop (vop_type vop_reg_class:$rs1), 145 (vop_type (SplatFPOp xop_kind:$rs2)))), 146 (!cast<Instruction>(instruction_name#"_"#vlmul.MX) 147 vop_reg_class:$rs1, 148 (xop_type xop_kind:$rs2), 149 avl, sew)>; 150 151multiclass VPatBinaryFPSDNode_VV_VF<SDNode vop, string instruction_name> { 152 foreach vti = AllFloatVectors in { 153 def : VPatBinarySDNode_VV<vop, instruction_name, 154 vti.Vector, vti.Vector, vti.Log2SEW, 155 vti.LMul, vti.AVL, vti.RegClass>; 156 def : VPatBinarySDNode_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 157 vti.Vector, vti.Vector, vti.Scalar, 158 vti.Log2SEW, vti.LMul, vti.AVL, vti.RegClass, 159 vti.ScalarRegClass>; 160 } 161} 162 163multiclass VPatBinaryFPSDNode_R_VF<SDNode vop, string instruction_name> { 164 foreach fvti = AllFloatVectors in 165 def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), 166 (fvti.Vector fvti.RegClass:$rs1))), 167 (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 168 fvti.RegClass:$rs1, 169 (fvti.Scalar fvti.ScalarRegClass:$rs2), 170 fvti.AVL, fvti.Log2SEW)>; 171} 172 173multiclass VPatIntegerSetCCSDNode_VV<string instruction_name, 174 CondCode cc> { 175 foreach vti = AllIntegerVectors in { 176 defvar instruction = !cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX); 177 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 178 (vti.Vector vti.RegClass:$rs2), cc)), 179 (instruction vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, 180 vti.Log2SEW)>; 181 } 182} 183 184multiclass VPatIntegerSetCCSDNode_VV_Swappable<string instruction_name, 185 CondCode cc, CondCode invcc> 186 : VPatIntegerSetCCSDNode_VV<instruction_name, cc> { 187 foreach vti = AllIntegerVectors in { 188 defvar instruction = !cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX); 189 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs2), 190 (vti.Vector vti.RegClass:$rs1), invcc)), 191 (instruction vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, 192 vti.Log2SEW)>; 193 } 194} 195 196multiclass VPatIntegerSetCCSDNode_XI< 197 string instruction_name, 198 CondCode cc, 199 string kind, 200 ComplexPattern SplatPatKind, 201 DAGOperand xop_kind> { 202 foreach vti = AllIntegerVectors in { 203 defvar instruction = !cast<Instruction>(instruction_name#_#kind#_#vti.LMul.MX); 204 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 205 (vti.Vector (SplatPatKind xop_kind:$rs2)), cc)), 206 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; 207 } 208} 209 210multiclass VPatIntegerSetCCSDNode_XI_Swappable<string instruction_name, 211 CondCode cc, CondCode invcc, 212 string kind, 213 ComplexPattern SplatPatKind, 214 DAGOperand xop_kind> 215 : VPatIntegerSetCCSDNode_XI<instruction_name, cc, kind, SplatPatKind, 216 xop_kind> { 217 foreach vti = AllIntegerVectors in { 218 defvar instruction = !cast<Instruction>(instruction_name#_#kind#_#vti.LMul.MX); 219 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 220 (vti.Vector (SplatPatKind xop_kind:$rs2)), cc)), 221 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; 222 def : Pat<(vti.Mask (setcc (vti.Vector (SplatPatKind xop_kind:$rs2)), 223 (vti.Vector vti.RegClass:$rs1), invcc)), 224 (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>; 225 } 226} 227 228multiclass VPatIntegerSetCCSDNode_VX_Swappable<string instruction_name, 229 CondCode cc, CondCode invcc> 230 : VPatIntegerSetCCSDNode_XI_Swappable<instruction_name, cc, invcc, "VX", 231 SplatPat, GPR>; 232 233multiclass VPatIntegerSetCCSDNode_VI<string instruction_name, CondCode cc> 234 : VPatIntegerSetCCSDNode_XI<instruction_name, cc, "VI", SplatPat_simm5, simm5>; 235 236multiclass VPatIntegerSetCCSDNode_VIPlus1<string instruction_name, CondCode cc, 237 ComplexPattern splatpat_kind> { 238 foreach vti = AllIntegerVectors in { 239 defvar instruction = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX); 240 def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 241 (vti.Vector (splatpat_kind simm5:$rs2)), 242 cc)), 243 (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2), 244 vti.AVL, vti.Log2SEW)>; 245 } 246} 247 248multiclass VPatFPSetCCSDNode_VV_VF_FV<CondCode cc, 249 string inst_name, 250 string swapped_op_inst_name> { 251 foreach fvti = AllFloatVectors in { 252 def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), 253 (fvti.Vector fvti.RegClass:$rs2), 254 cc)), 255 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX) 256 fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.AVL, fvti.Log2SEW)>; 257 def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), 258 (SplatFPOp fvti.ScalarRegClass:$rs2), 259 cc)), 260 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 261 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 262 fvti.AVL, fvti.Log2SEW)>; 263 def : Pat<(fvti.Mask (setcc (SplatFPOp fvti.ScalarRegClass:$rs2), 264 (fvti.Vector fvti.RegClass:$rs1), 265 cc)), 266 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 267 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 268 fvti.AVL, fvti.Log2SEW)>; 269 } 270} 271 272multiclass VPatExtendSDNode_V<list<SDNode> ops, string inst_name, string suffix, 273 list <VTypeInfoToFraction> fraction_list> { 274 foreach vtiTofti = fraction_list in { 275 defvar vti = vtiTofti.Vti; 276 defvar fti = vtiTofti.Fti; 277 foreach op = ops in 278 def : Pat<(vti.Vector (op (fti.Vector fti.RegClass:$rs2))), 279 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX) 280 fti.RegClass:$rs2, fti.AVL, vti.Log2SEW)>; 281 } 282} 283 284multiclass VPatConvertI2FPSDNode_V<SDNode vop, string instruction_name> { 285 foreach fvti = AllFloatVectors in { 286 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 287 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), 288 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 289 ivti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>; 290 } 291} 292 293multiclass VPatConvertFP2ISDNode_V<SDNode vop, string instruction_name> { 294 foreach fvti = AllFloatVectors in { 295 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 296 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), 297 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX) 298 fvti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW)>; 299 } 300} 301 302multiclass VPatWConvertI2FPSDNode_V<SDNode vop, string instruction_name> { 303 foreach vtiToWti = AllWidenableIntToFloatVectors in { 304 defvar ivti = vtiToWti.Vti; 305 defvar fwti = vtiToWti.Wti; 306 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), 307 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX) 308 ivti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW)>; 309 } 310} 311 312multiclass VPatWConvertFP2ISDNode_V<SDNode vop, string instruction_name> { 313 foreach fvtiToFWti = AllWidenableFloatVectors in { 314 defvar fvti = fvtiToFWti.Vti; 315 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 316 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), 317 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 318 fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>; 319 } 320} 321 322multiclass VPatNConvertI2FPSDNode_V<SDNode vop, string instruction_name> { 323 foreach fvtiToFWti = AllWidenableFloatVectors in { 324 defvar fvti = fvtiToFWti.Vti; 325 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 326 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1))), 327 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 328 iwti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>; 329 } 330} 331 332multiclass VPatNConvertFP2ISDNode_V<SDNode vop, string instruction_name> { 333 foreach vtiToWti = AllWidenableIntToFloatVectors in { 334 defvar vti = vtiToWti.Vti; 335 defvar fwti = vtiToWti.Wti; 336 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1))), 337 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX) 338 fwti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; 339 } 340} 341 342multiclass VPatWidenBinarySDNode_VV_VX<SDNode op, PatFrags extop1, PatFrags extop2, 343 string instruction_name> { 344 foreach vtiToWti = AllWidenableIntVectors in { 345 defvar vti = vtiToWti.Vti; 346 defvar wti = vtiToWti.Wti; 347 def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), 348 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs1)))), 349 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 350 vti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; 351 def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), 352 (wti.Vector (extop2 (vti.Vector (SplatPat GPR:$rs1))))), 353 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX) 354 vti.RegClass:$rs2, GPR:$rs1, vti.AVL, vti.Log2SEW)>; 355 } 356} 357 358multiclass VPatWidenBinarySDNode_WV_WX<SDNode op, PatFrags extop, 359 string instruction_name> { 360 foreach vtiToWti = AllWidenableIntVectors in { 361 defvar vti = vtiToWti.Vti; 362 defvar wti = vtiToWti.Wti; 363 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 364 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1)))), 365 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_TIED") 366 wti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, 367 TAIL_AGNOSTIC)>; 368 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 369 (wti.Vector (extop (vti.Vector (SplatPat GPR:$rs1))))), 370 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 371 wti.RegClass:$rs2, GPR:$rs1, vti.AVL, vti.Log2SEW)>; 372 } 373} 374 375multiclass VPatWidenBinarySDNode_VV_VX_WV_WX<SDNode op, PatFrags extop, 376 string instruction_name> { 377 defm : VPatWidenBinarySDNode_VV_VX<op, extop, extop, instruction_name>; 378 defm : VPatWidenBinarySDNode_WV_WX<op, extop, instruction_name>; 379} 380 381multiclass VPatWidenMulAddSDNode_VV<PatFrags extop1, PatFrags extop2, string instruction_name> { 382 foreach vtiToWti = AllWidenableIntVectors in { 383 defvar vti = vtiToWti.Vti; 384 defvar wti = vtiToWti.Wti; 385 def : Pat< 386 (add (wti.Vector wti.RegClass:$rd), 387 (mul_oneuse (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs1))), 388 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))), 389 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 390 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 391 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC 392 )>; 393 } 394} 395multiclass VPatWidenMulAddSDNode_VX<PatFrags extop1, PatFrags extop2, string instruction_name> { 396 foreach vtiToWti = AllWidenableIntVectors in { 397 defvar vti = vtiToWti.Vti; 398 defvar wti = vtiToWti.Wti; 399 def : Pat< 400 (add (wti.Vector wti.RegClass:$rd), 401 (mul_oneuse (wti.Vector (extop1 (vti.Vector (SplatPat GPR:$rs1)))), 402 (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))), 403 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX) 404 wti.RegClass:$rd, GPR:$rs1, vti.RegClass:$rs2, 405 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC 406 )>; 407 } 408} 409 410multiclass VPatWidenBinaryFPSDNode_VV_VF<SDNode op, string instruction_name> { 411 foreach vtiToWti = AllWidenableFloatVectors in { 412 defvar vti = vtiToWti.Vti; 413 defvar wti = vtiToWti.Wti; 414 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 415 (vti.Vector vti.RegClass:$rs2), 416 (vti.Mask true_mask), (XLenVT srcvalue))), 417 (wti.Vector (riscv_fpextend_vl_oneuse 418 (vti.Vector vti.RegClass:$rs1), 419 (vti.Mask true_mask), (XLenVT srcvalue)))), 420 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 421 vti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; 422 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 423 (vti.Vector vti.RegClass:$rs2), 424 (vti.Mask true_mask), (XLenVT srcvalue))), 425 (wti.Vector (riscv_fpextend_vl_oneuse 426 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 427 (vti.Mask true_mask), (XLenVT srcvalue)))), 428 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 429 vti.RegClass:$rs2, vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW)>; 430 def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse 431 (vti.Vector vti.RegClass:$rs2), 432 (vti.Mask true_mask), (XLenVT srcvalue))), 433 (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))), 434 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 435 vti.RegClass:$rs2, vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW)>; 436 } 437} 438 439multiclass VPatWidenBinaryFPSDNode_WV_WF<SDNode op, string instruction_name> { 440 foreach vtiToWti = AllWidenableFloatVectors in { 441 defvar vti = vtiToWti.Vti; 442 defvar wti = vtiToWti.Wti; 443 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 444 (wti.Vector (riscv_fpextend_vl_oneuse 445 (vti.Vector vti.RegClass:$rs1), 446 (vti.Mask true_mask), (XLenVT srcvalue)))), 447 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_TIED") 448 wti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, 449 TAIL_AGNOSTIC)>; 450 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 451 (wti.Vector (riscv_fpextend_vl_oneuse 452 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 453 (vti.Mask true_mask), (XLenVT srcvalue)))), 454 (!cast<Instruction>(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX) 455 wti.RegClass:$rs2, vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW)>; 456 def : Pat<(op (wti.Vector wti.RegClass:$rs2), 457 (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))), 458 (!cast<Instruction>(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX) 459 wti.RegClass:$rs2, vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW)>; 460 } 461} 462 463multiclass VPatWidenBinaryFPSDNode_VV_VF_WV_WF<SDNode op, string instruction_name> { 464 defm : VPatWidenBinaryFPSDNode_VV_VF<op, instruction_name>; 465 defm : VPatWidenBinaryFPSDNode_WV_WF<op, instruction_name>; 466} 467 468multiclass VPatWidenFPMulAccSDNode_VV_VF<string instruction_name> { 469 foreach vtiToWti = AllWidenableFloatVectors in { 470 defvar vti = vtiToWti.Vti; 471 defvar wti = vtiToWti.Wti; 472 def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse 473 (vti.Vector vti.RegClass:$rs1), 474 (vti.Mask true_mask), (XLenVT srcvalue))), 475 (wti.Vector (riscv_fpextend_vl_oneuse 476 (vti.Vector vti.RegClass:$rs2), 477 (vti.Mask true_mask), (XLenVT srcvalue))), 478 (wti.Vector wti.RegClass:$rd)), 479 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 480 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 481 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 482 def : Pat<(fma (wti.Vector (SplatFPOp 483 (fpext_oneuse vti.ScalarRegClass:$rs1))), 484 (wti.Vector (riscv_fpextend_vl_oneuse 485 (vti.Vector vti.RegClass:$rs2), 486 (vti.Mask true_mask), (XLenVT srcvalue))), 487 (wti.Vector wti.RegClass:$rd)), 488 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 489 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 490 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 491 } 492} 493 494multiclass VPatWidenFPNegMulAccSDNode_VV_VF<string instruction_name> { 495 foreach vtiToWti = AllWidenableFloatVectors in { 496 defvar vti = vtiToWti.Vti; 497 defvar wti = vtiToWti.Wti; 498 def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse 499 (vti.Vector vti.RegClass:$rs1), 500 (vti.Mask true_mask), (XLenVT srcvalue)))), 501 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 502 (vti.Mask true_mask), (XLenVT srcvalue)), 503 (fneg wti.RegClass:$rd)), 504 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 505 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 506 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 507 def : Pat<(fma (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)), 508 (fneg (wti.Vector (riscv_fpextend_vl_oneuse 509 (vti.Vector vti.RegClass:$rs2), 510 (vti.Mask true_mask), (XLenVT srcvalue)))), 511 (fneg wti.RegClass:$rd)), 512 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 513 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 514 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 515 def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))), 516 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 517 (vti.Mask true_mask), (XLenVT srcvalue)), 518 (fneg wti.RegClass:$rd)), 519 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 520 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 521 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 522 } 523} 524 525multiclass VPatWidenFPMulSacSDNode_VV_VF<string instruction_name> { 526 foreach vtiToWti = AllWidenableFloatVectors in { 527 defvar vti = vtiToWti.Vti; 528 defvar wti = vtiToWti.Wti; 529 def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse 530 (vti.Vector vti.RegClass:$rs1), 531 (vti.Mask true_mask), (XLenVT srcvalue))), 532 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 533 (vti.Mask true_mask), (XLenVT srcvalue)), 534 (fneg wti.RegClass:$rd)), 535 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 536 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 537 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 538 def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1))), 539 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 540 (vti.Mask true_mask), (XLenVT srcvalue)), 541 (fneg wti.RegClass:$rd)), 542 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 543 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 544 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 545 } 546} 547 548multiclass VPatWidenFPNegMulSacSDNode_VV_VF<string instruction_name> { 549 foreach vtiToWti = AllWidenableFloatVectors in { 550 defvar vti = vtiToWti.Vti; 551 defvar wti = vtiToWti.Wti; 552 def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse 553 (vti.Vector vti.RegClass:$rs1), 554 (vti.Mask true_mask), (XLenVT srcvalue)))), 555 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 556 (vti.Mask true_mask), (XLenVT srcvalue)), 557 wti.RegClass:$rd), 558 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX) 559 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 560 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 561 def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1))), 562 (fneg (wti.Vector (riscv_fpextend_vl_oneuse 563 (vti.Vector vti.RegClass:$rs2), 564 (vti.Mask true_mask), (XLenVT srcvalue)))), 565 wti.RegClass:$rd), 566 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 567 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 568 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 569 def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))), 570 (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), 571 (vti.Mask true_mask), (XLenVT srcvalue)), 572 wti.RegClass:$rd), 573 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 574 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 575 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 576 } 577} 578 579multiclass VPatMultiplyAddSDNode_VV_VX<SDNode op, string instruction_name> { 580 foreach vti = AllIntegerVectors in { 581 defvar suffix = vti.LMul.MX; 582 // NOTE: We choose VMADD because it has the most commuting freedom. So it 583 // works best with how TwoAddressInstructionPass tries commuting. 584 def : Pat<(vti.Vector (op vti.RegClass:$rs2, 585 (mul_oneuse vti.RegClass:$rs1, vti.RegClass:$rd))), 586 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 587 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 588 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 589 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally 590 // commutable. 591 def : Pat<(vti.Vector (op vti.RegClass:$rs2, 592 (mul_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rd))), 593 (!cast<Instruction>(instruction_name#"_VX_" # suffix) 594 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 595 vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; 596 } 597} 598 599//===----------------------------------------------------------------------===// 600// Patterns. 601//===----------------------------------------------------------------------===// 602 603let Predicates = [HasVInstructions] in { 604 605// 7.4. Vector Unit-Stride Instructions 606foreach vti = !listconcat(FractionalGroupIntegerVectors, 607 FractionalGroupFloatVectors) in 608 defm : VPatUSLoadStoreSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 609 vti.AVL, vti.RegClass>; 610foreach vti = [VI8M1, VI16M1, VI32M1, VI64M1, VF16M1, VF32M1, VF64M1] in 611 defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 612 vti.RegClass>; 613foreach vti = !listconcat(GroupIntegerVectors, GroupFloatVectors) in 614 defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 615 vti.RegClass>; 616foreach mti = AllMasks in 617 defm : VPatUSLoadStoreMaskSDNode<mti>; 618 619// 11. Vector Integer Arithmetic Instructions 620 621// 11.1. Vector Single-Width Integer Add and Subtract 622defm : VPatBinarySDNode_VV_VX_VI<add, "PseudoVADD">; 623defm : VPatBinarySDNode_VV_VX<sub, "PseudoVSUB">; 624// Handle VRSUB specially since it's the only integer binary op with reversed 625// pattern operands 626foreach vti = AllIntegerVectors in { 627 def : Pat<(sub (vti.Vector (SplatPat GPR:$rs2)), 628 (vti.Vector vti.RegClass:$rs1)), 629 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX) 630 vti.RegClass:$rs1, GPR:$rs2, vti.AVL, vti.Log2SEW)>; 631 def : Pat<(sub (vti.Vector (SplatPat_simm5 simm5:$rs2)), 632 (vti.Vector vti.RegClass:$rs1)), 633 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX) 634 vti.RegClass:$rs1, simm5:$rs2, vti.AVL, vti.Log2SEW)>; 635} 636 637// 11.2. Vector Widening Integer Add and Subtract 638defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, sext_oneuse, "PseudoVWADD">; 639defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, zext_oneuse, "PseudoVWADDU">; 640defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, anyext_oneuse, "PseudoVWADDU">; 641 642defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, sext_oneuse, "PseudoVWSUB">; 643defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, zext_oneuse, "PseudoVWSUBU">; 644defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, anyext_oneuse, "PseudoVWSUBU">; 645 646// 11.3. Vector Integer Extension 647defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF2", 648 AllFractionableVF2IntVectors>; 649defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF2", 650 AllFractionableVF2IntVectors>; 651defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF4", 652 AllFractionableVF4IntVectors>; 653defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF4", 654 AllFractionableVF4IntVectors>; 655defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF8", 656 AllFractionableVF8IntVectors>; 657defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF8", 658 AllFractionableVF8IntVectors>; 659 660// 11.5. Vector Bitwise Logical Instructions 661defm : VPatBinarySDNode_VV_VX_VI<and, "PseudoVAND">; 662defm : VPatBinarySDNode_VV_VX_VI<or, "PseudoVOR">; 663defm : VPatBinarySDNode_VV_VX_VI<xor, "PseudoVXOR">; 664 665// 11.6. Vector Single-Width Bit Shift Instructions 666defm : VPatBinarySDNode_VV_VX_VI<shl, "PseudoVSLL", uimm5>; 667defm : VPatBinarySDNode_VV_VX_VI<srl, "PseudoVSRL", uimm5>; 668defm : VPatBinarySDNode_VV_VX_VI<sra, "PseudoVSRA", uimm5>; 669 670foreach vti = AllIntegerVectors in { 671 // Emit shift by 1 as an add since it might be faster. 672 def : Pat<(shl (vti.Vector vti.RegClass:$rs1), 673 (vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)))), 674 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX) 675 vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; 676 677} 678 679// 11.8. Vector Integer Comparison Instructions 680defm : VPatIntegerSetCCSDNode_VV<"PseudoVMSEQ", SETEQ>; 681defm : VPatIntegerSetCCSDNode_VV<"PseudoVMSNE", SETNE>; 682 683defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLT", SETLT, SETGT>; 684defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLTU", SETULT, SETUGT>; 685defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLE", SETLE, SETGE>; 686defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLEU", SETULE, SETUGE>; 687 688defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSEQ", SETEQ, SETEQ>; 689defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSNE", SETNE, SETNE>; 690defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLT", SETLT, SETGT>; 691defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLTU", SETULT, SETUGT>; 692defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLE", SETLE, SETGE>; 693defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLEU", SETULE, SETUGE>; 694defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSGT", SETGT, SETLT>; 695defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSGTU", SETUGT, SETULT>; 696// There is no VMSGE(U)_VX instruction 697 698defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSEQ", SETEQ>; 699defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSNE", SETNE>; 700defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSLE", SETLE>; 701defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSLEU", SETULE>; 702defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSGT", SETGT>; 703defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSGTU", SETUGT>; 704 705defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSLE", SETLT, 706 SplatPat_simm5_plus1_nonzero>; 707defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSLEU", SETULT, 708 SplatPat_simm5_plus1_nonzero>; 709defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSGT", SETGE, 710 SplatPat_simm5_plus1>; 711defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSGTU", SETUGE, 712 SplatPat_simm5_plus1_nonzero>; 713 714// 11.9. Vector Integer Min/Max Instructions 715defm : VPatBinarySDNode_VV_VX<umin, "PseudoVMINU">; 716defm : VPatBinarySDNode_VV_VX<smin, "PseudoVMIN">; 717defm : VPatBinarySDNode_VV_VX<umax, "PseudoVMAXU">; 718defm : VPatBinarySDNode_VV_VX<smax, "PseudoVMAX">; 719 720// 11.10. Vector Single-Width Integer Multiply Instructions 721defm : VPatBinarySDNode_VV_VX<mul, "PseudoVMUL">; 722defm : VPatBinarySDNode_VV_VX<mulhs, "PseudoVMULH">; 723defm : VPatBinarySDNode_VV_VX<mulhu, "PseudoVMULHU">; 724 725// 11.11. Vector Integer Divide Instructions 726defm : VPatBinarySDNode_VV_VX<udiv, "PseudoVDIVU">; 727defm : VPatBinarySDNode_VV_VX<sdiv, "PseudoVDIV">; 728defm : VPatBinarySDNode_VV_VX<urem, "PseudoVREMU">; 729defm : VPatBinarySDNode_VV_VX<srem, "PseudoVREM">; 730 731// 11.12. Vector Widening Integer Multiply Instructions 732defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, sext_oneuse, 733 "PseudoVWMUL">; 734defm : VPatWidenBinarySDNode_VV_VX<mul, zext_oneuse, zext_oneuse, 735 "PseudoVWMULU">; 736defm : VPatWidenBinarySDNode_VV_VX<mul, anyext_oneuse, anyext_oneuse, 737 "PseudoVWMULU">; 738defm : VPatWidenBinarySDNode_VV_VX<mul, zext_oneuse, anyext_oneuse, 739 "PseudoVWMULU">; 740defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, zext_oneuse, 741 "PseudoVWMULSU">; 742defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, anyext_oneuse, 743 "PseudoVWMULSU">; 744 745// 11.13 Vector Single-Width Integer Multiply-Add Instructions. 746defm : VPatMultiplyAddSDNode_VV_VX<add, "PseudoVMADD">; 747defm : VPatMultiplyAddSDNode_VV_VX<sub, "PseudoVNMSUB">; 748 749// 11.14 Vector Widening Integer Multiply-Add Instructions 750defm : VPatWidenMulAddSDNode_VV<sext_oneuse, sext_oneuse, "PseudoVWMACC">; 751defm : VPatWidenMulAddSDNode_VX<sext_oneuse, sext_oneuse, "PseudoVWMACC">; 752defm : VPatWidenMulAddSDNode_VV<zext_oneuse, zext_oneuse, "PseudoVWMACCU">; 753defm : VPatWidenMulAddSDNode_VX<zext_oneuse, zext_oneuse, "PseudoVWMACCU">; 754defm : VPatWidenMulAddSDNode_VV<sext_oneuse, zext_oneuse, "PseudoVWMACCSU">; 755defm : VPatWidenMulAddSDNode_VX<sext_oneuse, zext_oneuse, "PseudoVWMACCSU">; 756defm : VPatWidenMulAddSDNode_VX<zext_oneuse, sext_oneuse, "PseudoVWMACCUS">; 757 758// 11.15. Vector Integer Merge Instructions 759foreach vti = AllIntegerVectors in { 760 def : Pat<(vti.Vector (vselect (vti.Mask V0), vti.RegClass:$rs1, 761 vti.RegClass:$rs2)), 762 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 763 vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), 764 vti.AVL, vti.Log2SEW)>; 765 766 def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat XLenVT:$rs1), 767 vti.RegClass:$rs2)), 768 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 769 vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; 770 771 def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat_simm5 simm5:$rs1), 772 vti.RegClass:$rs2)), 773 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 774 vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; 775} 776 777// 12. Vector Fixed-Point Arithmetic Instructions 778 779// 12.1. Vector Single-Width Saturating Add and Subtract 780defm : VPatBinarySDNode_VV_VX_VI<saddsat, "PseudoVSADD">; 781defm : VPatBinarySDNode_VV_VX_VI<uaddsat, "PseudoVSADDU">; 782defm : VPatBinarySDNode_VV_VX<ssubsat, "PseudoVSSUB">; 783defm : VPatBinarySDNode_VV_VX<usubsat, "PseudoVSSUBU">; 784 785// 15. Vector Mask Instructions 786 787// 15.1. Vector Mask-Register Logical Instructions 788foreach mti = AllMasks in { 789 def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)), 790 (!cast<Instruction>("PseudoVMAND_MM_"#mti.LMul.MX) 791 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 792 def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)), 793 (!cast<Instruction>("PseudoVMOR_MM_"#mti.LMul.MX) 794 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 795 def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)), 796 (!cast<Instruction>("PseudoVMXOR_MM_"#mti.LMul.MX) 797 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 798 799 def : Pat<(mti.Mask (rvv_vnot (and VR:$rs1, VR:$rs2))), 800 (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX) 801 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 802 def : Pat<(mti.Mask (rvv_vnot (or VR:$rs1, VR:$rs2))), 803 (!cast<Instruction>("PseudoVMNOR_MM_"#mti.LMul.MX) 804 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 805 def : Pat<(mti.Mask (rvv_vnot (xor VR:$rs1, VR:$rs2))), 806 (!cast<Instruction>("PseudoVMXNOR_MM_"#mti.LMul.MX) 807 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 808 809 def : Pat<(mti.Mask (and VR:$rs1, (rvv_vnot VR:$rs2))), 810 (!cast<Instruction>("PseudoVMANDN_MM_"#mti.LMul.MX) 811 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 812 def : Pat<(mti.Mask (or VR:$rs1, (rvv_vnot VR:$rs2))), 813 (!cast<Instruction>("PseudoVMORN_MM_"#mti.LMul.MX) 814 VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 815 816 // Handle rvv_vnot the same as the vmnot.m pseudoinstruction. 817 def : Pat<(mti.Mask (rvv_vnot VR:$rs)), 818 (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX) 819 VR:$rs, VR:$rs, mti.AVL, mti.Log2SEW)>; 820} 821 822} // Predicates = [HasVInstructions] 823 824// 13. Vector Floating-Point Instructions 825 826let Predicates = [HasVInstructionsAnyF] in { 827 828// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions 829defm : VPatBinaryFPSDNode_VV_VF<fadd, "PseudoVFADD">; 830defm : VPatBinaryFPSDNode_VV_VF<fsub, "PseudoVFSUB">; 831defm : VPatBinaryFPSDNode_R_VF<fsub, "PseudoVFRSUB">; 832 833// 13.3. Vector Widening Floating-Point Add/Subtract Instructions 834defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF<fadd, "PseudoVFWADD">; 835defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF<fsub, "PseudoVFWSUB">; 836 837// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 838defm : VPatBinaryFPSDNode_VV_VF<fmul, "PseudoVFMUL">; 839defm : VPatBinaryFPSDNode_VV_VF<fdiv, "PseudoVFDIV">; 840defm : VPatBinaryFPSDNode_R_VF<fdiv, "PseudoVFRDIV">; 841 842// 13.5. Vector Widening Floating-Point Multiply Instructions 843defm : VPatWidenBinaryFPSDNode_VV_VF<fmul, "PseudoVFWMUL">; 844 845// 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. 846foreach fvti = AllFloatVectors in { 847 // NOTE: We choose VFMADD because it has the most commuting freedom. So it 848 // works best with how TwoAddressInstructionPass tries commuting. 849 defvar suffix = fvti.LMul.MX; 850 def : Pat<(fvti.Vector (fma fvti.RegClass:$rs1, fvti.RegClass:$rd, 851 fvti.RegClass:$rs2)), 852 (!cast<Instruction>("PseudoVFMADD_VV_"# suffix) 853 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 854 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 855 def : Pat<(fvti.Vector (fma fvti.RegClass:$rs1, fvti.RegClass:$rd, 856 (fneg fvti.RegClass:$rs2))), 857 (!cast<Instruction>("PseudoVFMSUB_VV_"# suffix) 858 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 859 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 860 def : Pat<(fvti.Vector (fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, 861 (fneg fvti.RegClass:$rs2))), 862 (!cast<Instruction>("PseudoVFNMADD_VV_"# suffix) 863 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 864 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 865 def : Pat<(fvti.Vector (fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, 866 fvti.RegClass:$rs2)), 867 (!cast<Instruction>("PseudoVFNMSUB_VV_"# suffix) 868 fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 869 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 870 871 // The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally 872 // commutable. 873 def : Pat<(fvti.Vector (fma (SplatFPOp fvti.ScalarRegClass:$rs1), 874 fvti.RegClass:$rd, fvti.RegClass:$rs2)), 875 (!cast<Instruction>("PseudoVFMADD_V" # fvti.ScalarSuffix # "_" # suffix) 876 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 877 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 878 def : Pat<(fvti.Vector (fma (SplatFPOp fvti.ScalarRegClass:$rs1), 879 fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), 880 (!cast<Instruction>("PseudoVFMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 881 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 882 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 883 884 def : Pat<(fvti.Vector (fma (SplatFPOp fvti.ScalarRegClass:$rs1), 885 (fneg fvti.RegClass:$rd), (fneg fvti.RegClass:$rs2))), 886 (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) 887 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 888 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 889 def : Pat<(fvti.Vector (fma (SplatFPOp fvti.ScalarRegClass:$rs1), 890 (fneg fvti.RegClass:$rd), fvti.RegClass:$rs2)), 891 (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 892 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 893 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 894 895 // The splat might be negated. 896 def : Pat<(fvti.Vector (fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)), 897 fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), 898 (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) 899 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 900 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 901 def : Pat<(fvti.Vector (fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)), 902 fvti.RegClass:$rd, fvti.RegClass:$rs2)), 903 (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 904 fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 905 fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; 906} 907 908// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 909defm : VPatWidenFPMulAccSDNode_VV_VF<"PseudoVFWMACC">; 910defm : VPatWidenFPNegMulAccSDNode_VV_VF<"PseudoVFWNMACC">; 911defm : VPatWidenFPMulSacSDNode_VV_VF<"PseudoVFWMSAC">; 912defm : VPatWidenFPNegMulSacSDNode_VV_VF<"PseudoVFWNMSAC">; 913 914foreach vti = AllFloatVectors in { 915 // 13.8. Vector Floating-Point Square-Root Instruction 916 def : Pat<(fsqrt (vti.Vector vti.RegClass:$rs2)), 917 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX) 918 vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>; 919 920 // 13.12. Vector Floating-Point Sign-Injection Instructions 921 def : Pat<(fabs (vti.Vector vti.RegClass:$rs)), 922 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX) 923 vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW)>; 924 // Handle fneg with VFSGNJN using the same input for both operands. 925 def : Pat<(fneg (vti.Vector vti.RegClass:$rs)), 926 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 927 vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW)>; 928 929 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 930 (vti.Vector vti.RegClass:$rs2))), 931 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX) 932 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>; 933 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 934 (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs2)))), 935 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 936 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW)>; 937 938 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 939 (vti.Vector (fneg vti.RegClass:$rs2)))), 940 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 941 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>; 942 def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 943 (vti.Vector (fneg (SplatFPOp vti.ScalarRegClass:$rs2))))), 944 (!cast<Instruction>("PseudoVFSGNJN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 945 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW)>; 946} 947 948// 13.11. Vector Floating-Point MIN/MAX Instructions 949defm : VPatBinaryFPSDNode_VV_VF<fminnum, "PseudoVFMIN">; 950defm : VPatBinaryFPSDNode_VV_VF<fmaxnum, "PseudoVFMAX">; 951 952// 13.13. Vector Floating-Point Compare Instructions 953defm : VPatFPSetCCSDNode_VV_VF_FV<SETEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 954defm : VPatFPSetCCSDNode_VV_VF_FV<SETOEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 955 956defm : VPatFPSetCCSDNode_VV_VF_FV<SETNE, "PseudoVMFNE", "PseudoVMFNE">; 957defm : VPatFPSetCCSDNode_VV_VF_FV<SETUNE, "PseudoVMFNE", "PseudoVMFNE">; 958 959defm : VPatFPSetCCSDNode_VV_VF_FV<SETLT, "PseudoVMFLT", "PseudoVMFGT">; 960defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLT, "PseudoVMFLT", "PseudoVMFGT">; 961 962defm : VPatFPSetCCSDNode_VV_VF_FV<SETLE, "PseudoVMFLE", "PseudoVMFGE">; 963defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">; 964 965// Floating-point vselects: 966// 11.15. Vector Integer Merge Instructions 967// 13.15. Vector Floating-Point Merge Instruction 968foreach fvti = AllFloatVectors in { 969 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), fvti.RegClass:$rs1, 970 fvti.RegClass:$rs2)), 971 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 972 fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 973 fvti.AVL, fvti.Log2SEW)>; 974 975 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), 976 (SplatFPOp fvti.ScalarRegClass:$rs1), 977 fvti.RegClass:$rs2)), 978 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 979 fvti.RegClass:$rs2, 980 (fvti.Scalar fvti.ScalarRegClass:$rs1), 981 (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; 982 983 def : Pat<(fvti.Vector (vselect (fvti.Mask V0), 984 (SplatFPOp (fvti.Scalar fpimm0)), 985 fvti.RegClass:$rs2)), 986 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 987 fvti.RegClass:$rs2, 0, (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; 988} 989 990// 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions 991defm : VPatConvertFP2ISDNode_V<fp_to_sint, "PseudoVFCVT_RTZ_X_F_V">; 992defm : VPatConvertFP2ISDNode_V<fp_to_uint, "PseudoVFCVT_RTZ_XU_F_V">; 993defm : VPatConvertI2FPSDNode_V<sint_to_fp, "PseudoVFCVT_F_X_V">; 994defm : VPatConvertI2FPSDNode_V<uint_to_fp, "PseudoVFCVT_F_XU_V">; 995 996// 13.18. Widening Floating-Point/Integer Type-Convert Instructions 997defm : VPatWConvertFP2ISDNode_V<fp_to_sint, "PseudoVFWCVT_RTZ_X_F_V">; 998defm : VPatWConvertFP2ISDNode_V<fp_to_uint, "PseudoVFWCVT_RTZ_XU_F_V">; 999defm : VPatWConvertI2FPSDNode_V<sint_to_fp, "PseudoVFWCVT_F_X_V">; 1000defm : VPatWConvertI2FPSDNode_V<uint_to_fp, "PseudoVFWCVT_F_XU_V">; 1001 1002// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions 1003defm : VPatNConvertFP2ISDNode_V<fp_to_sint, "PseudoVFNCVT_RTZ_X_F_W">; 1004defm : VPatNConvertFP2ISDNode_V<fp_to_uint, "PseudoVFNCVT_RTZ_XU_F_W">; 1005defm : VPatNConvertI2FPSDNode_V<sint_to_fp, "PseudoVFNCVT_F_X_W">; 1006defm : VPatNConvertI2FPSDNode_V<uint_to_fp, "PseudoVFNCVT_F_XU_W">; 1007foreach fvtiToFWti = AllWidenableFloatVectors in { 1008 defvar fvti = fvtiToFWti.Vti; 1009 defvar fwti = fvtiToFWti.Wti; 1010 def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))), 1011 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX) 1012 fwti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>; 1013} 1014} // Predicates = [HasVInstructionsAnyF] 1015 1016//===----------------------------------------------------------------------===// 1017// Vector Splats 1018//===----------------------------------------------------------------------===// 1019 1020let Predicates = [HasVInstructionsAnyF] in { 1021foreach fvti = AllFloatVectors in { 1022 def : Pat<(fvti.Vector (SplatFPOp fvti.ScalarRegClass:$rs1)), 1023 (!cast<Instruction>("PseudoVFMV_V_"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 1024 (fvti.Scalar fvti.ScalarRegClass:$rs1), 1025 fvti.AVL, fvti.Log2SEW)>; 1026 1027 def : Pat<(fvti.Vector (SplatFPOp (fvti.Scalar fpimm0))), 1028 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX) 1029 0, fvti.AVL, fvti.Log2SEW)>; 1030} 1031} // Predicates = [HasVInstructionsAnyF] 1032 1033//===----------------------------------------------------------------------===// 1034// Vector Element Extracts 1035//===----------------------------------------------------------------------===// 1036let Predicates = [HasVInstructionsAnyF] in 1037foreach vti = AllFloatVectors in { 1038 defvar vmv_f_s_inst = !cast<Instruction>(!strconcat("PseudoVFMV_", 1039 vti.ScalarSuffix, 1040 "_S_", vti.LMul.MX)); 1041 // Only pattern-match extract-element operations where the index is 0. Any 1042 // other index will have been custom-lowered to slide the vector correctly 1043 // into place. 1044 def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)), 1045 (vmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>; 1046} 1047