1//===-- X86InstrFMA.td - FMA Instruction Set ---------------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes FMA (Fused Multiply-Add) instructions. 10// 11//===----------------------------------------------------------------------===// 12 13//===----------------------------------------------------------------------===// 14// FMA3 - Intel 3 operand Fused Multiply-Add instructions 15//===----------------------------------------------------------------------===// 16 17// For all FMA opcodes declared in fma3p_rm_* and fma3s_rm_* multiclasses 18// defined below, both the register and memory variants are commutable. 19// For the register form the commutable operands are 1, 2 and 3. 20// For the memory variant the folded operand must be in 3. Thus, 21// in that case, only the operands 1 and 2 can be swapped. 22// Commuting some of operands may require the opcode change. 23// FMA*213*: 24// operands 1 and 2 (memory & register forms): *213* --> *213*(no changes); 25// operands 1 and 3 (register forms only): *213* --> *231*; 26// operands 2 and 3 (register forms only): *213* --> *132*. 27// FMA*132*: 28// operands 1 and 2 (memory & register forms): *132* --> *231*; 29// operands 1 and 3 (register forms only): *132* --> *132*(no changes); 30// operands 2 and 3 (register forms only): *132* --> *213*. 31// FMA*231*: 32// operands 1 and 2 (memory & register forms): *231* --> *132*; 33// operands 1 and 3 (register forms only): *231* --> *213*; 34// operands 2 and 3 (register forms only): *231* --> *231*(no changes). 35 36multiclass fma3p_rm_213<bits<8> opc, string OpcodeStr, RegisterClass RC, 37 ValueType VT, X86MemOperand x86memop, PatFrag MemFrag, 38 SDPatternOperator Op, X86FoldableSchedWrite sched> { 39 def r : FMA3<opc, MRMSrcReg, (outs RC:$dst), 40 (ins RC:$src1, RC:$src2, RC:$src3), 41 !strconcat(OpcodeStr, 42 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 43 [(set RC:$dst, (VT (Op RC:$src2, RC:$src1, RC:$src3)))]>, 44 Sched<[sched]>; 45 46 let mayLoad = 1 in 47 def m : FMA3<opc, MRMSrcMem, (outs RC:$dst), 48 (ins RC:$src1, RC:$src2, x86memop:$src3), 49 !strconcat(OpcodeStr, 50 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 51 [(set RC:$dst, (VT (Op RC:$src2, RC:$src1, 52 (MemFrag addr:$src3))))]>, 53 Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>; 54} 55 56multiclass fma3p_rm_231<bits<8> opc, string OpcodeStr, RegisterClass RC, 57 ValueType VT, X86MemOperand x86memop, PatFrag MemFrag, 58 SDPatternOperator Op, X86FoldableSchedWrite sched> { 59 let hasSideEffects = 0 in 60 def r : FMA3<opc, MRMSrcReg, (outs RC:$dst), 61 (ins RC:$src1, RC:$src2, RC:$src3), 62 !strconcat(OpcodeStr, 63 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 64 []>, Sched<[sched]>; 65 66 let mayLoad = 1 in 67 def m : FMA3<opc, MRMSrcMem, (outs RC:$dst), 68 (ins RC:$src1, RC:$src2, x86memop:$src3), 69 !strconcat(OpcodeStr, 70 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 71 [(set RC:$dst, (VT (Op RC:$src2, (MemFrag addr:$src3), 72 RC:$src1)))]>, 73 Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>; 74} 75 76multiclass fma3p_rm_132<bits<8> opc, string OpcodeStr, RegisterClass RC, 77 ValueType VT, X86MemOperand x86memop, PatFrag MemFrag, 78 SDPatternOperator Op, X86FoldableSchedWrite sched> { 79 let hasSideEffects = 0 in 80 def r : FMA3<opc, MRMSrcReg, (outs RC:$dst), 81 (ins RC:$src1, RC:$src2, RC:$src3), 82 !strconcat(OpcodeStr, 83 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 84 []>, Sched<[sched]>; 85 86 // Pattern is 312 order so that the load is in a different place from the 87 // 213 and 231 patterns this helps tablegen's duplicate pattern detection. 88 let mayLoad = 1 in 89 def m : FMA3<opc, MRMSrcMem, (outs RC:$dst), 90 (ins RC:$src1, RC:$src2, x86memop:$src3), 91 !strconcat(OpcodeStr, 92 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 93 [(set RC:$dst, (VT (Op (MemFrag addr:$src3), RC:$src1, 94 RC:$src2)))]>, 95 Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>; 96} 97 98let Constraints = "$src1 = $dst", hasSideEffects = 0, isCommutable = 1, 99 Uses = [MXCSR], mayRaiseFPException = 1 in 100multiclass fma3p_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231, 101 string OpcodeStr, string PackTy, string Suff, 102 PatFrag MemFrag128, PatFrag MemFrag256, 103 SDPatternOperator Op, ValueType OpTy128, ValueType OpTy256, 104 X86SchedWriteWidths sched> { 105 defm NAME#213#Suff : fma3p_rm_213<opc213, !strconcat(OpcodeStr, "213", PackTy), 106 VR128, OpTy128, f128mem, MemFrag128, Op, sched.XMM>; 107 defm NAME#231#Suff : fma3p_rm_231<opc231, !strconcat(OpcodeStr, "231", PackTy), 108 VR128, OpTy128, f128mem, MemFrag128, Op, sched.XMM>; 109 defm NAME#132#Suff : fma3p_rm_132<opc132, !strconcat(OpcodeStr, "132", PackTy), 110 VR128, OpTy128, f128mem, MemFrag128, Op, sched.XMM>; 111 112 defm NAME#213#Suff#Y : fma3p_rm_213<opc213, !strconcat(OpcodeStr, "213", PackTy), 113 VR256, OpTy256, f256mem, MemFrag256, Op, sched.YMM>, 114 VEX_L; 115 defm NAME#231#Suff#Y : fma3p_rm_231<opc231, !strconcat(OpcodeStr, "231", PackTy), 116 VR256, OpTy256, f256mem, MemFrag256, Op, sched.YMM>, 117 VEX_L; 118 defm NAME#132#Suff#Y : fma3p_rm_132<opc132, !strconcat(OpcodeStr, "132", PackTy), 119 VR256, OpTy256, f256mem, MemFrag256, Op, sched.YMM>, 120 VEX_L; 121} 122 123// Fused Multiply-Add 124let ExeDomain = SSEPackedSingle in { 125 defm VFMADD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps", "PS", 126 loadv4f32, loadv8f32, any_fma, v4f32, v8f32, 127 SchedWriteFMA>; 128 defm VFMSUB : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps", "PS", 129 loadv4f32, loadv8f32, X86any_Fmsub, v4f32, v8f32, 130 SchedWriteFMA>; 131 defm VFMADDSUB : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps", "PS", 132 loadv4f32, loadv8f32, X86Fmaddsub, v4f32, v8f32, 133 SchedWriteFMA>; 134 defm VFMSUBADD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps", "PS", 135 loadv4f32, loadv8f32, X86Fmsubadd, v4f32, v8f32, 136 SchedWriteFMA>; 137} 138 139let ExeDomain = SSEPackedDouble in { 140 defm VFMADD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd", "PD", 141 loadv2f64, loadv4f64, any_fma, v2f64, 142 v4f64, SchedWriteFMA>, VEX_W; 143 defm VFMSUB : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd", "PD", 144 loadv2f64, loadv4f64, X86any_Fmsub, v2f64, 145 v4f64, SchedWriteFMA>, VEX_W; 146 defm VFMADDSUB : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd", "PD", 147 loadv2f64, loadv4f64, X86Fmaddsub, 148 v2f64, v4f64, SchedWriteFMA>, VEX_W; 149 defm VFMSUBADD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd", "PD", 150 loadv2f64, loadv4f64, X86Fmsubadd, 151 v2f64, v4f64, SchedWriteFMA>, VEX_W; 152} 153 154// Fused Negative Multiply-Add 155let ExeDomain = SSEPackedSingle in { 156 defm VFNMADD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps", "PS", loadv4f32, 157 loadv8f32, X86any_Fnmadd, v4f32, v8f32, SchedWriteFMA>; 158 defm VFNMSUB : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps", "PS", loadv4f32, 159 loadv8f32, X86any_Fnmsub, v4f32, v8f32, SchedWriteFMA>; 160} 161let ExeDomain = SSEPackedDouble in { 162 defm VFNMADD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd", "PD", loadv2f64, 163 loadv4f64, X86any_Fnmadd, v2f64, v4f64, SchedWriteFMA>, VEX_W; 164 defm VFNMSUB : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd", "PD", loadv2f64, 165 loadv4f64, X86any_Fnmsub, v2f64, v4f64, SchedWriteFMA>, VEX_W; 166} 167 168// All source register operands of FMA opcodes defined in fma3s_rm multiclass 169// can be commuted. In many cases such commute transformation requires an opcode 170// adjustment, for example, commuting the operands 1 and 2 in FMA*132 form 171// would require an opcode change to FMA*231: 172// FMA*132* reg1, reg2, reg3; // reg1 * reg3 + reg2; 173// --> 174// FMA*231* reg2, reg1, reg3; // reg1 * reg3 + reg2; 175// Please see more detailed comment at the very beginning of the section 176// defining FMA3 opcodes above. 177multiclass fma3s_rm_213<bits<8> opc, string OpcodeStr, 178 X86MemOperand x86memop, RegisterClass RC, 179 SDPatternOperator OpNode, 180 X86FoldableSchedWrite sched> { 181 def r : FMA3S<opc, MRMSrcReg, (outs RC:$dst), 182 (ins RC:$src1, RC:$src2, RC:$src3), 183 !strconcat(OpcodeStr, 184 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 185 [(set RC:$dst, (OpNode RC:$src2, RC:$src1, RC:$src3))]>, 186 Sched<[sched]>; 187 188 let mayLoad = 1 in 189 def m : FMA3S<opc, MRMSrcMem, (outs RC:$dst), 190 (ins RC:$src1, RC:$src2, x86memop:$src3), 191 !strconcat(OpcodeStr, 192 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 193 [(set RC:$dst, 194 (OpNode RC:$src2, RC:$src1, (load addr:$src3)))]>, 195 Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>; 196} 197 198multiclass fma3s_rm_231<bits<8> opc, string OpcodeStr, 199 X86MemOperand x86memop, RegisterClass RC, 200 SDPatternOperator OpNode, X86FoldableSchedWrite sched> { 201 let hasSideEffects = 0 in 202 def r : FMA3S<opc, MRMSrcReg, (outs RC:$dst), 203 (ins RC:$src1, RC:$src2, RC:$src3), 204 !strconcat(OpcodeStr, 205 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 206 []>, Sched<[sched]>; 207 208 let mayLoad = 1 in 209 def m : FMA3S<opc, MRMSrcMem, (outs RC:$dst), 210 (ins RC:$src1, RC:$src2, x86memop:$src3), 211 !strconcat(OpcodeStr, 212 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 213 [(set RC:$dst, 214 (OpNode RC:$src2, (load addr:$src3), RC:$src1))]>, 215 Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>; 216} 217 218multiclass fma3s_rm_132<bits<8> opc, string OpcodeStr, 219 X86MemOperand x86memop, RegisterClass RC, 220 SDPatternOperator OpNode, X86FoldableSchedWrite sched> { 221 let hasSideEffects = 0 in 222 def r : FMA3S<opc, MRMSrcReg, (outs RC:$dst), 223 (ins RC:$src1, RC:$src2, RC:$src3), 224 !strconcat(OpcodeStr, 225 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 226 []>, Sched<[sched]>; 227 228 // Pattern is 312 order so that the load is in a different place from the 229 // 213 and 231 patterns this helps tablegen's duplicate pattern detection. 230 let mayLoad = 1 in 231 def m : FMA3S<opc, MRMSrcMem, (outs RC:$dst), 232 (ins RC:$src1, RC:$src2, x86memop:$src3), 233 !strconcat(OpcodeStr, 234 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 235 [(set RC:$dst, 236 (OpNode (load addr:$src3), RC:$src1, RC:$src2))]>, 237 Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>; 238} 239 240let Constraints = "$src1 = $dst", isCommutable = 1, isCodeGenOnly = 1, 241 hasSideEffects = 0, Uses = [MXCSR], mayRaiseFPException = 1 in 242multiclass fma3s_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231, 243 string OpStr, string PackTy, string Suff, 244 SDPatternOperator OpNode, RegisterClass RC, 245 X86MemOperand x86memop, X86FoldableSchedWrite sched> { 246 defm NAME#213#Suff : fma3s_rm_213<opc213, !strconcat(OpStr, "213", PackTy), 247 x86memop, RC, OpNode, sched>; 248 defm NAME#231#Suff : fma3s_rm_231<opc231, !strconcat(OpStr, "231", PackTy), 249 x86memop, RC, OpNode, sched>; 250 defm NAME#132#Suff : fma3s_rm_132<opc132, !strconcat(OpStr, "132", PackTy), 251 x86memop, RC, OpNode, sched>; 252} 253 254// These FMA*_Int instructions are defined specially for being used when 255// the scalar FMA intrinsics are lowered to machine instructions, and in that 256// sense, they are similar to existing ADD*_Int, SUB*_Int, MUL*_Int, etc. 257// instructions. 258// 259// All of the FMA*_Int opcodes are defined as commutable here. 260// Commuting the 2nd and 3rd source register operands of FMAs is quite trivial 261// and the corresponding optimizations have been developed. 262// Commuting the 1st operand of FMA*_Int requires some additional analysis, 263// the commute optimization is legal only if all users of FMA*_Int use only 264// the lowest element of the FMA*_Int instruction. Even though such analysis 265// may be not implemented yet we allow the routines doing the actual commute 266// transformation to decide if one or another instruction is commutable or not. 267let Constraints = "$src1 = $dst", isCommutable = 1, hasSideEffects = 0, 268 Uses = [MXCSR], mayRaiseFPException = 1 in 269multiclass fma3s_rm_int<bits<8> opc, string OpcodeStr, 270 Operand memopr, RegisterClass RC, 271 X86FoldableSchedWrite sched> { 272 def r_Int : FMA3S_Int<opc, MRMSrcReg, (outs RC:$dst), 273 (ins RC:$src1, RC:$src2, RC:$src3), 274 !strconcat(OpcodeStr, 275 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 276 []>, Sched<[sched]>; 277 278 let mayLoad = 1 in 279 def m_Int : FMA3S_Int<opc, MRMSrcMem, (outs RC:$dst), 280 (ins RC:$src1, RC:$src2, memopr:$src3), 281 !strconcat(OpcodeStr, 282 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 283 []>, Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>; 284} 285 286// The FMA 213 form is created for lowering of scalar FMA intrinsics 287// to machine instructions. 288// The FMA 132 form can trivially be get by commuting the 2nd and 3rd operands 289// of FMA 213 form. 290// The FMA 231 form can be get only by commuting the 1st operand of 213 or 132 291// forms and is possible only after special analysis of all uses of the initial 292// instruction. Such analysis do not exist yet and thus introducing the 231 293// form of FMA*_Int instructions is done using an optimistic assumption that 294// such analysis will be implemented eventually. 295multiclass fma3s_int_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231, 296 string OpStr, string PackTy, string Suff, 297 RegisterClass RC, Operand memop, 298 X86FoldableSchedWrite sched> { 299 defm NAME#132#Suff : fma3s_rm_int<opc132, !strconcat(OpStr, "132", PackTy), 300 memop, RC, sched>; 301 defm NAME#213#Suff : fma3s_rm_int<opc213, !strconcat(OpStr, "213", PackTy), 302 memop, RC, sched>; 303 defm NAME#231#Suff : fma3s_rm_int<opc231, !strconcat(OpStr, "231", PackTy), 304 memop, RC, sched>; 305} 306 307multiclass fma3s<bits<8> opc132, bits<8> opc213, bits<8> opc231, 308 string OpStr, SDPatternOperator OpNode, X86FoldableSchedWrite sched> { 309 let ExeDomain = SSEPackedSingle in 310 defm NAME : fma3s_forms<opc132, opc213, opc231, OpStr, "ss", "SS", OpNode, 311 FR32, f32mem, sched>, 312 fma3s_int_forms<opc132, opc213, opc231, OpStr, "ss", "SS", 313 VR128, ssmem, sched>; 314 315 let ExeDomain = SSEPackedDouble in 316 defm NAME : fma3s_forms<opc132, opc213, opc231, OpStr, "sd", "SD", OpNode, 317 FR64, f64mem, sched>, 318 fma3s_int_forms<opc132, opc213, opc231, OpStr, "sd", "SD", 319 VR128, sdmem, sched>, VEX_W; 320} 321 322defm VFMADD : fma3s<0x99, 0xA9, 0xB9, "vfmadd", any_fma, 323 SchedWriteFMA.Scl>, VEX_LIG; 324defm VFMSUB : fma3s<0x9B, 0xAB, 0xBB, "vfmsub", X86any_Fmsub, 325 SchedWriteFMA.Scl>, VEX_LIG; 326 327defm VFNMADD : fma3s<0x9D, 0xAD, 0xBD, "vfnmadd", X86any_Fnmadd, 328 SchedWriteFMA.Scl>, VEX_LIG; 329defm VFNMSUB : fma3s<0x9F, 0xAF, 0xBF, "vfnmsub", X86any_Fnmsub, 330 SchedWriteFMA.Scl>, VEX_LIG; 331 332multiclass scalar_fma_patterns<SDPatternOperator Op, string Prefix, string Suffix, 333 SDNode Move, ValueType VT, ValueType EltVT, 334 RegisterClass RC, PatFrag mem_frag> { 335 let Predicates = [HasFMA, NoAVX512] in { 336 def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector 337 (Op RC:$src2, 338 (EltVT (extractelt (VT VR128:$src1), (iPTR 0))), 339 RC:$src3))))), 340 (!cast<Instruction>(Prefix#"213"#Suffix#"r_Int") 341 VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)), 342 (VT (COPY_TO_REGCLASS RC:$src3, VR128)))>; 343 344 def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector 345 (Op RC:$src2, RC:$src3, 346 (EltVT (extractelt (VT VR128:$src1), (iPTR 0)))))))), 347 (!cast<Instruction>(Prefix#"231"#Suffix#"r_Int") 348 VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)), 349 (VT (COPY_TO_REGCLASS RC:$src3, VR128)))>; 350 351 def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector 352 (Op RC:$src2, 353 (EltVT (extractelt (VT VR128:$src1), (iPTR 0))), 354 (mem_frag addr:$src3)))))), 355 (!cast<Instruction>(Prefix#"213"#Suffix#"m_Int") 356 VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)), 357 addr:$src3)>; 358 359 def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector 360 (Op (EltVT (extractelt (VT VR128:$src1), (iPTR 0))), 361 (mem_frag addr:$src3), RC:$src2))))), 362 (!cast<Instruction>(Prefix#"132"#Suffix#"m_Int") 363 VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)), 364 addr:$src3)>; 365 366 def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector 367 (Op RC:$src2, (mem_frag addr:$src3), 368 (EltVT (extractelt (VT VR128:$src1), (iPTR 0)))))))), 369 (!cast<Instruction>(Prefix#"231"#Suffix#"m_Int") 370 VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)), 371 addr:$src3)>; 372 } 373} 374 375defm : scalar_fma_patterns<any_fma, "VFMADD", "SS", X86Movss, v4f32, f32, FR32, loadf32>; 376defm : scalar_fma_patterns<X86any_Fmsub, "VFMSUB", "SS", X86Movss, v4f32, f32, FR32, loadf32>; 377defm : scalar_fma_patterns<X86any_Fnmadd, "VFNMADD", "SS", X86Movss, v4f32, f32, FR32, loadf32>; 378defm : scalar_fma_patterns<X86any_Fnmsub, "VFNMSUB", "SS", X86Movss, v4f32, f32, FR32, loadf32>; 379 380defm : scalar_fma_patterns<any_fma, "VFMADD", "SD", X86Movsd, v2f64, f64, FR64, loadf64>; 381defm : scalar_fma_patterns<X86any_Fmsub, "VFMSUB", "SD", X86Movsd, v2f64, f64, FR64, loadf64>; 382defm : scalar_fma_patterns<X86any_Fnmadd, "VFNMADD", "SD", X86Movsd, v2f64, f64, FR64, loadf64>; 383defm : scalar_fma_patterns<X86any_Fnmsub, "VFNMSUB", "SD", X86Movsd, v2f64, f64, FR64, loadf64>; 384 385//===----------------------------------------------------------------------===// 386// FMA4 - AMD 4 operand Fused Multiply-Add instructions 387//===----------------------------------------------------------------------===// 388 389let Uses = [MXCSR], mayRaiseFPException = 1 in 390multiclass fma4s<bits<8> opc, string OpcodeStr, RegisterClass RC, 391 X86MemOperand x86memop, ValueType OpVT, SDPatternOperator OpNode, 392 PatFrag mem_frag, X86FoldableSchedWrite sched> { 393 let isCommutable = 1 in 394 def rr : FMA4S<opc, MRMSrcRegOp4, (outs RC:$dst), 395 (ins RC:$src1, RC:$src2, RC:$src3), 396 !strconcat(OpcodeStr, 397 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 398 [(set RC:$dst, 399 (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>, VEX_W, VEX_LIG, 400 Sched<[sched]>; 401 def rm : FMA4S<opc, MRMSrcMemOp4, (outs RC:$dst), 402 (ins RC:$src1, RC:$src2, x86memop:$src3), 403 !strconcat(OpcodeStr, 404 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 405 [(set RC:$dst, (OpNode RC:$src1, RC:$src2, 406 (mem_frag addr:$src3)))]>, VEX_W, VEX_LIG, 407 Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>; 408 def mr : FMA4S<opc, MRMSrcMem, (outs RC:$dst), 409 (ins RC:$src1, x86memop:$src2, RC:$src3), 410 !strconcat(OpcodeStr, 411 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 412 [(set RC:$dst, 413 (OpNode RC:$src1, (mem_frag addr:$src2), RC:$src3))]>, VEX_LIG, 414 Sched<[sched.Folded, sched.ReadAfterFold, 415 // x86memop:$src2 416 ReadDefault, ReadDefault, ReadDefault, ReadDefault, 417 ReadDefault, 418 // RC:$src3 419 sched.ReadAfterFold]>; 420// For disassembler 421let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in 422 def rr_REV : FMA4S<opc, MRMSrcReg, (outs RC:$dst), 423 (ins RC:$src1, RC:$src2, RC:$src3), 424 !strconcat(OpcodeStr, 425 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>, 426 VEX_LIG, FoldGenData<NAME#rr>, Sched<[sched]>; 427} 428 429multiclass fma4s_int<bits<8> opc, string OpcodeStr, Operand memop, 430 X86FoldableSchedWrite sched> { 431let isCodeGenOnly = 1, hasSideEffects = 0, 432 Uses = [MXCSR], mayRaiseFPException = 1 in { 433 def rr_Int : FMA4S_Int<opc, MRMSrcRegOp4, (outs VR128:$dst), 434 (ins VR128:$src1, VR128:$src2, VR128:$src3), 435 !strconcat(OpcodeStr, 436 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 437 []>, VEX_W, VEX_LIG, Sched<[sched]>; 438 let mayLoad = 1 in 439 def rm_Int : FMA4S_Int<opc, MRMSrcMemOp4, (outs VR128:$dst), 440 (ins VR128:$src1, VR128:$src2, memop:$src3), 441 !strconcat(OpcodeStr, 442 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 443 []>, VEX_W, VEX_LIG, 444 Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>; 445 let mayLoad = 1 in 446 def mr_Int : FMA4S_Int<opc, MRMSrcMem, (outs VR128:$dst), 447 (ins VR128:$src1, memop:$src2, VR128:$src3), 448 !strconcat(OpcodeStr, 449 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 450 []>, 451 VEX_LIG, Sched<[sched.Folded, sched.ReadAfterFold, 452 // memop:$src2 453 ReadDefault, ReadDefault, ReadDefault, 454 ReadDefault, ReadDefault, 455 // VR128::$src3 456 sched.ReadAfterFold]>; 457 def rr_Int_REV : FMA4S_Int<opc, MRMSrcReg, (outs VR128:$dst), 458 (ins VR128:$src1, VR128:$src2, VR128:$src3), 459 !strconcat(OpcodeStr, 460 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 461 []>, VEX_LIG, FoldGenData<NAME#rr_Int>, Sched<[sched]>; 462} // isCodeGenOnly = 1 463} 464 465let Uses = [MXCSR], mayRaiseFPException = 1 in 466multiclass fma4p<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode, 467 ValueType OpVT128, ValueType OpVT256, 468 PatFrag ld_frag128, PatFrag ld_frag256, 469 X86SchedWriteWidths sched> { 470 let isCommutable = 1 in 471 def rr : FMA4<opc, MRMSrcRegOp4, (outs VR128:$dst), 472 (ins VR128:$src1, VR128:$src2, VR128:$src3), 473 !strconcat(OpcodeStr, 474 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 475 [(set VR128:$dst, 476 (OpVT128 (OpNode VR128:$src1, VR128:$src2, VR128:$src3)))]>, 477 VEX_W, Sched<[sched.XMM]>; 478 def rm : FMA4<opc, MRMSrcMemOp4, (outs VR128:$dst), 479 (ins VR128:$src1, VR128:$src2, f128mem:$src3), 480 !strconcat(OpcodeStr, 481 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 482 [(set VR128:$dst, (OpNode VR128:$src1, VR128:$src2, 483 (ld_frag128 addr:$src3)))]>, VEX_W, 484 Sched<[sched.XMM.Folded, sched.XMM.ReadAfterFold, sched.XMM.ReadAfterFold]>; 485 def mr : FMA4<opc, MRMSrcMem, (outs VR128:$dst), 486 (ins VR128:$src1, f128mem:$src2, VR128:$src3), 487 !strconcat(OpcodeStr, 488 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 489 [(set VR128:$dst, 490 (OpNode VR128:$src1, (ld_frag128 addr:$src2), VR128:$src3))]>, 491 Sched<[sched.XMM.Folded, sched.XMM.ReadAfterFold, 492 // f128mem:$src2 493 ReadDefault, ReadDefault, ReadDefault, ReadDefault, 494 ReadDefault, 495 // VR128::$src3 496 sched.XMM.ReadAfterFold]>; 497 let isCommutable = 1 in 498 def Yrr : FMA4<opc, MRMSrcRegOp4, (outs VR256:$dst), 499 (ins VR256:$src1, VR256:$src2, VR256:$src3), 500 !strconcat(OpcodeStr, 501 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 502 [(set VR256:$dst, 503 (OpVT256 (OpNode VR256:$src1, VR256:$src2, VR256:$src3)))]>, 504 VEX_W, VEX_L, Sched<[sched.YMM]>; 505 def Yrm : FMA4<opc, MRMSrcMemOp4, (outs VR256:$dst), 506 (ins VR256:$src1, VR256:$src2, f256mem:$src3), 507 !strconcat(OpcodeStr, 508 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 509 [(set VR256:$dst, (OpNode VR256:$src1, VR256:$src2, 510 (ld_frag256 addr:$src3)))]>, VEX_W, VEX_L, 511 Sched<[sched.YMM.Folded, sched.YMM.ReadAfterFold, sched.YMM.ReadAfterFold]>; 512 def Ymr : FMA4<opc, MRMSrcMem, (outs VR256:$dst), 513 (ins VR256:$src1, f256mem:$src2, VR256:$src3), 514 !strconcat(OpcodeStr, 515 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 516 [(set VR256:$dst, (OpNode VR256:$src1, 517 (ld_frag256 addr:$src2), VR256:$src3))]>, VEX_L, 518 Sched<[sched.YMM.Folded, sched.YMM.ReadAfterFold, 519 // f256mem:$src2 520 ReadDefault, ReadDefault, ReadDefault, ReadDefault, 521 ReadDefault, 522 // VR256::$src3 523 sched.YMM.ReadAfterFold]>; 524// For disassembler 525let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in { 526 def rr_REV : FMA4<opc, MRMSrcReg, (outs VR128:$dst), 527 (ins VR128:$src1, VR128:$src2, VR128:$src3), 528 !strconcat(OpcodeStr, 529 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>, 530 Sched<[sched.XMM]>, FoldGenData<NAME#rr>; 531 def Yrr_REV : FMA4<opc, MRMSrcReg, (outs VR256:$dst), 532 (ins VR256:$src1, VR256:$src2, VR256:$src3), 533 !strconcat(OpcodeStr, 534 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>, 535 VEX_L, Sched<[sched.YMM]>, FoldGenData<NAME#Yrr>; 536} // isCodeGenOnly = 1 537} 538 539let ExeDomain = SSEPackedSingle in { 540 // Scalar Instructions 541 defm VFMADDSS4 : fma4s<0x6A, "vfmaddss", FR32, f32mem, f32, any_fma, loadf32, 542 SchedWriteFMA.Scl>, 543 fma4s_int<0x6A, "vfmaddss", ssmem, SchedWriteFMA.Scl>; 544 defm VFMSUBSS4 : fma4s<0x6E, "vfmsubss", FR32, f32mem, f32, X86any_Fmsub, loadf32, 545 SchedWriteFMA.Scl>, 546 fma4s_int<0x6E, "vfmsubss", ssmem, SchedWriteFMA.Scl>; 547 defm VFNMADDSS4 : fma4s<0x7A, "vfnmaddss", FR32, f32mem, f32, 548 X86any_Fnmadd, loadf32, SchedWriteFMA.Scl>, 549 fma4s_int<0x7A, "vfnmaddss", ssmem, SchedWriteFMA.Scl>; 550 defm VFNMSUBSS4 : fma4s<0x7E, "vfnmsubss", FR32, f32mem, f32, 551 X86any_Fnmsub, loadf32, SchedWriteFMA.Scl>, 552 fma4s_int<0x7E, "vfnmsubss", ssmem, SchedWriteFMA.Scl>; 553 // Packed Instructions 554 defm VFMADDPS4 : fma4p<0x68, "vfmaddps", any_fma, v4f32, v8f32, 555 loadv4f32, loadv8f32, SchedWriteFMA>; 556 defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", X86any_Fmsub, v4f32, v8f32, 557 loadv4f32, loadv8f32, SchedWriteFMA>; 558 defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", X86any_Fnmadd, v4f32, v8f32, 559 loadv4f32, loadv8f32, SchedWriteFMA>; 560 defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", X86any_Fnmsub, v4f32, v8f32, 561 loadv4f32, loadv8f32, SchedWriteFMA>; 562 defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps", X86Fmaddsub, v4f32, v8f32, 563 loadv4f32, loadv8f32, SchedWriteFMA>; 564 defm VFMSUBADDPS4 : fma4p<0x5E, "vfmsubaddps", X86Fmsubadd, v4f32, v8f32, 565 loadv4f32, loadv8f32, SchedWriteFMA>; 566} 567 568let ExeDomain = SSEPackedDouble in { 569 // Scalar Instructions 570 defm VFMADDSD4 : fma4s<0x6B, "vfmaddsd", FR64, f64mem, f64, any_fma, loadf64, 571 SchedWriteFMA.Scl>, 572 fma4s_int<0x6B, "vfmaddsd", sdmem, SchedWriteFMA.Scl>; 573 defm VFMSUBSD4 : fma4s<0x6F, "vfmsubsd", FR64, f64mem, f64, X86any_Fmsub, loadf64, 574 SchedWriteFMA.Scl>, 575 fma4s_int<0x6F, "vfmsubsd", sdmem, SchedWriteFMA.Scl>; 576 defm VFNMADDSD4 : fma4s<0x7B, "vfnmaddsd", FR64, f64mem, f64, 577 X86any_Fnmadd, loadf64, SchedWriteFMA.Scl>, 578 fma4s_int<0x7B, "vfnmaddsd", sdmem, SchedWriteFMA.Scl>; 579 defm VFNMSUBSD4 : fma4s<0x7F, "vfnmsubsd", FR64, f64mem, f64, 580 X86any_Fnmsub, loadf64, SchedWriteFMA.Scl>, 581 fma4s_int<0x7F, "vfnmsubsd", sdmem, SchedWriteFMA.Scl>; 582 // Packed Instructions 583 defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", any_fma, v2f64, v4f64, 584 loadv2f64, loadv4f64, SchedWriteFMA>; 585 defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", X86any_Fmsub, v2f64, v4f64, 586 loadv2f64, loadv4f64, SchedWriteFMA>; 587 defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", X86any_Fnmadd, v2f64, v4f64, 588 loadv2f64, loadv4f64, SchedWriteFMA>; 589 defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", X86any_Fnmsub, v2f64, v4f64, 590 loadv2f64, loadv4f64, SchedWriteFMA>; 591 defm VFMADDSUBPD4 : fma4p<0x5D, "vfmaddsubpd", X86Fmaddsub, v2f64, v4f64, 592 loadv2f64, loadv4f64, SchedWriteFMA>; 593 defm VFMSUBADDPD4 : fma4p<0x5F, "vfmsubaddpd", X86Fmsubadd, v2f64, v4f64, 594 loadv2f64, loadv4f64, SchedWriteFMA>; 595} 596 597multiclass scalar_fma4_patterns<SDPatternOperator Op, string Name, 598 ValueType VT, RegisterClass RC, 599 PatFrag mem_frag> { 600 let Predicates = [HasFMA4] in { 601 def : Pat<(VT (X86vzmovl (VT (scalar_to_vector 602 (Op RC:$src1, RC:$src2, RC:$src3))))), 603 (!cast<Instruction>(Name#"rr_Int") 604 (VT (COPY_TO_REGCLASS RC:$src1, VR128)), 605 (VT (COPY_TO_REGCLASS RC:$src2, VR128)), 606 (VT (COPY_TO_REGCLASS RC:$src3, VR128)))>; 607 608 def : Pat<(VT (X86vzmovl (VT (scalar_to_vector 609 (Op RC:$src1, RC:$src2, 610 (mem_frag addr:$src3)))))), 611 (!cast<Instruction>(Name#"rm_Int") 612 (VT (COPY_TO_REGCLASS RC:$src1, VR128)), 613 (VT (COPY_TO_REGCLASS RC:$src2, VR128)), addr:$src3)>; 614 615 def : Pat<(VT (X86vzmovl (VT (scalar_to_vector 616 (Op RC:$src1, (mem_frag addr:$src2), 617 RC:$src3))))), 618 (!cast<Instruction>(Name#"mr_Int") 619 (VT (COPY_TO_REGCLASS RC:$src1, VR128)), addr:$src2, 620 (VT (COPY_TO_REGCLASS RC:$src3, VR128)))>; 621 } 622} 623 624defm : scalar_fma4_patterns<any_fma, "VFMADDSS4", v4f32, FR32, loadf32>; 625defm : scalar_fma4_patterns<X86any_Fmsub, "VFMSUBSS4", v4f32, FR32, loadf32>; 626defm : scalar_fma4_patterns<X86any_Fnmadd, "VFNMADDSS4", v4f32, FR32, loadf32>; 627defm : scalar_fma4_patterns<X86any_Fnmsub, "VFNMSUBSS4", v4f32, FR32, loadf32>; 628 629defm : scalar_fma4_patterns<any_fma, "VFMADDSD4", v2f64, FR64, loadf64>; 630defm : scalar_fma4_patterns<X86any_Fmsub, "VFMSUBSD4", v2f64, FR64, loadf64>; 631defm : scalar_fma4_patterns<X86any_Fnmadd, "VFNMADDSD4", v2f64, FR64, loadf64>; 632defm : scalar_fma4_patterns<X86any_Fnmsub, "VFNMSUBSD4", v2f64, FR64, loadf64>; 633