xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/VOP3PInstructions.td (revision c57c26179033f64c2011a2d2a904ee3fa62e826a)
1//===-- VOP3PInstructions.td - Vector Instruction Definitions -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9//===----------------------------------------------------------------------===//
10// VOP3P Classes
11//===----------------------------------------------------------------------===//
12
13class VOP3P_Profile<VOPProfile P, VOP3Features Features = VOP3_REGULAR,
14                    bit HasDPP = 0> : VOP3_Profile<P, Features> {
15  let IsVOP3P = 1;
16  let HasExtVOP3DPP = HasDPP;
17  // We do not want to print src modifiers for vop3p because the bits are
18  // overloaded in meaning and the logic in printOperandAndFPInputMods is
19  // wrong for vop3p
20  let AsmVOP3Base = AsmVOP3P;
21}
22
23// Used for FMA_MIX* and MAD_MIX* insts
24// Their operands are only sort of f16 operands. Depending on
25// op_sel_hi, these may be interpreted as f32. The inline immediate
26// values are really f16 converted to f32, so we treat these as f16
27// operands.
28class VOP3P_Mix_Profile<VOPProfile P, VOP3Features Features = VOP3_REGULAR,
29                    bit useTiedOutput = 0> : VOP3P_Profile<P, Features, 1> {
30    bit UseTiedOutput = useTiedOutput;
31
32    dag srcs =
33          (ins FP16InputMods:$src0_modifiers, VCSrc_f16:$src0,
34               FP16InputMods:$src1_modifiers, VCSrc_f16:$src1,
35               FP16InputMods:$src2_modifiers, VCSrc_f16:$src2);
36    dag dpp_srcs =
37          (ins FPVRegInputMods:$src0_modifiers, VGPRSrc_32:$src0,
38               FPVRegInputMods:$src1_modifiers, VRegSrc_32:$src1,
39               FP16InputMods:$src2_modifiers, VCSrc_f16:$src2);
40
41           // FIXME: clampmod0 misbehaves with the non-default vdst_in
42           // following it. For now workaround this by requiring clamp
43           // in tied patterns. This should use undef_tied_input, but it
44           // seems underdeveloped and doesn't apply the right register
45           // class constraints.
46    dag mods = !con(!if(UseTiedOutput, (ins clampmod:$clamp, VGPR_32:$vdst_in),
47                        (ins clampmod0:$clamp)),
48                    (ins op_sel0:$op_sel, op_sel_hi0:$op_sel_hi));
49    // We use Ins64 because that is the one which populates InOperandList
50    // due to the logic in class VOP3_Pseudo
51    let Ins64 = !con(srcs, mods);
52    let InsVOP3Base = !con(dpp_srcs, mods);
53    let AsmVOP3Base =
54      "$vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$op_sel$op_sel_hi$clamp";
55}
56
57multiclass VOP3PInst<string OpName, VOPProfile P,
58                     SDPatternOperator node = null_frag, bit IsDOT = 0> {
59  def NAME : VOP3P_Pseudo<OpName, P,
60                          !if (P.HasModifiers,
61                               getVOP3PModPat<P, node, IsDOT, IsDOT>.ret,
62                               getVOP3Pat<P, node>.ret)>;
63  let SubtargetPredicate = isGFX11Plus in {
64  if P.HasExtVOP3DPP then
65    def _dpp : VOP3_DPP_Pseudo<OpName, P> {
66      let VOP3P = 1;
67      let PseudoInstr = OpName #"_dpp";
68    }
69  } // end SubtargetPredicate = isGFX11Plus
70}
71
72// Non-packed instructions that use the VOP3P encoding.
73// VOP3 neg/abs and VOP3P opsel/opsel_hi modifiers are allowed.
74multiclass VOP3_VOP3PInst<string OpName, VOP3P_Mix_Profile P> {
75  def NAME : VOP3P_Pseudo<OpName, P> {
76    let Constraints = !if(P.UseTiedOutput, "$vdst = $vdst_in", "");
77    let DisableEncoding = !if(P.UseTiedOutput, "$vdst_in", "");
78  }
79  let SubtargetPredicate = isGFX11Plus in {
80    if P.HasExtVOP3DPP then
81      def _dpp : VOP3_DPP_Pseudo<OpName, P> {
82        let VOP3P = 1;
83        let PseudoInstr = OpName#"_dpp";
84        let Constraints = !if(P.UseTiedOutput, "$vdst = $vdst_in", "");
85        let DisableEncoding = !if(P.UseTiedOutput, "$vdst_in", "");
86      }
87  } // end SubtargetPredicate = isGFX11Plus
88}
89
90let isReMaterializable = 1 in {
91let isCommutable = 1 in {
92defm V_PK_MAD_I16 : VOP3PInst<"v_pk_mad_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16_V2I16>>;
93defm V_PK_MAD_U16 : VOP3PInst<"v_pk_mad_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16_V2I16>>;
94
95let FPDPRounding = 1 in {
96defm V_PK_FMA_F16 : VOP3PInst<"v_pk_fma_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16_V2F16>, any_fma>;
97defm V_PK_ADD_F16 : VOP3PInst<"v_pk_add_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, any_fadd>;
98defm V_PK_MUL_F16 : VOP3PInst<"v_pk_mul_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, any_fmul>;
99} // End FPDPRounding = 1
100defm V_PK_MAX_F16 : VOP3PInst<"v_pk_max_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, fmaxnum_like>;
101defm V_PK_MIN_F16 : VOP3PInst<"v_pk_min_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, fminnum_like>;
102
103defm V_PK_ADD_U16 : VOP3PInst<"v_pk_add_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, add>;
104defm V_PK_ADD_I16 : VOP3PInst<"v_pk_add_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>>;
105defm V_PK_MUL_LO_U16 : VOP3PInst<"v_pk_mul_lo_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, mul>;
106
107defm V_PK_MIN_I16 : VOP3PInst<"v_pk_min_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, smin>;
108defm V_PK_MIN_U16 : VOP3PInst<"v_pk_min_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, umin>;
109defm V_PK_MAX_I16 : VOP3PInst<"v_pk_max_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, smax>;
110defm V_PK_MAX_U16 : VOP3PInst<"v_pk_max_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, umax>;
111
112let SubtargetPredicate = isGFX12Plus, ReadsModeReg = 0 in {
113defm V_PK_MAXIMUM_F16 : VOP3PInst<"v_pk_maximum_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, fmaximum>;
114defm V_PK_MINIMUM_F16 : VOP3PInst<"v_pk_minimum_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, fminimum>;
115} // End SubtargetPredicate = isGFX12Plus, ReadsModeReg = 0
116}
117
118defm V_PK_SUB_U16 : VOP3PInst<"v_pk_sub_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>>;
119defm V_PK_SUB_I16 : VOP3PInst<"v_pk_sub_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, sub>;
120
121defm V_PK_LSHLREV_B16 : VOP3PInst<"v_pk_lshlrev_b16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, clshl_rev_16>;
122defm V_PK_ASHRREV_I16 : VOP3PInst<"v_pk_ashrrev_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, cashr_rev_16>;
123defm V_PK_LSHRREV_B16 : VOP3PInst<"v_pk_lshrrev_b16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, clshr_rev_16>;
124} // End isReMaterializable = 1
125
126let SubtargetPredicate = HasVOP3PInsts in {
127
128// Integer operations with clamp bit set.
129class VOP3PSatPat<SDPatternOperator pat, Instruction inst> : GCNPat<
130  (pat (v2i16 (VOP3PMods v2i16:$src0, i32:$src0_modifiers)),
131       (v2i16 (VOP3PMods v2i16:$src1, i32:$src1_modifiers))),
132  (inst $src0_modifiers, $src0, $src1_modifiers, $src1, DSTCLAMP.ENABLE)
133>;
134
135def : VOP3PSatPat<uaddsat, V_PK_ADD_U16>;
136def : VOP3PSatPat<saddsat, V_PK_ADD_I16>;
137def : VOP3PSatPat<usubsat, V_PK_SUB_U16>;
138def : VOP3PSatPat<ssubsat, V_PK_SUB_I16>;
139} // End SubtargetPredicate = HasVOP3PInsts
140
141// TODO: Make sure we're doing the right thing with denormals. Note
142// that FMA and MAD will differ.
143multiclass MadFmaMixPats<SDPatternOperator fma_like,
144                         Instruction mix_inst,
145                         Instruction mixlo_inst,
146                         Instruction mixhi_inst> {
147  // At least one of the operands needs to be an fpextend of an f16
148  // for this to be worthwhile, so we need three patterns here.
149  // TODO: Could we use a predicate to inspect src1/2/3 instead?
150  def : GCNPat <
151    (f32 (fma_like (f32 (VOP3PMadMixModsExt f16:$src0, i32:$src0_mods)),
152                   (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_mods)),
153                   (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_mods)))),
154    (mix_inst $src0_mods, $src0, $src1_mods, $src1, $src2_mods, $src2,
155              DSTCLAMP.NONE)>;
156  def : GCNPat <
157    (f32 (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_mods)),
158                   (f32 (VOP3PMadMixModsExt f16:$src1, i32:$src1_mods)),
159                   (f32 (VOP3PMadMixMods f32:$src2, i32:$src2_mods)))),
160    (mix_inst $src0_mods, $src0, $src1_mods, $src1, $src2_mods, $src2,
161              DSTCLAMP.NONE)>;
162  def : GCNPat <
163    (f32 (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_mods)),
164                   (f32 (VOP3PMadMixMods f32:$src1, i32:$src1_mods)),
165                   (f32 (VOP3PMadMixModsExt f16:$src2, i32:$src2_mods)))),
166    (mix_inst $src0_mods, $src0, $src1_mods, $src1, $src2_mods, $src2,
167              DSTCLAMP.NONE)>;
168
169  def : GCNPat <
170    (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)),
171                            (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)),
172                            (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers))))),
173    (mixlo_inst $src0_modifiers, $src0,
174                $src1_modifiers, $src1,
175                $src2_modifiers, $src2,
176                DSTCLAMP.NONE,
177                (i32 (IMPLICIT_DEF)))
178  >;
179
180  // FIXME: Special case handling for maxhi (especially for clamp)
181  // because dealing with the write to high half of the register is
182  // difficult.
183  def : GCNPat <
184    (build_vector f16:$elt0, (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)),
185                                                     (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)),
186                                                     (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers)))))),
187    (v2f16 (mixhi_inst $src0_modifiers, $src0,
188                       $src1_modifiers, $src1,
189                       $src2_modifiers, $src2,
190                       DSTCLAMP.NONE,
191                       VGPR_32:$elt0))
192  >;
193
194  def : GCNPat <
195    (build_vector
196      f16:$elt0,
197      (AMDGPUclamp (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)),
198                                      (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)),
199                                      (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers))))))),
200    (v2f16 (mixhi_inst $src0_modifiers, $src0,
201                       $src1_modifiers, $src1,
202                       $src2_modifiers, $src2,
203                       DSTCLAMP.ENABLE,
204                       VGPR_32:$elt0))
205  >;
206
207  def : GCNPat <
208    (AMDGPUclamp (build_vector
209      (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$lo_src0, i32:$lo_src0_modifiers)),
210                         (f32 (VOP3PMadMixMods f16:$lo_src1, i32:$lo_src1_modifiers)),
211                         (f32 (VOP3PMadMixMods f16:$lo_src2, i32:$lo_src2_modifiers))))),
212      (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$hi_src0, i32:$hi_src0_modifiers)),
213                         (f32 (VOP3PMadMixMods f16:$hi_src1, i32:$hi_src1_modifiers)),
214                         (f32 (VOP3PMadMixMods f16:$hi_src2, i32:$hi_src2_modifiers))))))),
215    (v2f16 (mixhi_inst $hi_src0_modifiers, $hi_src0,
216                       $hi_src1_modifiers, $hi_src1,
217                       $hi_src2_modifiers, $hi_src2,
218                       DSTCLAMP.ENABLE,
219                       (mixlo_inst $lo_src0_modifiers, $lo_src0,
220                                   $lo_src1_modifiers, $lo_src1,
221                                   $lo_src2_modifiers, $lo_src2,
222                                   DSTCLAMP.ENABLE,
223                                   (i32 (IMPLICIT_DEF)))))
224  >;
225
226  def : GCNPat <
227    (f16 (fpround (fmul (f32 (VOP3PMadMixMods f32:$src0, i32:$src0_modifiers)),
228                        (f32 (VOP3PMadMixMods f32:$src1, i32:$src1_modifiers))))),
229    (mixlo_inst $src0_modifiers, $src0,
230                $src1_modifiers, $src1,
231                (i32 0), (i32 0),
232                DSTCLAMP.NONE,
233                (i32 (IMPLICIT_DEF)))
234  >;
235
236  def : GCNPat <
237    (build_vector f16:$elt0, (f16 (fpround (fmul (f32 (VOP3PMadMixMods f32:$src0, i32:$src0_modifiers)),
238                                            (f32 (VOP3PMadMixMods f32:$src1, i32:$src1_modifiers)))))),
239    (v2f16 (mixhi_inst $src0_modifiers, $src0,
240                       $src1_modifiers, $src1,
241                       (i32 0), (i32 0),
242                       DSTCLAMP.NONE,
243                       VGPR_32:$elt0))
244  >;
245}
246
247let SubtargetPredicate = HasMadMixInsts, OtherPredicates = [NoFP32Denormals] in {
248
249// These are VOP3a-like opcodes which accept no omod.
250// Size of src arguments (16/32) is controlled by op_sel.
251// For 16-bit src arguments their location (hi/lo) are controlled by op_sel_hi.
252let isCommutable = 1, mayRaiseFPException = 0 in {
253let isReMaterializable = 1 in
254defm V_MAD_MIX_F32 : VOP3_VOP3PInst<"v_mad_mix_f32", VOP3P_Mix_Profile<VOP_F32_F16_F16_F16, VOP3_OPSEL>>;
255
256let FPDPRounding = 1 in {
257// Clamp modifier is applied after conversion to f16.
258defm V_MAD_MIXLO_F16 : VOP3_VOP3PInst<"v_mad_mixlo_f16", VOP3P_Mix_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL, 1>>;
259
260let ClampLo = 0, ClampHi = 1 in {
261defm V_MAD_MIXHI_F16 : VOP3_VOP3PInst<"v_mad_mixhi_f16", VOP3P_Mix_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL, 1>>;
262}
263} // End FPDPRounding = 1
264}
265
266defm : MadFmaMixPats<fmad, V_MAD_MIX_F32, V_MAD_MIXLO_F16, V_MAD_MIXHI_F16>;
267} // End SubtargetPredicate = HasMadMixInsts, OtherPredicates = [NoFP32Denormals]
268
269
270// Essentially the same as the mad_mix versions
271let SubtargetPredicate = HasFmaMixInsts in {
272let isCommutable = 1 in {
273
274let isReMaterializable = 1 in
275defm V_FMA_MIX_F32 : VOP3_VOP3PInst<"v_fma_mix_f32", VOP3P_Mix_Profile<VOP_F32_F16_F16_F16, VOP3_OPSEL>>;
276
277let FPDPRounding = 1 in {
278// Clamp modifier is applied after conversion to f16.
279defm V_FMA_MIXLO_F16 : VOP3_VOP3PInst<"v_fma_mixlo_f16", VOP3P_Mix_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL, 1>>;
280
281let ClampLo = 0, ClampHi = 1 in {
282defm V_FMA_MIXHI_F16 : VOP3_VOP3PInst<"v_fma_mixhi_f16", VOP3P_Mix_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL, 1>>;
283}
284} // End FPDPRounding = 1
285}
286
287defm : MadFmaMixPats<fma, V_FMA_MIX_F32, V_FMA_MIXLO_F16, V_FMA_MIXHI_F16>;
288}
289
290// Defines patterns that extract signed 4bit from each Idx[0].
291foreach Idx = [[0,28],[4,24],[8,20],[12,16],[16,12],[20,8],[24,4]] in
292  def ExtractSigned4bit_#Idx[0] : PatFrag<(ops node:$src),
293                                          (sra (shl node:$src, (i32 Idx[1])), (i32 28))>;
294
295// Defines code pattern that extracts U(unsigned/signed) 4/8bit from FromBitIndex.
296class Extract<int FromBitIndex, int BitMask, bit U>: PatFrag<
297  (ops node:$src),
298  !if (!or (!and (!eq (BitMask, 255), !eq (FromBitIndex, 24)), !eq (FromBitIndex, 28)), // last element
299       !if (U, (srl node:$src, (i32 FromBitIndex)), (sra node:$src, (i32 FromBitIndex))),
300       !if (!eq (FromBitIndex, 0), // first element
301            !if (U, (and node:$src, (i32 BitMask)),
302                 !if (!eq (BitMask, 15), (!cast<PatFrag>("ExtractSigned4bit_"#FromBitIndex) node:$src),
303                                         (sext_inreg node:$src, i8))),
304            !if (U, (and (srl node:$src, (i32 FromBitIndex)), (i32 BitMask)),
305                 !if (!eq (BitMask, 15), (!cast<PatFrag>("ExtractSigned4bit_"#FromBitIndex) node:$src),
306                      (sext_inreg (srl node:$src, (i32 FromBitIndex)), i8)))))>;
307
308
309foreach Type = ["I", "U"] in
310  foreach Index = 0-3 in {
311    // Defines patterns that extract each Index'ed 8bit from an unsigned
312    // 32bit scalar value;
313    def Type#Index#"_8bit" : Extract<!shl(Index, 3), 255, !eq (Type, "U")>;
314
315    // Defines multiplication patterns where the multiplication is happening on each
316    // Index'ed 8bit of a 32bit scalar value.
317
318    def Mul#Type#_Elt#Index : PatFrag<
319      (ops node:$src0, node:$src1),
320      (!cast<HasOneUseBinOp>(!if (!eq (Type, "I"), AMDGPUmul_i24_oneuse, AMDGPUmul_u24_oneuse))
321                            (!cast<Extract>(Type#Index#"_8bit") node:$src0),
322                            (!cast<Extract>(Type#Index#"_8bit") node:$src1))>;
323  }
324
325// Different variants of dot8 patterns cause a huge increase in the compile time.
326// Define non-associative/commutative add/mul to prevent permutation in the dot8
327// pattern.
328def NonACAdd        : SDNode<"ISD::ADD"       , SDTIntBinOp>;
329def NonACAdd_oneuse : HasOneUseBinOp<NonACAdd>;
330
331def NonACAMDGPUmul_u24        : SDNode<"AMDGPUISD::MUL_U24"       , SDTIntBinOp>;
332def NonACAMDGPUmul_u24_oneuse : HasOneUseBinOp<NonACAMDGPUmul_u24>;
333
334def NonACAMDGPUmul_i24        : SDNode<"AMDGPUISD::MUL_I24"       , SDTIntBinOp>;
335def NonACAMDGPUmul_i24_oneuse : HasOneUseBinOp<NonACAMDGPUmul_i24>;
336
337foreach Type = ["I", "U"] in
338  foreach Index = 0-7 in {
339    // Defines patterns that extract each Index'ed 4bit from an unsigned
340    // 32bit scalar value;
341    def Type#Index#"_4bit" : Extract<!shl(Index, 2), 15, !eq (Type, "U")>;
342
343    // Defines multiplication patterns where the multiplication is happening on each
344    // Index'ed 8bit of a 32bit scalar value.
345    def Mul#Type#Index#"_4bit" : PatFrag<
346      (ops node:$src0, node:$src1),
347      (!cast<HasOneUseBinOp>(!if (!eq (Type, "I"), NonACAMDGPUmul_i24_oneuse, NonACAMDGPUmul_u24_oneuse))
348                             (!cast<Extract>(Type#Index#"_4bit") node:$src0),
349                             (!cast<Extract>(Type#Index#"_4bit") node:$src1))>;
350  }
351
352class UDot2Pat<VOP_Pseudo Inst> : GCNPat <
353  (add (add_oneuse (AMDGPUmul_u24_oneuse (srl i32:$src0, (i32 16)),
354                                         (srl i32:$src1, (i32 16))), i32:$src2),
355       (AMDGPUmul_u24_oneuse (and i32:$src0, (i32 65535)),
356                             (and i32:$src1, (i32 65535)))
357   ),
358  (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))> {
359  let Predicates = Inst.Predicates;
360}
361
362class SDot2Pat<VOP_Pseudo Inst> : GCNPat <
363  (add (add_oneuse (AMDGPUmul_i24_oneuse (sra i32:$src0, (i32 16)),
364                                         (sra i32:$src1, (i32 16))), i32:$src2),
365       (AMDGPUmul_i24_oneuse (sext_inreg i32:$src0, i16),
366                             (sext_inreg i32:$src1, i16))),
367  (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))> {
368  let Predicates = Inst.Predicates;
369}
370
371let IsDOT = 1 in {
372let OtherPredicates = [HasDot2Insts] in {
373defm V_DOT2_I32_I16 : VOP3PInst<"v_dot2_i32_i16",
374  VOP3P_Profile<VOP_I32_V2I16_V2I16_I32>, int_amdgcn_sdot2, 1>;
375defm V_DOT2_U32_U16 : VOP3PInst<"v_dot2_u32_u16",
376  VOP3P_Profile<VOP_I32_V2I16_V2I16_I32>, int_amdgcn_udot2, 1>;
377} // End OtherPredicates = [HasDot2Insts]
378
379let OtherPredicates = [HasDot10Insts] in
380defm V_DOT2_F32_F16 : VOP3PInst<"v_dot2_f32_f16",
381  VOP3P_Profile<VOP_F32_V2F16_V2F16_F32, VOP3_REGULAR, /*HasDPP*/ 1>,
382  AMDGPUfdot2, 1/*ExplicitClamp*/>;
383
384let OtherPredicates = [HasDot7Insts] in {
385defm V_DOT4_U32_U8  : VOP3PInst<"v_dot4_u32_u8",
386  VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot4, 1>;
387defm V_DOT8_U32_U4  : VOP3PInst<"v_dot8_u32_u4",
388  VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot8, 1>;
389} // End OtherPredicates = [HasDot7Insts]
390
391let OtherPredicates = [HasDot1Insts] in {
392defm V_DOT4_I32_I8  : VOP3PInst<"v_dot4_i32_i8",
393  VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot4, 1>;
394defm V_DOT8_I32_I4  : VOP3PInst<"v_dot8_i32_i4",
395  VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot8, 1>;
396} // End OtherPredicates = [HasDot1Insts]
397
398def DOT2_BF16_Profile
399  : VOP3P_Profile<VOP_F32_V2I16_V2I16_F32, VOP3_REGULAR, /*HasDPP*/ 1> {
400  let HasSrc1Mods = 1;
401}
402
403let SubtargetPredicate = HasDot9Insts  in {
404
405defm V_DOT2_F32_BF16 : VOP3PInst<"v_dot2_f32_bf16", DOT2_BF16_Profile,
406  int_amdgcn_fdot2_f32_bf16, 1>;
407
408} // End SubtargetPredicate = HasDot9Insts
409
410} // End let IsDOT = 1
411
412multiclass VOP3PDOTIUInst <string OpName, SDPatternOperator intrinsic_node> {
413  let IsDOT = 1 in
414  defm NAME : VOP3PInst<OpName, VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>,
415                        null_frag, 1>;
416  // Dot-iu instructions consider input as signed if imod neg bits are set. Thus
417  // Dot-iu Intrinsics have extra operands and require separate codegen pattern.
418  def : GCNPat < (intrinsic_node (VOP3PModsNeg i32:$src0_mods), i32:$src0,
419                                 (VOP3PModsNeg i32:$src1_mods), i32:$src1,
420                                 i32:$src2, (i1 timm:$clamp)),
421                 (!cast<Instruction>(NAME) $src0_mods, i32:$src0,
422                                           $src1_mods, i32:$src1,
423                                           (i32 8), i32:$src2, i1:$clamp)
424  >;
425}
426
427let SubtargetPredicate = HasDot8Insts  in {
428defm V_DOT4_I32_IU8 : VOP3PDOTIUInst<"v_dot4_i32_iu8", int_amdgcn_sudot4>;
429defm V_DOT8_I32_IU4 : VOP3PDOTIUInst<"v_dot8_i32_iu4", int_amdgcn_sudot8>;
430
431def : GCNPat < (int_amdgcn_sdot8 i32:$src0,
432                                 i32:$src1,
433                                 i32:$src2, (i1 timm:$clamp)),
434               (V_DOT8_I32_IU4  (i32 9), i32:$src0,
435                                (i32 9), i32:$src1, (i32 8), i32:$src2, i1:$clamp)
436>;
437
438def : GCNPat < (int_amdgcn_sdot4 i32:$src0,
439                                 i32:$src1,
440                                 i32:$src2, (i1 timm:$clamp)),
441               (V_DOT4_I32_IU8  (i32 9), i32:$src0,
442                                (i32 9), i32:$src1, (i32 8), i32:$src2, i1:$clamp)
443>;
444} // End SubtargetPredicate = HasDot8Insts
445
446// Does not use opsel, no src_modifiers on src0 and src1.
447// src_modifiers on src2(f32) are f32 fneg(neg_lo[2]) and f32 fabs(neg_hi[2]).
448def VOP3P_DOTF8_Profile : VOP3P_Profile<VOPProfile <[f32, i32, i32, f32]>,
449                                        VOP3_PACKED, 1> {
450  let HasClamp = 0;
451  let HasOpSel = 0;
452  let HasOMod = 0;
453  let IsDOT = 1;
454  let HasSrc0Mods = 0;
455  let HasSrc1Mods = 0;
456  let HasSrc2Mods = 1;
457
458  let InsVOP3P = (ins VSrc_b32:$src0, VSrc_b32:$src1,
459                      PackedF16InputMods:$src2_modifiers, VSrc_f32:$src2,
460                      neg_lo0:$neg_lo, neg_hi0:$neg_hi);
461
462  let InsVOP3DPP8 = (ins DstRC:$old, VGPR_32:$src0, VRegSrc_32:$src1,
463                         PackedF16InputMods:$src2_modifiers, VRegSrc_32:$src2,
464                         neg_lo0:$neg_lo, neg_hi0:$neg_hi, dpp8:$dpp8, FI:$fi);
465
466  let InsVOP3DPP16 = (ins DstRC:$old, VGPR_32:$src0, VRegSrc_32:$src1,
467                          PackedF16InputMods:$src2_modifiers, VRegSrc_32:$src2,
468                          neg_lo0:$neg_lo, neg_hi0:$neg_hi, dpp_ctrl:$dpp_ctrl,
469                          row_mask:$row_mask, bank_mask:$bank_mask,
470                          bound_ctrl:$bound_ctrl, FI:$fi);
471}
472
473multiclass VOP3PDOTF8Inst <string OpName, SDPatternOperator intrinsic_node> {
474  defm NAME : VOP3PInst<OpName, VOP3P_DOTF8_Profile, null_frag, 1>;
475
476  let SubtargetPredicate = isGFX12Plus in
477  def : GCNPat <(intrinsic_node i32:$src0, i32:$src1,
478                                (VOP3Mods f32:$src2, i32:$src2_modifiers)),
479                (!cast<Instruction>(NAME) i32:$src0, i32:$src1,
480                                          i32:$src2_modifiers, f32:$src2)>;
481}
482
483defm V_DOT4_F32_FP8_BF8 : VOP3PDOTF8Inst<"v_dot4_f32_fp8_bf8", int_amdgcn_dot4_f32_fp8_bf8>;
484defm V_DOT4_F32_BF8_FP8 : VOP3PDOTF8Inst<"v_dot4_f32_bf8_fp8", int_amdgcn_dot4_f32_bf8_fp8>;
485defm V_DOT4_F32_FP8_FP8 : VOP3PDOTF8Inst<"v_dot4_f32_fp8_fp8", int_amdgcn_dot4_f32_fp8_fp8>;
486defm V_DOT4_F32_BF8_BF8 : VOP3PDOTF8Inst<"v_dot4_f32_bf8_bf8", int_amdgcn_dot4_f32_bf8_bf8>;
487
488def : UDot2Pat<V_DOT2_U32_U16>;
489def : SDot2Pat<V_DOT2_I32_I16>;
490
491foreach Type = ["U", "I"] in
492  let Predicates = !cast<VOP_Pseudo>("V_DOT4_"#Type#"32_"#Type#8).Predicates in
493  def : GCNPat <
494    !cast<dag>(!foldl((i32 i32:$src2), [0, 1, 2, 3], lhs, y,
495                      (add_oneuse lhs, (!cast<PatFrag>("Mul"#Type#"_Elt"#y) i32:$src0, i32:$src1)))),
496    (!cast<VOP3P_Pseudo>("V_DOT4_"#Type#"32_"#Type#8) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>;
497
498foreach Type = ["U", "I"] in
499  let Predicates = !cast<VOP_Pseudo>("V_DOT8_"#Type#"32_"#Type#4).Predicates in
500  def : GCNPat <
501    !cast<dag>(!foldl((add_oneuse i32:$src2, (!cast<PatFrag>("Mul"#Type#"0_4bit") i32:$src0, i32:$src1)),
502                      [1, 2, 3, 4, 5, 6, 7], lhs, y,
503                      (NonACAdd_oneuse lhs, (!cast<PatFrag>("Mul"#Type#y#"_4bit") i32:$src0, i32:$src1)))),
504    (!cast<VOP3P_Pseudo>("V_DOT8_"#Type#"32_"#Type#4) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>;
505
506// Different variants of dot8 code-gen dag patterns are not generated through table-gen due to a huge increase
507// in the compile time. Directly handle the pattern generated by the FE here.
508foreach Type = ["U", "I"] in
509  let Predicates = !cast<VOP_Pseudo>("V_DOT8_"#Type#"32_"#Type#4).Predicates in
510  def : GCNPat <
511    !cast<dag>(!foldl((add_oneuse i32:$src2, (!cast<PatFrag>("Mul"#Type#"0_4bit") i32:$src0, i32:$src1)),
512                      [7, 1, 2, 3, 4, 5, 6], lhs, y,
513                      (NonACAdd_oneuse lhs, (!cast<PatFrag>("Mul"#Type#y#"_4bit") i32:$src0, i32:$src1)))),
514    (!cast<VOP3P_Pseudo>("V_DOT8_"#Type#"32_"#Type#4) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>;
515
516def ADst_32   : VOPDstOperand<AGPR_32>;
517def ADst_64   : VOPDstOperand<AReg_64>;
518def ADst_128  : VOPDstOperand<AReg_128>;
519def ADst_256  : VOPDstOperand<AReg_256>;
520def ADst_512  : VOPDstOperand<AReg_512>;
521def ADst_1024 : VOPDstOperand<AReg_1024>;
522def VDst_64   : VOPDstOperand<VReg_64>;
523def VDst_128  : VOPDstOperand<VReg_128>;
524def VDst_256  : VOPDstOperand<VReg_256>;
525def VDst_512  : VOPDstOperand<VReg_512>;
526def VDst_1024 : VOPDstOperand<VReg_1024>;
527
528def VOPProfileAccRead : VOP3P_Profile<VOP_I32_I32, VOP3_MAI> {
529  let Src0RC64 = ARegSrc_32;
530}
531
532def VOPProfileAccWrite : VOP3P_Profile<VOP_I32_I32, VOP3_MAI> {
533  let DstRC = ADst_32;
534  let Src0RC64 = VCSrc_b32;
535}
536
537class VOPProfileMAI<VOPProfile P, RegisterOperand _SrcRC, RegisterOperand _DstRC,
538                    RegisterOperand SrcABRC = AVSrc_32>
539  : VOP3P_Profile<P, VOP3_MAI> {
540  let DstRC = _DstRC;
541  let Src0RC64 = SrcABRC;
542  let Src1RC64 = SrcABRC;
543  let Src2RC64 = _SrcRC;
544  let HasOpSel = 0;
545  let HasClamp = 0;
546  let HasIntClamp = 0;
547  let HasOMod = 0;
548  let HasModifiers = 0;
549  let AsmVOP3Base = "$vdst, $src0, $src1, $src2$cbsz$abid$blgp";
550  let Ins64 = (ins Src0RC64:$src0, Src1RC64:$src1, Src2RC64:$src2, cbsz:$cbsz, abid:$abid, blgp:$blgp);
551  let InsVOP3Base = Ins64;
552  // Dst and SrcC cannot partially overlap if SrcC/Dst is bigger than 4 VGPRs.
553  // We then create two versions of the instruction: with tied dst and src2
554  // and with the earlyclobber flag on the dst. This is stricter than the
555  // actual HW restriction. In particular earlyclobber also affects src0 and
556  // src1 allocation which is not required.
557  bit NoDstOverlap = !gt(DstVT.Size, 128);
558}
559
560class VOPProfileSMFMAC<VOPProfile P, RegisterOperand _DstRC,
561                       RegisterOperand _SrcARC, RegisterOperand _SrcBRC>
562  : VOPProfileMAI<P, _DstRC, _DstRC, _SrcARC> {
563  let Src1RC64 = _SrcBRC;
564  let Src2VT = DstVT;
565  let Asm64 = " $vdst, $src0, $src1, $idx$cbsz$abid";
566  let Outs64 = (outs DstRC:$vdst);
567  let Ins64 = (ins Src0RC64:$src0, Src1RC64:$src1, VRegSrc_32:$idx, cbsz:$cbsz, abid:$abid, Src2RC64:$src2);
568}
569
570def VOPProfileMAI_F32_F32_X4    : VOPProfileMAI<VOP_V4F32_F32_F32_V4F32,       AISrc_128_f32,  ADst_128>;
571def VOPProfileMAI_F32_F32_X16   : VOPProfileMAI<VOP_V16F32_F32_F32_V16F32,     AISrc_512_f32,  ADst_512>;
572def VOPProfileMAI_F32_F32_X32   : VOPProfileMAI<VOP_V32F32_F32_F32_V32F32,     AISrc_1024_f32, ADst_1024>;
573def VOPProfileMAI_I32_I32_X4    : VOPProfileMAI<VOP_V4I32_I32_I32_V4I32,       AISrc_128_b32,  ADst_128>;
574def VOPProfileMAI_I32_I32_X16   : VOPProfileMAI<VOP_V16I32_I32_I32_V16I32,     AISrc_512_b32,  ADst_512>;
575def VOPProfileMAI_I32_I32_X32   : VOPProfileMAI<VOP_V32I32_I32_I32_V32I32,     AISrc_1024_b32, ADst_1024>;
576def VOPProfileMAI_F32_V2I16_X4  : VOPProfileMAI<VOP_V4F32_V2I16_V2I16_V4F32,   AISrc_128_b32,  ADst_128>;
577def VOPProfileMAI_F32_V2I16_X16 : VOPProfileMAI<VOP_V16F32_V2I16_V2I16_V16F32, AISrc_512_b32,  ADst_512>;
578def VOPProfileMAI_F32_V2I16_X32 : VOPProfileMAI<VOP_V32F32_V2I16_V2I16_V32F32, AISrc_1024_b32, ADst_1024>;
579def VOPProfileMAI_F32_V4F16_X4  : VOPProfileMAI<VOP_V4F32_V4F16_V4F16_V4F32,   AISrc_128_b32,  ADst_128,  AVSrc_64>;
580def VOPProfileMAI_F32_V4F16_X16 : VOPProfileMAI<VOP_V16F32_V4F16_V4F16_V16F32, AISrc_512_b32,  ADst_512,  AVSrc_64>;
581def VOPProfileMAI_F32_V4F16_X32 : VOPProfileMAI<VOP_V32F32_V4F16_V4F16_V32F32, AISrc_1024_b32, ADst_1024, AVSrc_64>;
582def VOPProfileMAI_F32_V4I16_X4  : VOPProfileMAI<VOP_V4F32_V4I16_V4I16_V4F32,   AISrc_128_b32,  ADst_128,  AVSrc_64>;
583def VOPProfileMAI_F32_V4I16_X16 : VOPProfileMAI<VOP_V16F32_V4I16_V4I16_V16F32, AISrc_512_b32,  ADst_512,  AVSrc_64>;
584def VOPProfileMAI_F32_V4I16_X32 : VOPProfileMAI<VOP_V32F32_V4I16_V4I16_V32F32, AISrc_1024_b32, ADst_1024, AVSrc_64>;
585def VOPProfileMAI_F64_16X16X4F64 : VOPProfileMAI<VOP_V4F64_F64_F64_V4F64,      AISrc_256_f64,  ADst_256,  AVSrc_64>;
586def VOPProfileMAI_F64_4X4X4F64   : VOPProfileMAI<VOP_F64_F64_F64_F64,          AISrc_64_f64,   ADst_64,   AVSrc_64>;
587def VOPProfileMAI_I32_I64_X16   : VOPProfileMAI<VOP_V4I32_I64_I64_V4I32,       AISrc_128_b32,  ADst_128,  AVSrc_64>;
588def VOPProfileMAI_I32_I64_X32   : VOPProfileMAI<VOP_V16I32_I64_I64_V16I32,     AISrc_512_b32,  ADst_512,  AVSrc_64>;
589def VOPProfileMAI_F32_V2F32_X16 : VOPProfileMAI<VOP_V4F32_V2F32_V2F32_V4F32,   AISrc_128_b32,  ADst_128,  AVSrc_64>;
590def VOPProfileMAI_F32_V2F32_X32 : VOPProfileMAI<VOP_V16F32_V2F32_V2F32_V16F32, AISrc_512_b32,  ADst_512,  AVSrc_64>;
591def VOPProfileMAI_F32_I64_X32   : VOPProfileMAI<VOP_V4F32_I64_I64_V4F32,       AISrc_128_b32,  ADst_128,  AVSrc_64>;
592def VOPProfileMAI_F32_I64_X16   : VOPProfileMAI<VOP_V16F32_I64_I64_V16F32,     AISrc_512_b32,  ADst_512,  AVSrc_64>;
593
594def VOPProfileMAI_F32_F32_X4_VCD     : VOPProfileMAI<VOP_V4F32_F32_F32_V4F32,       VISrc_128_f32,  VDst_128>;
595def VOPProfileMAI_F32_F32_X16_VCD    : VOPProfileMAI<VOP_V16F32_F32_F32_V16F32,     VISrc_512_f32,  VDst_512>;
596def VOPProfileMAI_F32_F32_X32_VCD    : VOPProfileMAI<VOP_V32F32_F32_F32_V32F32,     VISrc_1024_f32, VDst_1024>;
597def VOPProfileMAI_I32_I32_X4_VCD     : VOPProfileMAI<VOP_V4I32_I32_I32_V4I32,       VISrc_128_b32,  VDst_128>;
598def VOPProfileMAI_I32_I32_X16_VCD    : VOPProfileMAI<VOP_V16I32_I32_I32_V16I32,     VISrc_512_b32,  VDst_512>;
599def VOPProfileMAI_I32_I32_X32_VCD    : VOPProfileMAI<VOP_V32I32_I32_I32_V32I32,     VISrc_1024_b32, VDst_1024>;
600def VOPProfileMAI_F32_V2I16_X4_VCD   : VOPProfileMAI<VOP_V4F32_V2I16_V2I16_V4F32,   VISrc_128_b32,  VDst_128>;
601def VOPProfileMAI_F32_V2I16_X16_VCD  : VOPProfileMAI<VOP_V16F32_V2I16_V2I16_V16F32, VISrc_512_b32,  VDst_512>;
602def VOPProfileMAI_F32_V2I16_X32_VCD  : VOPProfileMAI<VOP_V32F32_V2I16_V2I16_V32F32, VISrc_1024_b32, VDst_1024>;
603def VOPProfileMAI_F32_V4F16_X4_VCD   : VOPProfileMAI<VOP_V4F32_V4F16_V4F16_V4F32,   VISrc_128_b32,  VDst_128,  AVSrc_64>;
604def VOPProfileMAI_F32_V4F16_X16_VCD  : VOPProfileMAI<VOP_V16F32_V4F16_V4F16_V16F32, VISrc_512_b32,  VDst_512,  AVSrc_64>;
605def VOPProfileMAI_F32_V4F16_X32_VCD  : VOPProfileMAI<VOP_V32F32_V4F16_V4F16_V32F32, VISrc_1024_b32, VDst_1024, AVSrc_64>;
606def VOPProfileMAI_F32_V4I16_X4_VCD   : VOPProfileMAI<VOP_V4F32_V4I16_V4I16_V4F32,   VISrc_128_b32,  VDst_128,  AVSrc_64>;
607def VOPProfileMAI_F32_V4I16_X16_VCD  : VOPProfileMAI<VOP_V16F32_V4I16_V4I16_V16F32, VISrc_512_b32,  VDst_512,  AVSrc_64>;
608def VOPProfileMAI_F32_V4I16_X32_VCD  : VOPProfileMAI<VOP_V32F32_V4I16_V4I16_V32F32, VISrc_1024_b32, VDst_1024, AVSrc_64>;
609def VOPProfileMAI_F64_16X16X4F64_VCD : VOPProfileMAI<VOP_V4F64_F64_F64_V4F64,       VISrc_256_f64,  VDst_256,  AVSrc_64>;
610def VOPProfileMAI_F64_4X4X4F64_VCD   : VOPProfileMAI<VOP_F64_F64_F64_F64,           VISrc_64_f64,   VDst_64,   AVSrc_64>;
611def VOPProfileMAI_I32_I64_X16_VCD    : VOPProfileMAI<VOP_V4I32_I64_I64_V4I32,       VISrc_128_b32,  VDst_128,  AVSrc_64>;
612def VOPProfileMAI_I32_I64_X32_VCD    : VOPProfileMAI<VOP_V16I32_I64_I64_V16I32,     VISrc_512_b32,  VDst_512,  AVSrc_64>;
613def VOPProfileMAI_F32_V2F32_X16_VCD  : VOPProfileMAI<VOP_V4F32_V2F32_V2F32_V4F32,   VISrc_128_b32,  VDst_128,  AVSrc_64>;
614def VOPProfileMAI_F32_V2F32_X32_VCD  : VOPProfileMAI<VOP_V16F32_V2F32_V2F32_V16F32, VISrc_512_b32,  VDst_512,  AVSrc_64>;
615def VOPProfileMAI_F32_I64_X32_VCD    : VOPProfileMAI<VOP_V4F32_I64_I64_V4F32,       VISrc_128_b32,  VDst_128,  AVSrc_64>;
616def VOPProfileMAI_F32_I64_X16_VCD    : VOPProfileMAI<VOP_V16F32_I64_I64_V16F32,     VISrc_512_b32,  VDst_512,  AVSrc_64>;
617
618def VOPProfileSMFMAC_F32_16X16X32_F16 : VOPProfileSMFMAC<VOP_V4F32_V4F16_V8F16_I32,  AVDst_128, AVSrc_64, AVSrc_128>;
619def VOPProfileSMFMAC_F32_32X32X16_F16 : VOPProfileSMFMAC<VOP_V16F32_V4F16_V8F16_I32, AVDst_512, AVSrc_64, AVSrc_128>;
620def VOPProfileSMFMAC_F32_16X16X32_I16 : VOPProfileSMFMAC<VOP_V4F32_V4I16_V8I16_I32,  AVDst_128, AVSrc_64, AVSrc_128>;
621def VOPProfileSMFMAC_F32_32X32X16_I16 : VOPProfileSMFMAC<VOP_V16F32_V4I16_V8I16_I32, AVDst_512, AVSrc_64, AVSrc_128>;
622def VOPProfileSMFMAC_I32_16X16X64_I8  : VOPProfileSMFMAC<VOP_V4I32_V2I32_V4I32_I32,  AVDst_128, AVSrc_64, AVSrc_128>;
623def VOPProfileSMFMAC_I32_32X32X32_I8  : VOPProfileSMFMAC<VOP_V16I32_V2I32_V4I32_I32, AVDst_512, AVSrc_64, AVSrc_128>;
624def VOPProfileSMFMAC_F32_16X16X64_F8  : VOPProfileSMFMAC<VOP_V4F32_V2I32_V4I32_I32,  AVDst_128, AVSrc_64, AVSrc_128>;
625def VOPProfileSMFMAC_F32_32X32X32_F8  : VOPProfileSMFMAC<VOP_V16F32_V2I32_V4I32_I32, AVDst_512, AVSrc_64, AVSrc_128>;
626
627class MFMATable <bit is_mac, string Name> {
628  bit IsMac = is_mac;
629  string FMAOp = Name;
630}
631
632class MAIFrag<SDPatternOperator Op, code pred> : PatFrag <
633  (ops node:$src0, node:$src1, node:$src2, node:$cbsz, node:$abid, node:$blgp),
634  (Op $src0, $src1, $src2, $cbsz, $abid, $blgp),
635  pred
636>;
637
638defvar MayNeedAGPRs = [{
639  return MF->getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs();
640}];
641
642defvar MayNeedAGPRs_gisel = [{
643  return MF.getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs();
644}];
645
646defvar MayNotNeedAGPRs = [{
647  return !MF->getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs();
648}];
649
650defvar MayNotNeedAGPRs_gisel = [{
651  return !MF.getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs();
652}];
653
654class AgprMAIFrag<SDPatternOperator Op> : MAIFrag<Op, MayNeedAGPRs> {
655  let GISelPredicateCode = MayNeedAGPRs_gisel;
656}
657
658class VgprMAIFrag<SDPatternOperator Op> : MAIFrag<Op, MayNotNeedAGPRs> {
659  let GISelPredicateCode = MayNotNeedAGPRs_gisel;
660}
661
662let SubtargetPredicate = HasMAIInsts in {
663
664let isAsCheapAsAMove = 1, isReMaterializable = 1 in {
665  defm V_ACCVGPR_READ_B32  : VOP3Inst<"v_accvgpr_read_b32",  VOPProfileAccRead>;
666  let isMoveImm = 1 in {
667    defm V_ACCVGPR_WRITE_B32 : VOP3Inst<"v_accvgpr_write_b32", VOPProfileAccWrite>;
668  } // End isMoveImm = 1
669} // End isAsCheapAsAMove = 1, isReMaterializable = 1
670
671class MAIInst<string OpName, VOPProfile P, SDPatternOperator node>
672  : VOP3InstBase<OpName, P, node> {
673  Instruction Opcode = !cast<Instruction>(NAME);
674  bit is_dgemm = 0;
675  bit is_gfx940_xdl = 0;
676}
677
678multiclass MAIInst<string OpName, string P, SDPatternOperator node,
679                   bit NoDstOverlap = !cast<VOPProfileMAI>("VOPProfileMAI_" # P).NoDstOverlap> {
680  let isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1 in {
681    // FP32 denorm mode is respected, rounding mode is not. Exceptions are not supported.
682    let Constraints = !if(NoDstOverlap, "@earlyclobber $vdst", "") in {
683      def _e64 : MAIInst<OpName, !cast<VOPProfileMAI>("VOPProfileMAI_" # P),
684                         !if(!or(NoDstOverlap, !eq(node, null_frag)), null_frag, AgprMAIFrag<node>)>,
685                 MFMATable<0, NAME # "_e64">;
686
687      let SubtargetPredicate = isGFX90APlus, Mnemonic = OpName in
688      def _vgprcd_e64 : MAIInst<OpName # "_vgprcd", !cast<VOPProfileMAI>("VOPProfileMAI_" # P # "_VCD"),
689                                !if(!or(NoDstOverlap, !eq(node, null_frag)), null_frag, VgprMAIFrag<node>)>,
690                        MFMATable<0, NAME # "_vgprcd_e64">;
691    }
692
693    if NoDstOverlap then {
694      let Constraints = !if(NoDstOverlap, "$vdst = $src2", ""),
695          isConvertibleToThreeAddress = NoDstOverlap,
696          Mnemonic = OpName in {
697        def "_mac_e64" : MAIInst<OpName # "_mac", !cast<VOPProfileMAI>("VOPProfileMAI_" # P),
698                                 !if(!eq(node, null_frag), null_frag, AgprMAIFrag<node>)>,
699                         MFMATable<1, NAME # "_e64">;
700
701        let SubtargetPredicate = isGFX90APlus in
702        def _mac_vgprcd_e64 : MAIInst<OpName # "_mac_vgprcd", !cast<VOPProfileMAI>("VOPProfileMAI_" # P # "_VCD"),
703                                      !if(!eq(node, null_frag), null_frag, VgprMAIFrag<node>)>,
704                              MFMATable<1, NAME # "_vgprcd_e64">;
705      }
706    }
707  } // End isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1
708}
709
710defm V_MFMA_F32_4X4X1F32    : MAIInst<"v_mfma_f32_4x4x1f32",    "F32_F32_X4",    int_amdgcn_mfma_f32_4x4x1f32>;
711defm V_MFMA_F32_16X16X1F32  : MAIInst<"v_mfma_f32_16x16x1f32",  "F32_F32_X16",   int_amdgcn_mfma_f32_16x16x1f32>;
712defm V_MFMA_F32_16X16X4F32  : MAIInst<"v_mfma_f32_16x16x4f32",  "F32_F32_X4",    int_amdgcn_mfma_f32_16x16x4f32>;
713defm V_MFMA_F32_32X32X1F32  : MAIInst<"v_mfma_f32_32x32x1f32",  "F32_F32_X32",   int_amdgcn_mfma_f32_32x32x1f32>;
714defm V_MFMA_F32_32X32X2F32  : MAIInst<"v_mfma_f32_32x32x2f32",  "F32_F32_X16",   int_amdgcn_mfma_f32_32x32x2f32>;
715
716let is_gfx940_xdl = 1 in {
717defm V_MFMA_F32_4X4X4F16    : MAIInst<"v_mfma_f32_4x4x4f16",    "F32_V4F16_X4",  int_amdgcn_mfma_f32_4x4x4f16>;
718defm V_MFMA_I32_4X4X4I8     : MAIInst<"v_mfma_i32_4x4x4i8",     "I32_I32_X4",    int_amdgcn_mfma_i32_4x4x4i8>;
719defm V_MFMA_F32_16X16X4F16  : MAIInst<"v_mfma_f32_16x16x4f16",  "F32_V4F16_X16", int_amdgcn_mfma_f32_16x16x4f16>;
720defm V_MFMA_F32_16X16X16F16 : MAIInst<"v_mfma_f32_16x16x16f16", "F32_V4F16_X4",  int_amdgcn_mfma_f32_16x16x16f16>;
721defm V_MFMA_I32_16X16X4I8   : MAIInst<"v_mfma_i32_16x16x4i8",   "I32_I32_X16",   int_amdgcn_mfma_i32_16x16x4i8>;
722defm V_MFMA_F32_32X32X4F16  : MAIInst<"v_mfma_f32_32x32x4f16",  "F32_V4F16_X32", int_amdgcn_mfma_f32_32x32x4f16>;
723defm V_MFMA_F32_32X32X8F16  : MAIInst<"v_mfma_f32_32x32x8f16",  "F32_V4F16_X16", int_amdgcn_mfma_f32_32x32x8f16>;
724defm V_MFMA_I32_32X32X4I8   : MAIInst<"v_mfma_i32_32x32x4i8",   "I32_I32_X32",   int_amdgcn_mfma_i32_32x32x4i8>;
725}
726
727let Predicates = [isGFX908orGFX90A] in {
728defm V_MFMA_I32_16X16X16I8  : MAIInst<"v_mfma_i32_16x16x16i8",  "I32_I32_X4",    int_amdgcn_mfma_i32_16x16x16i8>;
729defm V_MFMA_I32_32X32X8I8   : MAIInst<"v_mfma_i32_32x32x8i8",   "I32_I32_X16",   int_amdgcn_mfma_i32_32x32x8i8>;
730defm V_MFMA_F32_4X4X2BF16   : MAIInst<"v_mfma_f32_4x4x2bf16",   "F32_V2I16_X4",  int_amdgcn_mfma_f32_4x4x2bf16>;
731defm V_MFMA_F32_16X16X2BF16 : MAIInst<"v_mfma_f32_16x16x2bf16", "F32_V2I16_X16", int_amdgcn_mfma_f32_16x16x2bf16>;
732defm V_MFMA_F32_16X16X8BF16 : MAIInst<"v_mfma_f32_16x16x8bf16", "F32_V2I16_X4",  int_amdgcn_mfma_f32_16x16x8bf16>;
733defm V_MFMA_F32_32X32X2BF16 : MAIInst<"v_mfma_f32_32x32x2bf16", "F32_V2I16_X32", int_amdgcn_mfma_f32_32x32x2bf16>;
734defm V_MFMA_F32_32X32X4BF16 : MAIInst<"v_mfma_f32_32x32x4bf16", "F32_V2I16_X16", int_amdgcn_mfma_f32_32x32x4bf16>;
735}
736
737} // End SubtargetPredicate = HasMAIInsts
738
739let Predicates = [isGFX90APlus] in {
740  let is_gfx940_xdl = 1 in {
741  defm V_MFMA_F32_32X32X4BF16_1K  : MAIInst<"v_mfma_f32_32x32x4bf16_1k",  "F32_V4I16_X32",  int_amdgcn_mfma_f32_32x32x4bf16_1k>;
742  defm V_MFMA_F32_16X16X4BF16_1K  : MAIInst<"v_mfma_f32_16x16x4bf16_1k",  "F32_V4I16_X16",  int_amdgcn_mfma_f32_16x16x4bf16_1k>;
743  defm V_MFMA_F32_4X4X4BF16_1K    : MAIInst<"v_mfma_f32_4x4x4bf16_1k",    "F32_V4I16_X4",   int_amdgcn_mfma_f32_4x4x4bf16_1k>;
744  defm V_MFMA_F32_32X32X8BF16_1K  : MAIInst<"v_mfma_f32_32x32x8bf16_1k",  "F32_V4I16_X16",  int_amdgcn_mfma_f32_32x32x8bf16_1k>;
745  defm V_MFMA_F32_16X16X16BF16_1K : MAIInst<"v_mfma_f32_16x16x16bf16_1k", "F32_V4I16_X4",   int_amdgcn_mfma_f32_16x16x16bf16_1k>;
746  }
747
748  let is_dgemm = 1 in {
749  defm V_MFMA_F64_16X16X4F64      : MAIInst<"v_mfma_f64_16x16x4f64",      "F64_16X16X4F64", int_amdgcn_mfma_f64_16x16x4f64>;
750  defm V_MFMA_F64_4X4X4F64        : MAIInst<"v_mfma_f64_4x4x4f64",        "F64_4X4X4F64",   int_amdgcn_mfma_f64_4x4x4f64>;
751  }
752} // End Predicates = [isGFX90APlus]
753
754let SubtargetPredicate = isGFX940Plus, is_gfx940_xdl = 1 in {
755  defm V_MFMA_I32_32X32X16I8       : MAIInst<"v_mfma_i32_32x32x16i8",       "I32_I64_X32",    int_amdgcn_mfma_i32_32x32x16_i8>;
756  defm V_MFMA_I32_16X16X32I8       : MAIInst<"v_mfma_i32_16x16x32i8",       "I32_I64_X16",    int_amdgcn_mfma_i32_16x16x32_i8>;
757  defm V_MFMA_F32_16X16X8XF32      : MAIInst<"v_mfma_f32_16x16x8xf32",      "F32_V2F32_X16",  int_amdgcn_mfma_f32_16x16x8_xf32>;
758  defm V_MFMA_F32_32X32X4XF32      : MAIInst<"v_mfma_f32_32x32x4xf32",      "F32_V2F32_X32",  int_amdgcn_mfma_f32_32x32x4_xf32>;
759  defm V_MFMA_F32_16X16X32_BF8_BF8 : MAIInst<"v_mfma_f32_16x16x32_bf8_bf8", "F32_I64_X32",    int_amdgcn_mfma_f32_16x16x32_bf8_bf8>;
760  defm V_MFMA_F32_16X16X32_BF8_FP8 : MAIInst<"v_mfma_f32_16x16x32_bf8_fp8", "F32_I64_X32",    int_amdgcn_mfma_f32_16x16x32_bf8_fp8>;
761  defm V_MFMA_F32_16X16X32_FP8_BF8 : MAIInst<"v_mfma_f32_16x16x32_fp8_bf8", "F32_I64_X32",    int_amdgcn_mfma_f32_16x16x32_fp8_bf8>;
762  defm V_MFMA_F32_16X16X32_FP8_FP8 : MAIInst<"v_mfma_f32_16x16x32_fp8_fp8", "F32_I64_X32",    int_amdgcn_mfma_f32_16x16x32_fp8_fp8>;
763  defm V_MFMA_F32_32X32X16_BF8_BF8 : MAIInst<"v_mfma_f32_32x32x16_bf8_bf8", "F32_I64_X16",    int_amdgcn_mfma_f32_32x32x16_bf8_bf8>;
764  defm V_MFMA_F32_32X32X16_BF8_FP8 : MAIInst<"v_mfma_f32_32x32x16_bf8_fp8", "F32_I64_X16",    int_amdgcn_mfma_f32_32x32x16_bf8_fp8>;
765  defm V_MFMA_F32_32X32X16_FP8_BF8 : MAIInst<"v_mfma_f32_32x32x16_fp8_bf8", "F32_I64_X16",    int_amdgcn_mfma_f32_32x32x16_fp8_bf8>;
766  defm V_MFMA_F32_32X32X16_FP8_FP8 : MAIInst<"v_mfma_f32_32x32x16_fp8_fp8", "F32_I64_X16",    int_amdgcn_mfma_f32_32x32x16_fp8_fp8>;
767} // End SubtargetPredicate = isGFX940Plus, is_gfx940_xdl = 1
768
769multiclass SMFMACInst<string OpName, string P, SDPatternOperator node> {
770  let Constraints = "$vdst = $src2", DisableEncoding = "$src2",
771      isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1, is_gfx940_xdl = 1 in {
772    def _e64 : MAIInst<OpName, !cast<VOPProfileSMFMAC>("VOPProfileSMFMAC_" # P), node>;
773  }
774}
775
776let SubtargetPredicate = isGFX940Plus in {
777defm V_SMFMAC_F32_16X16X32_F16     : SMFMACInst<"v_smfmac_f32_16x16x32_f16",     "F32_16X16X32_F16", int_amdgcn_smfmac_f32_16x16x32_f16>;
778defm V_SMFMAC_F32_32X32X16_F16     : SMFMACInst<"v_smfmac_f32_32x32x16_f16",     "F32_32X32X16_F16", int_amdgcn_smfmac_f32_32x32x16_f16>;
779defm V_SMFMAC_F32_16X16X32_BF16    : SMFMACInst<"v_smfmac_f32_16x16x32_bf16",    "F32_16X16X32_I16", int_amdgcn_smfmac_f32_16x16x32_bf16>;
780defm V_SMFMAC_F32_32X32X16_BF16    : SMFMACInst<"v_smfmac_f32_32x32x16_bf16",    "F32_32X32X16_I16", int_amdgcn_smfmac_f32_32x32x16_bf16>;
781defm V_SMFMAC_I32_16X16X64_I8      : SMFMACInst<"v_smfmac_i32_16x16x64_i8",      "I32_16X16X64_I8",  int_amdgcn_smfmac_i32_16x16x64_i8>;
782defm V_SMFMAC_I32_32X32X32_I8      : SMFMACInst<"v_smfmac_i32_32x32x32_i8",      "I32_32X32X32_I8",  int_amdgcn_smfmac_i32_32x32x32_i8>;
783defm V_SMFMAC_F32_16X16X64_BF8_BF8 : SMFMACInst<"v_smfmac_f32_16x16x64_bf8_bf8", "F32_16X16X64_F8",  int_amdgcn_smfmac_f32_16x16x64_bf8_bf8>;
784defm V_SMFMAC_F32_16X16X64_BF8_FP8 : SMFMACInst<"v_smfmac_f32_16x16x64_bf8_fp8", "F32_16X16X64_F8",  int_amdgcn_smfmac_f32_16x16x64_bf8_fp8>;
785defm V_SMFMAC_F32_16X16X64_FP8_BF8 : SMFMACInst<"v_smfmac_f32_16x16x64_fp8_bf8", "F32_16X16X64_F8",  int_amdgcn_smfmac_f32_16x16x64_fp8_bf8>;
786defm V_SMFMAC_F32_16X16X64_FP8_FP8 : SMFMACInst<"v_smfmac_f32_16x16x64_fp8_fp8", "F32_16X16X64_F8",  int_amdgcn_smfmac_f32_16x16x64_fp8_fp8>;
787defm V_SMFMAC_F32_32X32X32_BF8_BF8 : SMFMACInst<"v_smfmac_f32_32x32x32_bf8_bf8", "F32_32X32X32_F8",  int_amdgcn_smfmac_f32_32x32x32_bf8_bf8>;
788defm V_SMFMAC_F32_32X32X32_BF8_FP8 : SMFMACInst<"v_smfmac_f32_32x32x32_bf8_fp8", "F32_32X32X32_F8",  int_amdgcn_smfmac_f32_32x32x32_bf8_fp8>;
789defm V_SMFMAC_F32_32X32X32_FP8_BF8 : SMFMACInst<"v_smfmac_f32_32x32x32_fp8_bf8", "F32_32X32X32_F8",  int_amdgcn_smfmac_f32_32x32x32_fp8_bf8>;
790defm V_SMFMAC_F32_32X32X32_FP8_FP8 : SMFMACInst<"v_smfmac_f32_32x32x32_fp8_fp8", "F32_32X32X32_F8",  int_amdgcn_smfmac_f32_32x32x32_fp8_fp8>;
791}
792
793def MAIInstInfoTable : GenericTable {
794  let FilterClass = "MAIInst";
795  let CppTypeName = "MAIInstInfo";
796  let Fields = [
797    "Opcode", "is_dgemm", "is_gfx940_xdl"
798  ];
799
800  let PrimaryKey = ["Opcode"];
801  let PrimaryKeyName = "getMAIInstInfoHelper";
802}
803
804let isCommutable = 1, isReMaterializable = 1 in {
805  let SubtargetPredicate = HasPackedFP32Ops in {
806    defm V_PK_FMA_F32 : VOP3PInst<"v_pk_fma_f32", VOP3P_Profile<VOP_V2F32_V2F32_V2F32_V2F32, VOP3_PACKED>, any_fma>;
807    defm V_PK_MUL_F32 : VOP3PInst<"v_pk_mul_f32", VOP3P_Profile<VOP_V2F32_V2F32_V2F32, VOP3_PACKED>, any_fmul>;
808    defm V_PK_ADD_F32 : VOP3PInst<"v_pk_add_f32", VOP3P_Profile<VOP_V2F32_V2F32_V2F32, VOP3_PACKED>, any_fadd>;
809  } // End SubtargetPredicate = HasPackedFP32Ops
810
811  let SubtargetPredicate = HasPkMovB32 in
812  defm V_PK_MOV_B32 : VOP3PInst<"v_pk_mov_b32", VOP3P_Profile<VOP_V2I32_V2I32_V2I32, VOP3_PACKED>>;
813} // End isCommutable = 1, isReMaterializable = 1
814
815def : MnemonicAlias<"v_accvgpr_read",  "v_accvgpr_read_b32">;
816def : MnemonicAlias<"v_accvgpr_write", "v_accvgpr_write_b32">;
817
818class VOPProfileWMMA<VOPProfile P, string Suffix, RegisterOperand _Src01RC64, bit _HasClamp, bit _HasOpSel> : VOP3P_Profile<P> {
819  let DstRC = !if(!eq(Suffix, "_w32"), VDst_256, VDst_128);
820  let Src0RC64 = _Src01RC64;
821  let Src1RC64 = _Src01RC64;
822  let Src2RC64 = !if(!eq(Suffix, "_w32"), VISrc_256_f64, VISrc_128_f32);
823  let HasClamp = _HasClamp;
824  let HasOpSel = _HasOpSel;
825  let IsPacked = 1;
826  let IsWMMA = 1;
827}
828
829def VOP_V8F32_V16F16_V16F16_V8F32 : VOPProfile <[v8f32, v16f16, v16f16, v8f32]>;
830def VOP_V8F32_V16I16_V16I16_V8F32 : VOPProfile <[v8f32, v16i16, v16i16, v8f32]>;
831def VOP_V16F16_V16F16_V16F16_V16F16 : VOPProfile <[v16f16, v16f16, v16f16, v16f16]>;
832def VOP_V16I16_V16I16_V16I16_V16I16 : VOPProfile <[v16i16, v16i16, v16i16, v16i16]>;
833def VOP_V8I32_V4I32_V4I32_V8I32 : VOPProfile <[v8i32, v4i32, v4i32, v8i32]>;
834def VOP_V8I32_V2I32_V2I32_V8I32 : VOPProfile <[v8i32, v2i32, v2i32, v8i32]>;
835
836def VOP_V4F32_V16F16_V16F16_V4F32 : VOPProfile <[v4f32, v16f16, v16f16, v4f32]>;
837def VOP_V4F32_V16I16_V16I16_V4F32 : VOPProfile <[v4f32, v16i16, v16i16, v4f32]>;
838def VOP_V8F16_V16F16_V16F16_V8F16 : VOPProfile <[v8f16, v16f16, v16f16, v8f16]>;
839def VOP_V8I16_V16I16_V16I16_V8I16 : VOPProfile <[v8i16, v16i16, v16i16, v8i16]>;
840def VOP_V4I32_V4I32_V4I32_V4I32 : VOPProfile <[v4i32, v4i32, v4i32, v4i32]>;
841def VOP_V4I32_V2I32_V2I32_V4I32 : VOPProfile <[v4i32, v2i32, v2i32, v4i32]>;
842
843
844class WMMAType <bits<2> val> {
845  bit hasClamp = val{0};
846  bit hasOpsel = val{1};
847}
848
849def WMMARegular      : WMMAType<0b00>;
850def WMMAUIClamp      : WMMAType<0b01>;
851def WMMAOpSel        : WMMAType<0b10>;
852
853class WMMARegularPat<Instruction Inst, SDPatternOperator node, VOPProfile P> :
854  GCNPat < (P.DstVT (node
855                                (P.Src0VT (VOP3PMods P.Src0VT:$src0, i32:$src0_modifiers)),
856                                (P.Src1VT (VOP3PMods P.Src1VT:$src1, i32:$src1_modifiers)),
857                                (P.Src2VT (VOP3PMods P.Src2VT:$src2, i32:$src2_modifiers))
858                   )),
859                   (P.DstVT (Inst i32:$src0_modifiers, P.Src0VT:$src0, i32:$src1_modifiers, P.Src1VT:$src1, $src2_modifiers, P.Src2VT:$src2))
860>;
861
862class WMMAOpSelPat<Instruction Inst, SDPatternOperator node, VOPProfile P> :
863  GCNPat < (P.DstVT (node
864                                (P.Src0VT P.Src0VT:$src0),
865                                (P.Src1VT P.Src1VT:$src1),
866                                (P.Src2VT P.Src2VT:$src2), (WMMAOpSelVOP3PMods i32:$src2_modifiers)
867                   )),
868                   (P.DstVT (Inst (i32 8), P.Src0VT:$src0, (i32 8), P.Src1VT:$src1, i32:$src2_modifiers, P.Src2VT:$src2))
869>;
870
871class WMMAUIClampPat<Instruction Inst, SDPatternOperator node, VOPProfile P> :
872  GCNPat < (P.DstVT (node
873                                (VOP3PModsNeg i32:$src0_modifiers), (P.Src0VT P.Src0VT:$src0),
874                                (VOP3PModsNeg i32:$src1_modifiers), (P.Src1VT P.Src1VT:$src1),
875                                (P.Src2VT P.Src2VT:$src2), (i1 timm:$clamp)
876                   )),
877                   (P.DstVT (Inst i32:$src0_modifiers, P.Src0VT:$src0, i32:$src1_modifiers, P.Src1VT:$src1, (i32 8), P.Src2VT:$src2, i1:$clamp))
878>;
879
880class WMMAOpcodeMapping<Instruction TwoAddr, Instruction ThreeAddr> {
881  Instruction Opcode2Addr = TwoAddr;
882  Instruction Opcode3Addr = ThreeAddr;
883  Predicate WaveSizePredicate;
884}
885
886def WMMAOpcode : GenericEnum {
887  let FilterClass = "VOP3P_Pseudo";
888}
889
890class WMMAMappingTable : GenericTable {
891  let FilterClass = "WMMAOpcodeMapping";
892  let CppTypeName = "WMMAOpcodeMappingInfo";
893  let Fields = ["Opcode2Addr", "Opcode3Addr"];
894  string TypeOf_Opcode2Addr = "WMMAOpcode";
895  string TypeOf_Opcode3Addr = "WMMAOpcode";
896}
897
898def WMMAOpcode2AddrMappingTable : WMMAMappingTable {
899  let PrimaryKey = ["Opcode2Addr"];
900  let PrimaryKeyName = "getWMMAMappingInfoFrom2AddrOpcode";
901}
902
903def WMMAOpcode3AddrMappingTable : WMMAMappingTable {
904  let PrimaryKey = ["Opcode3Addr"];
905  let PrimaryKeyName = "getWMMAMappingInfoFrom3AddrOpcode";
906}
907
908// The WMMA instruction has extra constraints:
909// Matrices A and B cannot overlap with D. C cannot partially overlap with D,
910// but it is OK for them to be the same (which is a typical case).
911//
912// We implement it as follows:
913// 1) Map the intrinsic to the pseudo where D is tied to C ($vdst = $src2).
914// 2) The pass twoaddressinstruction checks if src2 is live and if that is the case
915//    it converts the default pseudo to the pseudo where src2 is not the same as vdst.
916// 3) @earlyclobber on the destination satisfies the constraint during RA.
917
918multiclass WMMAInst<string Suffix, string Instr, VOPProfile P, SDPatternOperator node = null_frag, RegisterOperand _Src01RC64 = VRegSrc_256, WMMAType Type, bit convertibleTo3Addr> {
919
920  defvar WMMAConstraints2Addr = "@earlyclobber $vdst,$vdst = $src2";
921  defvar WMMAConstraints3Addr = "@earlyclobber $vdst";
922
923  defvar WMMAProfile = VOPProfileWMMA<P, Suffix, _Src01RC64, Type.hasClamp, Type.hasOpsel>;
924  let Mnemonic = Instr, mayRaiseFPException = 0, ReadsModeReg = 0 in {
925    let Constraints = WMMAConstraints2Addr, isConvertibleToThreeAddress = convertibleTo3Addr in {
926      def _twoaddr # Suffix : VOP3P_Pseudo<Instr # Suffix, WMMAProfile>;
927    }
928  }
929  if convertibleTo3Addr then {
930    let Mnemonic = Instr, mayRaiseFPException = 0, ReadsModeReg = 0 in {
931      let Constraints = WMMAConstraints3Addr, SchedRW = [Write32Bit, Write32Bit] in {
932        def _threeaddr # Suffix : VOP3P_Pseudo<Instr # Suffix, WMMAProfile>;
933      }
934    }
935    def : WMMAOpcodeMapping<!cast<Instruction>(NAME # _twoaddr # Suffix),
936                          !cast<Instruction>(NAME # _threeaddr # Suffix)>;
937  }
938
939  let SubtargetPredicate = isGFX11Only in {
940    if !eq(Type, WMMAOpSel) then {
941      def : WMMAOpSelPat<!cast<Instruction>(NAME # _twoaddr # Suffix), node, P>;
942    } else if !eq(Type, WMMAUIClamp) then {
943      def : WMMAUIClampPat<!cast<Instruction>(NAME # _twoaddr # Suffix), node, P>;
944    } else {
945      def : WMMARegularPat<!cast<Instruction>(NAME # _twoaddr # Suffix), node, P>;
946    }
947  }
948}
949
950
951
952let WaveSizePredicate = isWave32 in {
953  defm V_WMMA_F32_16X16X16_F16   : WMMAInst<"_w32", "v_wmma_f32_16x16x16_f16",  VOP_V8F32_V16F16_V16F16_V8F32, int_amdgcn_wmma_f32_16x16x16_f16, VRegSrc_256, WMMARegular, 1>;
954  defm V_WMMA_F32_16X16X16_BF16  : WMMAInst<"_w32", "v_wmma_f32_16x16x16_bf16", VOP_V8F32_V16I16_V16I16_V8F32, int_amdgcn_wmma_f32_16x16x16_bf16, VRegSrc_256, WMMARegular, 1>;
955  defm V_WMMA_F16_16X16X16_F16   : WMMAInst<"_w32", "v_wmma_f16_16x16x16_f16",   VOP_V16F16_V16F16_V16F16_V16F16, int_amdgcn_wmma_f16_16x16x16_f16, VRegSrc_256, WMMAOpSel, 1>;
956  defm V_WMMA_BF16_16X16X16_BF16 : WMMAInst<"_w32", "v_wmma_bf16_16x16x16_bf16", VOP_V16I16_V16I16_V16I16_V16I16, int_amdgcn_wmma_bf16_16x16x16_bf16, VRegSrc_256, WMMAOpSel, 1>;
957  defm V_WMMA_F16_16X16X16_F16_TIED   : WMMAInst<"_w32", "v_wmma_f16_16x16x16_f16",   VOP_V16F16_V16F16_V16F16_V16F16, int_amdgcn_wmma_f16_16x16x16_f16_tied, VRegSrc_256, WMMAOpSel, 0>;
958  defm V_WMMA_BF16_16X16X16_BF16_TIED : WMMAInst<"_w32", "v_wmma_bf16_16x16x16_bf16", VOP_V16I16_V16I16_V16I16_V16I16, int_amdgcn_wmma_bf16_16x16x16_bf16_tied, VRegSrc_256, WMMAOpSel, 0>;
959  defm V_WMMA_I32_16X16X16_IU8   : WMMAInst<"_w32", "v_wmma_i32_16x16x16_iu8",   VOP_V8I32_V4I32_V4I32_V8I32, int_amdgcn_wmma_i32_16x16x16_iu8, VRegSrc_128, WMMAUIClamp, 1>;
960  defm V_WMMA_I32_16X16X16_IU4   : WMMAInst<"_w32", "v_wmma_i32_16x16x16_iu4",   VOP_V8I32_V2I32_V2I32_V8I32, int_amdgcn_wmma_i32_16x16x16_iu4, VRegSrc_64,  WMMAUIClamp, 1>;
961}
962
963let WaveSizePredicate = isWave64 in {
964  defm V_WMMA_F32_16X16X16_F16   : WMMAInst<"_w64", "v_wmma_f32_16x16x16_f16",   VOP_V4F32_V16F16_V16F16_V4F32, int_amdgcn_wmma_f32_16x16x16_f16, VRegSrc_256, WMMARegular, 1>;
965  defm V_WMMA_F32_16X16X16_BF16  : WMMAInst<"_w64", "v_wmma_f32_16x16x16_bf16",  VOP_V4F32_V16I16_V16I16_V4F32, int_amdgcn_wmma_f32_16x16x16_bf16, VRegSrc_256, WMMARegular, 1>;
966  defm V_WMMA_F16_16X16X16_F16   : WMMAInst<"_w64", "v_wmma_f16_16x16x16_f16",   VOP_V8F16_V16F16_V16F16_V8F16, int_amdgcn_wmma_f16_16x16x16_f16, VRegSrc_256, WMMAOpSel, 1>;
967  defm V_WMMA_BF16_16X16X16_BF16 : WMMAInst<"_w64", "v_wmma_bf16_16x16x16_bf16", VOP_V8I16_V16I16_V16I16_V8I16, int_amdgcn_wmma_bf16_16x16x16_bf16, VRegSrc_256, WMMAOpSel, 1>;
968  defm V_WMMA_F16_16X16X16_F16_TIED   : WMMAInst<"_w64", "v_wmma_f16_16x16x16_f16",   VOP_V8F16_V16F16_V16F16_V8F16, int_amdgcn_wmma_f16_16x16x16_f16_tied, VRegSrc_256, WMMAOpSel, 0>;
969  defm V_WMMA_BF16_16X16X16_BF16_TIED : WMMAInst<"_w64", "v_wmma_bf16_16x16x16_bf16", VOP_V8I16_V16I16_V16I16_V8I16, int_amdgcn_wmma_bf16_16x16x16_bf16_tied, VRegSrc_256, WMMAOpSel, 0>;
970  defm V_WMMA_I32_16X16X16_IU8   : WMMAInst<"_w64", "v_wmma_i32_16x16x16_iu8",   VOP_V4I32_V4I32_V4I32_V4I32, int_amdgcn_wmma_i32_16x16x16_iu8, VRegSrc_128, WMMAUIClamp, 1>;
971  defm V_WMMA_I32_16X16X16_IU4   : WMMAInst<"_w64", "v_wmma_i32_16x16x16_iu4",   VOP_V4I32_V2I32_V2I32_V4I32, int_amdgcn_wmma_i32_16x16x16_iu4, VRegSrc_64, WMMAUIClamp, 1>;
972
973}
974
975class VOP3PWMMA_Profile<list<ValueType> ArgTy, bit _IsSWMMAC, int _IndexType,
976                        bit _IsIU, bit _IsFP8BF8>
977    : VOP3P_Profile<VOPProfile<ArgTy>> {
978  bit IsIU = _IsIU;
979  bit IsFP8BF8 = _IsFP8BF8;
980  bit IsF16BF16 = !not(!or(IsIU, IsFP8BF8));
981
982  int IndexType = _IndexType;
983
984  let IsPacked = 1;
985  let IsWMMA = !not(_IsSWMMAC);
986  let IsSWMMAC = _IsSWMMAC;
987
988  bit IsAB_F16 = !and(IsF16BF16, ArgTy[1].isFP);
989  bit IsAB_BF16 = !and(IsF16BF16, isIntType<ArgTy[1]>.ret);
990  bit IsC_F32 = !or(!eq(ArgTy[3], v8f32), !eq(ArgTy[3], v4f32));
991  bit IsC_BF16 = !or(!eq(ArgTy[3], v8i16), !eq(ArgTy[3], v4i16));
992  bit IsC_F16 = !or(!eq(ArgTy[3], v8f16), !eq(ArgTy[3], v4f16));
993
994  bit NegLo01 = !or(IsF16BF16, IsIU);
995  bit NegLo2 = !and(!or(IsF16BF16, IsFP8BF8), IsWMMA);
996  bit NegHi01 = IsF16BF16;
997  bit NegHi2 = !and(!or(IsF16BF16, IsFP8BF8), IsWMMA);
998  bit NegLoAny = !or(NegLo01, NegLo2);
999  bit NegHiAny = !or(NegHi01, NegHi2);
1000
1001  let DstRC = !cond(!eq(ArgTy[0], v8f32): VDst_256,
1002                    !eq(ArgTy[0], v8i32): VDst_256,
1003                    !eq(ArgTy[0], v8f16): VDst_128,
1004                    !eq(ArgTy[0], v8i16): VDst_128,
1005                    !eq(ArgTy[0], v4f32): VDst_128,
1006                    !eq(ArgTy[0], v4i32): VDst_128,
1007                    !eq(ArgTy[0], v4f16): VDst_64,
1008                    !eq(ArgTy[0], v4i16): VDst_64);
1009  let Src0RC64 = !cond(!eq(ArgTy[1], v8f16): VRegSrc_128,
1010                       !eq(ArgTy[1], v4f16): VRegSrc_64,
1011                       !eq(ArgTy[1], v4i16): VRegSrc_64,
1012                       !eq(ArgTy[1], v8i16): VRegSrc_128,
1013                       !eq(ArgTy[1], v4i32): VRegSrc_128,
1014                       !eq(ArgTy[1], v2i32): VRegSrc_64,
1015                       !eq(ArgTy[1], i32)  : VRegSrc_32);
1016  let Src1RC64 = !cond(!eq(ArgTy[2], v16f16): VRegSrc_256,
1017                       !eq(ArgTy[2], v16i16): VRegSrc_256,
1018                       !eq(ArgTy[2], v8f16): VRegSrc_128,
1019                       !eq(ArgTy[2], v8i16): VRegSrc_128,
1020                       !eq(ArgTy[2], v4i32): VRegSrc_128,
1021                       !eq(ArgTy[1], v4i16): VRegSrc_64,
1022                       !eq(ArgTy[1], v4f16): VRegSrc_64,
1023                       !eq(ArgTy[2], v2i32): VRegSrc_64,
1024                       !eq(ArgTy[2], i32)  : VRegSrc_32);
1025  let Src2RC64 = !if(IsSWMMAC, DstRC,
1026                               !cond(!eq(ArgTy[3], v8f32): VISrc_256_f32,
1027                                     !eq(ArgTy[3], v8i32): VISrc_256_b32,
1028                                     !eq(ArgTy[3], v8f16): VISrc_128_f16,
1029                                     !eq(ArgTy[3], v8i16): VISrc_128_f32, // bf16
1030                                     !eq(ArgTy[3], v4f16): VISrc_64_f16,
1031                                     !eq(ArgTy[3], v4i16): VISrc_64_b32,
1032                                     !eq(ArgTy[3], v4i32): VISrc_128_b32,
1033                                     !eq(ArgTy[3], v4f32): VISrc_128_f32));
1034
1035  // For f16 and bf16 matrices A and B, each element can be modified by
1036  // fneg(neg_lo,neg_hi = 1). For iu4 and iu8 matrices A and B neg_lo is
1037  // overloaded to mean unsigned/signed: neg_lo = 0 (u4 and u8) unsigned(zext)
1038  // neg_lo = 1 (i4 and i8) signed(sext). For f16, bf16 and f32 matrix C each
1039  // element can be modified by fneg(neg_lo = 1) or fabs(neg_hi = 1).
1040
1041  // Opcode             | src0/src1 - matrix A/B | src2 - matrix C or Index
1042  // ---------------------------------------------------------------------------
1043  // wmma f32_f16       | both neg_lo,neg_hi = 1 | neg_lo = 1  neg C(f32)
1044  // wmma f32_bf16      | neg A/B (f16 or bf16)  | neg_hi = 1  abs C(f32)
1045  // ---------------------------------------------------------------------------
1046  // wmma f16_f16       | both neg_lo,neg_hi = 1 | neg_lo = 1 neg C(f16 or bf16)
1047  // wmma bf16_bf16     | neg A/B (f16 or bf16)  | neg_hi = 1 abs C(f16 or bf16)
1048  // ---------------------------------------------------------------------------
1049  // wmma i32_iu8/iu4   | neg_lo = 0 u4/u8(zext) | not allowed for
1050  //                    | neg_lo = 1 i4/i8(sext) | i32 matrices
1051  // ---------------------------------------------------------------------------
1052  // wmma f32_fp8/bf8   | not allowed for        | neg_lo = 1  neg C(f32)
1053  // (4 instructions)   | f8 and bf8 matrices    | neg_hi = 1  abs C(f32)
1054  // ---------------------------------------------------------------------------
1055  // swmmac f32_f16     | both neg_lo,neg_hi = 1 | not allowed for sparse matrix
1056  // swmmac f32_bf16    | neg A/B (f16 or bf16)  | A Index - matrix C is in dst
1057  // ---------------------------------------------------------------------------
1058  // swmmac f16_f16     | both neg_lo,neg_hi = 1 | not allowed for sparse matrix
1059  // swmmac bf16_bf16   | neg A/B (f16 or bf16)  | A Index - matrix C is in dst
1060  // ---------------------------------------------------------------------------
1061  // swmmac i32_iu8/iu4 | neg_lo = 0 u4/u8(zext) | not allowed for sparse matrix
1062  //                    | neg_lo = 1 i4/i8(sext) | A Index - matrix C is in dst
1063  // ---------------------------------------------------------------------------
1064  // swmmac f32_fp8/bf8 | not allowed for        | not allowed for sparse matrix
1065  // (4 instructions)   | f8 and bf8 matrices    | A Index - matrix C is in dst
1066
1067  // pseudo
1068
1069  // fp8bf8 wmmas don't use src (0 and 1) modifiers, iu use neg_lo, f16 and bf16
1070  // use neg_lo and neg_hi. iu wmmas (C is i32) don't use src 2 modifiers,
1071  // remaining wmmas(f16, bf16 and f8bf8) use neg_lo and neg_hi for C (C is f32
1072  // f16 or bf16). swmmac use index_key and don't use src 2 modifiers.
1073
1074  dag Src0Mods = !if(IsFP8BF8, (ins), (ins PackedF16InputMods:$src0_modifiers));
1075  dag Src1Mods = !if(IsFP8BF8, (ins), (ins PackedF16InputMods:$src1_modifiers));
1076  dag Src2Mods = !if(IsIU, (ins), (ins PackedF16InputMods:$src2_modifiers));
1077  dag IndexKey = !cond(!eq(IndexType, 0) : (ins),
1078                       !eq(IndexType, 8) : (ins IndexKey8bit:$index_key_8bit),
1079                       !eq(IndexType, 16): (ins IndexKey16bit:$index_key_16bit));
1080  dag Clamp = !if(IsIU, (ins clampmod0:$clamp), (ins));
1081  dag Neg = !cond(!and(NegLoAny, NegHiAny)             : (ins neg_lo0:$neg_lo, neg_hi0:$neg_hi),
1082                  !and(NegLoAny, !not(NegHiAny))       : (ins neg_lo0:$neg_lo),
1083                  !and(!not(NegLoAny), !not(NegHiAny)) : (ins));
1084
1085  let InsVOP3P = !con(Src0Mods, (ins Src0RC64:$src0), Src1Mods, (ins Src1RC64:$src1),
1086                      !cond(IsWMMA   : !con(Src2Mods, (ins Src2RC64:$src2)),
1087                            IsSWMMAC : !con((ins DstRC:$srcTiedDef), (ins VRegSrc_32:$src2), IndexKey)),
1088                      Clamp, Neg);
1089
1090  // asm
1091
1092  string IndexKeyAsm = !cond(!eq(IndexType, 0)  : "",
1093                             !eq(IndexType, 8)  : "$index_key_8bit",
1094                             !eq(IndexType, 16) : "$index_key_16bit");
1095  string ClampAsm = !if(IsIU, "$clamp", "");
1096  string NegAsm = !cond(!and(NegLoAny, NegHiAny)             : "$neg_lo$neg_hi",
1097                        !and(NegLoAny, !not(NegHiAny))       : "$neg_lo",
1098                        !and(!not(NegLoAny), !not(NegHiAny)) : "");
1099
1100  let AsmVOP3P = "$vdst, $src0, $src1, $src2"#IndexKeyAsm#NegAsm#ClampAsm;
1101
1102  // isel patterns
1103
1104  dag Src0InPat  = !cond(IsAB_F16  : (ins (Src0VT (WMMAModsF16Neg Src0VT:$src0, i32:$src0_modifiers))),
1105                         IsAB_BF16 : (ins Src0VT:$src0),
1106                         IsIU      : (ins (VOP3PModsNeg i32:$src0_modifiers), Src0VT:$src0),
1107                         IsFP8BF8  : (ins Src0VT:$src0));
1108  dag Src0OutPat = !cond(IsAB_F16  : (ins i32:$src0_modifiers, Src0VT:$src0),
1109                         IsAB_BF16 : (ins (i32 8), Src0VT:$src0),
1110                         IsIU      : (ins i32:$src0_modifiers, Src0VT:$src0),
1111                         IsFP8BF8  : (ins Src0VT:$src0));
1112  dag Src1InPat  = !cond(IsAB_F16  : (ins (Src1VT (WMMAModsF16Neg Src1VT:$src1, i32:$src1_modifiers))),
1113                         IsAB_BF16 : (ins Src1VT:$src1),
1114                         IsIU      : (ins (VOP3PModsNeg i32:$src1_modifiers), Src1VT:$src1),
1115                         IsFP8BF8  : (ins Src1VT:$src1));
1116  dag Src1OutPat = !cond(IsAB_F16  : (ins i32:$src1_modifiers, Src1VT:$src1),
1117                         IsAB_BF16 : (ins (i32 8), Src1VT:$src1),
1118                         IsIU      : (ins i32:$src1_modifiers, Src1VT:$src1),
1119                         IsFP8BF8  : (ins Src1VT:$src1));
1120  dag Src2InPatWmma  = !cond(IsC_F32  : (ins (Src2VT (WMMAModsF32NegAbs Src2VT:$src2, i32:$src2_modifiers))),
1121                             IsC_F16  : (ins (Src2VT (WMMAModsF16NegAbs Src2VT:$src2, i32:$src2_modifiers))),
1122                             IsC_BF16 : (ins Src2VT:$src2),
1123                             IsIU     : (ins Src2VT:$src2),
1124                             IsSWMMAC : (ins));
1125  dag Src2OutPatWmma = !cond(IsC_F32  : (ins i32:$src2_modifiers, Src2VT:$src2),
1126                             IsC_F16  : (ins i32:$src2_modifiers, Src2VT:$src2),
1127                             IsC_BF16 : (ins (i32 8), Src2VT:$src2),
1128                             IsIU     : (ins Src2VT:$src2),
1129                             IsSWMMAC : (ins));
1130  dag ClampPat = !if(IsIU, (ins i1:$clamp), (ins));
1131  dag IndexInPat = !cond(!eq(IndexType, 0) : (ins i32:$src2),
1132                         !eq(IndexType, 8) : (ins (i32 (SWMMACIndex8 i32:$src2, i32:$index_key_8bit))),
1133                         !eq(IndexType, 16): (ins (i32 (SWMMACIndex16 i32:$src2, i32:$index_key_16bit))));
1134  dag IndexOutPat = !cond(!eq(IndexType, 0) : (ins i32:$src2),
1135                          !eq(IndexType, 8) : (ins i32:$src2, i32:$index_key_8bit),
1136                          !eq(IndexType, 16): (ins i32:$src2, i32:$index_key_16bit));
1137  dag Src2InlineInPat = (ins (Src2VT (WMMAVISrc Src2VT:$src2)));
1138  dag Src2InlineOutPat = !con(!if(IsIU, (ins), (ins (i32 8))), (ins Src2VT:$src2));
1139
1140
1141  dag WmmaInPat  = !con(Src0InPat, Src1InPat, Src2InPatWmma, ClampPat);
1142  dag WmmaOutPat = !con(Src0OutPat, Src1OutPat, Src2OutPatWmma, ClampPat);
1143
1144  dag SwmmacInPat  = !con(Src0InPat, Src1InPat, (ins Src2VT:$srcTiedDef), IndexInPat, ClampPat);
1145  dag SwmmacOutPat = !con(Src0OutPat, Src1OutPat, (ins Src2VT:$srcTiedDef), IndexOutPat, ClampPat);
1146
1147  // wmma pattern where src2 is inline imm uses _threeaddr pseudo,
1148  // can't use _twoaddr since it would violate src2 tied to vdst constraint.
1149  dag WmmaInlineInPat  = !con(Src0InPat, Src1InPat, Src2InlineInPat,  ClampPat);
1150  dag WmmaInlineOutPat = !con(Src0OutPat, Src1OutPat, Src2InlineOutPat, ClampPat);
1151}
1152
1153multiclass WMMAInstGFX12<string Instr, VOP3PWMMA_Profile WMMAProfile, string PseudoInstrSuffix> {
1154  let Mnemonic = Instr, mayRaiseFPException = 0, ReadsModeReg = 0 in {
1155    let Constraints = "@earlyclobber $vdst,$vdst = $src2", isConvertibleToThreeAddress = 1 in
1156      def _twoaddr : VOP3P_Pseudo<Instr, WMMAProfile>{
1157        let PseudoInstr = Instr#PseudoInstrSuffix;
1158      }
1159
1160    let Constraints = "@earlyclobber $vdst", SchedRW = [Write32Bit, Write32Bit] in
1161      def _threeaddr : VOP3P_Pseudo<Instr, WMMAProfile>{
1162        let PseudoInstr = Instr#PseudoInstrSuffix;
1163      }
1164
1165  }
1166  def : WMMAOpcodeMapping<!cast<Instruction>(NAME # _twoaddr),
1167                          !cast<Instruction>(NAME # _threeaddr)>;
1168}
1169
1170multiclass SWMMACInstGFX12<string Instr, VOP3PWMMA_Profile WMMAProfile, string PseudoInstrSuffix> {
1171  def _twoaddr : VOP3P_Pseudo<Instr, WMMAProfile>{
1172    let Mnemonic = Instr;
1173    let PseudoInstr = Instr#PseudoInstrSuffix;
1174    let mayRaiseFPException = 0;
1175    let ReadsModeReg = 0;
1176    let AsmMatchConverter = "cvtSWMMAC";
1177
1178    let Constraints = "@earlyclobber $vdst,$vdst = $srcTiedDef";
1179  }
1180}
1181
1182// First argument in Profile is types for matrices D, A, B and C (D = A * B + C)
1183// as used by llvm ir, types are vectors(with matrix elements)
1184// wave32:
1185// For 16x16 matrices, lanes 0 to 31 will have 8 matrix elts,
1186// for 16 x 32 16 elts and for 16 x 64 lanes have 32 elts.
1187// wave64:
1188// lanes will have half the size of elements in lanes compared to wave32 with
1189// exception of 16x16_iu4: lanes0-31 will have 8xi4, remaining lanes are ignored
1190
1191// general idea on element distribution differences:
1192// wave32: lane n has 8 matrix elements
1193// wave64: lane n has first 4, lane n+32 has other 4 elements
1194
1195// index size, for each 2 elements in lane you need 4bits in index
1196
1197// Non-standard types (iu8, iu4, fp8, bf8) will be packed in vectors of i32s.
1198// Original type for them is in comment on the right and refers to A and B.
1199
1200def F32_F16_WMMA_w32    : VOP3PWMMA_Profile<[v8f32, v8f16, v8f16, v8f32], 0, 0, 0, 0>;
1201def F32_BF16_WMMA_w32   : VOP3PWMMA_Profile<[v8f32, v8i16, v8i16, v8f32], 0, 0, 0, 0>;
1202def F16_F16_WMMA_w32    : VOP3PWMMA_Profile<[v8f16, v8f16, v8f16, v8f16], 0, 0, 0, 0>;
1203def BF16_BF16_WMMA_w32  : VOP3PWMMA_Profile<[v8i16, v8i16, v8i16, v8i16], 0, 0, 0, 0>;
1204def I32_IU8_WMMA_w32    : VOP3PWMMA_Profile<[v8i32, v2i32, v2i32, v8i32], 0, 0, 1, 0>; // 8xi8
1205def I32_IU4X16_WMMA_w32 : VOP3PWMMA_Profile<[v8i32,   i32,   i32, v8i32], 0, 0, 1, 0>; // 8xi4
1206def F32_FP8BF8_WMMA_w32 : VOP3PWMMA_Profile<[v8f32, v2i32, v2i32, v8f32], 0, 0, 0, 1>; // 8xf8
1207def I32_IU4X32_WMMA_w32 : VOP3PWMMA_Profile<[v8i32, v2i32, v2i32, v8i32], 0, 0, 1, 0>; // 16xi4
1208
1209def F32_F16_WMMA_w64    : VOP3PWMMA_Profile<[v4f32, v4f16, v4f16, v4f32], 0, 0, 0, 0>;
1210def F32_BF16_WMMA_w64   : VOP3PWMMA_Profile<[v4f32, v4i16, v4i16, v4f32], 0, 0, 0, 0>;
1211def F16_F16_WMMA_w64    : VOP3PWMMA_Profile<[v4f16, v4f16, v4f16, v4f16], 0, 0, 0, 0>;
1212def BF16_BF16_WMMA_w64  : VOP3PWMMA_Profile<[v4i16, v4i16, v4i16, v4i16], 0, 0, 0, 0>;
1213def I32_IU8_WMMA_w64    : VOP3PWMMA_Profile<[v4i32,   i32,   i32, v4i32], 0, 0, 1, 0>; // 4xi8
1214def I32_IU4X16_WMMA_w64 : VOP3PWMMA_Profile<[v4i32,   i32,   i32, v4i32], 0, 0, 1, 0>; // 8xi4 *
1215def F32_FP8BF8_WMMA_w64 : VOP3PWMMA_Profile<[v4f32,   i32,   i32, v4f32], 0, 0, 0, 1>; // 4xf8
1216def I32_IU4X32_WMMA_w64 : VOP3PWMMA_Profile<[v4i32,   i32,   i32, v4i32], 0, 0, 1, 0>; // 8xi4
1217
1218def F32_F16_SWMMAC_w32    : VOP3PWMMA_Profile<[v8f32, v8f16, v16f16, v8f32], 1, 16, 0, 0>;
1219def F32_BF16_SWMMAC_w32   : VOP3PWMMA_Profile<[v8f32, v8i16, v16i16, v8f32], 1, 16, 0, 0>;
1220def F16_F16_SWMMAC_w32    : VOP3PWMMA_Profile<[v8f16, v8f16, v16f16, v8f16], 1, 16, 0, 0>;
1221def BF16_BF16_SWMMAC_w32  : VOP3PWMMA_Profile<[v8i16, v8i16, v16i16, v8i16], 1, 16, 0, 0>;
1222def I32_IU8_SWMMAC_w32    : VOP3PWMMA_Profile<[v8i32, v2i32,  v4i32, v8i32], 1, 16, 1, 0>; // 8xi8, 16xi8
1223def I32_IU4X32_SWMMAC_w32 : VOP3PWMMA_Profile<[v8i32,   i32,  v2i32, v8i32], 1, 16, 1, 0>; // 8xi4, 16xi4
1224def I32_IU4X64_SWMMAC_w32 : VOP3PWMMA_Profile<[v8i32, v2i32,  v4i32, v8i32], 1,  0, 1, 0>; // 16xi4, 32xi4 **
1225def F32_FP8BF8_SWMMAC_w32 : VOP3PWMMA_Profile<[v8f32, v2i32,  v4i32, v8f32], 1, 16, 0, 1>; // 8xf8, 16xf8
1226
1227def F32_F16_SWMMAC_w64    : VOP3PWMMA_Profile<[v4f32, v4f16, v8f16, v4f32], 1,  8, 0, 0>;
1228def F32_BF16_SWMMAC_w64   : VOP3PWMMA_Profile<[v4f32, v4i16, v8i16, v4f32], 1,  8, 0, 0>;
1229def F16_F16_SWMMAC_w64    : VOP3PWMMA_Profile<[v4f16, v4f16, v8f16, v4f16], 1,  8, 0, 0>;
1230def BF16_BF16_SWMMAC_w64  : VOP3PWMMA_Profile<[v4i16, v4i16, v8i16, v4i16], 1,  8, 0, 0>;
1231def I32_IU8_SWMMAC_w64    : VOP3PWMMA_Profile<[v4i32,   i32, v2i32, v4i32], 1,  8, 1, 0>; // 4xi8, 8xi8
1232def I32_IU4X32_SWMMAC_w64 : VOP3PWMMA_Profile<[v4i32,   i32,   i32, v4i32], 1, 16, 1, 0>; // 8xi4, 8xi4 ***
1233def I32_IU4X64_SWMMAC_w64 : VOP3PWMMA_Profile<[v4i32,   i32, v2i32, v4i32], 1, 16, 1, 0>; // 8xi4, 16xi4
1234def F32_FP8BF8_SWMMAC_w64 : VOP3PWMMA_Profile<[v4f32,   i32, v2i32, v4f32], 1,  8, 0, 1>; // 4xf8, 8xf8
1235
1236// *   IU4X16_WMMA_w64 lanes 0-31 will have 8xi4, remaining lanes are ignored
1237// **  IU4X64_SWMMAC_w32 index is i32, index_key is not used
1238// *** IU4X32_SWMMAC_w64 lanes 0-31 will have 8xi4 remaining lanes are ignored
1239//                       for matrix A, index is i16; Matrix B uses all lanes
1240
1241let WaveSizePredicate = isWave32 in {
1242defm V_WMMA_F32_16X16X16_F16_w32     : WMMAInstGFX12<"v_wmma_f32_16x16x16_f16",     F32_F16_WMMA_w32, "_w32">;
1243defm V_WMMA_F32_16X16X16_BF16_w32    : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf16",    F32_BF16_WMMA_w32, "_w32">;
1244defm V_WMMA_F16_16X16X16_F16_w32     : WMMAInstGFX12<"v_wmma_f16_16x16x16_f16",     F16_F16_WMMA_w32, "_w32">;
1245defm V_WMMA_BF16_16X16X16_BF16_w32   : WMMAInstGFX12<"v_wmma_bf16_16x16x16_bf16",   BF16_BF16_WMMA_w32, "_w32">;
1246defm V_WMMA_I32_16X16X16_IU8_w32     : WMMAInstGFX12<"v_wmma_i32_16x16x16_iu8",     I32_IU8_WMMA_w32, "_w32">;
1247defm V_WMMA_I32_16X16X16_IU4_w32     : WMMAInstGFX12<"v_wmma_i32_16x16x16_iu4",     I32_IU4X16_WMMA_w32, "_w32">;
1248defm V_WMMA_F32_16X16X16_FP8_FP8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_fp8_fp8", F32_FP8BF8_WMMA_w32, "_w32">;
1249defm V_WMMA_F32_16X16X16_FP8_BF8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_fp8_bf8", F32_FP8BF8_WMMA_w32, "_w32">;
1250defm V_WMMA_F32_16X16X16_BF8_FP8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf8_fp8", F32_FP8BF8_WMMA_w32, "_w32">;
1251defm V_WMMA_F32_16X16X16_BF8_BF8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf8_bf8", F32_FP8BF8_WMMA_w32, "_w32">;
1252defm V_WMMA_I32_16X16X32_IU4_w32     : WMMAInstGFX12<"v_wmma_i32_16x16x32_iu4",     I32_IU4X32_WMMA_w32, "_w32">;
1253
1254defm V_SWMMAC_F32_16X16X32_F16_w32     : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_f16",     F32_F16_SWMMAC_w32, "_w32">;
1255defm V_SWMMAC_F32_16X16X32_BF16_w32    : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf16",    F32_BF16_SWMMAC_w32, "_w32">;
1256defm V_SWMMAC_F16_16X16X32_F16_w32     : SWMMACInstGFX12<"v_swmmac_f16_16x16x32_f16",     F16_F16_SWMMAC_w32, "_w32">;
1257defm V_SWMMAC_BF16_16X16X32_BF16_w32   : SWMMACInstGFX12<"v_swmmac_bf16_16x16x32_bf16",   BF16_BF16_SWMMAC_w32, "_w32">;
1258defm V_SWMMAC_I32_16X16X32_IU8_w32     : SWMMACInstGFX12<"v_swmmac_i32_16x16x32_iu8",     I32_IU8_SWMMAC_w32, "_w32">;
1259defm V_SWMMAC_I32_16X16X32_IU4_w32     : SWMMACInstGFX12<"v_swmmac_i32_16x16x32_iu4",     I32_IU4X32_SWMMAC_w32, "_w32">;
1260defm V_SWMMAC_I32_16X16X64_IU4_w32     : SWMMACInstGFX12<"v_swmmac_i32_16x16x64_iu4",     I32_IU4X64_SWMMAC_w32, "_w32">;
1261defm V_SWMMAC_F32_16X16X32_FP8_FP8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_fp8_fp8", F32_FP8BF8_SWMMAC_w32, "_w32">;
1262defm V_SWMMAC_F32_16X16X32_FP8_BF8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_fp8_bf8", F32_FP8BF8_SWMMAC_w32, "_w32">;
1263defm V_SWMMAC_F32_16X16X32_BF8_FP8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf8_fp8", F32_FP8BF8_SWMMAC_w32, "_w32">;
1264defm V_SWMMAC_F32_16X16X32_BF8_BF8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf8_bf8", F32_FP8BF8_SWMMAC_w32, "_w32">;
1265}
1266
1267let WaveSizePredicate = isWave64 in {
1268defm V_WMMA_F32_16X16X16_F16_w64     : WMMAInstGFX12<"v_wmma_f32_16x16x16_f16",     F32_F16_WMMA_w64, "_w64">;
1269defm V_WMMA_F32_16X16X16_BF16_w64    : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf16",    F32_BF16_WMMA_w64, "_w64">;
1270defm V_WMMA_F16_16X16X16_F16_w64     : WMMAInstGFX12<"v_wmma_f16_16x16x16_f16",     F16_F16_WMMA_w64, "_w64">;
1271defm V_WMMA_BF16_16X16X16_BF16_w64   : WMMAInstGFX12<"v_wmma_bf16_16x16x16_bf16",   BF16_BF16_WMMA_w64, "_w64">;
1272defm V_WMMA_I32_16X16X16_IU8_w64     : WMMAInstGFX12<"v_wmma_i32_16x16x16_iu8",     I32_IU8_WMMA_w64, "_w64">;
1273defm V_WMMA_I32_16X16X16_IU4_w64     : WMMAInstGFX12<"v_wmma_i32_16x16x16_iu4",     I32_IU4X16_WMMA_w64, "_w64">;
1274defm V_WMMA_F32_16X16X16_FP8_FP8_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_fp8_fp8", F32_FP8BF8_WMMA_w64, "_w64">;
1275defm V_WMMA_F32_16X16X16_FP8_BF8_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_fp8_bf8", F32_FP8BF8_WMMA_w64, "_w64">;
1276defm V_WMMA_F32_16X16X16_BF8_FP8_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf8_fp8", F32_FP8BF8_WMMA_w64, "_w64">;
1277defm V_WMMA_F32_16X16X16_BF8_BF8_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf8_bf8", F32_FP8BF8_WMMA_w64, "_w64">;
1278defm V_WMMA_I32_16X16X32_IU4_w64     : WMMAInstGFX12<"v_wmma_i32_16x16x32_iu4",     I32_IU4X32_WMMA_w64, "_w64">;
1279
1280defm V_SWMMAC_F32_16X16X32_F16_w64     : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_f16",     F32_F16_SWMMAC_w64, "_w64">;
1281defm V_SWMMAC_F32_16X16X32_BF16_w64    : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf16",    F32_BF16_SWMMAC_w64, "_w64">;
1282defm V_SWMMAC_F16_16X16X32_F16_w64     : SWMMACInstGFX12<"v_swmmac_f16_16x16x32_f16",     F16_F16_SWMMAC_w64, "_w64">;
1283defm V_SWMMAC_BF16_16X16X32_BF16_w64   : SWMMACInstGFX12<"v_swmmac_bf16_16x16x32_bf16",   BF16_BF16_SWMMAC_w64, "_w64">;
1284defm V_SWMMAC_I32_16X16X32_IU8_w64     : SWMMACInstGFX12<"v_swmmac_i32_16x16x32_iu8",     I32_IU8_SWMMAC_w64, "_w64">;
1285defm V_SWMMAC_I32_16X16X32_IU4_w64     : SWMMACInstGFX12<"v_swmmac_i32_16x16x32_iu4",     I32_IU4X32_SWMMAC_w64, "_w64">;
1286defm V_SWMMAC_I32_16X16X64_IU4_w64     : SWMMACInstGFX12<"v_swmmac_i32_16x16x64_iu4",     I32_IU4X64_SWMMAC_w64, "_w64">;
1287defm V_SWMMAC_F32_16X16X32_FP8_FP8_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_fp8_fp8", F32_FP8BF8_SWMMAC_w64, "_w64">;
1288defm V_SWMMAC_F32_16X16X32_FP8_BF8_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_fp8_bf8", F32_FP8BF8_SWMMAC_w64, "_w64">;
1289defm V_SWMMAC_F32_16X16X32_BF8_FP8_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf8_fp8", F32_FP8BF8_SWMMAC_w64, "_w64">;
1290defm V_SWMMAC_F32_16X16X32_BF8_BF8_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf8_bf8", F32_FP8BF8_SWMMAC_w64, "_w64">;
1291}
1292
1293// IsGFX11OpselIntrinsic: f16_f16 and bf16_bf16 Intrinsics have imm operand that
1294// controls opsel. Used by gfx11, removed in gfx12 (operand must be 0).
1295multiclass WMMAPat<string Inst, SDPatternOperator node, VOP3PWMMA_Profile P, bit IsGFX11OpselIntrinsic = 0> {
1296  def : GCNPat <(P.DstVT !setdagop(!con(P.WmmaInPat, !if(IsGFX11OpselIntrinsic, (ins 0), (ins))), node)),
1297                (P.DstVT !setdagop(P.WmmaOutPat, !cast<Instruction>(Inst#"_twoaddr")))>;
1298  let AddedComplexity = 4 in
1299  def : GCNPat <(P.DstVT !setdagop(!con(P.WmmaInlineInPat, !if(IsGFX11OpselIntrinsic, (ins 0), (ins))), node)),
1300                (P.DstVT !setdagop(P.WmmaInlineOutPat, !cast<Instruction>(Inst#"_threeaddr")))>;
1301}
1302
1303class SWMMACPat<Instruction Inst, SDPatternOperator node, VOP3PWMMA_Profile P> :
1304  GCNPat <(P.DstVT !setdagop(P.SwmmacInPat, node)),
1305          (P.DstVT !setdagop(P.SwmmacOutPat, Inst))>;
1306
1307class SWMMACPat_w64<Instruction Inst, SDPatternOperator node, VOP3PWMMA_Profile P> :
1308  GCNPat <(P.DstVT !setdagop(P.SwmmacInPat, node)),
1309          (P.DstVT !setdagop(P.SwmmacOutPat, Inst))>{
1310            let WaveSizePredicate = isWave64;
1311          }
1312
1313let WaveSizePredicate = isWave32, SubtargetPredicate = isGFX12Plus in {
1314  defm : WMMAPat<"V_WMMA_F32_16X16X16_F16_w32",     int_amdgcn_wmma_f32_16x16x16_f16,     F32_F16_WMMA_w32>;
1315  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF16_w32",    int_amdgcn_wmma_f32_16x16x16_bf16,    F32_BF16_WMMA_w32>;
1316  defm : WMMAPat<"V_WMMA_F16_16X16X16_F16_w32",     int_amdgcn_wmma_f16_16x16x16_f16,     F16_F16_WMMA_w32,1>;
1317  defm : WMMAPat<"V_WMMA_BF16_16X16X16_BF16_w32",   int_amdgcn_wmma_bf16_16x16x16_bf16,   BF16_BF16_WMMA_w32,1>;
1318  defm : WMMAPat<"V_WMMA_I32_16X16X16_IU8_w32",     int_amdgcn_wmma_i32_16x16x16_iu8,     I32_IU8_WMMA_w32>;
1319  defm : WMMAPat<"V_WMMA_I32_16X16X16_IU4_w32",     int_amdgcn_wmma_i32_16x16x16_iu4,     I32_IU4X16_WMMA_w32>;
1320  defm : WMMAPat<"V_WMMA_F32_16X16X16_FP8_FP8_w32", int_amdgcn_wmma_f32_16x16x16_fp8_fp8, F32_FP8BF8_WMMA_w32>;
1321  defm : WMMAPat<"V_WMMA_F32_16X16X16_FP8_BF8_w32", int_amdgcn_wmma_f32_16x16x16_fp8_bf8, F32_FP8BF8_WMMA_w32>;
1322  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF8_FP8_w32", int_amdgcn_wmma_f32_16x16x16_bf8_fp8, F32_FP8BF8_WMMA_w32>;
1323  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF8_BF8_w32", int_amdgcn_wmma_f32_16x16x16_bf8_bf8, F32_FP8BF8_WMMA_w32>;
1324  defm : WMMAPat<"V_WMMA_I32_16X16X32_IU4_w32",     int_amdgcn_wmma_i32_16x16x32_iu4,     I32_IU4X32_WMMA_w32>;
1325
1326  def : SWMMACPat<V_SWMMAC_F32_16X16X32_F16_w32_twoaddr,     int_amdgcn_swmmac_f32_16x16x32_f16,     F32_F16_SWMMAC_w32>;
1327  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF16_w32_twoaddr,    int_amdgcn_swmmac_f32_16x16x32_bf16,    F32_BF16_SWMMAC_w32>;
1328  def : SWMMACPat<V_SWMMAC_F16_16X16X32_F16_w32_twoaddr,     int_amdgcn_swmmac_f16_16x16x32_f16,     F16_F16_SWMMAC_w32>;
1329  def : SWMMACPat<V_SWMMAC_BF16_16X16X32_BF16_w32_twoaddr,   int_amdgcn_swmmac_bf16_16x16x32_bf16,   BF16_BF16_SWMMAC_w32>;
1330  def : SWMMACPat<V_SWMMAC_I32_16X16X32_IU8_w32_twoaddr,     int_amdgcn_swmmac_i32_16x16x32_iu8,     I32_IU8_SWMMAC_w32>;
1331  def : SWMMACPat<V_SWMMAC_I32_16X16X32_IU4_w32_twoaddr,     int_amdgcn_swmmac_i32_16x16x32_iu4,     I32_IU4X32_SWMMAC_w32>;
1332  def : GCNPat <(I32_IU4X64_SWMMAC_w32.DstVT !setdagop(I32_IU4X64_SWMMAC_w32.SwmmacInPat,  int_amdgcn_swmmac_i32_16x16x64_iu4)),
1333                (I32_IU4X64_SWMMAC_w32.DstVT !setdagop(I32_IU4X64_SWMMAC_w32.SwmmacOutPat, V_SWMMAC_I32_16X16X64_IU4_w32_twoaddr))>;
1334  def : SWMMACPat<V_SWMMAC_F32_16X16X32_FP8_FP8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_fp8_fp8, F32_FP8BF8_SWMMAC_w32>;
1335  def : SWMMACPat<V_SWMMAC_F32_16X16X32_FP8_BF8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_fp8_bf8, F32_FP8BF8_SWMMAC_w32>;
1336  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF8_FP8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf8_fp8, F32_FP8BF8_SWMMAC_w32>;
1337  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF8_BF8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf8_bf8, F32_FP8BF8_SWMMAC_w32>;
1338}
1339
1340let WaveSizePredicate = isWave64, SubtargetPredicate = isGFX12Plus in {
1341  defm : WMMAPat<"V_WMMA_F32_16X16X16_F16_w64",     int_amdgcn_wmma_f32_16x16x16_f16,     F32_F16_WMMA_w64>;
1342  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF16_w64",    int_amdgcn_wmma_f32_16x16x16_bf16,    F32_BF16_WMMA_w64>;
1343  defm : WMMAPat<"V_WMMA_F16_16X16X16_F16_w64",     int_amdgcn_wmma_f16_16x16x16_f16,     F16_F16_WMMA_w64,1>;
1344  defm : WMMAPat<"V_WMMA_BF16_16X16X16_BF16_w64",   int_amdgcn_wmma_bf16_16x16x16_bf16,   BF16_BF16_WMMA_w64,1>;
1345  defm : WMMAPat<"V_WMMA_I32_16X16X16_IU8_w64",     int_amdgcn_wmma_i32_16x16x16_iu8,     I32_IU8_WMMA_w64>;
1346  defm : WMMAPat<"V_WMMA_I32_16X16X16_IU4_w64",     int_amdgcn_wmma_i32_16x16x16_iu4,     I32_IU4X16_WMMA_w64>;
1347  defm : WMMAPat<"V_WMMA_F32_16X16X16_FP8_FP8_w64", int_amdgcn_wmma_f32_16x16x16_fp8_fp8, F32_FP8BF8_WMMA_w64>;
1348  defm : WMMAPat<"V_WMMA_F32_16X16X16_FP8_BF8_w64", int_amdgcn_wmma_f32_16x16x16_fp8_bf8, F32_FP8BF8_WMMA_w64>;
1349  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF8_FP8_w64", int_amdgcn_wmma_f32_16x16x16_bf8_fp8, F32_FP8BF8_WMMA_w64>;
1350  defm : WMMAPat<"V_WMMA_F32_16X16X16_BF8_BF8_w64", int_amdgcn_wmma_f32_16x16x16_bf8_bf8, F32_FP8BF8_WMMA_w64>;
1351  defm : WMMAPat<"V_WMMA_I32_16X16X32_IU4_w64",     int_amdgcn_wmma_i32_16x16x32_iu4,     I32_IU4X32_WMMA_w64>;
1352
1353  def : SWMMACPat<V_SWMMAC_F32_16X16X32_F16_w64_twoaddr,     int_amdgcn_swmmac_f32_16x16x32_f16,     F32_F16_SWMMAC_w64>;
1354  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF16_w64_twoaddr,    int_amdgcn_swmmac_f32_16x16x32_bf16,    F32_BF16_SWMMAC_w64>;
1355  def : SWMMACPat<V_SWMMAC_F16_16X16X32_F16_w64_twoaddr,     int_amdgcn_swmmac_f16_16x16x32_f16,     F16_F16_SWMMAC_w64>;
1356  def : SWMMACPat<V_SWMMAC_BF16_16X16X32_BF16_w64_twoaddr,   int_amdgcn_swmmac_bf16_16x16x32_bf16,   BF16_BF16_SWMMAC_w64>;
1357  def : SWMMACPat<V_SWMMAC_I32_16X16X32_IU8_w64_twoaddr,     int_amdgcn_swmmac_i32_16x16x32_iu8,     I32_IU8_SWMMAC_w64>;
1358  def : SWMMACPat<V_SWMMAC_I32_16X16X32_IU4_w64_twoaddr,     int_amdgcn_swmmac_i32_16x16x32_iu4,     I32_IU4X32_SWMMAC_w64>;
1359  def : SWMMACPat<V_SWMMAC_I32_16X16X64_IU4_w64_twoaddr,     int_amdgcn_swmmac_i32_16x16x64_iu4,     I32_IU4X64_SWMMAC_w64>;
1360  def : SWMMACPat<V_SWMMAC_F32_16X16X32_FP8_FP8_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_fp8_fp8, F32_FP8BF8_SWMMAC_w64>;
1361  def : SWMMACPat<V_SWMMAC_F32_16X16X32_FP8_BF8_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_fp8_bf8, F32_FP8BF8_SWMMAC_w64>;
1362  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF8_FP8_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf8_fp8, F32_FP8BF8_SWMMAC_w64>;
1363  def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF8_BF8_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf8_bf8, F32_FP8BF8_SWMMAC_w64>;
1364}
1365
1366
1367//===----------------------------------------------------------------------===//
1368// Begin Real Encodings
1369//===----------------------------------------------------------------------===//
1370
1371class VOP3P_DPP16<bits<7> op, VOP_DPP_Pseudo ps, int subtarget,
1372                  string opName = ps.OpName>
1373    : VOP3P_DPP<op, opName, ps.Pfl, 1>, SIMCInstr<ps.PseudoInstr, subtarget> {
1374  let hasSideEffects = ps.hasSideEffects;
1375  let Defs = ps.Defs;
1376  let SchedRW = ps.SchedRW;
1377  let Uses = ps.Uses;
1378  let AssemblerPredicate = HasDPP16;
1379  let SubtargetPredicate = HasDPP16;
1380  let OtherPredicates = ps.OtherPredicates;
1381}
1382
1383class VOP3P_DPP8_Base<bits<7> op, VOP_Pseudo ps, string opName = ps.OpName>
1384    : VOP3P_DPP8<op, opName, ps.Pfl> {
1385  let hasSideEffects = ps.hasSideEffects;
1386  let Defs = ps.Defs;
1387  let SchedRW = ps.SchedRW;
1388  let Uses = ps.Uses;
1389  let OtherPredicates = ps.OtherPredicates;
1390}
1391
1392//===----------------------------------------------------------------------===//
1393// GFX11, GFX12
1394//===----------------------------------------------------------------------===//
1395
1396multiclass VOP3P_Real_Base<GFXGen Gen, bits<7> op, string backing_ps_name = NAME,
1397                      string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> {
1398  def Gen.Suffix :
1399    VOP3P_Real_Gen<!cast<VOP3P_Pseudo>(backing_ps_name), Gen, asmName>,
1400    VOP3Pe_gfx11_gfx12<op, !cast<VOP3P_Pseudo>(backing_ps_name).Pfl>;
1401}
1402
1403class VOP3PeWmma<bits<7> op, VOPProfile P, VOP3PWMMA_Profile WMMAP>
1404    : VOP3Pe_gfx11_gfx12<op, P>{
1405  // opsel
1406  let Inst{11} = !cond(!eq(WMMAP.IndexType, 0)  : 0,
1407                       !eq(WMMAP.IndexType, 8)  : index_key_8bit{0},
1408                       !eq(WMMAP.IndexType, 16) : index_key_16bit{0});
1409  let Inst{12} = !if(!eq(WMMAP.IndexType, 8), index_key_8bit{1}, 0);
1410  let Inst{13} = 0;
1411  // opsel_hi
1412  let Inst{59} = 1;
1413  let Inst{60} = 1;
1414  let Inst{14} = 1;
1415  // neg_lo
1416  let Inst{61} = !if(WMMAP.NegLo01, src0_modifiers{0}, 0);
1417  let Inst{62} = !if(WMMAP.NegLo01, src1_modifiers{0}, 0);
1418  let Inst{63} = !if(WMMAP.NegLo2, src2_modifiers{0}, 0);
1419  // neg_hi
1420  let Inst{8}  = !if(WMMAP.NegHi01, src0_modifiers{1}, 0);
1421  let Inst{9}  = !if(WMMAP.NegHi01, src1_modifiers{1}, 0);
1422  let Inst{10} = !if(WMMAP.NegHi2, src2_modifiers{1}, 0);
1423  // clamp
1424  let Inst{15} = !if(WMMAP.IsIU, clamp{0}, 0);
1425}
1426
1427multiclass VOP3P_WMMA_Real_Base<GFXGen Gen, bits<7> op, VOP3PWMMA_Profile WMMAP,
1428                                string backing_ps_name = NAME,
1429                                string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> {
1430  def Gen.Suffix :
1431    VOP3P_Real_Gen<!cast<VOP3P_Pseudo>(backing_ps_name), Gen, asmName>,
1432    VOP3PeWmma<op, !cast<VOP3P_Pseudo>(backing_ps_name).Pfl, WMMAP>;
1433}
1434
1435multiclass VOP3P_Real_WMMA_gfx12 <bits<7> op, VOP3PWMMA_Profile WMMAP> {
1436  let WaveSizePredicate = isWave32, DecoderNamespace = "GFX12" in {
1437    defm _twoaddr : VOP3P_WMMA_Real_Base <GFX12Gen, op, WMMAP>;
1438  }
1439}
1440
1441multiclass VOP3P_Real_WMMA_gfx12w64 <bits<7> op, VOP3PWMMA_Profile WMMAP> {
1442  let WaveSizePredicate = isWave64, DecoderNamespace = "WMMAGFX12" in {
1443    defm _twoaddr : VOP3P_WMMA_Real_Base <GFX12Gen, op, WMMAP>;
1444  }
1445}
1446
1447defm V_WMMA_F32_16X16X16_F16_w32     : VOP3P_Real_WMMA_gfx12 <0x040, F32_F16_WMMA_w32>;
1448defm V_WMMA_F32_16X16X16_BF16_w32    : VOP3P_Real_WMMA_gfx12 <0x041, F32_BF16_WMMA_w32>;
1449defm V_WMMA_F16_16X16X16_F16_w32     : VOP3P_Real_WMMA_gfx12 <0x042, F16_F16_WMMA_w32>;
1450defm V_WMMA_BF16_16X16X16_BF16_w32   : VOP3P_Real_WMMA_gfx12 <0x043, BF16_BF16_WMMA_w32>;
1451defm V_WMMA_I32_16X16X16_IU8_w32     : VOP3P_Real_WMMA_gfx12 <0x044, I32_IU8_WMMA_w32>;
1452defm V_WMMA_I32_16X16X16_IU4_w32     : VOP3P_Real_WMMA_gfx12 <0x045, I32_IU4X16_WMMA_w32>;
1453defm V_WMMA_F32_16X16X16_FP8_FP8_w32 : VOP3P_Real_WMMA_gfx12 <0x046, F32_FP8BF8_WMMA_w32>;
1454defm V_WMMA_F32_16X16X16_FP8_BF8_w32 : VOP3P_Real_WMMA_gfx12 <0x047, F32_FP8BF8_WMMA_w32>;
1455defm V_WMMA_F32_16X16X16_BF8_FP8_w32 : VOP3P_Real_WMMA_gfx12 <0x048, F32_FP8BF8_WMMA_w32>;
1456defm V_WMMA_F32_16X16X16_BF8_BF8_w32 : VOP3P_Real_WMMA_gfx12 <0x049, F32_FP8BF8_WMMA_w32>;
1457defm V_WMMA_I32_16X16X32_IU4_w32     : VOP3P_Real_WMMA_gfx12 <0x04a, I32_IU4X32_WMMA_w32>;
1458
1459defm V_WMMA_F32_16X16X16_F16_w64     : VOP3P_Real_WMMA_gfx12w64 <0x040, F32_F16_WMMA_w64>;
1460defm V_WMMA_F32_16X16X16_BF16_w64    : VOP3P_Real_WMMA_gfx12w64 <0x041, F32_BF16_WMMA_w64>;
1461defm V_WMMA_F16_16X16X16_F16_w64     : VOP3P_Real_WMMA_gfx12w64 <0x042, F16_F16_WMMA_w64>;
1462defm V_WMMA_BF16_16X16X16_BF16_w64   : VOP3P_Real_WMMA_gfx12w64 <0x043, BF16_BF16_WMMA_w64>;
1463defm V_WMMA_I32_16X16X16_IU8_w64     : VOP3P_Real_WMMA_gfx12w64 <0x044, I32_IU8_WMMA_w64>;
1464defm V_WMMA_I32_16X16X16_IU4_w64     : VOP3P_Real_WMMA_gfx12w64 <0x045, I32_IU4X16_WMMA_w64>;
1465defm V_WMMA_F32_16X16X16_FP8_FP8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x046, F32_FP8BF8_WMMA_w64>;
1466defm V_WMMA_F32_16X16X16_FP8_BF8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x047, F32_FP8BF8_WMMA_w64>;
1467defm V_WMMA_F32_16X16X16_BF8_FP8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x048, F32_FP8BF8_WMMA_w64>;
1468defm V_WMMA_F32_16X16X16_BF8_BF8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x049, F32_FP8BF8_WMMA_w64>;
1469defm V_WMMA_I32_16X16X32_IU4_w64     : VOP3P_Real_WMMA_gfx12w64 <0x04a, I32_IU4X32_WMMA_w64>;
1470
1471
1472defm V_SWMMAC_F32_16X16X32_F16_w32     : VOP3P_Real_WMMA_gfx12 <0x050, F32_F16_SWMMAC_w32>;
1473defm V_SWMMAC_F32_16X16X32_BF16_w32    : VOP3P_Real_WMMA_gfx12 <0x051, F32_BF16_SWMMAC_w32>;
1474defm V_SWMMAC_F16_16X16X32_F16_w32     : VOP3P_Real_WMMA_gfx12 <0x052, F16_F16_SWMMAC_w32>;
1475defm V_SWMMAC_BF16_16X16X32_BF16_w32   : VOP3P_Real_WMMA_gfx12 <0x053, BF16_BF16_SWMMAC_w32>;
1476defm V_SWMMAC_I32_16X16X32_IU8_w32     : VOP3P_Real_WMMA_gfx12 <0x054, I32_IU8_SWMMAC_w32>;
1477defm V_SWMMAC_I32_16X16X32_IU4_w32     : VOP3P_Real_WMMA_gfx12 <0x055, I32_IU4X32_SWMMAC_w32>;
1478defm V_SWMMAC_I32_16X16X64_IU4_w32     : VOP3P_Real_WMMA_gfx12 <0x056, I32_IU4X64_SWMMAC_w32>;
1479defm V_SWMMAC_F32_16X16X32_FP8_FP8_w32 : VOP3P_Real_WMMA_gfx12 <0x057, F32_FP8BF8_SWMMAC_w32>;
1480defm V_SWMMAC_F32_16X16X32_FP8_BF8_w32 : VOP3P_Real_WMMA_gfx12 <0x058, F32_FP8BF8_SWMMAC_w32>;
1481defm V_SWMMAC_F32_16X16X32_BF8_FP8_w32 : VOP3P_Real_WMMA_gfx12 <0x059, F32_FP8BF8_SWMMAC_w32>;
1482defm V_SWMMAC_F32_16X16X32_BF8_BF8_w32 : VOP3P_Real_WMMA_gfx12 <0x05a, F32_FP8BF8_SWMMAC_w32>;
1483
1484defm V_SWMMAC_F32_16X16X32_F16_w64     : VOP3P_Real_WMMA_gfx12w64 <0x050, F32_F16_SWMMAC_w64>;
1485defm V_SWMMAC_F32_16X16X32_BF16_w64    : VOP3P_Real_WMMA_gfx12w64 <0x051, F32_BF16_SWMMAC_w64>;
1486defm V_SWMMAC_F16_16X16X32_F16_w64     : VOP3P_Real_WMMA_gfx12w64 <0x052, F16_F16_SWMMAC_w64>;
1487defm V_SWMMAC_BF16_16X16X32_BF16_w64   : VOP3P_Real_WMMA_gfx12w64 <0x053, BF16_BF16_SWMMAC_w64>;
1488defm V_SWMMAC_I32_16X16X32_IU8_w64     : VOP3P_Real_WMMA_gfx12w64 <0x054, I32_IU8_SWMMAC_w64>;
1489defm V_SWMMAC_I32_16X16X32_IU4_w64     : VOP3P_Real_WMMA_gfx12w64 <0x055, I32_IU4X32_SWMMAC_w64>;
1490defm V_SWMMAC_I32_16X16X64_IU4_w64     : VOP3P_Real_WMMA_gfx12w64 <0x056, I32_IU4X64_SWMMAC_w64>;
1491defm V_SWMMAC_F32_16X16X32_FP8_FP8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x057, F32_FP8BF8_SWMMAC_w64>;
1492defm V_SWMMAC_F32_16X16X32_FP8_BF8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x058, F32_FP8BF8_SWMMAC_w64>;
1493defm V_SWMMAC_F32_16X16X32_BF8_FP8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x059, F32_FP8BF8_SWMMAC_w64>;
1494defm V_SWMMAC_F32_16X16X32_BF8_BF8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x05a, F32_FP8BF8_SWMMAC_w64>;
1495
1496multiclass VOP3P_Real_with_name<GFXGen Gen, bits<7> op,
1497                          string backing_ps_name = NAME,
1498                          string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> {
1499  defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name);
1500  let AsmString = asmName # ps.AsmOperands in
1501    def Gen.Suffix :
1502      VOP3P_Real_Gen<!cast<VOP3P_Pseudo>(backing_ps_name), Gen, asmName>,
1503      VOP3Pe_gfx11_gfx12<op, !cast<VOP3P_Pseudo>(backing_ps_name).Pfl>,
1504      MnemonicAlias<ps.Mnemonic, asmName>, Requires<[Gen.AssemblerPredicate]>;
1505}
1506
1507multiclass VOP3P_Real_dpp<GFXGen Gen, bits<7> op, string backing_ps_name = NAME,
1508                          string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> {
1509  defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name);
1510  def _dpp#Gen.Suffix
1511      : VOP3P_DPP16<op, !cast<VOP_DPP_Pseudo>(backing_ps_name #"_dpp"),
1512                    Gen.Subtarget> {
1513    let AsmString = asmName #ps.Pfl.AsmVOP3DPP16;
1514    let DecoderNamespace = "DPP"#Gen.DecoderNamespace;
1515    let AssemblerPredicate = Gen.AssemblerPredicate;
1516  }
1517}
1518
1519multiclass VOP3P_Real_dpp8<GFXGen Gen, bits<7> op, string backing_ps_name = NAME,
1520                           string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> {
1521  defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name);
1522  def _dpp8#Gen.Suffix : VOP3P_DPP8_Base<op, ps> {
1523    let AsmString = asmName #ps.Pfl.AsmVOP3DPP8;
1524    let DecoderNamespace = "DPP8"#Gen.DecoderNamespace;
1525    let AssemblerPredicate = Gen.AssemblerPredicate;
1526  }
1527}
1528
1529multiclass VOP3P_Realtriple<GFXGen Gen, bits<7> op, string backing_ps_name = NAME,
1530                            string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic>
1531    : VOP3P_Real_Base<Gen, op, backing_ps_name, asmName>,
1532      VOP3P_Real_dpp<Gen, op, backing_ps_name, asmName>,
1533      VOP3P_Real_dpp8<Gen, op, backing_ps_name, asmName>;
1534
1535//===----------------------------------------------------------------------===//
1536// GFX12
1537//===----------------------------------------------------------------------===//
1538
1539multiclass VOP3P_Real_gfx12<bits<7> op> : VOP3P_Real_Base<GFX12Gen, op>;
1540
1541multiclass VOP3P_Real_with_name_gfx12<bits<7> op,
1542                          string backing_ps_name = NAME,
1543                          string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> :
1544  VOP3P_Real_with_name<GFX12Gen, op, backing_ps_name, asmName>;
1545
1546defm V_PK_MIN_NUM_F16 : VOP3P_Real_with_name_gfx12<0x1b, "V_PK_MIN_F16", "v_pk_min_num_f16">;
1547defm V_PK_MAX_NUM_F16 : VOP3P_Real_with_name_gfx12<0x1c, "V_PK_MAX_F16", "v_pk_max_num_f16">;
1548
1549defm V_PK_MINIMUM_F16 : VOP3P_Real_gfx12<0x1d>;
1550defm V_PK_MAXIMUM_F16 : VOP3P_Real_gfx12<0x1e>;
1551
1552defm V_DOT4_F32_FP8_BF8 : VOP3P_Realtriple<GFX12Gen, 0x24>;
1553defm V_DOT4_F32_BF8_FP8 : VOP3P_Realtriple<GFX12Gen, 0x25>;
1554defm V_DOT4_F32_FP8_FP8 : VOP3P_Realtriple<GFX12Gen, 0x26>;
1555defm V_DOT4_F32_BF8_BF8 : VOP3P_Realtriple<GFX12Gen, 0x27>;
1556
1557//===----------------------------------------------------------------------===//
1558// GFX11
1559//===----------------------------------------------------------------------===//
1560
1561multiclass VOP3P_Real_gfx11_gfx12<bits<7> op> :
1562   VOP3P_Real_Base<GFX11Gen, op>, VOP3P_Real_Base<GFX12Gen, op>;
1563
1564defm V_DOT4_I32_IU8  : VOP3P_Real_gfx11_gfx12<0x16>;
1565defm V_DOT8_I32_IU4  : VOP3P_Real_gfx11_gfx12<0x18>;
1566defm V_DOT2_F32_BF16 : VOP3P_Real_gfx11_gfx12<0x1a>;
1567
1568multiclass VOP3P_Real_WMMA <bits<7> op> {
1569  let WaveSizePredicate = isWave32, DecoderNamespace = "GFX11" in {
1570    defm _twoaddr_w32 : VOP3P_Real_Base <GFX11Gen, op>;
1571  }
1572  let WaveSizePredicate = isWave64, DecoderNamespace = "WMMAGFX11" in {
1573    defm _twoaddr_w64 : VOP3P_Real_Base <GFX11Gen, op>;
1574  }
1575}
1576
1577defm V_WMMA_F32_16X16X16_F16   : VOP3P_Real_WMMA <0x040>;
1578defm V_WMMA_F32_16X16X16_BF16  : VOP3P_Real_WMMA <0x041>;
1579defm V_WMMA_F16_16X16X16_F16   : VOP3P_Real_WMMA <0x042>;
1580defm V_WMMA_BF16_16X16X16_BF16 : VOP3P_Real_WMMA <0x043>;
1581defm V_WMMA_I32_16X16X16_IU8   : VOP3P_Real_WMMA <0x044>;
1582defm V_WMMA_I32_16X16X16_IU4   : VOP3P_Real_WMMA <0x045>;
1583
1584//===----------------------------------------------------------------------===//
1585// GFX8 (VI)
1586//===----------------------------------------------------------------------===//
1587
1588multiclass VOP3P_Real_vi<bits<7> op> {
1589  def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.VI>,
1590            VOP3Pe <op, !cast<VOP3_Pseudo>(NAME).Pfl> {
1591    let AssemblerPredicate = HasVOP3PInsts;
1592    let DecoderNamespace = "GFX8";
1593    let VOP3P = 1;
1594  }
1595}
1596
1597multiclass VOP3P_Real_MAI<bits<7> op> {
1598  def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
1599            VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, ?> {
1600    let AssemblerPredicate = HasMAIInsts;
1601    let DecoderNamespace = "GFX8";
1602    let Inst{14} = ?; // op_sel_hi(2)
1603    let Inst{59} = ?; // op_sel_hi(0)
1604    let Inst{60} = ?; // op_sel_hi(1)
1605  }
1606}
1607
1608let Constraints = "" in {
1609multiclass VOP3P_Real_MFMA_gfx90a<bits<7> op> {
1610  let SubtargetPredicate = isGFX90AOnly,
1611      AssemblerPredicate = isGFX90AOnly, DecoderNamespace = "GFX90A" in {
1612  def _gfx90a_acd : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.GFX90A>,
1613             VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, 1>;
1614
1615  def _gfx90a_vcd : VOP3P_Real<!cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64"), SIEncodingFamily.GFX90A>,
1616             VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64").Pfl, 0>;
1617  } // End AssemblerPredicate = isGFX90AOnly, DecoderNamespace = "GFX90A"
1618}
1619}
1620
1621multiclass VOP3P_Real_MFMA_gfx940_aliases<string NameFrom, string NameTo, string Op,
1622                                          VOP3_Pseudo PS_ACD = !cast<VOP3_Pseudo>(Op # "_e64"),
1623                                          VOP3_Pseudo PS_VCD = !cast<VOP3_Pseudo>(Op # "_vgprcd" # "_e64"),
1624                                          VOPProfile Pfl_ACD = PS_ACD.Pfl,
1625                                          VOPProfile Pfl_VCD = PS_VCD.Pfl> {
1626  if !ne(NameFrom, NameTo) then {
1627    def : InstAlias <NameTo # " " # PS_ACD.AsmOperands,
1628                     (!cast<VOP3P_Real>(Op # "_gfx940_acd") Pfl_ACD.DstRC:$vdst,
1629                         Pfl_ACD.Src0RC64:$src0, Pfl_ACD.Src1RC64:$src1, Pfl_ACD.Src2RC64:$src2,
1630                         cbsz:$cbsz, abid:$abid, blgp:$blgp)>, PredicateControl;
1631    def : InstAlias <NameTo # " " # PS_VCD.AsmOperands,
1632                     (!cast<VOP3P_Real>(Op # "_gfx940_vcd") Pfl_VCD.DstRC:$vdst,
1633                         Pfl_VCD.Src0RC64:$src0, Pfl_VCD.Src1RC64:$src1, Pfl_VCD.Src2RC64:$src2,
1634                         cbsz:$cbsz, abid:$abid, blgp:$blgp)>, PredicateControl;
1635  }
1636}
1637
1638multiclass VOP3P_Real_MFMA_gfx940<bits<7> op, string Name = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic,
1639                                  VOP3_Pseudo PS_ACD = !cast<VOP3_Pseudo>(NAME # "_e64"),
1640                                  VOP3_Pseudo PS_VCD = !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64")> {
1641  let SubtargetPredicate = isGFX940Plus,
1642      DecoderNamespace = "GFX940",
1643      AsmString = Name # PS_ACD.AsmOperands, Constraints = "" in {
1644  def _gfx940_acd : VOP3P_Real<PS_ACD, SIEncodingFamily.GFX940>,
1645                    VOP3Pe_MAI <op, PS_ACD.Pfl, 1>;
1646
1647  def _gfx940_vcd : VOP3P_Real<PS_VCD, SIEncodingFamily.GFX940>,
1648                    VOP3Pe_MAI <op, PS_VCD.Pfl, 0>;
1649  } // End AssemblerPredicate = isGFX940Plus, DecoderNamespace = "GFX940"
1650
1651  let SubtargetPredicate = isGFX940Plus in {
1652    defm : VOP3P_Real_MFMA_gfx940_aliases<Name, PS_ACD.Mnemonic, NAME>;
1653
1654    if !ne(!subst("_1k", "", PS_ACD.Mnemonic), PS_ACD.Mnemonic) then
1655    defm : VOP3P_Real_MFMA_gfx940_aliases<Name, !subst("_1k", "", PS_ACD.Mnemonic), NAME>;
1656  }
1657}
1658
1659multiclass VOP3P_Real_MFMA_vi<bits<7> op> {
1660  def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
1661            VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, ?> {
1662    let SubtargetPredicate = isGFX8GFX9NotGFX90A;
1663    let AssemblerPredicate = HasMAIInsts;
1664    let DecoderNamespace = "GFX8";
1665    let Constraints = "";
1666  }
1667}
1668
1669multiclass VOP3P_Real_MFMA_vi_gfx90a<bits<7> op> :
1670  VOP3P_Real_MFMA_gfx90a <op>,
1671  VOP3P_Real_MFMA_vi <op>;
1672
1673multiclass VOP3P_Real_MFMA<bits<7> op, string GFX940Name = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic> :
1674  VOP3P_Real_MFMA_vi_gfx90a <op>,
1675  VOP3P_Real_MFMA_gfx940 <op, GFX940Name>;
1676
1677multiclass VOP3P_Real_SMFMAC<bits<7> op, string alias> {
1678  def _gfx940 : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
1679                VOP3Pe_SMFMAC <op> {
1680    let AssemblerPredicate = isGFX940Plus;
1681    let DecoderNamespace = "GFX8";
1682  }
1683  def : MnemonicAlias<alias, !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic>;
1684}
1685
1686let SubtargetPredicate = isGFX8GFX9 in {
1687defm V_PK_MAD_I16 : VOP3P_Real_vi <0x00>;
1688defm V_PK_MUL_LO_U16 : VOP3P_Real_vi <0x01>;
1689defm V_PK_ADD_I16 : VOP3P_Real_vi <0x02>;
1690defm V_PK_SUB_I16 : VOP3P_Real_vi <0x03>;
1691defm V_PK_LSHLREV_B16 : VOP3P_Real_vi <0x04>;
1692defm V_PK_LSHRREV_B16 : VOP3P_Real_vi <0x05>;
1693defm V_PK_ASHRREV_I16 : VOP3P_Real_vi <0x06>;
1694defm V_PK_MAX_I16 : VOP3P_Real_vi <0x07>;
1695defm V_PK_MIN_I16 : VOP3P_Real_vi <0x08>;
1696defm V_PK_MAD_U16 : VOP3P_Real_vi <0x09>;
1697
1698defm V_PK_ADD_U16 : VOP3P_Real_vi <0x0a>;
1699defm V_PK_SUB_U16 : VOP3P_Real_vi <0x0b>;
1700defm V_PK_MAX_U16 : VOP3P_Real_vi <0x0c>;
1701defm V_PK_MIN_U16 : VOP3P_Real_vi <0x0d>;
1702defm V_PK_FMA_F16 : VOP3P_Real_vi <0x0e>;
1703defm V_PK_ADD_F16 : VOP3P_Real_vi <0x0f>;
1704defm V_PK_MUL_F16 : VOP3P_Real_vi <0x10>;
1705defm V_PK_MIN_F16 : VOP3P_Real_vi <0x11>;
1706defm V_PK_MAX_F16 : VOP3P_Real_vi <0x12>;
1707
1708let OtherPredicates = [HasMadMixInsts] in {
1709defm V_MAD_MIX_F32 : VOP3P_Real_vi <0x20>;
1710defm V_MAD_MIXLO_F16 : VOP3P_Real_vi <0x21>;
1711defm V_MAD_MIXHI_F16 : VOP3P_Real_vi <0x22>;
1712}
1713
1714let OtherPredicates = [HasFmaMixInsts],
1715    DecoderNamespace = "GFX9_DL" in {
1716// The mad_mix instructions were renamed and their behaviors changed,
1717// but the opcode stayed the same so we need to put these in a
1718// different DecoderNamespace to avoid the ambiguity.
1719defm V_FMA_MIX_F32 : VOP3P_Real_vi <0x20>;
1720defm V_FMA_MIXLO_F16 : VOP3P_Real_vi <0x21>;
1721defm V_FMA_MIXHI_F16 : VOP3P_Real_vi <0x22>;
1722}
1723
1724defm V_DOT2_I32_I16 : VOP3P_Real_vi <0x26>;
1725defm V_DOT2_U32_U16 : VOP3P_Real_vi <0x27>;
1726
1727defm V_DOT2_F32_F16 : VOP3P_Real_vi <0x23>;
1728defm V_DOT4_U32_U8  : VOP3P_Real_vi <0x29>;
1729defm V_DOT8_U32_U4  : VOP3P_Real_vi <0x2b>;
1730
1731defm V_DOT4_I32_I8  : VOP3P_Real_vi <0x28>;
1732defm V_DOT8_I32_I4  : VOP3P_Real_vi <0x2a>;
1733} // End SubtargetPredicate = isGFX8GFX9
1734
1735let OtherPredicates = [HasMAIInsts] in {
1736
1737defm V_ACCVGPR_READ_B32  : VOP3P_Real_MAI <0x58>;
1738defm V_ACCVGPR_WRITE_B32 : VOP3P_Real_MAI <0x59>;
1739defm V_MFMA_F32_32X32X1F32  : VOP3P_Real_MFMA <0x40, "v_mfma_f32_32x32x1_2b_f32">;
1740defm V_MFMA_F32_16X16X1F32  : VOP3P_Real_MFMA <0x41, "v_mfma_f32_16x16x1_4b_f32">;
1741defm V_MFMA_F32_4X4X1F32    : VOP3P_Real_MFMA <0x42, "v_mfma_f32_4x4x1_16b_f32">;
1742defm V_MFMA_F32_32X32X2F32  : VOP3P_Real_MFMA <0x44, "v_mfma_f32_32x32x2_f32">;
1743defm V_MFMA_F32_16X16X4F32  : VOP3P_Real_MFMA <0x45, "v_mfma_f32_16x16x4_f32">;
1744defm V_MFMA_F32_32X32X4F16  : VOP3P_Real_MFMA <0x48, "v_mfma_f32_32x32x4_2b_f16">;
1745defm V_MFMA_F32_16X16X4F16  : VOP3P_Real_MFMA <0x49, "v_mfma_f32_16x16x4_4b_f16">;
1746defm V_MFMA_F32_4X4X4F16    : VOP3P_Real_MFMA <0x4a, "v_mfma_f32_4x4x4_16b_f16">;
1747defm V_MFMA_F32_32X32X8F16  : VOP3P_Real_MFMA <0x4c, "v_mfma_f32_32x32x8_f16">;
1748defm V_MFMA_F32_16X16X16F16 : VOP3P_Real_MFMA <0x4d, "v_mfma_f32_16x16x16_f16">;
1749defm V_MFMA_I32_32X32X4I8   : VOP3P_Real_MFMA <0x50, "v_mfma_i32_32x32x4_2b_i8">;
1750defm V_MFMA_I32_16X16X4I8   : VOP3P_Real_MFMA <0x51, "v_mfma_i32_16x16x4_4b_i8">;
1751defm V_MFMA_I32_4X4X4I8     : VOP3P_Real_MFMA <0x52, "v_mfma_i32_4x4x4_16b_i8">;
1752
1753defm V_MFMA_I32_16X16X16I8  : VOP3P_Real_MFMA_vi_gfx90a <0x55>;
1754defm V_MFMA_I32_32X32X8I8   : VOP3P_Real_MFMA_vi_gfx90a <0x54>;
1755defm V_MFMA_F32_32X32X2BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x68>;
1756defm V_MFMA_F32_16X16X2BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x69>;
1757defm V_MFMA_F32_4X4X2BF16   : VOP3P_Real_MFMA_vi_gfx90a <0x6b>;
1758defm V_MFMA_F32_32X32X4BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x6c>;
1759defm V_MFMA_F32_16X16X8BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x6d>;
1760
1761} // End OtherPredicates = [HasMAIInsts]
1762
1763defm V_MFMA_F32_32X32X4BF16_1K  : VOP3P_Real_MFMA_gfx90a <0x63>;
1764defm V_MFMA_F32_16X16X4BF16_1K  : VOP3P_Real_MFMA_gfx90a <0x64>;
1765defm V_MFMA_F32_4X4X4BF16_1K    : VOP3P_Real_MFMA_gfx90a <0x65>;
1766defm V_MFMA_F32_32X32X8BF16_1K  : VOP3P_Real_MFMA_gfx90a <0x66>;
1767defm V_MFMA_F32_16X16X16BF16_1K : VOP3P_Real_MFMA_gfx90a <0x67>;
1768defm V_MFMA_F64_16X16X4F64      : VOP3P_Real_MFMA_gfx90a <0x6e>;
1769defm V_MFMA_F64_4X4X4F64        : VOP3P_Real_MFMA_gfx90a <0x6f>;
1770
1771defm V_MFMA_I32_32X32X16I8       : VOP3P_Real_MFMA_gfx940 <0x56, "v_mfma_i32_32x32x16_i8">;
1772defm V_MFMA_I32_16X16X32I8       : VOP3P_Real_MFMA_gfx940 <0x57, "v_mfma_i32_16x16x32_i8">;
1773defm V_MFMA_F32_16X16X8XF32      : VOP3P_Real_MFMA_gfx940 <0x3e, "v_mfma_f32_16x16x8_xf32">;
1774defm V_MFMA_F32_32X32X4XF32      : VOP3P_Real_MFMA_gfx940 <0x3f, "v_mfma_f32_32x32x4_xf32">;
1775defm V_MFMA_F32_16X16X32_BF8_BF8 : VOP3P_Real_MFMA_gfx940 <0x70>;
1776defm V_MFMA_F32_16X16X32_BF8_FP8 : VOP3P_Real_MFMA_gfx940 <0x71>;
1777defm V_MFMA_F32_16X16X32_FP8_BF8 : VOP3P_Real_MFMA_gfx940 <0x72>;
1778defm V_MFMA_F32_16X16X32_FP8_FP8 : VOP3P_Real_MFMA_gfx940 <0x73>;
1779defm V_MFMA_F32_32X32X16_BF8_BF8 : VOP3P_Real_MFMA_gfx940 <0x74>;
1780defm V_MFMA_F32_32X32X16_BF8_FP8 : VOP3P_Real_MFMA_gfx940 <0x75>;
1781defm V_MFMA_F32_32X32X16_FP8_BF8 : VOP3P_Real_MFMA_gfx940 <0x76>;
1782defm V_MFMA_F32_32X32X16_FP8_FP8 : VOP3P_Real_MFMA_gfx940 <0x77>;
1783
1784defm V_MFMA_F32_32X32X4BF16_1K   : VOP3P_Real_MFMA_gfx940 <0x5d, "v_mfma_f32_32x32x4_2b_bf16">;
1785defm V_MFMA_F32_16X16X4BF16_1K   : VOP3P_Real_MFMA_gfx940 <0x5e, "v_mfma_f32_16x16x4_4b_bf16">;
1786defm V_MFMA_F32_4X4X4BF16_1K     : VOP3P_Real_MFMA_gfx940 <0x5f, "v_mfma_f32_4x4x4_16b_bf16">;
1787defm V_MFMA_F32_32X32X8BF16_1K   : VOP3P_Real_MFMA_gfx940 <0x60, "v_mfma_f32_32x32x8_bf16">;
1788defm V_MFMA_F32_16X16X16BF16_1K  : VOP3P_Real_MFMA_gfx940 <0x61, "v_mfma_f32_16x16x16_bf16">;
1789
1790defm V_MFMA_F64_16X16X4F64       : VOP3P_Real_MFMA_gfx940 <0x6e, "v_mfma_f64_16x16x4_f64">;
1791defm V_MFMA_F64_4X4X4F64         : VOP3P_Real_MFMA_gfx940 <0x6f, "v_mfma_f64_4x4x4_4b_f64">;
1792
1793defm V_SMFMAC_F32_16X16X32_F16     : VOP3P_Real_SMFMAC <0x62, "v_smfmac_f32_16x16x32f16">;
1794defm V_SMFMAC_F32_32X32X16_F16     : VOP3P_Real_SMFMAC <0x64, "v_smfmac_f32_32x32x16f16">;
1795defm V_SMFMAC_F32_16X16X32_BF16    : VOP3P_Real_SMFMAC <0x66, "v_smfmac_f32_16x16x32bf16">;
1796defm V_SMFMAC_F32_32X32X16_BF16    : VOP3P_Real_SMFMAC <0x68, "v_smfmac_f32_32x32x16bf16">;
1797defm V_SMFMAC_I32_16X16X64_I8      : VOP3P_Real_SMFMAC <0x6a, "v_smfmac_i32_16x16x64i8">;
1798defm V_SMFMAC_I32_32X32X32_I8      : VOP3P_Real_SMFMAC <0x6c, "v_smfmac_i32_32x32x32i8">;
1799defm V_SMFMAC_F32_16X16X64_BF8_BF8 : VOP3P_Real_SMFMAC <0x78, "v_smfmac_f32_16x16x64bf8bf8">;
1800defm V_SMFMAC_F32_16X16X64_BF8_FP8 : VOP3P_Real_SMFMAC <0x79, "v_smfmac_f32_16x16x64bf8fp8">;
1801defm V_SMFMAC_F32_16X16X64_FP8_BF8 : VOP3P_Real_SMFMAC <0x7a, "v_smfmac_f32_16x16x64fp8bf8">;
1802defm V_SMFMAC_F32_16X16X64_FP8_FP8 : VOP3P_Real_SMFMAC <0x7b, "v_smfmac_f32_16x16x64fp8fp8">;
1803defm V_SMFMAC_F32_32X32X32_BF8_BF8 : VOP3P_Real_SMFMAC <0x7c, "v_smfmac_f32_32x32x32bf8bf8">;
1804defm V_SMFMAC_F32_32X32X32_BF8_FP8 : VOP3P_Real_SMFMAC <0x7d, "v_smfmac_f32_32x32x32bf8fp8">;
1805defm V_SMFMAC_F32_32X32X32_FP8_BF8 : VOP3P_Real_SMFMAC <0x7e, "v_smfmac_f32_32x32x32fp8bf8">;
1806defm V_SMFMAC_F32_32X32X32_FP8_FP8 : VOP3P_Real_SMFMAC <0x7f, "v_smfmac_f32_32x32x32fp8fp8">;
1807
1808defm V_PK_FMA_F32 : VOP3P_Real_vi <0x30>;
1809defm V_PK_MUL_F32 : VOP3P_Real_vi <0x31>;
1810defm V_PK_ADD_F32 : VOP3P_Real_vi <0x32>;
1811defm V_PK_MOV_B32 : VOP3P_Real_vi <0x33>;
1812
1813//===----------------------------------------------------------------------===//
1814// GFX10.
1815//===----------------------------------------------------------------------===//
1816
1817let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10", VOP3P = 1 in {
1818  multiclass VOP3P_Real_gfx10<bits<7> op> {
1819    def _gfx10 : VOP3P_Real<!cast<VOP3P_Pseudo>(NAME), SIEncodingFamily.GFX10>,
1820                 VOP3Pe_gfx10 <op, !cast<VOP3P_Pseudo>(NAME).Pfl>;
1821  }
1822} // End AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10", VOP3P = 1
1823
1824multiclass VOP3P_Real_gfx10_gfx11<bits<7> op> :
1825  VOP3P_Real_gfx10<op>, VOP3P_Real_Base<GFX11Gen, op>;
1826
1827multiclass VOP3P_Real_gfx10_gfx11_gfx12<bits<7> op> :
1828  VOP3P_Real_gfx10_gfx11<op>, VOP3P_Real_Base<GFX12Gen, op>;
1829
1830multiclass VOP3P_Real_gfx10_gfx11_gfx12_Triple<bits<7> op> :
1831  VOP3P_Real_gfx10<op>, VOP3P_Realtriple<GFX11Gen, op>,
1832  VOP3P_Realtriple<GFX12Gen, op>;
1833
1834defm V_PK_MAD_I16     : VOP3P_Real_gfx10_gfx11_gfx12<0x00>;
1835defm V_PK_MUL_LO_U16  : VOP3P_Real_gfx10_gfx11_gfx12<0x01>;
1836defm V_PK_ADD_I16     : VOP3P_Real_gfx10_gfx11_gfx12<0x02>;
1837defm V_PK_SUB_I16     : VOP3P_Real_gfx10_gfx11_gfx12<0x03>;
1838defm V_PK_LSHLREV_B16 : VOP3P_Real_gfx10_gfx11_gfx12<0x04>;
1839defm V_PK_LSHRREV_B16 : VOP3P_Real_gfx10_gfx11_gfx12<0x05>;
1840defm V_PK_ASHRREV_I16 : VOP3P_Real_gfx10_gfx11_gfx12<0x06>;
1841defm V_PK_MAX_I16     : VOP3P_Real_gfx10_gfx11_gfx12<0x07>;
1842defm V_PK_MIN_I16     : VOP3P_Real_gfx10_gfx11_gfx12<0x08>;
1843defm V_PK_MAD_U16     : VOP3P_Real_gfx10_gfx11_gfx12<0x09>;
1844defm V_PK_ADD_U16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0a>;
1845defm V_PK_SUB_U16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0b>;
1846defm V_PK_MAX_U16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0c>;
1847defm V_PK_MIN_U16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0d>;
1848defm V_PK_FMA_F16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0e>;
1849defm V_PK_ADD_F16     : VOP3P_Real_gfx10_gfx11_gfx12<0x0f>;
1850defm V_PK_MUL_F16     : VOP3P_Real_gfx10_gfx11_gfx12<0x10>;
1851defm V_PK_MIN_F16     : VOP3P_Real_gfx10_gfx11<0x11>;
1852defm V_PK_MAX_F16     : VOP3P_Real_gfx10_gfx11<0x12>;
1853defm V_FMA_MIX_F32    : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x20>;
1854defm V_FMA_MIXLO_F16  : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x21>;
1855defm V_FMA_MIXHI_F16  : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x22>;
1856
1857defm V_DOT2_I32_I16 : VOP3P_Real_gfx10 <0x14>;
1858defm V_DOT2_U32_U16 : VOP3P_Real_gfx10 <0x15>;
1859
1860defm V_DOT2_F32_F16 : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x13>;
1861defm V_DOT4_U32_U8  : VOP3P_Real_gfx10_gfx11_gfx12<0x17>;
1862defm V_DOT8_U32_U4  : VOP3P_Real_gfx10_gfx11_gfx12<0x19>;
1863
1864defm V_DOT4_I32_I8  : VOP3P_Real_gfx10 <0x16>;
1865defm V_DOT8_I32_I4  : VOP3P_Real_gfx10 <0x18>;
1866