1//===-- VOP3PInstructions.td - Vector Instruction Definitions -------------===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8 9//===----------------------------------------------------------------------===// 10// VOP3P Classes 11//===----------------------------------------------------------------------===// 12 13class VOP3P_Profile<VOPProfile P, VOP3Features Features = VOP3_REGULAR, 14 bit HasDPP = 0> : VOP3_Profile<P, Features> { 15 let IsVOP3P = 1; 16 let HasExtVOP3DPP = HasDPP; 17 // We do not want to print src modifiers for vop3p because the bits are 18 // overloaded in meaning and the logic in printOperandAndFPInputMods is 19 // wrong for vop3p 20 let AsmVOP3Base = AsmVOP3P; 21} 22 23// Used for FMA_MIX* and MAD_MIX* insts 24// Their operands are only sort of f16 operands. Depending on 25// op_sel_hi, these may be interpreted as f32. The inline immediate 26// values are really f16 converted to f32, so we treat these as f16 27// operands. 28class VOP3P_Mix_Profile<VOPProfile P, VOP3Features Features = VOP3_REGULAR, 29 bit useTiedOutput = 0> : VOP3P_Profile<P, Features, 1> { 30 bit UseTiedOutput = useTiedOutput; 31 32 dag srcs = 33 (ins FP16InputMods:$src0_modifiers, VCSrc_f16:$src0, 34 FP16InputMods:$src1_modifiers, VCSrc_f16:$src1, 35 FP16InputMods:$src2_modifiers, VCSrc_f16:$src2); 36 dag dpp_srcs = 37 (ins FPVRegInputMods:$src0_modifiers, VGPRSrc_32:$src0, 38 FPVRegInputMods:$src1_modifiers, VRegSrc_32:$src1, 39 FP16InputMods:$src2_modifiers, VCSrc_f16:$src2); 40 41 // FIXME: clampmod0 misbehaves with the non-default vdst_in 42 // following it. For now workaround this by requiring clamp 43 // in tied patterns. This should use undef_tied_input, but it 44 // seems underdeveloped and doesn't apply the right register 45 // class constraints. 46 dag mods = !con(!if(UseTiedOutput, (ins clampmod:$clamp, VGPR_32:$vdst_in), 47 (ins clampmod0:$clamp)), 48 (ins op_sel0:$op_sel, op_sel_hi0:$op_sel_hi)); 49 // We use Ins64 because that is the one which populates InOperandList 50 // due to the logic in class VOP3_Pseudo 51 let Ins64 = !con(srcs, mods); 52 let InsVOP3Base = !con(dpp_srcs, mods); 53 let AsmVOP3Base = 54 "$vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$op_sel$op_sel_hi$clamp"; 55} 56 57multiclass VOP3PInst<string OpName, VOPProfile P, 58 SDPatternOperator node = null_frag, bit IsDOT = 0> { 59 def NAME : VOP3P_Pseudo<OpName, P, 60 !if (P.HasModifiers, 61 getVOP3PModPat<P, node, IsDOT, IsDOT>.ret, 62 getVOP3Pat<P, node>.ret)>; 63 let SubtargetPredicate = isGFX11Plus in { 64 if P.HasExtVOP3DPP then 65 def _dpp : VOP3_DPP_Pseudo<OpName, P> { 66 let VOP3P = 1; 67 let PseudoInstr = OpName #"_dpp"; 68 } 69 } // end SubtargetPredicate = isGFX11Plus 70} 71 72// Non-packed instructions that use the VOP3P encoding. 73// VOP3 neg/abs and VOP3P opsel/opsel_hi modifiers are allowed. 74multiclass VOP3_VOP3PInst<string OpName, VOP3P_Mix_Profile P> { 75 def NAME : VOP3P_Pseudo<OpName, P> { 76 let Constraints = !if(P.UseTiedOutput, "$vdst = $vdst_in", ""); 77 let DisableEncoding = !if(P.UseTiedOutput, "$vdst_in", ""); 78 } 79 let SubtargetPredicate = isGFX11Plus in { 80 if P.HasExtVOP3DPP then 81 def _dpp : VOP3_DPP_Pseudo<OpName, P> { 82 let VOP3P = 1; 83 let PseudoInstr = OpName#"_dpp"; 84 let Constraints = !if(P.UseTiedOutput, "$vdst = $vdst_in", ""); 85 let DisableEncoding = !if(P.UseTiedOutput, "$vdst_in", ""); 86 } 87 } // end SubtargetPredicate = isGFX11Plus 88} 89 90let isReMaterializable = 1 in { 91let isCommutable = 1 in { 92defm V_PK_MAD_I16 : VOP3PInst<"v_pk_mad_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16_V2I16>>; 93defm V_PK_MAD_U16 : VOP3PInst<"v_pk_mad_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16_V2I16>>; 94 95let FPDPRounding = 1 in { 96defm V_PK_FMA_F16 : VOP3PInst<"v_pk_fma_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16_V2F16>, any_fma>; 97defm V_PK_ADD_F16 : VOP3PInst<"v_pk_add_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, any_fadd>; 98defm V_PK_MUL_F16 : VOP3PInst<"v_pk_mul_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, any_fmul>; 99} // End FPDPRounding = 1 100defm V_PK_MAX_F16 : VOP3PInst<"v_pk_max_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, fmaxnum_like>; 101defm V_PK_MIN_F16 : VOP3PInst<"v_pk_min_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, fminnum_like>; 102 103defm V_PK_ADD_U16 : VOP3PInst<"v_pk_add_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, add>; 104defm V_PK_ADD_I16 : VOP3PInst<"v_pk_add_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>>; 105defm V_PK_MUL_LO_U16 : VOP3PInst<"v_pk_mul_lo_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, mul>; 106 107defm V_PK_MIN_I16 : VOP3PInst<"v_pk_min_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, smin>; 108defm V_PK_MIN_U16 : VOP3PInst<"v_pk_min_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, umin>; 109defm V_PK_MAX_I16 : VOP3PInst<"v_pk_max_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, smax>; 110defm V_PK_MAX_U16 : VOP3PInst<"v_pk_max_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, umax>; 111 112let SubtargetPredicate = isGFX12Plus, ReadsModeReg = 0 in { 113defm V_PK_MAXIMUM_F16 : VOP3PInst<"v_pk_maximum_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, fmaximum>; 114defm V_PK_MINIMUM_F16 : VOP3PInst<"v_pk_minimum_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, fminimum>; 115} // End SubtargetPredicate = isGFX12Plus, ReadsModeReg = 0 116} 117 118defm V_PK_SUB_U16 : VOP3PInst<"v_pk_sub_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>>; 119defm V_PK_SUB_I16 : VOP3PInst<"v_pk_sub_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, sub>; 120 121defm V_PK_LSHLREV_B16 : VOP3PInst<"v_pk_lshlrev_b16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, clshl_rev_16>; 122defm V_PK_ASHRREV_I16 : VOP3PInst<"v_pk_ashrrev_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, cashr_rev_16>; 123defm V_PK_LSHRREV_B16 : VOP3PInst<"v_pk_lshrrev_b16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, clshr_rev_16>; 124} // End isReMaterializable = 1 125 126let SubtargetPredicate = HasVOP3PInsts in { 127 128// Integer operations with clamp bit set. 129class VOP3PSatPat<SDPatternOperator pat, Instruction inst> : GCNPat< 130 (pat (v2i16 (VOP3PMods v2i16:$src0, i32:$src0_modifiers)), 131 (v2i16 (VOP3PMods v2i16:$src1, i32:$src1_modifiers))), 132 (inst $src0_modifiers, $src0, $src1_modifiers, $src1, DSTCLAMP.ENABLE) 133>; 134 135def : VOP3PSatPat<uaddsat, V_PK_ADD_U16>; 136def : VOP3PSatPat<saddsat, V_PK_ADD_I16>; 137def : VOP3PSatPat<usubsat, V_PK_SUB_U16>; 138def : VOP3PSatPat<ssubsat, V_PK_SUB_I16>; 139} // End SubtargetPredicate = HasVOP3PInsts 140 141// TODO: Make sure we're doing the right thing with denormals. Note 142// that FMA and MAD will differ. 143multiclass MadFmaMixPats<SDPatternOperator fma_like, 144 Instruction mix_inst, 145 Instruction mixlo_inst, 146 Instruction mixhi_inst> { 147 // At least one of the operands needs to be an fpextend of an f16 148 // for this to be worthwhile, so we need three patterns here. 149 // TODO: Could we use a predicate to inspect src1/2/3 instead? 150 def : GCNPat < 151 (f32 (fma_like (f32 (VOP3PMadMixModsExt f16:$src0, i32:$src0_mods)), 152 (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_mods)), 153 (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_mods)))), 154 (mix_inst $src0_mods, $src0, $src1_mods, $src1, $src2_mods, $src2, 155 DSTCLAMP.NONE)>; 156 def : GCNPat < 157 (f32 (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_mods)), 158 (f32 (VOP3PMadMixModsExt f16:$src1, i32:$src1_mods)), 159 (f32 (VOP3PMadMixMods f32:$src2, i32:$src2_mods)))), 160 (mix_inst $src0_mods, $src0, $src1_mods, $src1, $src2_mods, $src2, 161 DSTCLAMP.NONE)>; 162 def : GCNPat < 163 (f32 (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_mods)), 164 (f32 (VOP3PMadMixMods f32:$src1, i32:$src1_mods)), 165 (f32 (VOP3PMadMixModsExt f16:$src2, i32:$src2_mods)))), 166 (mix_inst $src0_mods, $src0, $src1_mods, $src1, $src2_mods, $src2, 167 DSTCLAMP.NONE)>; 168 169 def : GCNPat < 170 (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)), 171 (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)), 172 (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers))))), 173 (mixlo_inst $src0_modifiers, $src0, 174 $src1_modifiers, $src1, 175 $src2_modifiers, $src2, 176 DSTCLAMP.NONE, 177 (i32 (IMPLICIT_DEF))) 178 >; 179 180 // FIXME: Special case handling for maxhi (especially for clamp) 181 // because dealing with the write to high half of the register is 182 // difficult. 183 def : GCNPat < 184 (build_vector f16:$elt0, (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)), 185 (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)), 186 (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers)))))), 187 (v2f16 (mixhi_inst $src0_modifiers, $src0, 188 $src1_modifiers, $src1, 189 $src2_modifiers, $src2, 190 DSTCLAMP.NONE, 191 VGPR_32:$elt0)) 192 >; 193 194 def : GCNPat < 195 (build_vector 196 f16:$elt0, 197 (AMDGPUclamp (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)), 198 (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)), 199 (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers))))))), 200 (v2f16 (mixhi_inst $src0_modifiers, $src0, 201 $src1_modifiers, $src1, 202 $src2_modifiers, $src2, 203 DSTCLAMP.ENABLE, 204 VGPR_32:$elt0)) 205 >; 206 207 def : GCNPat < 208 (AMDGPUclamp (build_vector 209 (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$lo_src0, i32:$lo_src0_modifiers)), 210 (f32 (VOP3PMadMixMods f16:$lo_src1, i32:$lo_src1_modifiers)), 211 (f32 (VOP3PMadMixMods f16:$lo_src2, i32:$lo_src2_modifiers))))), 212 (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$hi_src0, i32:$hi_src0_modifiers)), 213 (f32 (VOP3PMadMixMods f16:$hi_src1, i32:$hi_src1_modifiers)), 214 (f32 (VOP3PMadMixMods f16:$hi_src2, i32:$hi_src2_modifiers))))))), 215 (v2f16 (mixhi_inst $hi_src0_modifiers, $hi_src0, 216 $hi_src1_modifiers, $hi_src1, 217 $hi_src2_modifiers, $hi_src2, 218 DSTCLAMP.ENABLE, 219 (mixlo_inst $lo_src0_modifiers, $lo_src0, 220 $lo_src1_modifiers, $lo_src1, 221 $lo_src2_modifiers, $lo_src2, 222 DSTCLAMP.ENABLE, 223 (i32 (IMPLICIT_DEF))))) 224 >; 225 226 def : GCNPat < 227 (f16 (fpround (fmul (f32 (VOP3PMadMixMods f32:$src0, i32:$src0_modifiers)), 228 (f32 (VOP3PMadMixMods f32:$src1, i32:$src1_modifiers))))), 229 (mixlo_inst $src0_modifiers, $src0, 230 $src1_modifiers, $src1, 231 (i32 0), (i32 0), 232 DSTCLAMP.NONE, 233 (i32 (IMPLICIT_DEF))) 234 >; 235 236 def : GCNPat < 237 (build_vector f16:$elt0, (f16 (fpround (fmul (f32 (VOP3PMadMixMods f32:$src0, i32:$src0_modifiers)), 238 (f32 (VOP3PMadMixMods f32:$src1, i32:$src1_modifiers)))))), 239 (v2f16 (mixhi_inst $src0_modifiers, $src0, 240 $src1_modifiers, $src1, 241 (i32 0), (i32 0), 242 DSTCLAMP.NONE, 243 VGPR_32:$elt0)) 244 >; 245} 246 247let SubtargetPredicate = HasMadMixInsts, OtherPredicates = [NoFP32Denormals] in { 248 249// These are VOP3a-like opcodes which accept no omod. 250// Size of src arguments (16/32) is controlled by op_sel. 251// For 16-bit src arguments their location (hi/lo) are controlled by op_sel_hi. 252let isCommutable = 1, mayRaiseFPException = 0 in { 253let isReMaterializable = 1 in 254defm V_MAD_MIX_F32 : VOP3_VOP3PInst<"v_mad_mix_f32", VOP3P_Mix_Profile<VOP_F32_F16_F16_F16, VOP3_OPSEL>>; 255 256let FPDPRounding = 1 in { 257// Clamp modifier is applied after conversion to f16. 258defm V_MAD_MIXLO_F16 : VOP3_VOP3PInst<"v_mad_mixlo_f16", VOP3P_Mix_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL, 1>>; 259 260let ClampLo = 0, ClampHi = 1 in { 261defm V_MAD_MIXHI_F16 : VOP3_VOP3PInst<"v_mad_mixhi_f16", VOP3P_Mix_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL, 1>>; 262} 263} // End FPDPRounding = 1 264} 265 266defm : MadFmaMixPats<fmad, V_MAD_MIX_F32, V_MAD_MIXLO_F16, V_MAD_MIXHI_F16>; 267} // End SubtargetPredicate = HasMadMixInsts, OtherPredicates = [NoFP32Denormals] 268 269 270// Essentially the same as the mad_mix versions 271let SubtargetPredicate = HasFmaMixInsts in { 272let isCommutable = 1 in { 273 274let isReMaterializable = 1 in 275defm V_FMA_MIX_F32 : VOP3_VOP3PInst<"v_fma_mix_f32", VOP3P_Mix_Profile<VOP_F32_F16_F16_F16, VOP3_OPSEL>>; 276 277let FPDPRounding = 1 in { 278// Clamp modifier is applied after conversion to f16. 279defm V_FMA_MIXLO_F16 : VOP3_VOP3PInst<"v_fma_mixlo_f16", VOP3P_Mix_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL, 1>>; 280 281let ClampLo = 0, ClampHi = 1 in { 282defm V_FMA_MIXHI_F16 : VOP3_VOP3PInst<"v_fma_mixhi_f16", VOP3P_Mix_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL, 1>>; 283} 284} // End FPDPRounding = 1 285} 286 287defm : MadFmaMixPats<fma, V_FMA_MIX_F32, V_FMA_MIXLO_F16, V_FMA_MIXHI_F16>; 288} 289 290// Defines patterns that extract signed 4bit from each Idx[0]. 291foreach Idx = [[0,28],[4,24],[8,20],[12,16],[16,12],[20,8],[24,4]] in 292 def ExtractSigned4bit_#Idx[0] : PatFrag<(ops node:$src), 293 (sra (shl node:$src, (i32 Idx[1])), (i32 28))>; 294 295// Defines code pattern that extracts U(unsigned/signed) 4/8bit from FromBitIndex. 296class Extract<int FromBitIndex, int BitMask, bit U>: PatFrag< 297 (ops node:$src), 298 !if (!or (!and (!eq (BitMask, 255), !eq (FromBitIndex, 24)), !eq (FromBitIndex, 28)), // last element 299 !if (U, (srl node:$src, (i32 FromBitIndex)), (sra node:$src, (i32 FromBitIndex))), 300 !if (!eq (FromBitIndex, 0), // first element 301 !if (U, (and node:$src, (i32 BitMask)), 302 !if (!eq (BitMask, 15), (!cast<PatFrag>("ExtractSigned4bit_"#FromBitIndex) node:$src), 303 (sext_inreg node:$src, i8))), 304 !if (U, (and (srl node:$src, (i32 FromBitIndex)), (i32 BitMask)), 305 !if (!eq (BitMask, 15), (!cast<PatFrag>("ExtractSigned4bit_"#FromBitIndex) node:$src), 306 (sext_inreg (srl node:$src, (i32 FromBitIndex)), i8)))))>; 307 308 309foreach Type = ["I", "U"] in 310 foreach Index = 0-3 in { 311 // Defines patterns that extract each Index'ed 8bit from an unsigned 312 // 32bit scalar value; 313 def Type#Index#"_8bit" : Extract<!shl(Index, 3), 255, !eq (Type, "U")>; 314 315 // Defines multiplication patterns where the multiplication is happening on each 316 // Index'ed 8bit of a 32bit scalar value. 317 318 def Mul#Type#_Elt#Index : PatFrag< 319 (ops node:$src0, node:$src1), 320 (!cast<HasOneUseBinOp>(!if (!eq (Type, "I"), AMDGPUmul_i24_oneuse, AMDGPUmul_u24_oneuse)) 321 (!cast<Extract>(Type#Index#"_8bit") node:$src0), 322 (!cast<Extract>(Type#Index#"_8bit") node:$src1))>; 323 } 324 325// Different variants of dot8 patterns cause a huge increase in the compile time. 326// Define non-associative/commutative add/mul to prevent permutation in the dot8 327// pattern. 328def NonACAdd : SDNode<"ISD::ADD" , SDTIntBinOp>; 329def NonACAdd_oneuse : HasOneUseBinOp<NonACAdd>; 330 331def NonACAMDGPUmul_u24 : SDNode<"AMDGPUISD::MUL_U24" , SDTIntBinOp>; 332def NonACAMDGPUmul_u24_oneuse : HasOneUseBinOp<NonACAMDGPUmul_u24>; 333 334def NonACAMDGPUmul_i24 : SDNode<"AMDGPUISD::MUL_I24" , SDTIntBinOp>; 335def NonACAMDGPUmul_i24_oneuse : HasOneUseBinOp<NonACAMDGPUmul_i24>; 336 337foreach Type = ["I", "U"] in 338 foreach Index = 0-7 in { 339 // Defines patterns that extract each Index'ed 4bit from an unsigned 340 // 32bit scalar value; 341 def Type#Index#"_4bit" : Extract<!shl(Index, 2), 15, !eq (Type, "U")>; 342 343 // Defines multiplication patterns where the multiplication is happening on each 344 // Index'ed 8bit of a 32bit scalar value. 345 def Mul#Type#Index#"_4bit" : PatFrag< 346 (ops node:$src0, node:$src1), 347 (!cast<HasOneUseBinOp>(!if (!eq (Type, "I"), NonACAMDGPUmul_i24_oneuse, NonACAMDGPUmul_u24_oneuse)) 348 (!cast<Extract>(Type#Index#"_4bit") node:$src0), 349 (!cast<Extract>(Type#Index#"_4bit") node:$src1))>; 350 } 351 352class UDot2Pat<VOP_Pseudo Inst> : GCNPat < 353 (add (add_oneuse (AMDGPUmul_u24_oneuse (srl i32:$src0, (i32 16)), 354 (srl i32:$src1, (i32 16))), i32:$src2), 355 (AMDGPUmul_u24_oneuse (and i32:$src0, (i32 65535)), 356 (and i32:$src1, (i32 65535))) 357 ), 358 (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))> { 359 let Predicates = Inst.Predicates; 360} 361 362class SDot2Pat<VOP_Pseudo Inst> : GCNPat < 363 (add (add_oneuse (AMDGPUmul_i24_oneuse (sra i32:$src0, (i32 16)), 364 (sra i32:$src1, (i32 16))), i32:$src2), 365 (AMDGPUmul_i24_oneuse (sext_inreg i32:$src0, i16), 366 (sext_inreg i32:$src1, i16))), 367 (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))> { 368 let Predicates = Inst.Predicates; 369} 370 371let IsDOT = 1 in { 372let OtherPredicates = [HasDot2Insts] in { 373defm V_DOT2_I32_I16 : VOP3PInst<"v_dot2_i32_i16", 374 VOP3P_Profile<VOP_I32_V2I16_V2I16_I32>, int_amdgcn_sdot2, 1>; 375defm V_DOT2_U32_U16 : VOP3PInst<"v_dot2_u32_u16", 376 VOP3P_Profile<VOP_I32_V2I16_V2I16_I32>, int_amdgcn_udot2, 1>; 377} // End OtherPredicates = [HasDot2Insts] 378 379let OtherPredicates = [HasDot10Insts] in 380defm V_DOT2_F32_F16 : VOP3PInst<"v_dot2_f32_f16", 381 VOP3P_Profile<VOP_F32_V2F16_V2F16_F32, VOP3_REGULAR, /*HasDPP*/ 1>, 382 AMDGPUfdot2, 1/*ExplicitClamp*/>; 383 384let OtherPredicates = [HasDot7Insts] in { 385defm V_DOT4_U32_U8 : VOP3PInst<"v_dot4_u32_u8", 386 VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot4, 1>; 387defm V_DOT8_U32_U4 : VOP3PInst<"v_dot8_u32_u4", 388 VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot8, 1>; 389} // End OtherPredicates = [HasDot7Insts] 390 391let OtherPredicates = [HasDot1Insts] in { 392defm V_DOT4_I32_I8 : VOP3PInst<"v_dot4_i32_i8", 393 VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot4, 1>; 394defm V_DOT8_I32_I4 : VOP3PInst<"v_dot8_i32_i4", 395 VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot8, 1>; 396} // End OtherPredicates = [HasDot1Insts] 397 398def DOT2_BF16_Profile 399 : VOP3P_Profile<VOP_F32_V2I16_V2I16_F32, VOP3_REGULAR, /*HasDPP*/ 1> { 400 let HasSrc1Mods = 1; 401} 402 403let SubtargetPredicate = HasDot9Insts in { 404 405defm V_DOT2_F32_BF16 : VOP3PInst<"v_dot2_f32_bf16", DOT2_BF16_Profile, 406 int_amdgcn_fdot2_f32_bf16, 1>; 407 408} // End SubtargetPredicate = HasDot9Insts 409 410} // End let IsDOT = 1 411 412multiclass VOP3PDOTIUInst <string OpName, SDPatternOperator intrinsic_node> { 413 let IsDOT = 1 in 414 defm NAME : VOP3PInst<OpName, VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, 415 null_frag, 1>; 416 // Dot-iu instructions consider input as signed if imod neg bits are set. Thus 417 // Dot-iu Intrinsics have extra operands and require separate codegen pattern. 418 def : GCNPat < (intrinsic_node (DotIUVOP3PMods i32:$src0_mods), i32:$src0, 419 (DotIUVOP3PMods i32:$src1_mods), i32:$src1, 420 i32:$src2, (i1 timm:$clamp)), 421 (!cast<Instruction>(NAME) $src0_mods, i32:$src0, 422 $src1_mods, i32:$src1, 423 (i32 8), i32:$src2, i1:$clamp) 424 >; 425} 426 427let SubtargetPredicate = HasDot8Insts in { 428defm V_DOT4_I32_IU8 : VOP3PDOTIUInst<"v_dot4_i32_iu8", int_amdgcn_sudot4>; 429defm V_DOT8_I32_IU4 : VOP3PDOTIUInst<"v_dot8_i32_iu4", int_amdgcn_sudot8>; 430 431def : GCNPat < (int_amdgcn_sdot8 i32:$src0, 432 i32:$src1, 433 i32:$src2, (i1 timm:$clamp)), 434 (V_DOT8_I32_IU4 (i32 9), i32:$src0, 435 (i32 9), i32:$src1, (i32 8), i32:$src2, i1:$clamp) 436>; 437 438def : GCNPat < (int_amdgcn_sdot4 i32:$src0, 439 i32:$src1, 440 i32:$src2, (i1 timm:$clamp)), 441 (V_DOT4_I32_IU8 (i32 9), i32:$src0, 442 (i32 9), i32:$src1, (i32 8), i32:$src2, i1:$clamp) 443>; 444} // End SubtargetPredicate = HasDot8Insts 445 446def : UDot2Pat<V_DOT2_U32_U16>; 447def : SDot2Pat<V_DOT2_I32_I16>; 448 449foreach Type = ["U", "I"] in 450 let Predicates = !cast<VOP_Pseudo>("V_DOT4_"#Type#"32_"#Type#8).Predicates in 451 def : GCNPat < 452 !cast<dag>(!foldl((i32 i32:$src2), [0, 1, 2, 3], lhs, y, 453 (add_oneuse lhs, (!cast<PatFrag>("Mul"#Type#"_Elt"#y) i32:$src0, i32:$src1)))), 454 (!cast<VOP3P_Pseudo>("V_DOT4_"#Type#"32_"#Type#8) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>; 455 456foreach Type = ["U", "I"] in 457 let Predicates = !cast<VOP_Pseudo>("V_DOT8_"#Type#"32_"#Type#4).Predicates in 458 def : GCNPat < 459 !cast<dag>(!foldl((add_oneuse i32:$src2, (!cast<PatFrag>("Mul"#Type#"0_4bit") i32:$src0, i32:$src1)), 460 [1, 2, 3, 4, 5, 6, 7], lhs, y, 461 (NonACAdd_oneuse lhs, (!cast<PatFrag>("Mul"#Type#y#"_4bit") i32:$src0, i32:$src1)))), 462 (!cast<VOP3P_Pseudo>("V_DOT8_"#Type#"32_"#Type#4) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>; 463 464// Different variants of dot8 code-gen dag patterns are not generated through table-gen due to a huge increase 465// in the compile time. Directly handle the pattern generated by the FE here. 466foreach Type = ["U", "I"] in 467 let Predicates = !cast<VOP_Pseudo>("V_DOT8_"#Type#"32_"#Type#4).Predicates in 468 def : GCNPat < 469 !cast<dag>(!foldl((add_oneuse i32:$src2, (!cast<PatFrag>("Mul"#Type#"0_4bit") i32:$src0, i32:$src1)), 470 [7, 1, 2, 3, 4, 5, 6], lhs, y, 471 (NonACAdd_oneuse lhs, (!cast<PatFrag>("Mul"#Type#y#"_4bit") i32:$src0, i32:$src1)))), 472 (!cast<VOP3P_Pseudo>("V_DOT8_"#Type#"32_"#Type#4) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>; 473 474def ADst_32 : VOPDstOperand<AGPR_32>; 475def ADst_64 : VOPDstOperand<AReg_64>; 476def ADst_128 : VOPDstOperand<AReg_128>; 477def ADst_256 : VOPDstOperand<AReg_256>; 478def ADst_512 : VOPDstOperand<AReg_512>; 479def ADst_1024 : VOPDstOperand<AReg_1024>; 480def VDst_64 : VOPDstOperand<VReg_64>; 481def VDst_128 : VOPDstOperand<VReg_128>; 482def VDst_256 : VOPDstOperand<VReg_256>; 483def VDst_512 : VOPDstOperand<VReg_512>; 484def VDst_1024 : VOPDstOperand<VReg_1024>; 485 486def VOPProfileAccRead : VOP3P_Profile<VOP_I32_I32, VOP3_MAI> { 487 let Src0RC64 = ARegSrc_32; 488} 489 490def VOPProfileAccWrite : VOP3P_Profile<VOP_I32_I32, VOP3_MAI> { 491 let DstRC = ADst_32; 492 let Src0RC64 = VCSrc_b32; 493} 494 495class VOPProfileMAI<VOPProfile P, RegisterOperand _SrcRC, RegisterOperand _DstRC, 496 RegisterOperand SrcABRC = AVSrc_32> 497 : VOP3P_Profile<P, VOP3_MAI> { 498 let DstRC = _DstRC; 499 let Src0RC64 = SrcABRC; 500 let Src1RC64 = SrcABRC; 501 let Src2RC64 = _SrcRC; 502 let HasOpSel = 0; 503 let HasClamp = 0; 504 let HasIntClamp = 0; 505 let HasOMod = 0; 506 let HasModifiers = 0; 507 let AsmVOP3Base = "$vdst, $src0, $src1, $src2$cbsz$abid$blgp"; 508 let Ins64 = (ins Src0RC64:$src0, Src1RC64:$src1, Src2RC64:$src2, cbsz:$cbsz, abid:$abid, blgp:$blgp); 509 let InsVOP3Base = Ins64; 510 // Dst and SrcC cannot partially overlap if SrcC/Dst is bigger than 4 VGPRs. 511 // We then create two versions of the instruction: with tied dst and src2 512 // and with the earlyclobber flag on the dst. This is stricter than the 513 // actual HW restriction. In particular earlyclobber also affects src0 and 514 // src1 allocation which is not required. 515 bit NoDstOverlap = !gt(DstVT.Size, 128); 516} 517 518class VOPProfileSMFMAC<VOPProfile P, RegisterOperand _DstRC, 519 RegisterOperand _SrcARC, RegisterOperand _SrcBRC> 520 : VOPProfileMAI<P, _DstRC, _DstRC, _SrcARC> { 521 let Src1RC64 = _SrcBRC; 522 let Src2VT = DstVT; 523 let Asm64 = " $vdst, $src0, $src1, $idx$cbsz$abid"; 524 let Outs64 = (outs DstRC:$vdst); 525 let Ins64 = (ins Src0RC64:$src0, Src1RC64:$src1, VRegSrc_32:$idx, cbsz:$cbsz, abid:$abid, Src2RC64:$src2); 526} 527 528def VOPProfileMAI_F32_F32_X4 : VOPProfileMAI<VOP_V4F32_F32_F32_V4F32, AISrc_128_f32, ADst_128>; 529def VOPProfileMAI_F32_F32_X16 : VOPProfileMAI<VOP_V16F32_F32_F32_V16F32, AISrc_512_f32, ADst_512>; 530def VOPProfileMAI_F32_F32_X32 : VOPProfileMAI<VOP_V32F32_F32_F32_V32F32, AISrc_1024_f32, ADst_1024>; 531def VOPProfileMAI_I32_I32_X4 : VOPProfileMAI<VOP_V4I32_I32_I32_V4I32, AISrc_128_b32, ADst_128>; 532def VOPProfileMAI_I32_I32_X16 : VOPProfileMAI<VOP_V16I32_I32_I32_V16I32, AISrc_512_b32, ADst_512>; 533def VOPProfileMAI_I32_I32_X32 : VOPProfileMAI<VOP_V32I32_I32_I32_V32I32, AISrc_1024_b32, ADst_1024>; 534def VOPProfileMAI_F32_V2I16_X4 : VOPProfileMAI<VOP_V4F32_V2I16_V2I16_V4F32, AISrc_128_b32, ADst_128>; 535def VOPProfileMAI_F32_V2I16_X16 : VOPProfileMAI<VOP_V16F32_V2I16_V2I16_V16F32, AISrc_512_b32, ADst_512>; 536def VOPProfileMAI_F32_V2I16_X32 : VOPProfileMAI<VOP_V32F32_V2I16_V2I16_V32F32, AISrc_1024_b32, ADst_1024>; 537def VOPProfileMAI_F32_V4F16_X4 : VOPProfileMAI<VOP_V4F32_V4F16_V4F16_V4F32, AISrc_128_b32, ADst_128, AVSrc_64>; 538def VOPProfileMAI_F32_V4F16_X16 : VOPProfileMAI<VOP_V16F32_V4F16_V4F16_V16F32, AISrc_512_b32, ADst_512, AVSrc_64>; 539def VOPProfileMAI_F32_V4F16_X32 : VOPProfileMAI<VOP_V32F32_V4F16_V4F16_V32F32, AISrc_1024_b32, ADst_1024, AVSrc_64>; 540def VOPProfileMAI_F32_V4I16_X4 : VOPProfileMAI<VOP_V4F32_V4I16_V4I16_V4F32, AISrc_128_b32, ADst_128, AVSrc_64>; 541def VOPProfileMAI_F32_V4I16_X16 : VOPProfileMAI<VOP_V16F32_V4I16_V4I16_V16F32, AISrc_512_b32, ADst_512, AVSrc_64>; 542def VOPProfileMAI_F32_V4I16_X32 : VOPProfileMAI<VOP_V32F32_V4I16_V4I16_V32F32, AISrc_1024_b32, ADst_1024, AVSrc_64>; 543def VOPProfileMAI_F64_16X16X4F64 : VOPProfileMAI<VOP_V4F64_F64_F64_V4F64, AISrc_256_f64, ADst_256, AVSrc_64>; 544def VOPProfileMAI_F64_4X4X4F64 : VOPProfileMAI<VOP_F64_F64_F64_F64, AISrc_64_f64, ADst_64, AVSrc_64>; 545def VOPProfileMAI_I32_I64_X16 : VOPProfileMAI<VOP_V4I32_I64_I64_V4I32, AISrc_128_b32, ADst_128, AVSrc_64>; 546def VOPProfileMAI_I32_I64_X32 : VOPProfileMAI<VOP_V16I32_I64_I64_V16I32, AISrc_512_b32, ADst_512, AVSrc_64>; 547def VOPProfileMAI_F32_V2F32_X16 : VOPProfileMAI<VOP_V4F32_V2F32_V2F32_V4F32, AISrc_128_b32, ADst_128, AVSrc_64>; 548def VOPProfileMAI_F32_V2F32_X32 : VOPProfileMAI<VOP_V16F32_V2F32_V2F32_V16F32, AISrc_512_b32, ADst_512, AVSrc_64>; 549def VOPProfileMAI_F32_I64_X32 : VOPProfileMAI<VOP_V4F32_I64_I64_V4F32, AISrc_128_b32, ADst_128, AVSrc_64>; 550def VOPProfileMAI_F32_I64_X16 : VOPProfileMAI<VOP_V16F32_I64_I64_V16F32, AISrc_512_b32, ADst_512, AVSrc_64>; 551 552def VOPProfileMAI_F32_F32_X4_VCD : VOPProfileMAI<VOP_V4F32_F32_F32_V4F32, VISrc_128_f32, VDst_128>; 553def VOPProfileMAI_F32_F32_X16_VCD : VOPProfileMAI<VOP_V16F32_F32_F32_V16F32, VISrc_512_f32, VDst_512>; 554def VOPProfileMAI_F32_F32_X32_VCD : VOPProfileMAI<VOP_V32F32_F32_F32_V32F32, VISrc_1024_f32, VDst_1024>; 555def VOPProfileMAI_I32_I32_X4_VCD : VOPProfileMAI<VOP_V4I32_I32_I32_V4I32, VISrc_128_b32, VDst_128>; 556def VOPProfileMAI_I32_I32_X16_VCD : VOPProfileMAI<VOP_V16I32_I32_I32_V16I32, VISrc_512_b32, VDst_512>; 557def VOPProfileMAI_I32_I32_X32_VCD : VOPProfileMAI<VOP_V32I32_I32_I32_V32I32, VISrc_1024_b32, VDst_1024>; 558def VOPProfileMAI_F32_V2I16_X4_VCD : VOPProfileMAI<VOP_V4F32_V2I16_V2I16_V4F32, VISrc_128_b32, VDst_128>; 559def VOPProfileMAI_F32_V2I16_X16_VCD : VOPProfileMAI<VOP_V16F32_V2I16_V2I16_V16F32, VISrc_512_b32, VDst_512>; 560def VOPProfileMAI_F32_V2I16_X32_VCD : VOPProfileMAI<VOP_V32F32_V2I16_V2I16_V32F32, VISrc_1024_b32, VDst_1024>; 561def VOPProfileMAI_F32_V4F16_X4_VCD : VOPProfileMAI<VOP_V4F32_V4F16_V4F16_V4F32, VISrc_128_b32, VDst_128, AVSrc_64>; 562def VOPProfileMAI_F32_V4F16_X16_VCD : VOPProfileMAI<VOP_V16F32_V4F16_V4F16_V16F32, VISrc_512_b32, VDst_512, AVSrc_64>; 563def VOPProfileMAI_F32_V4F16_X32_VCD : VOPProfileMAI<VOP_V32F32_V4F16_V4F16_V32F32, VISrc_1024_b32, VDst_1024, AVSrc_64>; 564def VOPProfileMAI_F32_V4I16_X4_VCD : VOPProfileMAI<VOP_V4F32_V4I16_V4I16_V4F32, VISrc_128_b32, VDst_128, AVSrc_64>; 565def VOPProfileMAI_F32_V4I16_X16_VCD : VOPProfileMAI<VOP_V16F32_V4I16_V4I16_V16F32, VISrc_512_b32, VDst_512, AVSrc_64>; 566def VOPProfileMAI_F32_V4I16_X32_VCD : VOPProfileMAI<VOP_V32F32_V4I16_V4I16_V32F32, VISrc_1024_b32, VDst_1024, AVSrc_64>; 567def VOPProfileMAI_F64_16X16X4F64_VCD : VOPProfileMAI<VOP_V4F64_F64_F64_V4F64, VISrc_256_f64, VDst_256, AVSrc_64>; 568def VOPProfileMAI_F64_4X4X4F64_VCD : VOPProfileMAI<VOP_F64_F64_F64_F64, VISrc_64_f64, VDst_64, AVSrc_64>; 569def VOPProfileMAI_I32_I64_X16_VCD : VOPProfileMAI<VOP_V4I32_I64_I64_V4I32, VISrc_128_b32, VDst_128, AVSrc_64>; 570def VOPProfileMAI_I32_I64_X32_VCD : VOPProfileMAI<VOP_V16I32_I64_I64_V16I32, VISrc_512_b32, VDst_512, AVSrc_64>; 571def VOPProfileMAI_F32_V2F32_X16_VCD : VOPProfileMAI<VOP_V4F32_V2F32_V2F32_V4F32, VISrc_128_b32, VDst_128, AVSrc_64>; 572def VOPProfileMAI_F32_V2F32_X32_VCD : VOPProfileMAI<VOP_V16F32_V2F32_V2F32_V16F32, VISrc_512_b32, VDst_512, AVSrc_64>; 573def VOPProfileMAI_F32_I64_X32_VCD : VOPProfileMAI<VOP_V4F32_I64_I64_V4F32, VISrc_128_b32, VDst_128, AVSrc_64>; 574def VOPProfileMAI_F32_I64_X16_VCD : VOPProfileMAI<VOP_V16F32_I64_I64_V16F32, VISrc_512_b32, VDst_512, AVSrc_64>; 575 576def VOPProfileSMFMAC_F32_16X16X32_F16 : VOPProfileSMFMAC<VOP_V4F32_V4F16_V8F16_I32, AVDst_128, AVSrc_64, AVSrc_128>; 577def VOPProfileSMFMAC_F32_32X32X16_F16 : VOPProfileSMFMAC<VOP_V16F32_V4F16_V8F16_I32, AVDst_512, AVSrc_64, AVSrc_128>; 578def VOPProfileSMFMAC_F32_16X16X32_I16 : VOPProfileSMFMAC<VOP_V4F32_V4I16_V8I16_I32, AVDst_128, AVSrc_64, AVSrc_128>; 579def VOPProfileSMFMAC_F32_32X32X16_I16 : VOPProfileSMFMAC<VOP_V16F32_V4I16_V8I16_I32, AVDst_512, AVSrc_64, AVSrc_128>; 580def VOPProfileSMFMAC_I32_16X16X64_I8 : VOPProfileSMFMAC<VOP_V4I32_V2I32_V4I32_I32, AVDst_128, AVSrc_64, AVSrc_128>; 581def VOPProfileSMFMAC_I32_32X32X32_I8 : VOPProfileSMFMAC<VOP_V16I32_V2I32_V4I32_I32, AVDst_512, AVSrc_64, AVSrc_128>; 582def VOPProfileSMFMAC_F32_16X16X64_F8 : VOPProfileSMFMAC<VOP_V4F32_V2I32_V4I32_I32, AVDst_128, AVSrc_64, AVSrc_128>; 583def VOPProfileSMFMAC_F32_32X32X32_F8 : VOPProfileSMFMAC<VOP_V16F32_V2I32_V4I32_I32, AVDst_512, AVSrc_64, AVSrc_128>; 584 585class MFMATable <bit is_mac, string Name> { 586 bit IsMac = is_mac; 587 string FMAOp = Name; 588} 589 590class MAIFrag<SDPatternOperator Op, code pred> : PatFrag < 591 (ops node:$src0, node:$src1, node:$src2, node:$cbsz, node:$abid, node:$blgp), 592 (Op $src0, $src1, $src2, $cbsz, $abid, $blgp), 593 pred 594>; 595 596let GISelPredicateCode = [{ return MF.getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs(); }] in 597class AgprMAIFrag<SDPatternOperator Op> : 598 MAIFrag<Op, [{ return MF->getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs(); }]>; 599 600let GISelPredicateCode = [{ return !MF.getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs(); }] in 601class VgprMAIFrag<SDPatternOperator Op> : 602 MAIFrag<Op, [{ return !MF->getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs(); }]>; 603 604let SubtargetPredicate = HasMAIInsts in { 605 606let isAsCheapAsAMove = 1, isReMaterializable = 1 in { 607 defm V_ACCVGPR_READ_B32 : VOP3Inst<"v_accvgpr_read_b32", VOPProfileAccRead>; 608 let isMoveImm = 1 in { 609 defm V_ACCVGPR_WRITE_B32 : VOP3Inst<"v_accvgpr_write_b32", VOPProfileAccWrite>; 610 } // End isMoveImm = 1 611} // End isAsCheapAsAMove = 1, isReMaterializable = 1 612 613class MAIInst<string OpName, VOPProfile P, SDPatternOperator node> 614 : VOP3InstBase<OpName, P, node> { 615 Instruction Opcode = !cast<Instruction>(NAME); 616 bit is_dgemm = 0; 617 bit is_gfx940_xdl = 0; 618} 619 620multiclass MAIInst<string OpName, string P, SDPatternOperator node, 621 bit NoDstOverlap = !cast<VOPProfileMAI>("VOPProfileMAI_" # P).NoDstOverlap> { 622 let isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1 in { 623 // FP32 denorm mode is respected, rounding mode is not. Exceptions are not supported. 624 let Constraints = !if(NoDstOverlap, "@earlyclobber $vdst", "") in { 625 def _e64 : MAIInst<OpName, !cast<VOPProfileMAI>("VOPProfileMAI_" # P), 626 !if(!or(NoDstOverlap, !eq(node, null_frag)), null_frag, AgprMAIFrag<node>)>, 627 MFMATable<0, NAME # "_e64">; 628 629 let SubtargetPredicate = isGFX90APlus, Mnemonic = OpName in 630 def _vgprcd_e64 : MAIInst<OpName # "_vgprcd", !cast<VOPProfileMAI>("VOPProfileMAI_" # P # "_VCD"), 631 !if(!or(NoDstOverlap, !eq(node, null_frag)), null_frag, VgprMAIFrag<node>)>, 632 MFMATable<0, NAME # "_vgprcd_e64">; 633 } 634 635 if NoDstOverlap then { 636 let Constraints = !if(NoDstOverlap, "$vdst = $src2", ""), 637 isConvertibleToThreeAddress = NoDstOverlap, 638 Mnemonic = OpName in { 639 def "_mac_e64" : MAIInst<OpName # "_mac", !cast<VOPProfileMAI>("VOPProfileMAI_" # P), 640 !if(!eq(node, null_frag), null_frag, AgprMAIFrag<node>)>, 641 MFMATable<1, NAME # "_e64">; 642 643 let SubtargetPredicate = isGFX90APlus in 644 def _mac_vgprcd_e64 : MAIInst<OpName # "_mac_vgprcd", !cast<VOPProfileMAI>("VOPProfileMAI_" # P # "_VCD"), 645 !if(!eq(node, null_frag), null_frag, VgprMAIFrag<node>)>, 646 MFMATable<1, NAME # "_vgprcd_e64">; 647 } 648 } 649 } // End isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1 650} 651 652defm V_MFMA_F32_4X4X1F32 : MAIInst<"v_mfma_f32_4x4x1f32", "F32_F32_X4", int_amdgcn_mfma_f32_4x4x1f32>; 653defm V_MFMA_F32_16X16X1F32 : MAIInst<"v_mfma_f32_16x16x1f32", "F32_F32_X16", int_amdgcn_mfma_f32_16x16x1f32>; 654defm V_MFMA_F32_16X16X4F32 : MAIInst<"v_mfma_f32_16x16x4f32", "F32_F32_X4", int_amdgcn_mfma_f32_16x16x4f32>; 655defm V_MFMA_F32_32X32X1F32 : MAIInst<"v_mfma_f32_32x32x1f32", "F32_F32_X32", int_amdgcn_mfma_f32_32x32x1f32>; 656defm V_MFMA_F32_32X32X2F32 : MAIInst<"v_mfma_f32_32x32x2f32", "F32_F32_X16", int_amdgcn_mfma_f32_32x32x2f32>; 657 658let is_gfx940_xdl = 1 in { 659defm V_MFMA_F32_4X4X4F16 : MAIInst<"v_mfma_f32_4x4x4f16", "F32_V4F16_X4", int_amdgcn_mfma_f32_4x4x4f16>; 660defm V_MFMA_I32_4X4X4I8 : MAIInst<"v_mfma_i32_4x4x4i8", "I32_I32_X4", int_amdgcn_mfma_i32_4x4x4i8>; 661defm V_MFMA_F32_16X16X4F16 : MAIInst<"v_mfma_f32_16x16x4f16", "F32_V4F16_X16", int_amdgcn_mfma_f32_16x16x4f16>; 662defm V_MFMA_F32_16X16X16F16 : MAIInst<"v_mfma_f32_16x16x16f16", "F32_V4F16_X4", int_amdgcn_mfma_f32_16x16x16f16>; 663defm V_MFMA_I32_16X16X4I8 : MAIInst<"v_mfma_i32_16x16x4i8", "I32_I32_X16", int_amdgcn_mfma_i32_16x16x4i8>; 664defm V_MFMA_F32_32X32X4F16 : MAIInst<"v_mfma_f32_32x32x4f16", "F32_V4F16_X32", int_amdgcn_mfma_f32_32x32x4f16>; 665defm V_MFMA_F32_32X32X8F16 : MAIInst<"v_mfma_f32_32x32x8f16", "F32_V4F16_X16", int_amdgcn_mfma_f32_32x32x8f16>; 666defm V_MFMA_I32_32X32X4I8 : MAIInst<"v_mfma_i32_32x32x4i8", "I32_I32_X32", int_amdgcn_mfma_i32_32x32x4i8>; 667} 668 669let Predicates = [isGFX908orGFX90A] in { 670defm V_MFMA_I32_16X16X16I8 : MAIInst<"v_mfma_i32_16x16x16i8", "I32_I32_X4", int_amdgcn_mfma_i32_16x16x16i8>; 671defm V_MFMA_I32_32X32X8I8 : MAIInst<"v_mfma_i32_32x32x8i8", "I32_I32_X16", int_amdgcn_mfma_i32_32x32x8i8>; 672defm V_MFMA_F32_4X4X2BF16 : MAIInst<"v_mfma_f32_4x4x2bf16", "F32_V2I16_X4", int_amdgcn_mfma_f32_4x4x2bf16>; 673defm V_MFMA_F32_16X16X2BF16 : MAIInst<"v_mfma_f32_16x16x2bf16", "F32_V2I16_X16", int_amdgcn_mfma_f32_16x16x2bf16>; 674defm V_MFMA_F32_16X16X8BF16 : MAIInst<"v_mfma_f32_16x16x8bf16", "F32_V2I16_X4", int_amdgcn_mfma_f32_16x16x8bf16>; 675defm V_MFMA_F32_32X32X2BF16 : MAIInst<"v_mfma_f32_32x32x2bf16", "F32_V2I16_X32", int_amdgcn_mfma_f32_32x32x2bf16>; 676defm V_MFMA_F32_32X32X4BF16 : MAIInst<"v_mfma_f32_32x32x4bf16", "F32_V2I16_X16", int_amdgcn_mfma_f32_32x32x4bf16>; 677} 678 679} // End SubtargetPredicate = HasMAIInsts 680 681let Predicates = [isGFX90APlus] in { 682 let is_gfx940_xdl = 1 in { 683 defm V_MFMA_F32_32X32X4BF16_1K : MAIInst<"v_mfma_f32_32x32x4bf16_1k", "F32_V4I16_X32", int_amdgcn_mfma_f32_32x32x4bf16_1k>; 684 defm V_MFMA_F32_16X16X4BF16_1K : MAIInst<"v_mfma_f32_16x16x4bf16_1k", "F32_V4I16_X16", int_amdgcn_mfma_f32_16x16x4bf16_1k>; 685 defm V_MFMA_F32_4X4X4BF16_1K : MAIInst<"v_mfma_f32_4x4x4bf16_1k", "F32_V4I16_X4", int_amdgcn_mfma_f32_4x4x4bf16_1k>; 686 defm V_MFMA_F32_32X32X8BF16_1K : MAIInst<"v_mfma_f32_32x32x8bf16_1k", "F32_V4I16_X16", int_amdgcn_mfma_f32_32x32x8bf16_1k>; 687 defm V_MFMA_F32_16X16X16BF16_1K : MAIInst<"v_mfma_f32_16x16x16bf16_1k", "F32_V4I16_X4", int_amdgcn_mfma_f32_16x16x16bf16_1k>; 688 } 689 690 let is_dgemm = 1 in { 691 defm V_MFMA_F64_16X16X4F64 : MAIInst<"v_mfma_f64_16x16x4f64", "F64_16X16X4F64", int_amdgcn_mfma_f64_16x16x4f64>; 692 defm V_MFMA_F64_4X4X4F64 : MAIInst<"v_mfma_f64_4x4x4f64", "F64_4X4X4F64", int_amdgcn_mfma_f64_4x4x4f64>; 693 } 694} // End Predicates = [isGFX90APlus] 695 696let SubtargetPredicate = isGFX940Plus, is_gfx940_xdl = 1 in { 697 defm V_MFMA_I32_32X32X16I8 : MAIInst<"v_mfma_i32_32x32x16i8", "I32_I64_X32", int_amdgcn_mfma_i32_32x32x16_i8>; 698 defm V_MFMA_I32_16X16X32I8 : MAIInst<"v_mfma_i32_16x16x32i8", "I32_I64_X16", int_amdgcn_mfma_i32_16x16x32_i8>; 699 defm V_MFMA_F32_16X16X8XF32 : MAIInst<"v_mfma_f32_16x16x8xf32", "F32_V2F32_X16", int_amdgcn_mfma_f32_16x16x8_xf32>; 700 defm V_MFMA_F32_32X32X4XF32 : MAIInst<"v_mfma_f32_32x32x4xf32", "F32_V2F32_X32", int_amdgcn_mfma_f32_32x32x4_xf32>; 701 defm V_MFMA_F32_16X16X32_BF8_BF8 : MAIInst<"v_mfma_f32_16x16x32_bf8_bf8", "F32_I64_X32", int_amdgcn_mfma_f32_16x16x32_bf8_bf8>; 702 defm V_MFMA_F32_16X16X32_BF8_FP8 : MAIInst<"v_mfma_f32_16x16x32_bf8_fp8", "F32_I64_X32", int_amdgcn_mfma_f32_16x16x32_bf8_fp8>; 703 defm V_MFMA_F32_16X16X32_FP8_BF8 : MAIInst<"v_mfma_f32_16x16x32_fp8_bf8", "F32_I64_X32", int_amdgcn_mfma_f32_16x16x32_fp8_bf8>; 704 defm V_MFMA_F32_16X16X32_FP8_FP8 : MAIInst<"v_mfma_f32_16x16x32_fp8_fp8", "F32_I64_X32", int_amdgcn_mfma_f32_16x16x32_fp8_fp8>; 705 defm V_MFMA_F32_32X32X16_BF8_BF8 : MAIInst<"v_mfma_f32_32x32x16_bf8_bf8", "F32_I64_X16", int_amdgcn_mfma_f32_32x32x16_bf8_bf8>; 706 defm V_MFMA_F32_32X32X16_BF8_FP8 : MAIInst<"v_mfma_f32_32x32x16_bf8_fp8", "F32_I64_X16", int_amdgcn_mfma_f32_32x32x16_bf8_fp8>; 707 defm V_MFMA_F32_32X32X16_FP8_BF8 : MAIInst<"v_mfma_f32_32x32x16_fp8_bf8", "F32_I64_X16", int_amdgcn_mfma_f32_32x32x16_fp8_bf8>; 708 defm V_MFMA_F32_32X32X16_FP8_FP8 : MAIInst<"v_mfma_f32_32x32x16_fp8_fp8", "F32_I64_X16", int_amdgcn_mfma_f32_32x32x16_fp8_fp8>; 709} // End SubtargetPredicate = isGFX940Plus, is_gfx940_xdl = 1 710 711multiclass SMFMACInst<string OpName, string P, SDPatternOperator node> { 712 let Constraints = "$vdst = $src2", DisableEncoding = "$src2", 713 isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1, is_gfx940_xdl = 1 in { 714 def _e64 : MAIInst<OpName, !cast<VOPProfileSMFMAC>("VOPProfileSMFMAC_" # P), node>; 715 } 716} 717 718let SubtargetPredicate = isGFX940Plus in { 719defm V_SMFMAC_F32_16X16X32_F16 : SMFMACInst<"v_smfmac_f32_16x16x32_f16", "F32_16X16X32_F16", int_amdgcn_smfmac_f32_16x16x32_f16>; 720defm V_SMFMAC_F32_32X32X16_F16 : SMFMACInst<"v_smfmac_f32_32x32x16_f16", "F32_32X32X16_F16", int_amdgcn_smfmac_f32_32x32x16_f16>; 721defm V_SMFMAC_F32_16X16X32_BF16 : SMFMACInst<"v_smfmac_f32_16x16x32_bf16", "F32_16X16X32_I16", int_amdgcn_smfmac_f32_16x16x32_bf16>; 722defm V_SMFMAC_F32_32X32X16_BF16 : SMFMACInst<"v_smfmac_f32_32x32x16_bf16", "F32_32X32X16_I16", int_amdgcn_smfmac_f32_32x32x16_bf16>; 723defm V_SMFMAC_I32_16X16X64_I8 : SMFMACInst<"v_smfmac_i32_16x16x64_i8", "I32_16X16X64_I8", int_amdgcn_smfmac_i32_16x16x64_i8>; 724defm V_SMFMAC_I32_32X32X32_I8 : SMFMACInst<"v_smfmac_i32_32x32x32_i8", "I32_32X32X32_I8", int_amdgcn_smfmac_i32_32x32x32_i8>; 725defm V_SMFMAC_F32_16X16X64_BF8_BF8 : SMFMACInst<"v_smfmac_f32_16x16x64_bf8_bf8", "F32_16X16X64_F8", int_amdgcn_smfmac_f32_16x16x64_bf8_bf8>; 726defm V_SMFMAC_F32_16X16X64_BF8_FP8 : SMFMACInst<"v_smfmac_f32_16x16x64_bf8_fp8", "F32_16X16X64_F8", int_amdgcn_smfmac_f32_16x16x64_bf8_fp8>; 727defm V_SMFMAC_F32_16X16X64_FP8_BF8 : SMFMACInst<"v_smfmac_f32_16x16x64_fp8_bf8", "F32_16X16X64_F8", int_amdgcn_smfmac_f32_16x16x64_fp8_bf8>; 728defm V_SMFMAC_F32_16X16X64_FP8_FP8 : SMFMACInst<"v_smfmac_f32_16x16x64_fp8_fp8", "F32_16X16X64_F8", int_amdgcn_smfmac_f32_16x16x64_fp8_fp8>; 729defm V_SMFMAC_F32_32X32X32_BF8_BF8 : SMFMACInst<"v_smfmac_f32_32x32x32_bf8_bf8", "F32_32X32X32_F8", int_amdgcn_smfmac_f32_32x32x32_bf8_bf8>; 730defm V_SMFMAC_F32_32X32X32_BF8_FP8 : SMFMACInst<"v_smfmac_f32_32x32x32_bf8_fp8", "F32_32X32X32_F8", int_amdgcn_smfmac_f32_32x32x32_bf8_fp8>; 731defm V_SMFMAC_F32_32X32X32_FP8_BF8 : SMFMACInst<"v_smfmac_f32_32x32x32_fp8_bf8", "F32_32X32X32_F8", int_amdgcn_smfmac_f32_32x32x32_fp8_bf8>; 732defm V_SMFMAC_F32_32X32X32_FP8_FP8 : SMFMACInst<"v_smfmac_f32_32x32x32_fp8_fp8", "F32_32X32X32_F8", int_amdgcn_smfmac_f32_32x32x32_fp8_fp8>; 733} 734 735def MAIInstInfoTable : GenericTable { 736 let FilterClass = "MAIInst"; 737 let CppTypeName = "MAIInstInfo"; 738 let Fields = [ 739 "Opcode", "is_dgemm", "is_gfx940_xdl" 740 ]; 741 742 let PrimaryKey = ["Opcode"]; 743 let PrimaryKeyName = "getMAIInstInfoHelper"; 744} 745 746let isCommutable = 1, isReMaterializable = 1 in { 747 let SubtargetPredicate = HasPackedFP32Ops in { 748 defm V_PK_FMA_F32 : VOP3PInst<"v_pk_fma_f32", VOP3P_Profile<VOP_V2F32_V2F32_V2F32_V2F32, VOP3_PACKED>, any_fma>; 749 defm V_PK_MUL_F32 : VOP3PInst<"v_pk_mul_f32", VOP3P_Profile<VOP_V2F32_V2F32_V2F32, VOP3_PACKED>, any_fmul>; 750 defm V_PK_ADD_F32 : VOP3PInst<"v_pk_add_f32", VOP3P_Profile<VOP_V2F32_V2F32_V2F32, VOP3_PACKED>, any_fadd>; 751 } // End SubtargetPredicate = HasPackedFP32Ops 752 753 let SubtargetPredicate = HasPkMovB32 in 754 defm V_PK_MOV_B32 : VOP3PInst<"v_pk_mov_b32", VOP3P_Profile<VOP_V2I32_V2I32_V2I32, VOP3_PACKED>>; 755} // End isCommutable = 1, isReMaterializable = 1 756 757def : MnemonicAlias<"v_accvgpr_read", "v_accvgpr_read_b32">; 758def : MnemonicAlias<"v_accvgpr_write", "v_accvgpr_write_b32">; 759 760class VOPProfileWMMA<VOPProfile P, string Suffix, RegisterOperand _Src01RC64, bit _HasClamp, bit _HasOpSel> : VOP3P_Profile<P> { 761 let DstRC = !if(!eq(Suffix, "_w32"), VDst_256, VDst_128); 762 let Src0RC64 = _Src01RC64; 763 let Src1RC64 = _Src01RC64; 764 let Src2RC64 = !if(!eq(Suffix, "_w32"), VISrc_256_f64, VISrc_128_f32); 765 let HasClamp = _HasClamp; 766 let HasOpSel = _HasOpSel; 767 let IsPacked = 1; 768 let IsWMMA = 1; 769} 770 771def VOP_V8F32_V16F16_V16F16_V8F32 : VOPProfile <[v8f32, v16f16, v16f16, v8f32]>; 772def VOP_V8F32_V16I16_V16I16_V8F32 : VOPProfile <[v8f32, v16i16, v16i16, v8f32]>; 773def VOP_V16F16_V16F16_V16F16_V16F16 : VOPProfile <[v16f16, v16f16, v16f16, v16f16]>; 774def VOP_V16I16_V16I16_V16I16_V16I16 : VOPProfile <[v16i16, v16i16, v16i16, v16i16]>; 775def VOP_V8I32_V4I32_V4I32_V8I32 : VOPProfile <[v8i32, v4i32, v4i32, v8i32]>; 776def VOP_V8I32_V2I32_V2I32_V8I32 : VOPProfile <[v8i32, v2i32, v2i32, v8i32]>; 777 778def VOP_V4F32_V16F16_V16F16_V4F32 : VOPProfile <[v4f32, v16f16, v16f16, v4f32]>; 779def VOP_V4F32_V16I16_V16I16_V4F32 : VOPProfile <[v4f32, v16i16, v16i16, v4f32]>; 780def VOP_V8F16_V16F16_V16F16_V8F16 : VOPProfile <[v8f16, v16f16, v16f16, v8f16]>; 781def VOP_V8I16_V16I16_V16I16_V8I16 : VOPProfile <[v8i16, v16i16, v16i16, v8i16]>; 782def VOP_V4I32_V4I32_V4I32_V4I32 : VOPProfile <[v4i32, v4i32, v4i32, v4i32]>; 783def VOP_V4I32_V2I32_V2I32_V4I32 : VOPProfile <[v4i32, v2i32, v2i32, v4i32]>; 784 785 786class WMMAType <bits<2> val> { 787 bit hasClamp = val{0}; 788 bit hasOpsel = val{1}; 789} 790 791def WMMARegular : WMMAType<0b00>; 792def WMMAUIClamp : WMMAType<0b01>; 793def WMMAOpSel : WMMAType<0b10>; 794 795class WMMARegularPat<Instruction Inst, SDPatternOperator node, VOPProfile P> : 796 GCNPat < (P.DstVT (node 797 (P.Src0VT (VOP3PMods P.Src0VT:$src0, i32:$src0_modifiers)), 798 (P.Src1VT (VOP3PMods P.Src1VT:$src1, i32:$src1_modifiers)), 799 (P.Src2VT (VOP3PMods P.Src2VT:$src2, i32:$src2_modifiers)) 800 )), 801 (P.DstVT (Inst i32:$src0_modifiers, P.Src0VT:$src0, i32:$src1_modifiers, P.Src1VT:$src1, $src2_modifiers, P.Src2VT:$src2)) 802>; 803 804class WMMAOpSelPat<Instruction Inst, SDPatternOperator node, VOPProfile P> : 805 GCNPat < (P.DstVT (node 806 (P.Src0VT P.Src0VT:$src0), 807 (P.Src1VT P.Src1VT:$src1), 808 (P.Src2VT P.Src2VT:$src2), (WMMAOpSelVOP3PMods i32:$src2_modifiers) 809 )), 810 (P.DstVT (Inst (i32 8), P.Src0VT:$src0, (i32 8), P.Src1VT:$src1, i32:$src2_modifiers, P.Src2VT:$src2)) 811>; 812 813class WMMAUIClampPat<Instruction Inst, SDPatternOperator node, VOPProfile P> : 814 GCNPat < (P.DstVT (node 815 (DotIUVOP3PMods i32:$src0_modifiers), (P.Src0VT P.Src0VT:$src0), 816 (DotIUVOP3PMods i32:$src1_modifiers), (P.Src1VT P.Src1VT:$src1), 817 (P.Src2VT P.Src2VT:$src2), (i1 timm:$clamp) 818 )), 819 (P.DstVT (Inst i32:$src0_modifiers, P.Src0VT:$src0, i32:$src1_modifiers, P.Src1VT:$src1, (i32 8), P.Src2VT:$src2, i1:$clamp)) 820>; 821 822class WMMAOpcodeMapping<Instruction TwoAddr, Instruction ThreeAddr> { 823 Instruction Opcode2Addr = TwoAddr; 824 Instruction Opcode3Addr = ThreeAddr; 825 Predicate WaveSizePredicate; 826} 827 828def WMMAOpcode : GenericEnum { 829 let FilterClass = "VOP3P_Pseudo"; 830} 831 832class WMMAMappingTable : GenericTable { 833 let FilterClass = "WMMAOpcodeMapping"; 834 let CppTypeName = "WMMAOpcodeMappingInfo"; 835 let Fields = ["Opcode2Addr", "Opcode3Addr"]; 836 string TypeOf_Opcode2Addr = "WMMAOpcode"; 837 string TypeOf_Opcode3Addr = "WMMAOpcode"; 838} 839 840def WMMAOpcode2AddrMappingTable : WMMAMappingTable { 841 let PrimaryKey = ["Opcode2Addr"]; 842 let PrimaryKeyName = "getWMMAMappingInfoFrom2AddrOpcode"; 843} 844 845def WMMAOpcode3AddrMappingTable : WMMAMappingTable { 846 let PrimaryKey = ["Opcode3Addr"]; 847 let PrimaryKeyName = "getWMMAMappingInfoFrom3AddrOpcode"; 848} 849 850// The WMMA instruction has extra constraints: 851// Matrices A and B cannot overlap with D. C cannot partially overlap with D, 852// but it is OK for them to be the same (which is a typical case). 853// 854// We implement it as follows: 855// 1) Map the intrinsic to the pseudo where D is tied to C ($vdst = $src2). 856// 2) The pass twoaddressinstruction checks if src2 is live and if that is the case 857// it converts the default pseudo to the pseudo where src2 is not the same as vdst. 858// 3) @earlyclobber on the destination satisfies the constraint during RA. 859 860multiclass WMMAInst<string Suffix, string Instr, VOPProfile P, SDPatternOperator node = null_frag, RegisterOperand _Src01RC64 = VRegSrc_256, WMMAType Type, bit convertibleTo3Addr> { 861 862 defvar WMMAConstraints2Addr = "@earlyclobber $vdst,$vdst = $src2"; 863 defvar WMMAConstraints3Addr = "@earlyclobber $vdst"; 864 865 defvar WMMAProfile = VOPProfileWMMA<P, Suffix, _Src01RC64, Type.hasClamp, Type.hasOpsel>; 866 let Mnemonic = Instr, mayRaiseFPException = 0, ReadsModeReg = 0 in { 867 let Constraints = WMMAConstraints2Addr, isConvertibleToThreeAddress = convertibleTo3Addr in { 868 def _twoaddr # Suffix : VOP3P_Pseudo<Instr # Suffix, WMMAProfile>; 869 } 870 } 871 if convertibleTo3Addr then { 872 let Mnemonic = Instr, mayRaiseFPException = 0, ReadsModeReg = 0 in { 873 let Constraints = WMMAConstraints3Addr, SchedRW = [Write32Bit, Write32Bit] in { 874 def _threeaddr # Suffix : VOP3P_Pseudo<Instr # Suffix, WMMAProfile>; 875 } 876 } 877 def : WMMAOpcodeMapping<!cast<Instruction>(NAME # _twoaddr # Suffix), 878 !cast<Instruction>(NAME # _threeaddr # Suffix)>; 879 } 880 881 if !eq(Type, WMMAOpSel) then { 882 def : WMMAOpSelPat<!cast<Instruction>(NAME # _twoaddr # Suffix), node, P>; 883 } else if !eq(Type, WMMAUIClamp) then { 884 def : WMMAUIClampPat<!cast<Instruction>(NAME # _twoaddr # Suffix), node, P>; 885 } else { 886 def : WMMARegularPat<!cast<Instruction>(NAME # _twoaddr # Suffix), node, P>; 887 } 888} 889 890 891let WaveSizePredicate = isWave32 in { 892 defm V_WMMA_F32_16X16X16_F16 : WMMAInst<"_w32", "v_wmma_f32_16x16x16_f16", VOP_V8F32_V16F16_V16F16_V8F32, int_amdgcn_wmma_f32_16x16x16_f16, VRegSrc_256, WMMARegular, 1>; 893 defm V_WMMA_F32_16X16X16_BF16 : WMMAInst<"_w32", "v_wmma_f32_16x16x16_bf16", VOP_V8F32_V16I16_V16I16_V8F32, int_amdgcn_wmma_f32_16x16x16_bf16, VRegSrc_256, WMMARegular, 1>; 894 defm V_WMMA_F16_16X16X16_F16 : WMMAInst<"_w32", "v_wmma_f16_16x16x16_f16", VOP_V16F16_V16F16_V16F16_V16F16, int_amdgcn_wmma_f16_16x16x16_f16, VRegSrc_256, WMMAOpSel, 1>; 895 defm V_WMMA_BF16_16X16X16_BF16 : WMMAInst<"_w32", "v_wmma_bf16_16x16x16_bf16", VOP_V16I16_V16I16_V16I16_V16I16, int_amdgcn_wmma_bf16_16x16x16_bf16, VRegSrc_256, WMMAOpSel, 1>; 896 defm V_WMMA_F16_16X16X16_F16_TIED : WMMAInst<"_w32", "v_wmma_f16_16x16x16_f16", VOP_V16F16_V16F16_V16F16_V16F16, int_amdgcn_wmma_f16_16x16x16_f16_tied, VRegSrc_256, WMMAOpSel, 0>; 897 defm V_WMMA_BF16_16X16X16_BF16_TIED : WMMAInst<"_w32", "v_wmma_bf16_16x16x16_bf16", VOP_V16I16_V16I16_V16I16_V16I16, int_amdgcn_wmma_bf16_16x16x16_bf16_tied, VRegSrc_256, WMMAOpSel, 0>; 898 defm V_WMMA_I32_16X16X16_IU8 : WMMAInst<"_w32", "v_wmma_i32_16x16x16_iu8", VOP_V8I32_V4I32_V4I32_V8I32, int_amdgcn_wmma_i32_16x16x16_iu8, VRegSrc_128, WMMAUIClamp, 1>; 899 defm V_WMMA_I32_16X16X16_IU4 : WMMAInst<"_w32", "v_wmma_i32_16x16x16_iu4", VOP_V8I32_V2I32_V2I32_V8I32, int_amdgcn_wmma_i32_16x16x16_iu4, VRegSrc_64, WMMAUIClamp, 1>; 900} 901 902let WaveSizePredicate = isWave64 in { 903 defm V_WMMA_F32_16X16X16_F16 : WMMAInst<"_w64", "v_wmma_f32_16x16x16_f16", VOP_V4F32_V16F16_V16F16_V4F32, int_amdgcn_wmma_f32_16x16x16_f16, VRegSrc_256, WMMARegular, 1>; 904 defm V_WMMA_F32_16X16X16_BF16 : WMMAInst<"_w64", "v_wmma_f32_16x16x16_bf16", VOP_V4F32_V16I16_V16I16_V4F32, int_amdgcn_wmma_f32_16x16x16_bf16, VRegSrc_256, WMMARegular, 1>; 905 defm V_WMMA_F16_16X16X16_F16 : WMMAInst<"_w64", "v_wmma_f16_16x16x16_f16", VOP_V8F16_V16F16_V16F16_V8F16, int_amdgcn_wmma_f16_16x16x16_f16, VRegSrc_256, WMMAOpSel, 1>; 906 defm V_WMMA_BF16_16X16X16_BF16 : WMMAInst<"_w64", "v_wmma_bf16_16x16x16_bf16", VOP_V8I16_V16I16_V16I16_V8I16, int_amdgcn_wmma_bf16_16x16x16_bf16, VRegSrc_256, WMMAOpSel, 1>; 907 defm V_WMMA_F16_16X16X16_F16_TIED : WMMAInst<"_w64", "v_wmma_f16_16x16x16_f16", VOP_V8F16_V16F16_V16F16_V8F16, int_amdgcn_wmma_f16_16x16x16_f16_tied, VRegSrc_256, WMMAOpSel, 0>; 908 defm V_WMMA_BF16_16X16X16_BF16_TIED : WMMAInst<"_w64", "v_wmma_bf16_16x16x16_bf16", VOP_V8I16_V16I16_V16I16_V8I16, int_amdgcn_wmma_bf16_16x16x16_bf16_tied, VRegSrc_256, WMMAOpSel, 0>; 909 defm V_WMMA_I32_16X16X16_IU8 : WMMAInst<"_w64", "v_wmma_i32_16x16x16_iu8", VOP_V4I32_V4I32_V4I32_V4I32, int_amdgcn_wmma_i32_16x16x16_iu8, VRegSrc_128, WMMAUIClamp, 1>; 910 defm V_WMMA_I32_16X16X16_IU4 : WMMAInst<"_w64", "v_wmma_i32_16x16x16_iu4", VOP_V4I32_V2I32_V2I32_V4I32, int_amdgcn_wmma_i32_16x16x16_iu4, VRegSrc_64, WMMAUIClamp, 1>; 911 912} 913 914//===----------------------------------------------------------------------===// 915// Begin Real Encodings 916//===----------------------------------------------------------------------===// 917 918class VOP3P_DPP16<bits<7> op, VOP_DPP_Pseudo ps, int subtarget, 919 string opName = ps.OpName> 920 : VOP3P_DPP<op, opName, ps.Pfl, 1>, SIMCInstr<ps.PseudoInstr, subtarget> { 921 let hasSideEffects = ps.hasSideEffects; 922 let Defs = ps.Defs; 923 let SchedRW = ps.SchedRW; 924 let Uses = ps.Uses; 925 let AssemblerPredicate = HasDPP16; 926 let SubtargetPredicate = HasDPP16; 927 let OtherPredicates = ps.OtherPredicates; 928} 929 930class VOP3P_DPP8_Base<bits<7> op, VOP_Pseudo ps, string opName = ps.OpName> 931 : VOP3P_DPP8<op, opName, ps.Pfl> { 932 let hasSideEffects = ps.hasSideEffects; 933 let Defs = ps.Defs; 934 let SchedRW = ps.SchedRW; 935 let Uses = ps.Uses; 936 let OtherPredicates = ps.OtherPredicates; 937} 938 939//===----------------------------------------------------------------------===// 940// GFX11, GFX12 941//===----------------------------------------------------------------------===// 942 943multiclass VOP3P_Real_Base<GFXGen Gen, bits<7> op, string backing_ps_name = NAME, 944 string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> { 945 def Gen.Suffix : 946 VOP3P_Real_Gen<!cast<VOP3P_Pseudo>(backing_ps_name), Gen, asmName>, 947 VOP3Pe_gfx11_gfx12<op, !cast<VOP3P_Pseudo>(backing_ps_name).Pfl>; 948} 949 950multiclass VOP3P_Real_with_name<GFXGen Gen, bits<7> op, 951 string backing_ps_name = NAME, 952 string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> { 953 defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name); 954 let AsmString = asmName # ps.AsmOperands in 955 def Gen.Suffix : 956 VOP3P_Real_Gen<!cast<VOP3P_Pseudo>(backing_ps_name), Gen, asmName>, 957 VOP3Pe_gfx11_gfx12<op, !cast<VOP3P_Pseudo>(backing_ps_name).Pfl>, 958 MnemonicAlias<ps.Mnemonic, asmName>, Requires<[Gen.AssemblerPredicate]>; 959} 960 961multiclass VOP3P_Real_dpp<GFXGen Gen, bits<7> op, string backing_ps_name = NAME, 962 string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> { 963 defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name); 964 def _dpp#Gen.Suffix 965 : VOP3P_DPP16<op, !cast<VOP_DPP_Pseudo>(backing_ps_name #"_dpp"), 966 Gen.Subtarget> { 967 let AsmString = asmName #ps.Pfl.AsmVOP3DPP16; 968 let DecoderNamespace = "DPP"#Gen.DecoderNamespace; 969 let AssemblerPredicate = Gen.AssemblerPredicate; 970 } 971} 972 973multiclass VOP3P_Real_dpp8<GFXGen Gen, bits<7> op, string backing_ps_name = NAME, 974 string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> { 975 defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name); 976 def _dpp8#Gen.Suffix : VOP3P_DPP8_Base<op, ps> { 977 let AsmString = asmName #ps.Pfl.AsmVOP3DPP8; 978 let DecoderNamespace = "DPP8"#Gen.DecoderNamespace; 979 let AssemblerPredicate = Gen.AssemblerPredicate; 980 } 981} 982 983multiclass VOP3P_Realtriple<GFXGen Gen, bits<7> op, string backing_ps_name = NAME, 984 string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> 985 : VOP3P_Real_Base<Gen, op, backing_ps_name, asmName>, 986 VOP3P_Real_dpp<Gen, op, backing_ps_name, asmName>, 987 VOP3P_Real_dpp8<Gen, op, backing_ps_name, asmName>; 988 989//===----------------------------------------------------------------------===// 990// GFX12 991//===----------------------------------------------------------------------===// 992 993multiclass VOP3P_Real_gfx12<bits<7> op> : VOP3P_Real_Base<GFX12Gen, op>; 994 995multiclass VOP3P_Real_with_name_gfx12<bits<7> op, 996 string backing_ps_name = NAME, 997 string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> : 998 VOP3P_Real_with_name<GFX12Gen, op, backing_ps_name, asmName>; 999 1000defm V_PK_MIN_NUM_F16 : VOP3P_Real_with_name_gfx12<0x1b, "V_PK_MIN_F16", "v_pk_min_num_f16">; 1001defm V_PK_MAX_NUM_F16 : VOP3P_Real_with_name_gfx12<0x1c, "V_PK_MAX_F16", "v_pk_max_num_f16">; 1002 1003defm V_PK_MINIMUM_F16 : VOP3P_Real_gfx12<0x1d>; 1004defm V_PK_MAXIMUM_F16 : VOP3P_Real_gfx12<0x1e>; 1005 1006//===----------------------------------------------------------------------===// 1007// GFX11 1008//===----------------------------------------------------------------------===// 1009 1010multiclass VOP3P_Real_gfx11_gfx12<bits<7> op> : 1011 VOP3P_Real_Base<GFX11Gen, op>, VOP3P_Real_Base<GFX12Gen, op>; 1012 1013defm V_DOT4_I32_IU8 : VOP3P_Real_gfx11_gfx12<0x16>; 1014defm V_DOT8_I32_IU4 : VOP3P_Real_gfx11_gfx12<0x18>; 1015defm V_DOT2_F32_BF16 : VOP3P_Real_gfx11_gfx12<0x1a>; 1016 1017multiclass VOP3P_Real_WMMA <bits<7> op> { 1018 let WaveSizePredicate = isWave32, DecoderNamespace = "GFX11" in { 1019 defm _twoaddr_w32 : VOP3P_Real_Base <GFX11Gen, op>; 1020 } 1021 let WaveSizePredicate = isWave64, DecoderNamespace = "WMMAGFX11" in { 1022 defm _twoaddr_w64 : VOP3P_Real_Base <GFX11Gen, op>; 1023 } 1024} 1025 1026defm V_WMMA_F32_16X16X16_F16 : VOP3P_Real_WMMA <0x040>; 1027defm V_WMMA_F32_16X16X16_BF16 : VOP3P_Real_WMMA <0x041>; 1028defm V_WMMA_F16_16X16X16_F16 : VOP3P_Real_WMMA <0x042>; 1029defm V_WMMA_BF16_16X16X16_BF16 : VOP3P_Real_WMMA <0x043>; 1030defm V_WMMA_I32_16X16X16_IU8 : VOP3P_Real_WMMA <0x044>; 1031defm V_WMMA_I32_16X16X16_IU4 : VOP3P_Real_WMMA <0x045>; 1032 1033//===----------------------------------------------------------------------===// 1034// GFX8 (VI) 1035//===----------------------------------------------------------------------===// 1036 1037multiclass VOP3P_Real_vi<bits<7> op> { 1038 def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.VI>, 1039 VOP3Pe <op, !cast<VOP3_Pseudo>(NAME).Pfl> { 1040 let AssemblerPredicate = HasVOP3PInsts; 1041 let DecoderNamespace = "GFX8"; 1042 let VOP3P = 1; 1043 } 1044} 1045 1046multiclass VOP3P_Real_MAI<bits<7> op> { 1047 def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>, 1048 VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, ?> { 1049 let AssemblerPredicate = HasMAIInsts; 1050 let DecoderNamespace = "GFX8"; 1051 let Inst{14} = ?; // op_sel_hi(2) 1052 let Inst{59} = ?; // op_sel_hi(0) 1053 let Inst{60} = ?; // op_sel_hi(1) 1054 } 1055} 1056 1057let Constraints = "" in { 1058multiclass VOP3P_Real_MFMA_gfx90a<bits<7> op> { 1059 let SubtargetPredicate = isGFX90AOnly, 1060 AssemblerPredicate = isGFX90AOnly, DecoderNamespace = "GFX90A" in { 1061 def _gfx90a_acd : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.GFX90A>, 1062 VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, 1>; 1063 1064 def _gfx90a_vcd : VOP3P_Real<!cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64"), SIEncodingFamily.GFX90A>, 1065 VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64").Pfl, 0>; 1066 } // End AssemblerPredicate = isGFX90AOnly, DecoderNamespace = "GFX90A" 1067} 1068} 1069 1070multiclass VOP3P_Real_MFMA_gfx940_aliases<string NameFrom, string NameTo, string Op, 1071 VOP3_Pseudo PS_ACD = !cast<VOP3_Pseudo>(Op # "_e64"), 1072 VOP3_Pseudo PS_VCD = !cast<VOP3_Pseudo>(Op # "_vgprcd" # "_e64"), 1073 VOPProfile Pfl_ACD = PS_ACD.Pfl, 1074 VOPProfile Pfl_VCD = PS_VCD.Pfl> { 1075 if !ne(NameFrom, NameTo) then { 1076 def : InstAlias <NameTo # " " # PS_ACD.AsmOperands, 1077 (!cast<VOP3P_Real>(Op # "_gfx940_acd") Pfl_ACD.DstRC:$vdst, 1078 Pfl_ACD.Src0RC64:$src0, Pfl_ACD.Src1RC64:$src1, Pfl_ACD.Src2RC64:$src2, 1079 cbsz:$cbsz, abid:$abid, blgp:$blgp)>, PredicateControl; 1080 def : InstAlias <NameTo # " " # PS_VCD.AsmOperands, 1081 (!cast<VOP3P_Real>(Op # "_gfx940_vcd") Pfl_VCD.DstRC:$vdst, 1082 Pfl_VCD.Src0RC64:$src0, Pfl_VCD.Src1RC64:$src1, Pfl_VCD.Src2RC64:$src2, 1083 cbsz:$cbsz, abid:$abid, blgp:$blgp)>, PredicateControl; 1084 } 1085} 1086 1087multiclass VOP3P_Real_MFMA_gfx940<bits<7> op, string Name = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic, 1088 VOP3_Pseudo PS_ACD = !cast<VOP3_Pseudo>(NAME # "_e64"), 1089 VOP3_Pseudo PS_VCD = !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64")> { 1090 let SubtargetPredicate = isGFX940Plus, 1091 DecoderNamespace = "GFX940", 1092 AsmString = Name # PS_ACD.AsmOperands, Constraints = "" in { 1093 def _gfx940_acd : VOP3P_Real<PS_ACD, SIEncodingFamily.GFX940>, 1094 VOP3Pe_MAI <op, PS_ACD.Pfl, 1>; 1095 1096 def _gfx940_vcd : VOP3P_Real<PS_VCD, SIEncodingFamily.GFX940>, 1097 VOP3Pe_MAI <op, PS_VCD.Pfl, 0>; 1098 } // End AssemblerPredicate = isGFX940Plus, DecoderNamespace = "GFX940" 1099 1100 let SubtargetPredicate = isGFX940Plus in { 1101 defm : VOP3P_Real_MFMA_gfx940_aliases<Name, PS_ACD.Mnemonic, NAME>; 1102 1103 if !ne(!subst("_1k", "", PS_ACD.Mnemonic), PS_ACD.Mnemonic) then 1104 defm : VOP3P_Real_MFMA_gfx940_aliases<Name, !subst("_1k", "", PS_ACD.Mnemonic), NAME>; 1105 } 1106} 1107 1108multiclass VOP3P_Real_MFMA_vi<bits<7> op> { 1109 def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>, 1110 VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, ?> { 1111 let SubtargetPredicate = isGFX8GFX9NotGFX90A; 1112 let AssemblerPredicate = HasMAIInsts; 1113 let DecoderNamespace = "GFX8"; 1114 let Constraints = ""; 1115 } 1116} 1117 1118multiclass VOP3P_Real_MFMA_vi_gfx90a<bits<7> op> : 1119 VOP3P_Real_MFMA_gfx90a <op>, 1120 VOP3P_Real_MFMA_vi <op>; 1121 1122multiclass VOP3P_Real_MFMA<bits<7> op, string GFX940Name = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic> : 1123 VOP3P_Real_MFMA_vi_gfx90a <op>, 1124 VOP3P_Real_MFMA_gfx940 <op, GFX940Name>; 1125 1126multiclass VOP3P_Real_SMFMAC<bits<7> op, string alias> { 1127 def _gfx940 : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>, 1128 VOP3Pe_SMFMAC <op> { 1129 let AssemblerPredicate = isGFX940Plus; 1130 let DecoderNamespace = "GFX8"; 1131 } 1132 def : MnemonicAlias<alias, !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic>; 1133} 1134 1135let SubtargetPredicate = isGFX8GFX9 in { 1136defm V_PK_MAD_I16 : VOP3P_Real_vi <0x00>; 1137defm V_PK_MUL_LO_U16 : VOP3P_Real_vi <0x01>; 1138defm V_PK_ADD_I16 : VOP3P_Real_vi <0x02>; 1139defm V_PK_SUB_I16 : VOP3P_Real_vi <0x03>; 1140defm V_PK_LSHLREV_B16 : VOP3P_Real_vi <0x04>; 1141defm V_PK_LSHRREV_B16 : VOP3P_Real_vi <0x05>; 1142defm V_PK_ASHRREV_I16 : VOP3P_Real_vi <0x06>; 1143defm V_PK_MAX_I16 : VOP3P_Real_vi <0x07>; 1144defm V_PK_MIN_I16 : VOP3P_Real_vi <0x08>; 1145defm V_PK_MAD_U16 : VOP3P_Real_vi <0x09>; 1146 1147defm V_PK_ADD_U16 : VOP3P_Real_vi <0x0a>; 1148defm V_PK_SUB_U16 : VOP3P_Real_vi <0x0b>; 1149defm V_PK_MAX_U16 : VOP3P_Real_vi <0x0c>; 1150defm V_PK_MIN_U16 : VOP3P_Real_vi <0x0d>; 1151defm V_PK_FMA_F16 : VOP3P_Real_vi <0x0e>; 1152defm V_PK_ADD_F16 : VOP3P_Real_vi <0x0f>; 1153defm V_PK_MUL_F16 : VOP3P_Real_vi <0x10>; 1154defm V_PK_MIN_F16 : VOP3P_Real_vi <0x11>; 1155defm V_PK_MAX_F16 : VOP3P_Real_vi <0x12>; 1156 1157let OtherPredicates = [HasMadMixInsts] in { 1158defm V_MAD_MIX_F32 : VOP3P_Real_vi <0x20>; 1159defm V_MAD_MIXLO_F16 : VOP3P_Real_vi <0x21>; 1160defm V_MAD_MIXHI_F16 : VOP3P_Real_vi <0x22>; 1161} 1162 1163let OtherPredicates = [HasFmaMixInsts], 1164 DecoderNamespace = "GFX9_DL" in { 1165// The mad_mix instructions were renamed and their behaviors changed, 1166// but the opcode stayed the same so we need to put these in a 1167// different DecoderNamespace to avoid the ambiguity. 1168defm V_FMA_MIX_F32 : VOP3P_Real_vi <0x20>; 1169defm V_FMA_MIXLO_F16 : VOP3P_Real_vi <0x21>; 1170defm V_FMA_MIXHI_F16 : VOP3P_Real_vi <0x22>; 1171} 1172 1173defm V_DOT2_I32_I16 : VOP3P_Real_vi <0x26>; 1174defm V_DOT2_U32_U16 : VOP3P_Real_vi <0x27>; 1175 1176defm V_DOT2_F32_F16 : VOP3P_Real_vi <0x23>; 1177defm V_DOT4_U32_U8 : VOP3P_Real_vi <0x29>; 1178defm V_DOT8_U32_U4 : VOP3P_Real_vi <0x2b>; 1179 1180defm V_DOT4_I32_I8 : VOP3P_Real_vi <0x28>; 1181defm V_DOT8_I32_I4 : VOP3P_Real_vi <0x2a>; 1182} // End SubtargetPredicate = isGFX8GFX9 1183 1184let OtherPredicates = [HasMAIInsts] in { 1185 1186defm V_ACCVGPR_READ_B32 : VOP3P_Real_MAI <0x58>; 1187defm V_ACCVGPR_WRITE_B32 : VOP3P_Real_MAI <0x59>; 1188defm V_MFMA_F32_32X32X1F32 : VOP3P_Real_MFMA <0x40, "v_mfma_f32_32x32x1_2b_f32">; 1189defm V_MFMA_F32_16X16X1F32 : VOP3P_Real_MFMA <0x41, "v_mfma_f32_16x16x1_4b_f32">; 1190defm V_MFMA_F32_4X4X1F32 : VOP3P_Real_MFMA <0x42, "v_mfma_f32_4x4x1_16b_f32">; 1191defm V_MFMA_F32_32X32X2F32 : VOP3P_Real_MFMA <0x44, "v_mfma_f32_32x32x2_f32">; 1192defm V_MFMA_F32_16X16X4F32 : VOP3P_Real_MFMA <0x45, "v_mfma_f32_16x16x4_f32">; 1193defm V_MFMA_F32_32X32X4F16 : VOP3P_Real_MFMA <0x48, "v_mfma_f32_32x32x4_2b_f16">; 1194defm V_MFMA_F32_16X16X4F16 : VOP3P_Real_MFMA <0x49, "v_mfma_f32_16x16x4_4b_f16">; 1195defm V_MFMA_F32_4X4X4F16 : VOP3P_Real_MFMA <0x4a, "v_mfma_f32_4x4x4_16b_f16">; 1196defm V_MFMA_F32_32X32X8F16 : VOP3P_Real_MFMA <0x4c, "v_mfma_f32_32x32x8_f16">; 1197defm V_MFMA_F32_16X16X16F16 : VOP3P_Real_MFMA <0x4d, "v_mfma_f32_16x16x16_f16">; 1198defm V_MFMA_I32_32X32X4I8 : VOP3P_Real_MFMA <0x50, "v_mfma_i32_32x32x4_2b_i8">; 1199defm V_MFMA_I32_16X16X4I8 : VOP3P_Real_MFMA <0x51, "v_mfma_i32_16x16x4_4b_i8">; 1200defm V_MFMA_I32_4X4X4I8 : VOP3P_Real_MFMA <0x52, "v_mfma_i32_4x4x4_16b_i8">; 1201 1202defm V_MFMA_I32_16X16X16I8 : VOP3P_Real_MFMA_vi_gfx90a <0x55>; 1203defm V_MFMA_I32_32X32X8I8 : VOP3P_Real_MFMA_vi_gfx90a <0x54>; 1204defm V_MFMA_F32_32X32X2BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x68>; 1205defm V_MFMA_F32_16X16X2BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x69>; 1206defm V_MFMA_F32_4X4X2BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x6b>; 1207defm V_MFMA_F32_32X32X4BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x6c>; 1208defm V_MFMA_F32_16X16X8BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x6d>; 1209 1210} // End OtherPredicates = [HasMAIInsts] 1211 1212defm V_MFMA_F32_32X32X4BF16_1K : VOP3P_Real_MFMA_gfx90a <0x63>; 1213defm V_MFMA_F32_16X16X4BF16_1K : VOP3P_Real_MFMA_gfx90a <0x64>; 1214defm V_MFMA_F32_4X4X4BF16_1K : VOP3P_Real_MFMA_gfx90a <0x65>; 1215defm V_MFMA_F32_32X32X8BF16_1K : VOP3P_Real_MFMA_gfx90a <0x66>; 1216defm V_MFMA_F32_16X16X16BF16_1K : VOP3P_Real_MFMA_gfx90a <0x67>; 1217defm V_MFMA_F64_16X16X4F64 : VOP3P_Real_MFMA_gfx90a <0x6e>; 1218defm V_MFMA_F64_4X4X4F64 : VOP3P_Real_MFMA_gfx90a <0x6f>; 1219 1220defm V_MFMA_I32_32X32X16I8 : VOP3P_Real_MFMA_gfx940 <0x56, "v_mfma_i32_32x32x16_i8">; 1221defm V_MFMA_I32_16X16X32I8 : VOP3P_Real_MFMA_gfx940 <0x57, "v_mfma_i32_16x16x32_i8">; 1222defm V_MFMA_F32_16X16X8XF32 : VOP3P_Real_MFMA_gfx940 <0x3e, "v_mfma_f32_16x16x8_xf32">; 1223defm V_MFMA_F32_32X32X4XF32 : VOP3P_Real_MFMA_gfx940 <0x3f, "v_mfma_f32_32x32x4_xf32">; 1224defm V_MFMA_F32_16X16X32_BF8_BF8 : VOP3P_Real_MFMA_gfx940 <0x70>; 1225defm V_MFMA_F32_16X16X32_BF8_FP8 : VOP3P_Real_MFMA_gfx940 <0x71>; 1226defm V_MFMA_F32_16X16X32_FP8_BF8 : VOP3P_Real_MFMA_gfx940 <0x72>; 1227defm V_MFMA_F32_16X16X32_FP8_FP8 : VOP3P_Real_MFMA_gfx940 <0x73>; 1228defm V_MFMA_F32_32X32X16_BF8_BF8 : VOP3P_Real_MFMA_gfx940 <0x74>; 1229defm V_MFMA_F32_32X32X16_BF8_FP8 : VOP3P_Real_MFMA_gfx940 <0x75>; 1230defm V_MFMA_F32_32X32X16_FP8_BF8 : VOP3P_Real_MFMA_gfx940 <0x76>; 1231defm V_MFMA_F32_32X32X16_FP8_FP8 : VOP3P_Real_MFMA_gfx940 <0x77>; 1232 1233defm V_MFMA_F32_32X32X4BF16_1K : VOP3P_Real_MFMA_gfx940 <0x5d, "v_mfma_f32_32x32x4_2b_bf16">; 1234defm V_MFMA_F32_16X16X4BF16_1K : VOP3P_Real_MFMA_gfx940 <0x5e, "v_mfma_f32_16x16x4_4b_bf16">; 1235defm V_MFMA_F32_4X4X4BF16_1K : VOP3P_Real_MFMA_gfx940 <0x5f, "v_mfma_f32_4x4x4_16b_bf16">; 1236defm V_MFMA_F32_32X32X8BF16_1K : VOP3P_Real_MFMA_gfx940 <0x60, "v_mfma_f32_32x32x8_bf16">; 1237defm V_MFMA_F32_16X16X16BF16_1K : VOP3P_Real_MFMA_gfx940 <0x61, "v_mfma_f32_16x16x16_bf16">; 1238 1239defm V_MFMA_F64_16X16X4F64 : VOP3P_Real_MFMA_gfx940 <0x6e, "v_mfma_f64_16x16x4_f64">; 1240defm V_MFMA_F64_4X4X4F64 : VOP3P_Real_MFMA_gfx940 <0x6f, "v_mfma_f64_4x4x4_4b_f64">; 1241 1242defm V_SMFMAC_F32_16X16X32_F16 : VOP3P_Real_SMFMAC <0x62, "v_smfmac_f32_16x16x32f16">; 1243defm V_SMFMAC_F32_32X32X16_F16 : VOP3P_Real_SMFMAC <0x64, "v_smfmac_f32_32x32x16f16">; 1244defm V_SMFMAC_F32_16X16X32_BF16 : VOP3P_Real_SMFMAC <0x66, "v_smfmac_f32_16x16x32bf16">; 1245defm V_SMFMAC_F32_32X32X16_BF16 : VOP3P_Real_SMFMAC <0x68, "v_smfmac_f32_32x32x16bf16">; 1246defm V_SMFMAC_I32_16X16X64_I8 : VOP3P_Real_SMFMAC <0x6a, "v_smfmac_i32_16x16x64i8">; 1247defm V_SMFMAC_I32_32X32X32_I8 : VOP3P_Real_SMFMAC <0x6c, "v_smfmac_i32_32x32x32i8">; 1248defm V_SMFMAC_F32_16X16X64_BF8_BF8 : VOP3P_Real_SMFMAC <0x78, "v_smfmac_f32_16x16x64bf8bf8">; 1249defm V_SMFMAC_F32_16X16X64_BF8_FP8 : VOP3P_Real_SMFMAC <0x79, "v_smfmac_f32_16x16x64bf8fp8">; 1250defm V_SMFMAC_F32_16X16X64_FP8_BF8 : VOP3P_Real_SMFMAC <0x7a, "v_smfmac_f32_16x16x64fp8bf8">; 1251defm V_SMFMAC_F32_16X16X64_FP8_FP8 : VOP3P_Real_SMFMAC <0x7b, "v_smfmac_f32_16x16x64fp8fp8">; 1252defm V_SMFMAC_F32_32X32X32_BF8_BF8 : VOP3P_Real_SMFMAC <0x7c, "v_smfmac_f32_32x32x32bf8bf8">; 1253defm V_SMFMAC_F32_32X32X32_BF8_FP8 : VOP3P_Real_SMFMAC <0x7d, "v_smfmac_f32_32x32x32bf8fp8">; 1254defm V_SMFMAC_F32_32X32X32_FP8_BF8 : VOP3P_Real_SMFMAC <0x7e, "v_smfmac_f32_32x32x32fp8bf8">; 1255defm V_SMFMAC_F32_32X32X32_FP8_FP8 : VOP3P_Real_SMFMAC <0x7f, "v_smfmac_f32_32x32x32fp8fp8">; 1256 1257defm V_PK_FMA_F32 : VOP3P_Real_vi <0x30>; 1258defm V_PK_MUL_F32 : VOP3P_Real_vi <0x31>; 1259defm V_PK_ADD_F32 : VOP3P_Real_vi <0x32>; 1260defm V_PK_MOV_B32 : VOP3P_Real_vi <0x33>; 1261 1262//===----------------------------------------------------------------------===// 1263// GFX10. 1264//===----------------------------------------------------------------------===// 1265 1266let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10", VOP3P = 1 in { 1267 multiclass VOP3P_Real_gfx10<bits<7> op> { 1268 def _gfx10 : VOP3P_Real<!cast<VOP3P_Pseudo>(NAME), SIEncodingFamily.GFX10>, 1269 VOP3Pe_gfx10 <op, !cast<VOP3P_Pseudo>(NAME).Pfl>; 1270 } 1271} // End AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10", VOP3P = 1 1272 1273multiclass VOP3P_Real_gfx10_gfx11<bits<7> op> : 1274 VOP3P_Real_gfx10<op>, VOP3P_Real_Base<GFX11Gen, op>; 1275 1276multiclass VOP3P_Real_gfx10_gfx11_gfx12<bits<7> op> : 1277 VOP3P_Real_gfx10_gfx11<op>, VOP3P_Real_Base<GFX12Gen, op>; 1278 1279multiclass VOP3P_Real_gfx10_gfx11_gfx12_Triple<bits<7> op> : 1280 VOP3P_Real_gfx10<op>, VOP3P_Realtriple<GFX11Gen, op>, 1281 VOP3P_Realtriple<GFX12Gen, op>; 1282 1283defm V_PK_MAD_I16 : VOP3P_Real_gfx10_gfx11_gfx12<0x00>; 1284defm V_PK_MUL_LO_U16 : VOP3P_Real_gfx10_gfx11_gfx12<0x01>; 1285defm V_PK_ADD_I16 : VOP3P_Real_gfx10_gfx11_gfx12<0x02>; 1286defm V_PK_SUB_I16 : VOP3P_Real_gfx10_gfx11_gfx12<0x03>; 1287defm V_PK_LSHLREV_B16 : VOP3P_Real_gfx10_gfx11_gfx12<0x04>; 1288defm V_PK_LSHRREV_B16 : VOP3P_Real_gfx10_gfx11_gfx12<0x05>; 1289defm V_PK_ASHRREV_I16 : VOP3P_Real_gfx10_gfx11_gfx12<0x06>; 1290defm V_PK_MAX_I16 : VOP3P_Real_gfx10_gfx11_gfx12<0x07>; 1291defm V_PK_MIN_I16 : VOP3P_Real_gfx10_gfx11_gfx12<0x08>; 1292defm V_PK_MAD_U16 : VOP3P_Real_gfx10_gfx11_gfx12<0x09>; 1293defm V_PK_ADD_U16 : VOP3P_Real_gfx10_gfx11_gfx12<0x0a>; 1294defm V_PK_SUB_U16 : VOP3P_Real_gfx10_gfx11_gfx12<0x0b>; 1295defm V_PK_MAX_U16 : VOP3P_Real_gfx10_gfx11_gfx12<0x0c>; 1296defm V_PK_MIN_U16 : VOP3P_Real_gfx10_gfx11_gfx12<0x0d>; 1297defm V_PK_FMA_F16 : VOP3P_Real_gfx10_gfx11_gfx12<0x0e>; 1298defm V_PK_ADD_F16 : VOP3P_Real_gfx10_gfx11_gfx12<0x0f>; 1299defm V_PK_MUL_F16 : VOP3P_Real_gfx10_gfx11_gfx12<0x10>; 1300defm V_PK_MIN_F16 : VOP3P_Real_gfx10_gfx11<0x11>; 1301defm V_PK_MAX_F16 : VOP3P_Real_gfx10_gfx11<0x12>; 1302defm V_FMA_MIX_F32 : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x20>; 1303defm V_FMA_MIXLO_F16 : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x21>; 1304defm V_FMA_MIXHI_F16 : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x22>; 1305 1306defm V_DOT2_I32_I16 : VOP3P_Real_gfx10 <0x14>; 1307defm V_DOT2_U32_U16 : VOP3P_Real_gfx10 <0x15>; 1308 1309defm V_DOT2_F32_F16 : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x13>; 1310defm V_DOT4_U32_U8 : VOP3P_Real_gfx10_gfx11_gfx12<0x17>; 1311defm V_DOT8_U32_U4 : VOP3P_Real_gfx10_gfx11_gfx12<0x19>; 1312 1313defm V_DOT4_I32_I8 : VOP3P_Real_gfx10 <0x16>; 1314defm V_DOT8_I32_I4 : VOP3P_Real_gfx10 <0x18>; 1315