1//===-- VOP3PInstructions.td - Vector Instruction Definitions -------------===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8 9//===----------------------------------------------------------------------===// 10// VOP3P Classes 11//===----------------------------------------------------------------------===// 12 13class VOP3P_Profile<VOPProfile P, VOP3Features Features = VOP3_REGULAR, 14 bit HasDPP = 0> : VOP3_Profile<P, Features> { 15 let IsVOP3P = 1; 16 let HasExtVOP3DPP = HasDPP; 17 // We do not want to print src modifiers for vop3p because the bits are 18 // overloaded in meaning and the logic in printOperandAndFPInputMods is 19 // wrong for vop3p 20 let AsmVOP3Base = AsmVOP3P; 21} 22 23// Used for FMA_MIX* and MAD_MIX* insts 24// Their operands are only sort of f16 operands. Depending on 25// op_sel_hi, these may be interpreted as f32. The inline immediate 26// values are really f16 converted to f32, so we treat these as f16 27// operands. 28class VOP3P_Mix_Profile<VOPProfile P, VOP3Features Features = VOP3_REGULAR, 29 bit useTiedOutput = 0> : VOP3P_Profile<P, Features, 1> { 30 bit UseTiedOutput = useTiedOutput; 31 32 dag srcs = 33 (ins FP16InputMods:$src0_modifiers, VCSrc_f16:$src0, 34 FP16InputMods:$src1_modifiers, VCSrc_f16:$src1, 35 FP16InputMods:$src2_modifiers, VCSrc_f16:$src2); 36 dag dpp_srcs = 37 (ins FPVRegInputMods:$src0_modifiers, VGPRSrc_32:$src0, 38 FPVRegInputMods:$src1_modifiers, VRegSrc_32:$src1, 39 FP16InputMods:$src2_modifiers, VCSrc_f16:$src2); 40 41 // FIXME: Clamp0 misbehaves with the non-default vdst_in 42 // following it. For now workaround this by requiring clamp 43 // in tied patterns. This should use undef_tied_input, but it 44 // seems underdeveloped and doesn't apply the right register 45 // class constraints. 46 dag mods = !con(!if(UseTiedOutput, (ins Clamp:$clamp, VGPR_32:$vdst_in), 47 (ins Clamp0:$clamp)), 48 (ins op_sel0:$op_sel, op_sel_hi0:$op_sel_hi)); 49 // We use Ins64 because that is the one which populates InOperandList 50 // due to the logic in class VOP3_Pseudo 51 let Ins64 = !con(srcs, mods); 52 let InsVOP3Base = !con(dpp_srcs, mods); 53 let AsmVOP3Base = 54 "$vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$op_sel$op_sel_hi$clamp"; 55} 56 57multiclass VOP3PInst<string OpName, VOPProfile P, 58 SDPatternOperator node = null_frag, bit IsDOT = 0> { 59 def NAME : VOP3P_Pseudo<OpName, P, 60 !if (P.HasModifiers, 61 getVOP3PModPat<P, node, IsDOT, IsDOT>.ret, 62 getVOP3Pat<P, node>.ret)>; 63 let SubtargetPredicate = isGFX11Plus in { 64 if P.HasExtVOP3DPP then 65 def _dpp : VOP3_DPP_Pseudo<OpName, P> { 66 let VOP3P = 1; 67 let PseudoInstr = OpName #"_dpp"; 68 } 69 } // end SubtargetPredicate = isGFX11Plus 70} 71 72// Non-packed instructions that use the VOP3P encoding. 73// VOP3 neg/abs and VOP3P opsel/opsel_hi modifiers are allowed. 74multiclass VOP3_VOP3PInst<string OpName, VOP3P_Mix_Profile P> { 75 def NAME : VOP3P_Pseudo<OpName, P> { 76 let Constraints = !if(P.UseTiedOutput, "$vdst = $vdst_in", ""); 77 let DisableEncoding = !if(P.UseTiedOutput, "$vdst_in", ""); 78 } 79 let SubtargetPredicate = isGFX11Plus in { 80 if P.HasExtVOP3DPP then 81 def _dpp : VOP3_DPP_Pseudo<OpName, P> { 82 let VOP3P = 1; 83 let PseudoInstr = OpName#"_dpp"; 84 let Constraints = !if(P.UseTiedOutput, "$vdst = $vdst_in", ""); 85 let DisableEncoding = !if(P.UseTiedOutput, "$vdst_in", ""); 86 } 87 } // end SubtargetPredicate = isGFX11Plus 88} 89 90let isReMaterializable = 1 in { 91let isCommutable = 1 in { 92defm V_PK_MAD_I16 : VOP3PInst<"v_pk_mad_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16_V2I16>>; 93defm V_PK_MAD_U16 : VOP3PInst<"v_pk_mad_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16_V2I16>, imad>; 94 95let FPDPRounding = 1 in { 96defm V_PK_FMA_F16 : VOP3PInst<"v_pk_fma_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16_V2F16>, any_fma>; 97defm V_PK_ADD_F16 : VOP3PInst<"v_pk_add_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, any_fadd>; 98defm V_PK_MUL_F16 : VOP3PInst<"v_pk_mul_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, any_fmul>; 99} // End FPDPRounding = 1 100defm V_PK_MAX_F16 : VOP3PInst<"v_pk_max_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, fmaxnum_like>; 101defm V_PK_MIN_F16 : VOP3PInst<"v_pk_min_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, fminnum_like>; 102 103defm V_PK_ADD_U16 : VOP3PInst<"v_pk_add_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, add>; 104defm V_PK_ADD_I16 : VOP3PInst<"v_pk_add_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>>; 105defm V_PK_MUL_LO_U16 : VOP3PInst<"v_pk_mul_lo_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, mul>; 106 107defm V_PK_MIN_I16 : VOP3PInst<"v_pk_min_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, smin>; 108defm V_PK_MIN_U16 : VOP3PInst<"v_pk_min_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, umin>; 109defm V_PK_MAX_I16 : VOP3PInst<"v_pk_max_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, smax>; 110defm V_PK_MAX_U16 : VOP3PInst<"v_pk_max_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, umax>; 111 112let SubtargetPredicate = isGFX12Plus, ReadsModeReg = 0 in { 113defm V_PK_MAXIMUM_F16 : VOP3PInst<"v_pk_maximum_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16, VOP3_PACKED>, fmaximum>; 114defm V_PK_MINIMUM_F16 : VOP3PInst<"v_pk_minimum_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16, VOP3_PACKED>, fminimum>; 115} // End SubtargetPredicate = isGFX12Plus, ReadsModeReg = 0 116} 117 118defm V_PK_SUB_U16 : VOP3PInst<"v_pk_sub_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>>; 119defm V_PK_SUB_I16 : VOP3PInst<"v_pk_sub_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, sub>; 120 121defm V_PK_LSHLREV_B16 : VOP3PInst<"v_pk_lshlrev_b16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, clshl_rev_16>; 122defm V_PK_ASHRREV_I16 : VOP3PInst<"v_pk_ashrrev_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, cashr_rev_16>; 123defm V_PK_LSHRREV_B16 : VOP3PInst<"v_pk_lshrrev_b16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, clshr_rev_16>; 124} // End isReMaterializable = 1 125 126let SubtargetPredicate = HasVOP3PInsts in { 127 128// Integer operations with clamp bit set. 129class VOP3PSatPat<SDPatternOperator pat, Instruction inst> : GCNPat< 130 (pat (v2i16 (VOP3PMods v2i16:$src0, i32:$src0_modifiers)), 131 (v2i16 (VOP3PMods v2i16:$src1, i32:$src1_modifiers))), 132 (inst $src0_modifiers, $src0, $src1_modifiers, $src1, DSTCLAMP.ENABLE) 133>; 134 135def : VOP3PSatPat<uaddsat, V_PK_ADD_U16>; 136def : VOP3PSatPat<saddsat, V_PK_ADD_I16>; 137def : VOP3PSatPat<usubsat, V_PK_SUB_U16>; 138def : VOP3PSatPat<ssubsat, V_PK_SUB_I16>; 139} // End SubtargetPredicate = HasVOP3PInsts 140 141// TODO: Make sure we're doing the right thing with denormals. Note 142// that FMA and MAD will differ. 143multiclass MadFmaMixPats<SDPatternOperator fma_like, 144 Instruction mix_inst, 145 Instruction mixlo_inst, 146 Instruction mixhi_inst> { 147 // At least one of the operands needs to be an fpextend of an f16 148 // for this to be worthwhile, so we need three patterns here. 149 // TODO: Could we use a predicate to inspect src1/2/3 instead? 150 def : GCNPat < 151 (f32 (fma_like (f32 (VOP3PMadMixModsExt f16:$src0, i32:$src0_mods)), 152 (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_mods)), 153 (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_mods)))), 154 (mix_inst $src0_mods, $src0, $src1_mods, $src1, $src2_mods, $src2, 155 DSTCLAMP.NONE)>; 156 def : GCNPat < 157 (f32 (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_mods)), 158 (f32 (VOP3PMadMixModsExt f16:$src1, i32:$src1_mods)), 159 (f32 (VOP3PMadMixMods f32:$src2, i32:$src2_mods)))), 160 (mix_inst $src0_mods, $src0, $src1_mods, $src1, $src2_mods, $src2, 161 DSTCLAMP.NONE)>; 162 def : GCNPat < 163 (f32 (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_mods)), 164 (f32 (VOP3PMadMixMods f32:$src1, i32:$src1_mods)), 165 (f32 (VOP3PMadMixModsExt f16:$src2, i32:$src2_mods)))), 166 (mix_inst $src0_mods, $src0, $src1_mods, $src1, $src2_mods, $src2, 167 DSTCLAMP.NONE)>; 168 169 def : GCNPat < 170 (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)), 171 (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)), 172 (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers))))), 173 (mixlo_inst $src0_modifiers, $src0, 174 $src1_modifiers, $src1, 175 $src2_modifiers, $src2, 176 DSTCLAMP.NONE, 177 (i32 (IMPLICIT_DEF))) 178 >; 179 180 // FIXME: Special case handling for maxhi (especially for clamp) 181 // because dealing with the write to high half of the register is 182 // difficult. 183 def : GCNPat < 184 (build_vector f16:$elt0, (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)), 185 (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)), 186 (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers)))))), 187 (v2f16 (mixhi_inst $src0_modifiers, $src0, 188 $src1_modifiers, $src1, 189 $src2_modifiers, $src2, 190 DSTCLAMP.NONE, 191 VGPR_32:$elt0)) 192 >; 193 194 def : GCNPat < 195 (build_vector 196 f16:$elt0, 197 (AMDGPUclamp (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)), 198 (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)), 199 (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers))))))), 200 (v2f16 (mixhi_inst $src0_modifiers, $src0, 201 $src1_modifiers, $src1, 202 $src2_modifiers, $src2, 203 DSTCLAMP.ENABLE, 204 VGPR_32:$elt0)) 205 >; 206 207 def : GCNPat < 208 (AMDGPUclamp (build_vector 209 (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$lo_src0, i32:$lo_src0_modifiers)), 210 (f32 (VOP3PMadMixMods f16:$lo_src1, i32:$lo_src1_modifiers)), 211 (f32 (VOP3PMadMixMods f16:$lo_src2, i32:$lo_src2_modifiers))))), 212 (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$hi_src0, i32:$hi_src0_modifiers)), 213 (f32 (VOP3PMadMixMods f16:$hi_src1, i32:$hi_src1_modifiers)), 214 (f32 (VOP3PMadMixMods f16:$hi_src2, i32:$hi_src2_modifiers))))))), 215 (v2f16 (mixhi_inst $hi_src0_modifiers, $hi_src0, 216 $hi_src1_modifiers, $hi_src1, 217 $hi_src2_modifiers, $hi_src2, 218 DSTCLAMP.ENABLE, 219 (mixlo_inst $lo_src0_modifiers, $lo_src0, 220 $lo_src1_modifiers, $lo_src1, 221 $lo_src2_modifiers, $lo_src2, 222 DSTCLAMP.ENABLE, 223 (i32 (IMPLICIT_DEF))))) 224 >; 225 226 def : GCNPat < 227 (f16 (fpround (fmul (f32 (VOP3PMadMixMods f32:$src0, i32:$src0_modifiers)), 228 (f32 (VOP3PMadMixMods f32:$src1, i32:$src1_modifiers))))), 229 (mixlo_inst $src0_modifiers, $src0, 230 $src1_modifiers, $src1, 231 (i32 0), (i32 0), 232 DSTCLAMP.NONE, 233 (i32 (IMPLICIT_DEF))) 234 >; 235 236 def : GCNPat < 237 (build_vector f16:$elt0, (f16 (fpround (fmul (f32 (VOP3PMadMixMods f32:$src0, i32:$src0_modifiers)), 238 (f32 (VOP3PMadMixMods f32:$src1, i32:$src1_modifiers)))))), 239 (v2f16 (mixhi_inst $src0_modifiers, $src0, 240 $src1_modifiers, $src1, 241 (i32 0), (i32 0), 242 DSTCLAMP.NONE, 243 VGPR_32:$elt0)) 244 >; 245} 246 247let SubtargetPredicate = HasMadMixInsts, OtherPredicates = [NoFP32Denormals] in { 248 249// These are VOP3a-like opcodes which accept no omod. 250// Size of src arguments (16/32) is controlled by op_sel. 251// For 16-bit src arguments their location (hi/lo) are controlled by op_sel_hi. 252let isCommutable = 1, mayRaiseFPException = 0 in { 253let isReMaterializable = 1 in 254defm V_MAD_MIX_F32 : VOP3_VOP3PInst<"v_mad_mix_f32", VOP3P_Mix_Profile<VOP_F32_F16_F16_F16, VOP3_OPSEL>>; 255 256let FPDPRounding = 1 in { 257// Clamp modifier is applied after conversion to f16. 258defm V_MAD_MIXLO_F16 : VOP3_VOP3PInst<"v_mad_mixlo_f16", VOP3P_Mix_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL, 1>>; 259 260let ClampLo = 0, ClampHi = 1 in { 261defm V_MAD_MIXHI_F16 : VOP3_VOP3PInst<"v_mad_mixhi_f16", VOP3P_Mix_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL, 1>>; 262} 263} // End FPDPRounding = 1 264} 265 266defm : MadFmaMixPats<fmad, V_MAD_MIX_F32, V_MAD_MIXLO_F16, V_MAD_MIXHI_F16>; 267} // End SubtargetPredicate = HasMadMixInsts, OtherPredicates = [NoFP32Denormals] 268 269 270// Essentially the same as the mad_mix versions 271let SubtargetPredicate = HasFmaMixInsts in { 272let isCommutable = 1 in { 273 274let isReMaterializable = 1 in 275defm V_FMA_MIX_F32 : VOP3_VOP3PInst<"v_fma_mix_f32", VOP3P_Mix_Profile<VOP_F32_F16_F16_F16, VOP3_OPSEL>>; 276 277let FPDPRounding = 1 in { 278// Clamp modifier is applied after conversion to f16. 279defm V_FMA_MIXLO_F16 : VOP3_VOP3PInst<"v_fma_mixlo_f16", VOP3P_Mix_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL, 1>>; 280 281let ClampLo = 0, ClampHi = 1 in { 282defm V_FMA_MIXHI_F16 : VOP3_VOP3PInst<"v_fma_mixhi_f16", VOP3P_Mix_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL, 1>>; 283} 284} // End FPDPRounding = 1 285} 286 287defm : MadFmaMixPats<fma, V_FMA_MIX_F32, V_FMA_MIXLO_F16, V_FMA_MIXHI_F16>; 288} 289 290// Defines patterns that extract signed 4bit from each Idx[0]. 291foreach Idx = [[0,28],[4,24],[8,20],[12,16],[16,12],[20,8],[24,4]] in 292 def ExtractSigned4bit_#Idx[0] : PatFrag<(ops node:$src), 293 (sra (shl node:$src, (i32 Idx[1])), (i32 28))>; 294 295// Defines code pattern that extracts U(unsigned/signed) 4/8bit from FromBitIndex. 296class Extract<int FromBitIndex, int BitMask, bit U>: PatFrag< 297 (ops node:$src), 298 !if (!or (!and (!eq (BitMask, 255), !eq (FromBitIndex, 24)), !eq (FromBitIndex, 28)), // last element 299 !if (U, (srl node:$src, (i32 FromBitIndex)), (sra node:$src, (i32 FromBitIndex))), 300 !if (!eq (FromBitIndex, 0), // first element 301 !if (U, (and node:$src, (i32 BitMask)), 302 !if (!eq (BitMask, 15), (!cast<PatFrag>("ExtractSigned4bit_"#FromBitIndex) node:$src), 303 (sext_inreg node:$src, i8))), 304 !if (U, (and (srl node:$src, (i32 FromBitIndex)), (i32 BitMask)), 305 !if (!eq (BitMask, 15), (!cast<PatFrag>("ExtractSigned4bit_"#FromBitIndex) node:$src), 306 (sext_inreg (srl node:$src, (i32 FromBitIndex)), i8)))))>; 307 308 309foreach Type = ["I", "U"] in 310 foreach Index = 0-3 in { 311 // Defines patterns that extract each Index'ed 8bit from an unsigned 312 // 32bit scalar value; 313 def Type#Index#"_8bit" : Extract<!shl(Index, 3), 255, !eq (Type, "U")>; 314 315 // Defines multiplication patterns where the multiplication is happening on each 316 // Index'ed 8bit of a 32bit scalar value. 317 318 def Mul#Type#_Elt#Index : PatFrag< 319 (ops node:$src0, node:$src1), 320 (!cast<HasOneUseBinOp>(!if (!eq (Type, "I"), AMDGPUmul_i24_oneuse, AMDGPUmul_u24_oneuse)) 321 (!cast<Extract>(Type#Index#"_8bit") node:$src0), 322 (!cast<Extract>(Type#Index#"_8bit") node:$src1))>; 323 } 324 325// Different variants of dot8 patterns cause a huge increase in the compile time. 326// Define non-associative/commutative add/mul to prevent permutation in the dot8 327// pattern. 328def NonACAdd : SDNode<"ISD::ADD" , SDTIntBinOp>; 329def NonACAdd_oneuse : HasOneUseBinOp<NonACAdd>; 330 331def NonACAMDGPUmul_u24 : SDNode<"AMDGPUISD::MUL_U24" , SDTIntBinOp>; 332def NonACAMDGPUmul_u24_oneuse : HasOneUseBinOp<NonACAMDGPUmul_u24>; 333 334def NonACAMDGPUmul_i24 : SDNode<"AMDGPUISD::MUL_I24" , SDTIntBinOp>; 335def NonACAMDGPUmul_i24_oneuse : HasOneUseBinOp<NonACAMDGPUmul_i24>; 336 337foreach Type = ["I", "U"] in 338 foreach Index = 0-7 in { 339 // Defines patterns that extract each Index'ed 4bit from an unsigned 340 // 32bit scalar value; 341 def Type#Index#"_4bit" : Extract<!shl(Index, 2), 15, !eq (Type, "U")>; 342 343 // Defines multiplication patterns where the multiplication is happening on each 344 // Index'ed 8bit of a 32bit scalar value. 345 def Mul#Type#Index#"_4bit" : PatFrag< 346 (ops node:$src0, node:$src1), 347 (!cast<HasOneUseBinOp>(!if (!eq (Type, "I"), NonACAMDGPUmul_i24_oneuse, NonACAMDGPUmul_u24_oneuse)) 348 (!cast<Extract>(Type#Index#"_4bit") node:$src0), 349 (!cast<Extract>(Type#Index#"_4bit") node:$src1))>; 350 } 351 352class UDot2Pat<VOP_Pseudo Inst> : GCNPat < 353 (add (add_oneuse (AMDGPUmul_u24_oneuse (srl i32:$src0, (i32 16)), 354 (srl i32:$src1, (i32 16))), i32:$src2), 355 (AMDGPUmul_u24_oneuse (and i32:$src0, (i32 65535)), 356 (and i32:$src1, (i32 65535))) 357 ), 358 (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))> { 359 let Predicates = Inst.Predicates; 360} 361 362class SDot2Pat<VOP_Pseudo Inst> : GCNPat < 363 (add (add_oneuse (AMDGPUmul_i24_oneuse (sra i32:$src0, (i32 16)), 364 (sra i32:$src1, (i32 16))), i32:$src2), 365 (AMDGPUmul_i24_oneuse (sext_inreg i32:$src0, i16), 366 (sext_inreg i32:$src1, i16))), 367 (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))> { 368 let Predicates = Inst.Predicates; 369} 370 371let IsDOT = 1 in { 372let OtherPredicates = [HasDot2Insts] in { 373defm V_DOT2_I32_I16 : VOP3PInst<"v_dot2_i32_i16", 374 VOP3P_Profile<VOP_I32_V2I16_V2I16_I32>, int_amdgcn_sdot2, 1>; 375defm V_DOT2_U32_U16 : VOP3PInst<"v_dot2_u32_u16", 376 VOP3P_Profile<VOP_I32_V2I16_V2I16_I32>, int_amdgcn_udot2, 1>; 377} // End OtherPredicates = [HasDot2Insts] 378 379let OtherPredicates = [HasDot10Insts] in 380defm V_DOT2_F32_F16 : VOP3PInst<"v_dot2_f32_f16", 381 VOP3P_Profile<VOP_F32_V2F16_V2F16_F32, VOP3_REGULAR, /*HasDPP*/ 1>, 382 AMDGPUfdot2, 1/*ExplicitClamp*/>; 383 384let OtherPredicates = [HasDot7Insts] in { 385let IsInvalidSingleUseConsumer = 1 in { 386 defm V_DOT4_U32_U8 : VOP3PInst<"v_dot4_u32_u8", 387 VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot4, 1>; 388} 389defm V_DOT8_U32_U4 : VOP3PInst<"v_dot8_u32_u4", 390 VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot8, 1>; 391} // End OtherPredicates = [HasDot7Insts] 392 393let OtherPredicates = [HasDot1Insts] in { 394let IsInvalidSingleUseConsumer = 1 in { 395 defm V_DOT4_I32_I8 : VOP3PInst<"v_dot4_i32_i8", 396 VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot4, 1>; 397} 398defm V_DOT8_I32_I4 : VOP3PInst<"v_dot8_i32_i4", 399 VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot8, 1>; 400} // End OtherPredicates = [HasDot1Insts] 401 402def DOT2_BF16_Profile 403 : VOP3P_Profile<VOP_F32_V2BF16_V2BF16_F32, VOP3_REGULAR, /*HasDPP*/ 1> { 404 let HasSrc1Mods = 1; 405} 406 407let SubtargetPredicate = HasDot9Insts in { 408 409defm V_DOT2_F32_BF16 : VOP3PInst<"v_dot2_f32_bf16", DOT2_BF16_Profile, 410 int_amdgcn_fdot2_f32_bf16, 1>; 411 412} // End SubtargetPredicate = HasDot9Insts 413 414} // End let IsDOT = 1 415 416multiclass VOP3PDOTIUInst <string OpName, SDPatternOperator intrinsic_node> { 417 let IsDOT = 1 in 418 defm NAME : VOP3PInst<OpName, VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, 419 null_frag, 1>; 420 // Dot-iu instructions consider input as signed if imod neg bits are set. Thus 421 // Dot-iu Intrinsics have extra operands and require separate codegen pattern. 422 def : GCNPat < (intrinsic_node (VOP3PModsNeg i32:$src0_mods), i32:$src0, 423 (VOP3PModsNeg i32:$src1_mods), i32:$src1, 424 i32:$src2, (i1 timm:$clamp)), 425 (!cast<Instruction>(NAME) $src0_mods, i32:$src0, 426 $src1_mods, i32:$src1, 427 (i32 8), i32:$src2, i1:$clamp) 428 >; 429} 430 431let SubtargetPredicate = HasDot8Insts in { 432defm V_DOT4_I32_IU8 : VOP3PDOTIUInst<"v_dot4_i32_iu8", int_amdgcn_sudot4>; 433defm V_DOT8_I32_IU4 : VOP3PDOTIUInst<"v_dot8_i32_iu4", int_amdgcn_sudot8>; 434 435def : GCNPat < (int_amdgcn_sdot8 i32:$src0, 436 i32:$src1, 437 i32:$src2, (i1 timm:$clamp)), 438 (V_DOT8_I32_IU4 (i32 9), i32:$src0, 439 (i32 9), i32:$src1, (i32 8), i32:$src2, i1:$clamp) 440>; 441 442def : GCNPat < (int_amdgcn_sdot4 i32:$src0, 443 i32:$src1, 444 i32:$src2, (i1 timm:$clamp)), 445 (V_DOT4_I32_IU8 (i32 9), i32:$src0, 446 (i32 9), i32:$src1, (i32 8), i32:$src2, i1:$clamp) 447>; 448} // End SubtargetPredicate = HasDot8Insts 449 450// Does not use opsel, no src_modifiers on src0 and src1. 451// src_modifiers on src2(f32) are f32 fneg(neg_lo[2]) and f32 fabs(neg_hi[2]). 452def VOP3P_DOTF8_Profile : VOP3P_Profile<VOPProfile <[f32, i32, i32, f32]>, 453 VOP3_PACKED, 1> { 454 let HasClamp = 0; 455 let HasOpSel = 0; 456 let HasOMod = 0; 457 let IsDOT = 1; 458 let HasSrc0Mods = 0; 459 let HasSrc1Mods = 0; 460 let HasSrc2Mods = 1; 461 462 let InsVOP3P = (ins VSrc_b32:$src0, VSrc_b32:$src1, 463 PackedF16InputMods:$src2_modifiers, VSrc_f32:$src2, 464 neg_lo0:$neg_lo, neg_hi0:$neg_hi); 465 466 let InsVOP3DPP8 = (ins DstRC:$old, VGPR_32:$src0, VRegSrc_32:$src1, 467 PackedF16InputMods:$src2_modifiers, VRegSrc_32:$src2, 468 neg_lo0:$neg_lo, neg_hi0:$neg_hi, dpp8:$dpp8, Dpp8FI:$fi); 469 470 let InsVOP3DPP16 = (ins DstRC:$old, VGPR_32:$src0, VRegSrc_32:$src1, 471 PackedF16InputMods:$src2_modifiers, VRegSrc_32:$src2, 472 neg_lo0:$neg_lo, neg_hi0:$neg_hi, dpp_ctrl:$dpp_ctrl, 473 DppRowMask:$row_mask, DppBankMask:$bank_mask, 474 DppBoundCtrl:$bound_ctrl, Dpp16FI:$fi); 475} 476 477multiclass VOP3PDOTF8Inst <string OpName, SDPatternOperator intrinsic_node> { 478 defm NAME : VOP3PInst<OpName, VOP3P_DOTF8_Profile, null_frag, 1>; 479 480 let SubtargetPredicate = isGFX12Plus in 481 def : GCNPat <(intrinsic_node i32:$src0, i32:$src1, 482 (VOP3Mods f32:$src2, i32:$src2_modifiers)), 483 (!cast<Instruction>(NAME) i32:$src0, i32:$src1, 484 i32:$src2_modifiers, f32:$src2)>; 485} 486 487let OtherPredicates = [HasDot11Insts] in { 488defm V_DOT4_F32_FP8_BF8 : VOP3PDOTF8Inst<"v_dot4_f32_fp8_bf8", int_amdgcn_dot4_f32_fp8_bf8>; 489defm V_DOT4_F32_BF8_FP8 : VOP3PDOTF8Inst<"v_dot4_f32_bf8_fp8", int_amdgcn_dot4_f32_bf8_fp8>; 490defm V_DOT4_F32_FP8_FP8 : VOP3PDOTF8Inst<"v_dot4_f32_fp8_fp8", int_amdgcn_dot4_f32_fp8_fp8>; 491defm V_DOT4_F32_BF8_BF8 : VOP3PDOTF8Inst<"v_dot4_f32_bf8_bf8", int_amdgcn_dot4_f32_bf8_bf8>; 492} 493 494def : UDot2Pat<V_DOT2_U32_U16>; 495def : SDot2Pat<V_DOT2_I32_I16>; 496 497foreach Type = ["U", "I"] in 498 let Predicates = !cast<VOP_Pseudo>("V_DOT4_"#Type#"32_"#Type#8).Predicates in 499 def : GCNPat < 500 !cast<dag>(!foldl((i32 i32:$src2), [0, 1, 2, 3], lhs, y, 501 (add_oneuse lhs, (!cast<PatFrag>("Mul"#Type#"_Elt"#y) i32:$src0, i32:$src1)))), 502 (!cast<VOP3P_Pseudo>("V_DOT4_"#Type#"32_"#Type#8) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>; 503 504foreach Type = ["U", "I"] in 505 let Predicates = !cast<VOP_Pseudo>("V_DOT8_"#Type#"32_"#Type#4).Predicates in 506 def : GCNPat < 507 !cast<dag>(!foldl((add_oneuse i32:$src2, (!cast<PatFrag>("Mul"#Type#"0_4bit") i32:$src0, i32:$src1)), 508 [1, 2, 3, 4, 5, 6, 7], lhs, y, 509 (NonACAdd_oneuse lhs, (!cast<PatFrag>("Mul"#Type#y#"_4bit") i32:$src0, i32:$src1)))), 510 (!cast<VOP3P_Pseudo>("V_DOT8_"#Type#"32_"#Type#4) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>; 511 512// Different variants of dot8 code-gen dag patterns are not generated through table-gen due to a huge increase 513// in the compile time. Directly handle the pattern generated by the FE here. 514foreach Type = ["U", "I"] in 515 let Predicates = !cast<VOP_Pseudo>("V_DOT8_"#Type#"32_"#Type#4).Predicates in 516 def : GCNPat < 517 !cast<dag>(!foldl((add_oneuse i32:$src2, (!cast<PatFrag>("Mul"#Type#"0_4bit") i32:$src0, i32:$src1)), 518 [7, 1, 2, 3, 4, 5, 6], lhs, y, 519 (NonACAdd_oneuse lhs, (!cast<PatFrag>("Mul"#Type#y#"_4bit") i32:$src0, i32:$src1)))), 520 (!cast<VOP3P_Pseudo>("V_DOT8_"#Type#"32_"#Type#4) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>; 521 522def ADst_32 : VOPDstOperand<AGPR_32>; 523def ADst_64 : VOPDstOperand<AReg_64>; 524def ADst_128 : VOPDstOperand<AReg_128>; 525def ADst_256 : VOPDstOperand<AReg_256>; 526def ADst_512 : VOPDstOperand<AReg_512>; 527def ADst_1024 : VOPDstOperand<AReg_1024>; 528def VDst_64 : VOPDstOperand<VReg_64>; 529def VDst_128 : VOPDstOperand<VReg_128>; 530def VDst_256 : VOPDstOperand<VReg_256>; 531def VDst_512 : VOPDstOperand<VReg_512>; 532def VDst_1024 : VOPDstOperand<VReg_1024>; 533 534def VOPProfileAccRead : VOP3P_Profile<VOP_I32_I32, VOP3_MAI> { 535 let Src0RC64 = ARegSrc_32; 536} 537 538def VOPProfileAccWrite : VOP3P_Profile<VOP_I32_I32, VOP3_MAI> { 539 let DstRC = ADst_32; 540 let Src0RC64 = VCSrc_b32; 541} 542 543class VOPProfileMAI<VOPProfile P, RegisterOperand _SrcRC, RegisterOperand _DstRC, 544 RegisterOperand SrcABRC = AVSrc_32> 545 : VOP3P_Profile<P, VOP3_MAI> { 546 let DstRC = _DstRC; 547 let Src0RC64 = SrcABRC; 548 let Src1RC64 = SrcABRC; 549 let Src2RC64 = _SrcRC; 550 let HasOpSel = 0; 551 let HasClamp = 0; 552 let HasIntClamp = 0; 553 let HasOMod = 0; 554 let HasModifiers = 0; 555 let AsmVOP3Base = "$vdst, $src0, $src1, $src2$cbsz$abid$blgp"; 556 let Ins64 = (ins Src0RC64:$src0, Src1RC64:$src1, Src2RC64:$src2, CBSZ:$cbsz, ABID:$abid, blgp:$blgp); 557 let InsVOP3Base = Ins64; 558 // Dst and SrcC cannot partially overlap if SrcC/Dst is bigger than 4 VGPRs. 559 // We then create two versions of the instruction: with tied dst and src2 560 // and with the earlyclobber flag on the dst. This is stricter than the 561 // actual HW restriction. In particular earlyclobber also affects src0 and 562 // src1 allocation which is not required. 563 bit NoDstOverlap = !gt(DstVT.Size, 128); 564} 565 566class VOPProfileSMFMAC<VOPProfile P, RegisterOperand _DstRC, 567 RegisterOperand _SrcARC, RegisterOperand _SrcBRC> 568 : VOPProfileMAI<P, _DstRC, _DstRC, _SrcARC> { 569 let Src1RC64 = _SrcBRC; 570 let Src2VT = DstVT; 571 let Asm64 = " $vdst, $src0, $src1, $idx$cbsz$abid"; 572 let Outs64 = (outs DstRC:$vdst); 573 let Ins64 = (ins Src0RC64:$src0, Src1RC64:$src1, VRegSrc_32:$idx, CBSZ:$cbsz, ABID:$abid, Src2RC64:$src2); 574} 575 576def VOPProfileMAI_F32_F32_X4 : VOPProfileMAI<VOP_V4F32_F32_F32_V4F32, AISrc_128_f32, ADst_128>; 577def VOPProfileMAI_F32_F32_X16 : VOPProfileMAI<VOP_V16F32_F32_F32_V16F32, AISrc_512_f32, ADst_512>; 578def VOPProfileMAI_F32_F32_X32 : VOPProfileMAI<VOP_V32F32_F32_F32_V32F32, AISrc_1024_f32, ADst_1024>; 579def VOPProfileMAI_I32_I32_X4 : VOPProfileMAI<VOP_V4I32_I32_I32_V4I32, AISrc_128_b32, ADst_128>; 580def VOPProfileMAI_I32_I32_X16 : VOPProfileMAI<VOP_V16I32_I32_I32_V16I32, AISrc_512_b32, ADst_512>; 581def VOPProfileMAI_I32_I32_X32 : VOPProfileMAI<VOP_V32I32_I32_I32_V32I32, AISrc_1024_b32, ADst_1024>; 582def VOPProfileMAI_F32_V2I16_X4 : VOPProfileMAI<VOP_V4F32_V2I16_V2I16_V4F32, AISrc_128_b32, ADst_128>; 583def VOPProfileMAI_F32_V2I16_X16 : VOPProfileMAI<VOP_V16F32_V2I16_V2I16_V16F32, AISrc_512_b32, ADst_512>; 584def VOPProfileMAI_F32_V2I16_X32 : VOPProfileMAI<VOP_V32F32_V2I16_V2I16_V32F32, AISrc_1024_b32, ADst_1024>; 585def VOPProfileMAI_F32_V4F16_X4 : VOPProfileMAI<VOP_V4F32_V4F16_V4F16_V4F32, AISrc_128_b32, ADst_128, AVSrc_64>; 586def VOPProfileMAI_F32_V4F16_X16 : VOPProfileMAI<VOP_V16F32_V4F16_V4F16_V16F32, AISrc_512_b32, ADst_512, AVSrc_64>; 587def VOPProfileMAI_F32_V4F16_X32 : VOPProfileMAI<VOP_V32F32_V4F16_V4F16_V32F32, AISrc_1024_b32, ADst_1024, AVSrc_64>; 588def VOPProfileMAI_F32_V4I16_X4 : VOPProfileMAI<VOP_V4F32_V4I16_V4I16_V4F32, AISrc_128_b32, ADst_128, AVSrc_64>; 589def VOPProfileMAI_F32_V4I16_X16 : VOPProfileMAI<VOP_V16F32_V4I16_V4I16_V16F32, AISrc_512_b32, ADst_512, AVSrc_64>; 590def VOPProfileMAI_F32_V4I16_X32 : VOPProfileMAI<VOP_V32F32_V4I16_V4I16_V32F32, AISrc_1024_b32, ADst_1024, AVSrc_64>; 591def VOPProfileMAI_F64_16X16X4F64 : VOPProfileMAI<VOP_V4F64_F64_F64_V4F64, AISrc_256_f64, ADst_256, AVSrc_64>; 592def VOPProfileMAI_F64_4X4X4F64 : VOPProfileMAI<VOP_F64_F64_F64_F64, AISrc_64_f64, ADst_64, AVSrc_64>; 593def VOPProfileMAI_I32_I64_X16 : VOPProfileMAI<VOP_V4I32_I64_I64_V4I32, AISrc_128_b32, ADst_128, AVSrc_64>; 594def VOPProfileMAI_I32_I64_X32 : VOPProfileMAI<VOP_V16I32_I64_I64_V16I32, AISrc_512_b32, ADst_512, AVSrc_64>; 595def VOPProfileMAI_F32_V2F32_X16 : VOPProfileMAI<VOP_V4F32_V2F32_V2F32_V4F32, AISrc_128_b32, ADst_128, AVSrc_64>; 596def VOPProfileMAI_F32_V2F32_X32 : VOPProfileMAI<VOP_V16F32_V2F32_V2F32_V16F32, AISrc_512_b32, ADst_512, AVSrc_64>; 597def VOPProfileMAI_F32_I64_X32 : VOPProfileMAI<VOP_V4F32_I64_I64_V4F32, AISrc_128_b32, ADst_128, AVSrc_64>; 598def VOPProfileMAI_F32_I64_X16 : VOPProfileMAI<VOP_V16F32_I64_I64_V16F32, AISrc_512_b32, ADst_512, AVSrc_64>; 599 600def VOPProfileMAI_F32_F32_X4_VCD : VOPProfileMAI<VOP_V4F32_F32_F32_V4F32, VISrc_128_f32, VDst_128>; 601def VOPProfileMAI_F32_F32_X16_VCD : VOPProfileMAI<VOP_V16F32_F32_F32_V16F32, VISrc_512_f32, VDst_512>; 602def VOPProfileMAI_F32_F32_X32_VCD : VOPProfileMAI<VOP_V32F32_F32_F32_V32F32, VISrc_1024_f32, VDst_1024>; 603def VOPProfileMAI_I32_I32_X4_VCD : VOPProfileMAI<VOP_V4I32_I32_I32_V4I32, VISrc_128_b32, VDst_128>; 604def VOPProfileMAI_I32_I32_X16_VCD : VOPProfileMAI<VOP_V16I32_I32_I32_V16I32, VISrc_512_b32, VDst_512>; 605def VOPProfileMAI_I32_I32_X32_VCD : VOPProfileMAI<VOP_V32I32_I32_I32_V32I32, VISrc_1024_b32, VDst_1024>; 606def VOPProfileMAI_F32_V2I16_X4_VCD : VOPProfileMAI<VOP_V4F32_V2I16_V2I16_V4F32, VISrc_128_b32, VDst_128>; 607def VOPProfileMAI_F32_V2I16_X16_VCD : VOPProfileMAI<VOP_V16F32_V2I16_V2I16_V16F32, VISrc_512_b32, VDst_512>; 608def VOPProfileMAI_F32_V2I16_X32_VCD : VOPProfileMAI<VOP_V32F32_V2I16_V2I16_V32F32, VISrc_1024_b32, VDst_1024>; 609def VOPProfileMAI_F32_V4F16_X4_VCD : VOPProfileMAI<VOP_V4F32_V4F16_V4F16_V4F32, VISrc_128_b32, VDst_128, AVSrc_64>; 610def VOPProfileMAI_F32_V4F16_X16_VCD : VOPProfileMAI<VOP_V16F32_V4F16_V4F16_V16F32, VISrc_512_b32, VDst_512, AVSrc_64>; 611def VOPProfileMAI_F32_V4F16_X32_VCD : VOPProfileMAI<VOP_V32F32_V4F16_V4F16_V32F32, VISrc_1024_b32, VDst_1024, AVSrc_64>; 612def VOPProfileMAI_F32_V4I16_X4_VCD : VOPProfileMAI<VOP_V4F32_V4I16_V4I16_V4F32, VISrc_128_b32, VDst_128, AVSrc_64>; 613def VOPProfileMAI_F32_V4I16_X16_VCD : VOPProfileMAI<VOP_V16F32_V4I16_V4I16_V16F32, VISrc_512_b32, VDst_512, AVSrc_64>; 614def VOPProfileMAI_F32_V4I16_X32_VCD : VOPProfileMAI<VOP_V32F32_V4I16_V4I16_V32F32, VISrc_1024_b32, VDst_1024, AVSrc_64>; 615def VOPProfileMAI_F64_16X16X4F64_VCD : VOPProfileMAI<VOP_V4F64_F64_F64_V4F64, VISrc_256_f64, VDst_256, AVSrc_64>; 616def VOPProfileMAI_F64_4X4X4F64_VCD : VOPProfileMAI<VOP_F64_F64_F64_F64, VISrc_64_f64, VDst_64, AVSrc_64>; 617def VOPProfileMAI_I32_I64_X16_VCD : VOPProfileMAI<VOP_V4I32_I64_I64_V4I32, VISrc_128_b32, VDst_128, AVSrc_64>; 618def VOPProfileMAI_I32_I64_X32_VCD : VOPProfileMAI<VOP_V16I32_I64_I64_V16I32, VISrc_512_b32, VDst_512, AVSrc_64>; 619def VOPProfileMAI_F32_V2F32_X16_VCD : VOPProfileMAI<VOP_V4F32_V2F32_V2F32_V4F32, VISrc_128_b32, VDst_128, AVSrc_64>; 620def VOPProfileMAI_F32_V2F32_X32_VCD : VOPProfileMAI<VOP_V16F32_V2F32_V2F32_V16F32, VISrc_512_b32, VDst_512, AVSrc_64>; 621def VOPProfileMAI_F32_I64_X32_VCD : VOPProfileMAI<VOP_V4F32_I64_I64_V4F32, VISrc_128_b32, VDst_128, AVSrc_64>; 622def VOPProfileMAI_F32_I64_X16_VCD : VOPProfileMAI<VOP_V16F32_I64_I64_V16F32, VISrc_512_b32, VDst_512, AVSrc_64>; 623 624def VOPProfileSMFMAC_F32_16X16X32_F16 : VOPProfileSMFMAC<VOP_V4F32_V4F16_V8F16_I32, AVDst_128, AVSrc_64, AVSrc_128>; 625def VOPProfileSMFMAC_F32_32X32X16_F16 : VOPProfileSMFMAC<VOP_V16F32_V4F16_V8F16_I32, AVDst_512, AVSrc_64, AVSrc_128>; 626def VOPProfileSMFMAC_F32_16X16X32_I16 : VOPProfileSMFMAC<VOP_V4F32_V4I16_V8I16_I32, AVDst_128, AVSrc_64, AVSrc_128>; 627def VOPProfileSMFMAC_F32_32X32X16_I16 : VOPProfileSMFMAC<VOP_V16F32_V4I16_V8I16_I32, AVDst_512, AVSrc_64, AVSrc_128>; 628def VOPProfileSMFMAC_I32_16X16X64_I8 : VOPProfileSMFMAC<VOP_V4I32_V2I32_V4I32_I32, AVDst_128, AVSrc_64, AVSrc_128>; 629def VOPProfileSMFMAC_I32_32X32X32_I8 : VOPProfileSMFMAC<VOP_V16I32_V2I32_V4I32_I32, AVDst_512, AVSrc_64, AVSrc_128>; 630def VOPProfileSMFMAC_F32_16X16X64_F8 : VOPProfileSMFMAC<VOP_V4F32_V2I32_V4I32_I32, AVDst_128, AVSrc_64, AVSrc_128>; 631def VOPProfileSMFMAC_F32_32X32X32_F8 : VOPProfileSMFMAC<VOP_V16F32_V2I32_V4I32_I32, AVDst_512, AVSrc_64, AVSrc_128>; 632 633class MFMATable <bit is_mac, string Name> { 634 bit IsMac = is_mac; 635 string FMAOp = Name; 636} 637 638class MAIFrag<SDPatternOperator Op, code pred> : PatFrag < 639 (ops node:$src0, node:$src1, node:$src2, node:$cbsz, node:$abid, node:$blgp), 640 (Op $src0, $src1, $src2, $cbsz, $abid, $blgp), 641 pred 642>; 643 644defvar MayNeedAGPRs = [{ 645 return MF->getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs(); 646}]; 647 648defvar MayNeedAGPRs_gisel = [{ 649 return MF.getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs(); 650}]; 651 652defvar MayNotNeedAGPRs = [{ 653 return !MF->getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs(); 654}]; 655 656defvar MayNotNeedAGPRs_gisel = [{ 657 return !MF.getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs(); 658}]; 659 660class AgprMAIFrag<SDPatternOperator Op> : MAIFrag<Op, MayNeedAGPRs> { 661 let GISelPredicateCode = MayNeedAGPRs_gisel; 662} 663 664class VgprMAIFrag<SDPatternOperator Op> : MAIFrag<Op, MayNotNeedAGPRs> { 665 let GISelPredicateCode = MayNotNeedAGPRs_gisel; 666} 667 668let SubtargetPredicate = HasMAIInsts in { 669 670let isAsCheapAsAMove = 1, isReMaterializable = 1 in { 671 defm V_ACCVGPR_READ_B32 : VOP3Inst<"v_accvgpr_read_b32", VOPProfileAccRead>; 672 let isMoveImm = 1 in { 673 defm V_ACCVGPR_WRITE_B32 : VOP3Inst<"v_accvgpr_write_b32", VOPProfileAccWrite>; 674 } // End isMoveImm = 1 675} // End isAsCheapAsAMove = 1, isReMaterializable = 1 676 677class MAIInst<string OpName, VOPProfile P, SDPatternOperator node> 678 : VOP3InstBase<OpName, P, node> { 679 Instruction Opcode = !cast<Instruction>(NAME); 680 bit is_dgemm = 0; 681 bit is_gfx940_xdl = 0; 682} 683 684multiclass MAIInst<string OpName, string P, SDPatternOperator node> { 685 defvar NoDstOverlap = !cast<VOPProfileMAI>("VOPProfileMAI_" # P).NoDstOverlap; 686 687 let isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1 in { 688 // FP32 denorm mode is respected, rounding mode is not. Exceptions are not supported. 689 let Constraints = !if(NoDstOverlap, "@earlyclobber $vdst", "") in { 690 def _e64 : MAIInst<OpName, !cast<VOPProfileMAI>("VOPProfileMAI_" # P), 691 !if(!or(NoDstOverlap, !eq(node, null_frag)), null_frag, AgprMAIFrag<node>)>, 692 MFMATable<0, NAME # "_e64">; 693 694 let SubtargetPredicate = isGFX90APlus, Mnemonic = OpName in 695 def _vgprcd_e64 : MAIInst<OpName # "_vgprcd", !cast<VOPProfileMAI>("VOPProfileMAI_" # P # "_VCD"), 696 !if(!or(NoDstOverlap, !eq(node, null_frag)), null_frag, VgprMAIFrag<node>)>, 697 MFMATable<0, NAME # "_vgprcd_e64">; 698 } 699 700 if NoDstOverlap then { 701 let Constraints = !if(NoDstOverlap, "$vdst = $src2", ""), 702 isConvertibleToThreeAddress = NoDstOverlap, 703 Mnemonic = OpName in { 704 def "_mac_e64" : MAIInst<OpName # "_mac", !cast<VOPProfileMAI>("VOPProfileMAI_" # P), 705 !if(!eq(node, null_frag), null_frag, AgprMAIFrag<node>)>, 706 MFMATable<1, NAME # "_e64">; 707 708 let SubtargetPredicate = isGFX90APlus in 709 def _mac_vgprcd_e64 : MAIInst<OpName # "_mac_vgprcd", !cast<VOPProfileMAI>("VOPProfileMAI_" # P # "_VCD"), 710 !if(!eq(node, null_frag), null_frag, VgprMAIFrag<node>)>, 711 MFMATable<1, NAME # "_vgprcd_e64">; 712 } 713 } 714 } // End isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1 715} 716 717defm V_MFMA_F32_4X4X1F32 : MAIInst<"v_mfma_f32_4x4x1f32", "F32_F32_X4", int_amdgcn_mfma_f32_4x4x1f32>; 718defm V_MFMA_F32_16X16X1F32 : MAIInst<"v_mfma_f32_16x16x1f32", "F32_F32_X16", int_amdgcn_mfma_f32_16x16x1f32>; 719defm V_MFMA_F32_16X16X4F32 : MAIInst<"v_mfma_f32_16x16x4f32", "F32_F32_X4", int_amdgcn_mfma_f32_16x16x4f32>; 720defm V_MFMA_F32_32X32X1F32 : MAIInst<"v_mfma_f32_32x32x1f32", "F32_F32_X32", int_amdgcn_mfma_f32_32x32x1f32>; 721defm V_MFMA_F32_32X32X2F32 : MAIInst<"v_mfma_f32_32x32x2f32", "F32_F32_X16", int_amdgcn_mfma_f32_32x32x2f32>; 722 723let is_gfx940_xdl = 1 in { 724defm V_MFMA_F32_4X4X4F16 : MAIInst<"v_mfma_f32_4x4x4f16", "F32_V4F16_X4", int_amdgcn_mfma_f32_4x4x4f16>; 725defm V_MFMA_I32_4X4X4I8 : MAIInst<"v_mfma_i32_4x4x4i8", "I32_I32_X4", int_amdgcn_mfma_i32_4x4x4i8>; 726defm V_MFMA_F32_16X16X4F16 : MAIInst<"v_mfma_f32_16x16x4f16", "F32_V4F16_X16", int_amdgcn_mfma_f32_16x16x4f16>; 727defm V_MFMA_F32_16X16X16F16 : MAIInst<"v_mfma_f32_16x16x16f16", "F32_V4F16_X4", int_amdgcn_mfma_f32_16x16x16f16>; 728defm V_MFMA_I32_16X16X4I8 : MAIInst<"v_mfma_i32_16x16x4i8", "I32_I32_X16", int_amdgcn_mfma_i32_16x16x4i8>; 729defm V_MFMA_F32_32X32X4F16 : MAIInst<"v_mfma_f32_32x32x4f16", "F32_V4F16_X32", int_amdgcn_mfma_f32_32x32x4f16>; 730defm V_MFMA_F32_32X32X8F16 : MAIInst<"v_mfma_f32_32x32x8f16", "F32_V4F16_X16", int_amdgcn_mfma_f32_32x32x8f16>; 731defm V_MFMA_I32_32X32X4I8 : MAIInst<"v_mfma_i32_32x32x4i8", "I32_I32_X32", int_amdgcn_mfma_i32_32x32x4i8>; 732} 733 734let Predicates = [isGFX908orGFX90A] in { 735defm V_MFMA_I32_16X16X16I8 : MAIInst<"v_mfma_i32_16x16x16i8", "I32_I32_X4", int_amdgcn_mfma_i32_16x16x16i8>; 736defm V_MFMA_I32_32X32X8I8 : MAIInst<"v_mfma_i32_32x32x8i8", "I32_I32_X16", int_amdgcn_mfma_i32_32x32x8i8>; 737defm V_MFMA_F32_4X4X2BF16 : MAIInst<"v_mfma_f32_4x4x2bf16", "F32_V2I16_X4", int_amdgcn_mfma_f32_4x4x2bf16>; 738defm V_MFMA_F32_16X16X2BF16 : MAIInst<"v_mfma_f32_16x16x2bf16", "F32_V2I16_X16", int_amdgcn_mfma_f32_16x16x2bf16>; 739defm V_MFMA_F32_16X16X8BF16 : MAIInst<"v_mfma_f32_16x16x8bf16", "F32_V2I16_X4", int_amdgcn_mfma_f32_16x16x8bf16>; 740defm V_MFMA_F32_32X32X2BF16 : MAIInst<"v_mfma_f32_32x32x2bf16", "F32_V2I16_X32", int_amdgcn_mfma_f32_32x32x2bf16>; 741defm V_MFMA_F32_32X32X4BF16 : MAIInst<"v_mfma_f32_32x32x4bf16", "F32_V2I16_X16", int_amdgcn_mfma_f32_32x32x4bf16>; 742} 743 744} // End SubtargetPredicate = HasMAIInsts 745 746let Predicates = [isGFX90APlus] in { 747 let is_gfx940_xdl = 1 in { 748 defm V_MFMA_F32_32X32X4BF16_1K : MAIInst<"v_mfma_f32_32x32x4bf16_1k", "F32_V4I16_X32", int_amdgcn_mfma_f32_32x32x4bf16_1k>; 749 defm V_MFMA_F32_16X16X4BF16_1K : MAIInst<"v_mfma_f32_16x16x4bf16_1k", "F32_V4I16_X16", int_amdgcn_mfma_f32_16x16x4bf16_1k>; 750 defm V_MFMA_F32_4X4X4BF16_1K : MAIInst<"v_mfma_f32_4x4x4bf16_1k", "F32_V4I16_X4", int_amdgcn_mfma_f32_4x4x4bf16_1k>; 751 defm V_MFMA_F32_32X32X8BF16_1K : MAIInst<"v_mfma_f32_32x32x8bf16_1k", "F32_V4I16_X16", int_amdgcn_mfma_f32_32x32x8bf16_1k>; 752 defm V_MFMA_F32_16X16X16BF16_1K : MAIInst<"v_mfma_f32_16x16x16bf16_1k", "F32_V4I16_X4", int_amdgcn_mfma_f32_16x16x16bf16_1k>; 753 } 754 755 let is_dgemm = 1 in { 756 defm V_MFMA_F64_16X16X4F64 : MAIInst<"v_mfma_f64_16x16x4f64", "F64_16X16X4F64", int_amdgcn_mfma_f64_16x16x4f64>; 757 defm V_MFMA_F64_4X4X4F64 : MAIInst<"v_mfma_f64_4x4x4f64", "F64_4X4X4F64", int_amdgcn_mfma_f64_4x4x4f64>; 758 } 759} // End Predicates = [isGFX90APlus] 760 761let SubtargetPredicate = isGFX940Plus, is_gfx940_xdl = 1 in { 762 defm V_MFMA_I32_32X32X16I8 : MAIInst<"v_mfma_i32_32x32x16i8", "I32_I64_X32", int_amdgcn_mfma_i32_32x32x16_i8>; 763 defm V_MFMA_I32_16X16X32I8 : MAIInst<"v_mfma_i32_16x16x32i8", "I32_I64_X16", int_amdgcn_mfma_i32_16x16x32_i8>; 764 defm V_MFMA_F32_16X16X8XF32 : MAIInst<"v_mfma_f32_16x16x8xf32", "F32_V2F32_X16", int_amdgcn_mfma_f32_16x16x8_xf32>; 765 defm V_MFMA_F32_32X32X4XF32 : MAIInst<"v_mfma_f32_32x32x4xf32", "F32_V2F32_X32", int_amdgcn_mfma_f32_32x32x4_xf32>; 766 defm V_MFMA_F32_16X16X32_BF8_BF8 : MAIInst<"v_mfma_f32_16x16x32_bf8_bf8", "F32_I64_X32", int_amdgcn_mfma_f32_16x16x32_bf8_bf8>; 767 defm V_MFMA_F32_16X16X32_BF8_FP8 : MAIInst<"v_mfma_f32_16x16x32_bf8_fp8", "F32_I64_X32", int_amdgcn_mfma_f32_16x16x32_bf8_fp8>; 768 defm V_MFMA_F32_16X16X32_FP8_BF8 : MAIInst<"v_mfma_f32_16x16x32_fp8_bf8", "F32_I64_X32", int_amdgcn_mfma_f32_16x16x32_fp8_bf8>; 769 defm V_MFMA_F32_16X16X32_FP8_FP8 : MAIInst<"v_mfma_f32_16x16x32_fp8_fp8", "F32_I64_X32", int_amdgcn_mfma_f32_16x16x32_fp8_fp8>; 770 defm V_MFMA_F32_32X32X16_BF8_BF8 : MAIInst<"v_mfma_f32_32x32x16_bf8_bf8", "F32_I64_X16", int_amdgcn_mfma_f32_32x32x16_bf8_bf8>; 771 defm V_MFMA_F32_32X32X16_BF8_FP8 : MAIInst<"v_mfma_f32_32x32x16_bf8_fp8", "F32_I64_X16", int_amdgcn_mfma_f32_32x32x16_bf8_fp8>; 772 defm V_MFMA_F32_32X32X16_FP8_BF8 : MAIInst<"v_mfma_f32_32x32x16_fp8_bf8", "F32_I64_X16", int_amdgcn_mfma_f32_32x32x16_fp8_bf8>; 773 defm V_MFMA_F32_32X32X16_FP8_FP8 : MAIInst<"v_mfma_f32_32x32x16_fp8_fp8", "F32_I64_X16", int_amdgcn_mfma_f32_32x32x16_fp8_fp8>; 774} // End SubtargetPredicate = isGFX940Plus, is_gfx940_xdl = 1 775 776multiclass SMFMACInst<string OpName, string P, SDPatternOperator node> { 777 let Constraints = "$vdst = $src2", DisableEncoding = "$src2", 778 isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1, is_gfx940_xdl = 1 in { 779 def _e64 : MAIInst<OpName, !cast<VOPProfileSMFMAC>("VOPProfileSMFMAC_" # P), node>; 780 } 781} 782 783let SubtargetPredicate = isGFX940Plus in { 784defm V_SMFMAC_F32_16X16X32_F16 : SMFMACInst<"v_smfmac_f32_16x16x32_f16", "F32_16X16X32_F16", int_amdgcn_smfmac_f32_16x16x32_f16>; 785defm V_SMFMAC_F32_32X32X16_F16 : SMFMACInst<"v_smfmac_f32_32x32x16_f16", "F32_32X32X16_F16", int_amdgcn_smfmac_f32_32x32x16_f16>; 786defm V_SMFMAC_F32_16X16X32_BF16 : SMFMACInst<"v_smfmac_f32_16x16x32_bf16", "F32_16X16X32_I16", int_amdgcn_smfmac_f32_16x16x32_bf16>; 787defm V_SMFMAC_F32_32X32X16_BF16 : SMFMACInst<"v_smfmac_f32_32x32x16_bf16", "F32_32X32X16_I16", int_amdgcn_smfmac_f32_32x32x16_bf16>; 788defm V_SMFMAC_I32_16X16X64_I8 : SMFMACInst<"v_smfmac_i32_16x16x64_i8", "I32_16X16X64_I8", int_amdgcn_smfmac_i32_16x16x64_i8>; 789defm V_SMFMAC_I32_32X32X32_I8 : SMFMACInst<"v_smfmac_i32_32x32x32_i8", "I32_32X32X32_I8", int_amdgcn_smfmac_i32_32x32x32_i8>; 790defm V_SMFMAC_F32_16X16X64_BF8_BF8 : SMFMACInst<"v_smfmac_f32_16x16x64_bf8_bf8", "F32_16X16X64_F8", int_amdgcn_smfmac_f32_16x16x64_bf8_bf8>; 791defm V_SMFMAC_F32_16X16X64_BF8_FP8 : SMFMACInst<"v_smfmac_f32_16x16x64_bf8_fp8", "F32_16X16X64_F8", int_amdgcn_smfmac_f32_16x16x64_bf8_fp8>; 792defm V_SMFMAC_F32_16X16X64_FP8_BF8 : SMFMACInst<"v_smfmac_f32_16x16x64_fp8_bf8", "F32_16X16X64_F8", int_amdgcn_smfmac_f32_16x16x64_fp8_bf8>; 793defm V_SMFMAC_F32_16X16X64_FP8_FP8 : SMFMACInst<"v_smfmac_f32_16x16x64_fp8_fp8", "F32_16X16X64_F8", int_amdgcn_smfmac_f32_16x16x64_fp8_fp8>; 794defm V_SMFMAC_F32_32X32X32_BF8_BF8 : SMFMACInst<"v_smfmac_f32_32x32x32_bf8_bf8", "F32_32X32X32_F8", int_amdgcn_smfmac_f32_32x32x32_bf8_bf8>; 795defm V_SMFMAC_F32_32X32X32_BF8_FP8 : SMFMACInst<"v_smfmac_f32_32x32x32_bf8_fp8", "F32_32X32X32_F8", int_amdgcn_smfmac_f32_32x32x32_bf8_fp8>; 796defm V_SMFMAC_F32_32X32X32_FP8_BF8 : SMFMACInst<"v_smfmac_f32_32x32x32_fp8_bf8", "F32_32X32X32_F8", int_amdgcn_smfmac_f32_32x32x32_fp8_bf8>; 797defm V_SMFMAC_F32_32X32X32_FP8_FP8 : SMFMACInst<"v_smfmac_f32_32x32x32_fp8_fp8", "F32_32X32X32_F8", int_amdgcn_smfmac_f32_32x32x32_fp8_fp8>; 798} 799 800def MAIInstInfoTable : GenericTable { 801 let FilterClass = "MAIInst"; 802 let CppTypeName = "MAIInstInfo"; 803 let Fields = [ 804 "Opcode", "is_dgemm", "is_gfx940_xdl" 805 ]; 806 807 let PrimaryKey = ["Opcode"]; 808 let PrimaryKeyName = "getMAIInstInfoHelper"; 809} 810 811let isCommutable = 1, isReMaterializable = 1 in { 812 let SubtargetPredicate = HasPackedFP32Ops in { 813 defm V_PK_FMA_F32 : VOP3PInst<"v_pk_fma_f32", VOP3P_Profile<VOP_V2F32_V2F32_V2F32_V2F32, VOP3_PACKED>, any_fma>; 814 defm V_PK_MUL_F32 : VOP3PInst<"v_pk_mul_f32", VOP3P_Profile<VOP_V2F32_V2F32_V2F32, VOP3_PACKED>, any_fmul>; 815 defm V_PK_ADD_F32 : VOP3PInst<"v_pk_add_f32", VOP3P_Profile<VOP_V2F32_V2F32_V2F32, VOP3_PACKED>, any_fadd>; 816 } // End SubtargetPredicate = HasPackedFP32Ops 817 818 let SubtargetPredicate = HasPkMovB32 in 819 defm V_PK_MOV_B32 : VOP3PInst<"v_pk_mov_b32", VOP3P_Profile<VOP_V2I32_V2I32_V2I32, VOP3_PACKED>>; 820} // End isCommutable = 1, isReMaterializable = 1 821 822def : AMDGPUMnemonicAlias<"v_accvgpr_read", "v_accvgpr_read_b32">; 823def : AMDGPUMnemonicAlias<"v_accvgpr_write", "v_accvgpr_write_b32">; 824 825class VOPProfileWMMA<VOPProfile P, string Suffix, RegisterOperand _Src01RC64, bit _HasClamp, bit _HasOpSel> : VOP3P_Profile<P> { 826 let DstRC = !if(!eq(Suffix, "_w32"), VDst_256, VDst_128); 827 let Src0RC64 = _Src01RC64; 828 let Src1RC64 = _Src01RC64; 829 let Src2RC64 = !if(!eq(Suffix, "_w32"), VISrc_256_f64, VISrc_128_f32); 830 let HasClamp = _HasClamp; 831 let HasOpSel = _HasOpSel; 832 let IsPacked = 1; 833 let IsWMMA = 1; 834} 835 836def VOP_V8F32_V16F16_V16F16_V8F32 : VOPProfile <[v8f32, v16f16, v16f16, v8f32]>; 837def VOP_V8F32_V16I16_V16I16_V8F32 : VOPProfile <[v8f32, v16i16, v16i16, v8f32]>; 838def VOP_V16F16_V16F16_V16F16_V16F16 : VOPProfile <[v16f16, v16f16, v16f16, v16f16]>; 839def VOP_V16I16_V16I16_V16I16_V16I16 : VOPProfile <[v16i16, v16i16, v16i16, v16i16]>; 840def VOP_V8I32_V4I32_V4I32_V8I32 : VOPProfile <[v8i32, v4i32, v4i32, v8i32]>; 841def VOP_V8I32_V2I32_V2I32_V8I32 : VOPProfile <[v8i32, v2i32, v2i32, v8i32]>; 842 843def VOP_V4F32_V16F16_V16F16_V4F32 : VOPProfile <[v4f32, v16f16, v16f16, v4f32]>; 844def VOP_V4F32_V16I16_V16I16_V4F32 : VOPProfile <[v4f32, v16i16, v16i16, v4f32]>; 845def VOP_V8F16_V16F16_V16F16_V8F16 : VOPProfile <[v8f16, v16f16, v16f16, v8f16]>; 846def VOP_V8I16_V16I16_V16I16_V8I16 : VOPProfile <[v8i16, v16i16, v16i16, v8i16]>; 847def VOP_V4I32_V4I32_V4I32_V4I32 : VOPProfile <[v4i32, v4i32, v4i32, v4i32]>; 848def VOP_V4I32_V2I32_V2I32_V4I32 : VOPProfile <[v4i32, v2i32, v2i32, v4i32]>; 849 850 851class WMMAType <bits<2> val> { 852 bit hasClamp = val{0}; 853 bit hasOpsel = val{1}; 854} 855 856def WMMARegular : WMMAType<0b00>; 857def WMMAUIClamp : WMMAType<0b01>; 858def WMMAOpSel : WMMAType<0b10>; 859 860class WMMARegularPat<Instruction Inst, SDPatternOperator node, VOPProfile P> : 861 GCNPat < (P.DstVT (node 862 (P.Src0VT (VOP3PMods P.Src0VT:$src0, i32:$src0_modifiers)), 863 (P.Src1VT (VOP3PMods P.Src1VT:$src1, i32:$src1_modifiers)), 864 (P.Src2VT (VOP3PMods P.Src2VT:$src2, i32:$src2_modifiers)) 865 )), 866 (P.DstVT (Inst i32:$src0_modifiers, P.Src0VT:$src0, i32:$src1_modifiers, P.Src1VT:$src1, $src2_modifiers, P.Src2VT:$src2)) 867>; 868 869class WMMAOpSelPat<Instruction Inst, SDPatternOperator node, VOPProfile P> : 870 GCNPat < (P.DstVT (node 871 (P.Src0VT P.Src0VT:$src0), 872 (P.Src1VT P.Src1VT:$src1), 873 (P.Src2VT P.Src2VT:$src2), (WMMAOpSelVOP3PMods i32:$src2_modifiers) 874 )), 875 (P.DstVT (Inst (i32 8), P.Src0VT:$src0, (i32 8), P.Src1VT:$src1, i32:$src2_modifiers, P.Src2VT:$src2)) 876>; 877 878class WMMAUIClampPat<Instruction Inst, SDPatternOperator node, VOPProfile P> : 879 GCNPat < (P.DstVT (node 880 (VOP3PModsNeg i32:$src0_modifiers), (P.Src0VT P.Src0VT:$src0), 881 (VOP3PModsNeg i32:$src1_modifiers), (P.Src1VT P.Src1VT:$src1), 882 (P.Src2VT P.Src2VT:$src2), (i1 timm:$clamp) 883 )), 884 (P.DstVT (Inst i32:$src0_modifiers, P.Src0VT:$src0, i32:$src1_modifiers, P.Src1VT:$src1, (i32 8), P.Src2VT:$src2, i1:$clamp)) 885>; 886 887class WMMAOpcodeMapping<Instruction TwoAddr, Instruction ThreeAddr> { 888 Instruction Opcode2Addr = TwoAddr; 889 Instruction Opcode3Addr = ThreeAddr; 890 Predicate WaveSizePredicate; 891} 892 893def WMMAOpcode : GenericEnum { 894 let FilterClass = "VOP3P_Pseudo"; 895} 896 897class WMMAMappingTable : GenericTable { 898 let FilterClass = "WMMAOpcodeMapping"; 899 let CppTypeName = "WMMAOpcodeMappingInfo"; 900 let Fields = ["Opcode2Addr", "Opcode3Addr"]; 901 string TypeOf_Opcode2Addr = "WMMAOpcode"; 902 string TypeOf_Opcode3Addr = "WMMAOpcode"; 903} 904 905def WMMAOpcode2AddrMappingTable : WMMAMappingTable { 906 let PrimaryKey = ["Opcode2Addr"]; 907 let PrimaryKeyName = "getWMMAMappingInfoFrom2AddrOpcode"; 908} 909 910def WMMAOpcode3AddrMappingTable : WMMAMappingTable { 911 let PrimaryKey = ["Opcode3Addr"]; 912 let PrimaryKeyName = "getWMMAMappingInfoFrom3AddrOpcode"; 913} 914 915// The WMMA instruction has extra constraints: 916// Matrices A and B cannot overlap with D. C cannot partially overlap with D, 917// but it is OK for them to be the same (which is a typical case). 918// 919// We implement it as follows: 920// 1) Map the intrinsic to the pseudo where D is tied to C ($vdst = $src2). 921// 2) The pass twoaddressinstruction checks if src2 is live and if that is the case 922// it converts the default pseudo to the pseudo where src2 is not the same as vdst. 923// 3) @earlyclobber on the destination satisfies the constraint during RA. 924 925multiclass WMMAInst<string Suffix, string Instr, VOPProfile P, SDPatternOperator node = null_frag, RegisterOperand _Src01RC64 = VRegSrc_256, WMMAType Type, bit convertibleTo3Addr> { 926 927 defvar WMMAConstraints2Addr = "@earlyclobber $vdst,$vdst = $src2"; 928 defvar WMMAConstraints3Addr = "@earlyclobber $vdst"; 929 930 defvar WMMAProfile = VOPProfileWMMA<P, Suffix, _Src01RC64, Type.hasClamp, Type.hasOpsel>; 931 let Mnemonic = Instr, mayRaiseFPException = 0, ReadsModeReg = 0 in { 932 let Constraints = WMMAConstraints2Addr, isConvertibleToThreeAddress = convertibleTo3Addr in { 933 def _twoaddr # Suffix : VOP3P_Pseudo<Instr # Suffix, WMMAProfile>; 934 } 935 } 936 if convertibleTo3Addr then { 937 let Mnemonic = Instr, mayRaiseFPException = 0, ReadsModeReg = 0 in { 938 let Constraints = WMMAConstraints3Addr, SchedRW = [Write32Bit, Write32Bit] in { 939 def _threeaddr # Suffix : VOP3P_Pseudo<Instr # Suffix, WMMAProfile>; 940 } 941 } 942 def : WMMAOpcodeMapping<!cast<Instruction>(NAME # _twoaddr # Suffix), 943 !cast<Instruction>(NAME # _threeaddr # Suffix)>; 944 } 945 946 let SubtargetPredicate = isGFX11Only in { 947 if !eq(Type, WMMAOpSel) then { 948 def : WMMAOpSelPat<!cast<Instruction>(NAME # _twoaddr # Suffix), node, P>; 949 } else if !eq(Type, WMMAUIClamp) then { 950 def : WMMAUIClampPat<!cast<Instruction>(NAME # _twoaddr # Suffix), node, P>; 951 } else { 952 def : WMMARegularPat<!cast<Instruction>(NAME # _twoaddr # Suffix), node, P>; 953 } 954 } 955} 956 957 958 959let WaveSizePredicate = isWave32 in { 960 defm V_WMMA_F32_16X16X16_F16 : WMMAInst<"_w32", "v_wmma_f32_16x16x16_f16", VOP_V8F32_V16F16_V16F16_V8F32, int_amdgcn_wmma_f32_16x16x16_f16, VRegSrc_256, WMMARegular, 1>; 961 defm V_WMMA_F32_16X16X16_BF16 : WMMAInst<"_w32", "v_wmma_f32_16x16x16_bf16", VOP_V8F32_V16I16_V16I16_V8F32, int_amdgcn_wmma_f32_16x16x16_bf16, VRegSrc_256, WMMARegular, 1>; 962 defm V_WMMA_F16_16X16X16_F16 : WMMAInst<"_w32", "v_wmma_f16_16x16x16_f16", VOP_V16F16_V16F16_V16F16_V16F16, int_amdgcn_wmma_f16_16x16x16_f16, VRegSrc_256, WMMAOpSel, 1>; 963 defm V_WMMA_BF16_16X16X16_BF16 : WMMAInst<"_w32", "v_wmma_bf16_16x16x16_bf16", VOP_V16I16_V16I16_V16I16_V16I16, int_amdgcn_wmma_bf16_16x16x16_bf16, VRegSrc_256, WMMAOpSel, 1>; 964 defm V_WMMA_F16_16X16X16_F16_TIED : WMMAInst<"_w32", "v_wmma_f16_16x16x16_f16", VOP_V16F16_V16F16_V16F16_V16F16, int_amdgcn_wmma_f16_16x16x16_f16_tied, VRegSrc_256, WMMAOpSel, 0>; 965 defm V_WMMA_BF16_16X16X16_BF16_TIED : WMMAInst<"_w32", "v_wmma_bf16_16x16x16_bf16", VOP_V16I16_V16I16_V16I16_V16I16, int_amdgcn_wmma_bf16_16x16x16_bf16_tied, VRegSrc_256, WMMAOpSel, 0>; 966 defm V_WMMA_I32_16X16X16_IU8 : WMMAInst<"_w32", "v_wmma_i32_16x16x16_iu8", VOP_V8I32_V4I32_V4I32_V8I32, int_amdgcn_wmma_i32_16x16x16_iu8, VRegSrc_128, WMMAUIClamp, 1>; 967 defm V_WMMA_I32_16X16X16_IU4 : WMMAInst<"_w32", "v_wmma_i32_16x16x16_iu4", VOP_V8I32_V2I32_V2I32_V8I32, int_amdgcn_wmma_i32_16x16x16_iu4, VRegSrc_64, WMMAUIClamp, 1>; 968} 969 970let WaveSizePredicate = isWave64 in { 971 defm V_WMMA_F32_16X16X16_F16 : WMMAInst<"_w64", "v_wmma_f32_16x16x16_f16", VOP_V4F32_V16F16_V16F16_V4F32, int_amdgcn_wmma_f32_16x16x16_f16, VRegSrc_256, WMMARegular, 1>; 972 defm V_WMMA_F32_16X16X16_BF16 : WMMAInst<"_w64", "v_wmma_f32_16x16x16_bf16", VOP_V4F32_V16I16_V16I16_V4F32, int_amdgcn_wmma_f32_16x16x16_bf16, VRegSrc_256, WMMARegular, 1>; 973 defm V_WMMA_F16_16X16X16_F16 : WMMAInst<"_w64", "v_wmma_f16_16x16x16_f16", VOP_V8F16_V16F16_V16F16_V8F16, int_amdgcn_wmma_f16_16x16x16_f16, VRegSrc_256, WMMAOpSel, 1>; 974 defm V_WMMA_BF16_16X16X16_BF16 : WMMAInst<"_w64", "v_wmma_bf16_16x16x16_bf16", VOP_V8I16_V16I16_V16I16_V8I16, int_amdgcn_wmma_bf16_16x16x16_bf16, VRegSrc_256, WMMAOpSel, 1>; 975 defm V_WMMA_F16_16X16X16_F16_TIED : WMMAInst<"_w64", "v_wmma_f16_16x16x16_f16", VOP_V8F16_V16F16_V16F16_V8F16, int_amdgcn_wmma_f16_16x16x16_f16_tied, VRegSrc_256, WMMAOpSel, 0>; 976 defm V_WMMA_BF16_16X16X16_BF16_TIED : WMMAInst<"_w64", "v_wmma_bf16_16x16x16_bf16", VOP_V8I16_V16I16_V16I16_V8I16, int_amdgcn_wmma_bf16_16x16x16_bf16_tied, VRegSrc_256, WMMAOpSel, 0>; 977 defm V_WMMA_I32_16X16X16_IU8 : WMMAInst<"_w64", "v_wmma_i32_16x16x16_iu8", VOP_V4I32_V4I32_V4I32_V4I32, int_amdgcn_wmma_i32_16x16x16_iu8, VRegSrc_128, WMMAUIClamp, 1>; 978 defm V_WMMA_I32_16X16X16_IU4 : WMMAInst<"_w64", "v_wmma_i32_16x16x16_iu4", VOP_V4I32_V2I32_V2I32_V4I32, int_amdgcn_wmma_i32_16x16x16_iu4, VRegSrc_64, WMMAUIClamp, 1>; 979 980} 981 982class VOP3PWMMA_Profile<list<ValueType> ArgTy, bit _IsSWMMAC, int _IndexType, 983 bit _IsIU, bit _IsFP8BF8> 984 : VOP3P_Profile<VOPProfile<ArgTy>> { 985 bit IsIU = _IsIU; 986 bit IsFP8BF8 = _IsFP8BF8; 987 bit IsF16BF16 = !not(!or(IsIU, IsFP8BF8)); 988 989 int IndexType = _IndexType; 990 991 let IsPacked = 1; 992 let IsWMMA = !not(_IsSWMMAC); 993 let IsSWMMAC = _IsSWMMAC; 994 995 bit IsAB_F16 = !and(IsF16BF16, ArgTy[1].isFP); 996 bit IsAB_BF16 = !and(IsF16BF16, isIntType<ArgTy[1]>.ret); 997 bit IsC_F32 = !or(!eq(ArgTy[3], v8f32), !eq(ArgTy[3], v4f32)); 998 bit IsC_BF16 = !or(!eq(ArgTy[3], v8i16), !eq(ArgTy[3], v4i16)); 999 bit IsC_F16 = !or(!eq(ArgTy[3], v8f16), !eq(ArgTy[3], v4f16)); 1000 1001 bit NegLo01 = !or(IsF16BF16, IsIU); 1002 bit NegLo2 = !and(!or(IsF16BF16, IsFP8BF8), IsWMMA); 1003 bit NegHi01 = IsF16BF16; 1004 bit NegHi2 = !and(!or(IsF16BF16, IsFP8BF8), IsWMMA); 1005 bit NegLoAny = !or(NegLo01, NegLo2); 1006 bit NegHiAny = !or(NegHi01, NegHi2); 1007 1008 let DstRC = !cast<RegisterOperand>("VDst_"#ArgTy[0].Size); 1009 let Src0RC64 = !cast<RegisterOperand>("VRegSrc_"#ArgTy[1].Size); 1010 let Src1RC64 = !cast<RegisterOperand>("VRegSrc_"#ArgTy[2].Size); 1011 let Src2RC64 = !if(IsSWMMAC, DstRC, 1012 !cast<RegisterOperand>("VISrc_"#ArgTy[3].Size# 1013 !cond(IsC_F32: "_f32", 1014 IsC_F16: "_f16", 1015 IsC_BF16: "_bf16", 1016 1: "_b32"))); 1017 1018 // For f16 and bf16 matrices A and B, each element can be modified by 1019 // fneg(neg_lo,neg_hi = 1). For iu4 and iu8 matrices A and B neg_lo is 1020 // overloaded to mean unsigned/signed: neg_lo = 0 (u4 and u8) unsigned(zext) 1021 // neg_lo = 1 (i4 and i8) signed(sext). For f16, bf16 and f32 matrix C each 1022 // element can be modified by fneg(neg_lo = 1) or fabs(neg_hi = 1). 1023 1024 // Opcode | src0/src1 - matrix A/B | src2 - matrix C or Index 1025 // --------------------------------------------------------------------------- 1026 // wmma f32_f16 | both neg_lo,neg_hi = 1 | neg_lo = 1 neg C(f32) 1027 // wmma f32_bf16 | neg A/B (f16 or bf16) | neg_hi = 1 abs C(f32) 1028 // --------------------------------------------------------------------------- 1029 // wmma f16_f16 | both neg_lo,neg_hi = 1 | neg_lo = 1 neg C(f16 or bf16) 1030 // wmma bf16_bf16 | neg A/B (f16 or bf16) | neg_hi = 1 abs C(f16 or bf16) 1031 // --------------------------------------------------------------------------- 1032 // wmma i32_iu8/iu4 | neg_lo = 0 u4/u8(zext) | not allowed for 1033 // | neg_lo = 1 i4/i8(sext) | i32 matrices 1034 // --------------------------------------------------------------------------- 1035 // wmma f32_fp8/bf8 | not allowed for | neg_lo = 1 neg C(f32) 1036 // (4 instructions) | f8 and bf8 matrices | neg_hi = 1 abs C(f32) 1037 // --------------------------------------------------------------------------- 1038 // swmmac f32_f16 | both neg_lo,neg_hi = 1 | not allowed for sparse matrix 1039 // swmmac f32_bf16 | neg A/B (f16 or bf16) | A Index - matrix C is in dst 1040 // --------------------------------------------------------------------------- 1041 // swmmac f16_f16 | both neg_lo,neg_hi = 1 | not allowed for sparse matrix 1042 // swmmac bf16_bf16 | neg A/B (f16 or bf16) | A Index - matrix C is in dst 1043 // --------------------------------------------------------------------------- 1044 // swmmac i32_iu8/iu4 | neg_lo = 0 u4/u8(zext) | not allowed for sparse matrix 1045 // | neg_lo = 1 i4/i8(sext) | A Index - matrix C is in dst 1046 // --------------------------------------------------------------------------- 1047 // swmmac f32_fp8/bf8 | not allowed for | not allowed for sparse matrix 1048 // (4 instructions) | f8 and bf8 matrices | A Index - matrix C is in dst 1049 1050 // pseudo 1051 1052 // fp8bf8 wmmas don't use src (0 and 1) modifiers, iu use neg_lo, f16 and bf16 1053 // use neg_lo and neg_hi. iu wmmas (C is i32) don't use src 2 modifiers, 1054 // remaining wmmas(f16, bf16 and f8bf8) use neg_lo and neg_hi for C (C is f32 1055 // f16 or bf16). swmmac use index_key and don't use src 2 modifiers. 1056 1057 dag Src0Mods = !if(IsFP8BF8, (ins), (ins PackedF16InputMods:$src0_modifiers)); 1058 dag Src1Mods = !if(IsFP8BF8, (ins), (ins PackedF16InputMods:$src1_modifiers)); 1059 dag Src2Mods = !if(IsIU, (ins), (ins PackedF16InputMods:$src2_modifiers)); 1060 dag IndexKey = !cond(!eq(IndexType, 0) : (ins), 1061 !eq(IndexType, 8) : (ins IndexKey8bit:$index_key_8bit), 1062 !eq(IndexType, 16): (ins IndexKey16bit:$index_key_16bit)); 1063 dag Clamp = !if(IsIU, (ins Clamp0:$clamp), (ins)); 1064 dag Neg = !cond(!and(NegLoAny, NegHiAny) : (ins neg_lo0:$neg_lo, neg_hi0:$neg_hi), 1065 !and(NegLoAny, !not(NegHiAny)) : (ins neg_lo0:$neg_lo), 1066 !and(!not(NegLoAny), !not(NegHiAny)) : (ins)); 1067 1068 let InsVOP3P = !con(Src0Mods, (ins Src0RC64:$src0), Src1Mods, (ins Src1RC64:$src1), 1069 !cond(IsWMMA : !con(Src2Mods, (ins Src2RC64:$src2)), 1070 IsSWMMAC : !con((ins DstRC:$srcTiedDef), (ins VRegSrc_32:$src2), IndexKey)), 1071 Clamp, Neg); 1072 1073 // asm 1074 1075 string IndexKeyAsm = !cond(!eq(IndexType, 0) : "", 1076 !eq(IndexType, 8) : "$index_key_8bit", 1077 !eq(IndexType, 16) : "$index_key_16bit"); 1078 string ClampAsm = !if(IsIU, "$clamp", ""); 1079 string NegAsm = !cond(!and(NegLoAny, NegHiAny) : "$neg_lo$neg_hi", 1080 !and(NegLoAny, !not(NegHiAny)) : "$neg_lo", 1081 !and(!not(NegLoAny), !not(NegHiAny)) : ""); 1082 1083 let AsmVOP3P = "$vdst, $src0, $src1, $src2"#IndexKeyAsm#NegAsm#ClampAsm; 1084 1085 // isel patterns 1086 1087 dag Src0InPat = !cond(IsAB_F16 : (ins (Src0VT (WMMAModsF16Neg Src0VT:$src0, i32:$src0_modifiers))), 1088 IsAB_BF16 : (ins Src0VT:$src0), 1089 IsIU : (ins (VOP3PModsNeg i32:$src0_modifiers), Src0VT:$src0), 1090 IsFP8BF8 : (ins Src0VT:$src0)); 1091 dag Src0OutPat = !cond(IsAB_F16 : (ins i32:$src0_modifiers, Src0VT:$src0), 1092 IsAB_BF16 : (ins (i32 8), Src0VT:$src0), 1093 IsIU : (ins i32:$src0_modifiers, Src0VT:$src0), 1094 IsFP8BF8 : (ins Src0VT:$src0)); 1095 dag Src1InPat = !cond(IsAB_F16 : (ins (Src1VT (WMMAModsF16Neg Src1VT:$src1, i32:$src1_modifiers))), 1096 IsAB_BF16 : (ins Src1VT:$src1), 1097 IsIU : (ins (VOP3PModsNeg i32:$src1_modifiers), Src1VT:$src1), 1098 IsFP8BF8 : (ins Src1VT:$src1)); 1099 dag Src1OutPat = !cond(IsAB_F16 : (ins i32:$src1_modifiers, Src1VT:$src1), 1100 IsAB_BF16 : (ins (i32 8), Src1VT:$src1), 1101 IsIU : (ins i32:$src1_modifiers, Src1VT:$src1), 1102 IsFP8BF8 : (ins Src1VT:$src1)); 1103 dag Src2InPatWmma = !cond(IsC_F32 : (ins (Src2VT (WMMAModsF32NegAbs Src2VT:$src2, i32:$src2_modifiers))), 1104 IsC_F16 : (ins (Src2VT (WMMAModsF16NegAbs Src2VT:$src2, i32:$src2_modifiers))), 1105 IsC_BF16 : (ins Src2VT:$src2), 1106 IsIU : (ins Src2VT:$src2), 1107 IsSWMMAC : (ins)); 1108 dag Src2OutPatWmma = !cond(IsC_F32 : (ins i32:$src2_modifiers, Src2VT:$src2), 1109 IsC_F16 : (ins i32:$src2_modifiers, Src2VT:$src2), 1110 IsC_BF16 : (ins (i32 8), Src2VT:$src2), 1111 IsIU : (ins Src2VT:$src2), 1112 IsSWMMAC : (ins)); 1113 dag ClampPat = !if(IsIU, (ins i1:$clamp), (ins)); 1114 dag IndexInPat = !cond(!eq(IndexType, 0) : (ins i32:$src2), 1115 !eq(IndexType, 8) : (ins (i32 (SWMMACIndex8 i32:$src2, i32:$index_key_8bit))), 1116 !eq(IndexType, 16): (ins (i32 (SWMMACIndex16 i32:$src2, i32:$index_key_16bit)))); 1117 dag IndexOutPat = !cond(!eq(IndexType, 0) : (ins i32:$src2), 1118 !eq(IndexType, 8) : (ins i32:$src2, i32:$index_key_8bit), 1119 !eq(IndexType, 16): (ins i32:$src2, i32:$index_key_16bit)); 1120 dag Src2InlineInPat = (ins (Src2VT (WMMAVISrc Src2VT:$src2))); 1121 dag Src2InlineOutPat = !con(!if(IsIU, (ins), (ins (i32 8))), (ins Src2VT:$src2)); 1122 1123 1124 dag WmmaInPat = !con(Src0InPat, Src1InPat, Src2InPatWmma, ClampPat); 1125 dag WmmaOutPat = !con(Src0OutPat, Src1OutPat, Src2OutPatWmma, ClampPat); 1126 1127 dag SwmmacInPat = !con(Src0InPat, Src1InPat, (ins Src2VT:$srcTiedDef), IndexInPat, ClampPat); 1128 dag SwmmacOutPat = !con(Src0OutPat, Src1OutPat, (ins Src2VT:$srcTiedDef), IndexOutPat, ClampPat); 1129 1130 // wmma pattern where src2 is inline imm uses _threeaddr pseudo, 1131 // can't use _twoaddr since it would violate src2 tied to vdst constraint. 1132 dag WmmaInlineInPat = !con(Src0InPat, Src1InPat, Src2InlineInPat, ClampPat); 1133 dag WmmaInlineOutPat = !con(Src0OutPat, Src1OutPat, Src2InlineOutPat, ClampPat); 1134} 1135 1136multiclass WMMAInstGFX12<string Instr, VOP3PWMMA_Profile WMMAProfile, string PseudoInstrSuffix> { 1137 let Mnemonic = Instr, mayRaiseFPException = 0, ReadsModeReg = 0 in { 1138 let Constraints = "@earlyclobber $vdst,$vdst = $src2", isConvertibleToThreeAddress = 1 in 1139 def _twoaddr : VOP3P_Pseudo<Instr, WMMAProfile>{ 1140 let PseudoInstr = Instr#PseudoInstrSuffix; 1141 } 1142 1143 let Constraints = "@earlyclobber $vdst", SchedRW = [Write32Bit, Write32Bit] in 1144 def _threeaddr : VOP3P_Pseudo<Instr, WMMAProfile>{ 1145 let PseudoInstr = Instr#PseudoInstrSuffix; 1146 } 1147 1148 } 1149 def : WMMAOpcodeMapping<!cast<Instruction>(NAME # _twoaddr), 1150 !cast<Instruction>(NAME # _threeaddr)>; 1151} 1152 1153multiclass SWMMACInstGFX12<string Instr, VOP3PWMMA_Profile WMMAProfile, string PseudoInstrSuffix> { 1154 def _twoaddr : VOP3P_Pseudo<Instr, WMMAProfile>{ 1155 let Mnemonic = Instr; 1156 let PseudoInstr = Instr#PseudoInstrSuffix; 1157 let mayRaiseFPException = 0; 1158 let ReadsModeReg = 0; 1159 let AsmMatchConverter = "cvtSWMMAC"; 1160 1161 let Constraints = "@earlyclobber $vdst,$vdst = $srcTiedDef"; 1162 } 1163} 1164 1165// First argument in Profile is types for matrices D, A, B and C (D = A * B + C) 1166// as used by llvm ir, types are vectors(with matrix elements) 1167// wave32: 1168// For 16x16 matrices, lanes 0 to 31 will have 8 matrix elts, 1169// for 16 x 32 16 elts and for 16 x 64 lanes have 32 elts. 1170// wave64: 1171// lanes will have half the size of elements in lanes compared to wave32 with 1172// exception of 16x16_iu4: lanes0-31 will have 8xi4, remaining lanes are ignored 1173 1174// general idea on element distribution differences: 1175// wave32: lane n has 8 matrix elements 1176// wave64: lane n has first 4, lane n+32 has other 4 elements 1177 1178// index size, for each 2 elements in lane you need 4bits in index 1179 1180// Non-standard types (iu8, iu4, fp8, bf8) will be packed in vectors of i32s. 1181// Original type for them is in comment on the right and refers to A and B. 1182 1183def F32_F16_WMMA_w32 : VOP3PWMMA_Profile<[v8f32, v8f16, v8f16, v8f32], 0, 0, 0, 0>; 1184def F32_BF16_WMMA_w32 : VOP3PWMMA_Profile<[v8f32, v8i16, v8i16, v8f32], 0, 0, 0, 0>; 1185def F16_F16_WMMA_w32 : VOP3PWMMA_Profile<[v8f16, v8f16, v8f16, v8f16], 0, 0, 0, 0>; 1186def BF16_BF16_WMMA_w32 : VOP3PWMMA_Profile<[v8i16, v8i16, v8i16, v8i16], 0, 0, 0, 0>; 1187def I32_IU8_WMMA_w32 : VOP3PWMMA_Profile<[v8i32, v2i32, v2i32, v8i32], 0, 0, 1, 0>; // 8xi8 1188def I32_IU4X16_WMMA_w32 : VOP3PWMMA_Profile<[v8i32, i32, i32, v8i32], 0, 0, 1, 0>; // 8xi4 1189def F32_FP8BF8_WMMA_w32 : VOP3PWMMA_Profile<[v8f32, v2i32, v2i32, v8f32], 0, 0, 0, 1>; // 8xf8 1190def I32_IU4X32_WMMA_w32 : VOP3PWMMA_Profile<[v8i32, v2i32, v2i32, v8i32], 0, 0, 1, 0>; // 16xi4 1191 1192def F32_F16_WMMA_w64 : VOP3PWMMA_Profile<[v4f32, v4f16, v4f16, v4f32], 0, 0, 0, 0>; 1193def F32_BF16_WMMA_w64 : VOP3PWMMA_Profile<[v4f32, v4i16, v4i16, v4f32], 0, 0, 0, 0>; 1194def F16_F16_WMMA_w64 : VOP3PWMMA_Profile<[v4f16, v4f16, v4f16, v4f16], 0, 0, 0, 0>; 1195def BF16_BF16_WMMA_w64 : VOP3PWMMA_Profile<[v4i16, v4i16, v4i16, v4i16], 0, 0, 0, 0>; 1196def I32_IU8_WMMA_w64 : VOP3PWMMA_Profile<[v4i32, i32, i32, v4i32], 0, 0, 1, 0>; // 4xi8 1197def I32_IU4X16_WMMA_w64 : VOP3PWMMA_Profile<[v4i32, i32, i32, v4i32], 0, 0, 1, 0>; // 8xi4 * 1198def F32_FP8BF8_WMMA_w64 : VOP3PWMMA_Profile<[v4f32, i32, i32, v4f32], 0, 0, 0, 1>; // 4xf8 1199def I32_IU4X32_WMMA_w64 : VOP3PWMMA_Profile<[v4i32, i32, i32, v4i32], 0, 0, 1, 0>; // 8xi4 1200 1201def F32_F16_SWMMAC_w32 : VOP3PWMMA_Profile<[v8f32, v8f16, v16f16, v8f32], 1, 16, 0, 0>; 1202def F32_BF16_SWMMAC_w32 : VOP3PWMMA_Profile<[v8f32, v8i16, v16i16, v8f32], 1, 16, 0, 0>; 1203def F16_F16_SWMMAC_w32 : VOP3PWMMA_Profile<[v8f16, v8f16, v16f16, v8f16], 1, 16, 0, 0>; 1204def BF16_BF16_SWMMAC_w32 : VOP3PWMMA_Profile<[v8i16, v8i16, v16i16, v8i16], 1, 16, 0, 0>; 1205def I32_IU8_SWMMAC_w32 : VOP3PWMMA_Profile<[v8i32, v2i32, v4i32, v8i32], 1, 16, 1, 0>; // 8xi8, 16xi8 1206def I32_IU4X32_SWMMAC_w32 : VOP3PWMMA_Profile<[v8i32, i32, v2i32, v8i32], 1, 16, 1, 0>; // 8xi4, 16xi4 1207def I32_IU4X64_SWMMAC_w32 : VOP3PWMMA_Profile<[v8i32, v2i32, v4i32, v8i32], 1, 0, 1, 0>; // 16xi4, 32xi4 ** 1208def F32_FP8BF8_SWMMAC_w32 : VOP3PWMMA_Profile<[v8f32, v2i32, v4i32, v8f32], 1, 16, 0, 1>; // 8xf8, 16xf8 1209 1210def F32_F16_SWMMAC_w64 : VOP3PWMMA_Profile<[v4f32, v4f16, v8f16, v4f32], 1, 8, 0, 0>; 1211def F32_BF16_SWMMAC_w64 : VOP3PWMMA_Profile<[v4f32, v4i16, v8i16, v4f32], 1, 8, 0, 0>; 1212def F16_F16_SWMMAC_w64 : VOP3PWMMA_Profile<[v4f16, v4f16, v8f16, v4f16], 1, 8, 0, 0>; 1213def BF16_BF16_SWMMAC_w64 : VOP3PWMMA_Profile<[v4i16, v4i16, v8i16, v4i16], 1, 8, 0, 0>; 1214def I32_IU8_SWMMAC_w64 : VOP3PWMMA_Profile<[v4i32, i32, v2i32, v4i32], 1, 8, 1, 0>; // 4xi8, 8xi8 1215def I32_IU4X32_SWMMAC_w64 : VOP3PWMMA_Profile<[v4i32, i32, i32, v4i32], 1, 16, 1, 0>; // 8xi4, 8xi4 *** 1216def I32_IU4X64_SWMMAC_w64 : VOP3PWMMA_Profile<[v4i32, i32, v2i32, v4i32], 1, 16, 1, 0>; // 8xi4, 16xi4 1217def F32_FP8BF8_SWMMAC_w64 : VOP3PWMMA_Profile<[v4f32, i32, v2i32, v4f32], 1, 8, 0, 1>; // 4xf8, 8xf8 1218 1219// * IU4X16_WMMA_w64 lanes 0-31 will have 8xi4, remaining lanes are ignored 1220// ** IU4X64_SWMMAC_w32 index is i32, index_key is not used 1221// *** IU4X32_SWMMAC_w64 lanes 0-31 will have 8xi4 remaining lanes are ignored 1222// for matrix A, index is i16; Matrix B uses all lanes 1223 1224let WaveSizePredicate = isWave32 in { 1225defm V_WMMA_F32_16X16X16_F16_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_f16", F32_F16_WMMA_w32, "_w32">; 1226defm V_WMMA_F32_16X16X16_BF16_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf16", F32_BF16_WMMA_w32, "_w32">; 1227defm V_WMMA_F16_16X16X16_F16_w32 : WMMAInstGFX12<"v_wmma_f16_16x16x16_f16", F16_F16_WMMA_w32, "_w32">; 1228defm V_WMMA_BF16_16X16X16_BF16_w32 : WMMAInstGFX12<"v_wmma_bf16_16x16x16_bf16", BF16_BF16_WMMA_w32, "_w32">; 1229defm V_WMMA_I32_16X16X16_IU8_w32 : WMMAInstGFX12<"v_wmma_i32_16x16x16_iu8", I32_IU8_WMMA_w32, "_w32">; 1230defm V_WMMA_I32_16X16X16_IU4_w32 : WMMAInstGFX12<"v_wmma_i32_16x16x16_iu4", I32_IU4X16_WMMA_w32, "_w32">; 1231defm V_WMMA_F32_16X16X16_FP8_FP8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_fp8_fp8", F32_FP8BF8_WMMA_w32, "_w32">; 1232defm V_WMMA_F32_16X16X16_FP8_BF8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_fp8_bf8", F32_FP8BF8_WMMA_w32, "_w32">; 1233defm V_WMMA_F32_16X16X16_BF8_FP8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf8_fp8", F32_FP8BF8_WMMA_w32, "_w32">; 1234defm V_WMMA_F32_16X16X16_BF8_BF8_w32 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf8_bf8", F32_FP8BF8_WMMA_w32, "_w32">; 1235defm V_WMMA_I32_16X16X32_IU4_w32 : WMMAInstGFX12<"v_wmma_i32_16x16x32_iu4", I32_IU4X32_WMMA_w32, "_w32">; 1236 1237defm V_SWMMAC_F32_16X16X32_F16_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_f16", F32_F16_SWMMAC_w32, "_w32">; 1238defm V_SWMMAC_F32_16X16X32_BF16_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf16", F32_BF16_SWMMAC_w32, "_w32">; 1239defm V_SWMMAC_F16_16X16X32_F16_w32 : SWMMACInstGFX12<"v_swmmac_f16_16x16x32_f16", F16_F16_SWMMAC_w32, "_w32">; 1240defm V_SWMMAC_BF16_16X16X32_BF16_w32 : SWMMACInstGFX12<"v_swmmac_bf16_16x16x32_bf16", BF16_BF16_SWMMAC_w32, "_w32">; 1241defm V_SWMMAC_I32_16X16X32_IU8_w32 : SWMMACInstGFX12<"v_swmmac_i32_16x16x32_iu8", I32_IU8_SWMMAC_w32, "_w32">; 1242defm V_SWMMAC_I32_16X16X32_IU4_w32 : SWMMACInstGFX12<"v_swmmac_i32_16x16x32_iu4", I32_IU4X32_SWMMAC_w32, "_w32">; 1243defm V_SWMMAC_I32_16X16X64_IU4_w32 : SWMMACInstGFX12<"v_swmmac_i32_16x16x64_iu4", I32_IU4X64_SWMMAC_w32, "_w32">; 1244defm V_SWMMAC_F32_16X16X32_FP8_FP8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_fp8_fp8", F32_FP8BF8_SWMMAC_w32, "_w32">; 1245defm V_SWMMAC_F32_16X16X32_FP8_BF8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_fp8_bf8", F32_FP8BF8_SWMMAC_w32, "_w32">; 1246defm V_SWMMAC_F32_16X16X32_BF8_FP8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf8_fp8", F32_FP8BF8_SWMMAC_w32, "_w32">; 1247defm V_SWMMAC_F32_16X16X32_BF8_BF8_w32 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf8_bf8", F32_FP8BF8_SWMMAC_w32, "_w32">; 1248} 1249 1250let WaveSizePredicate = isWave64 in { 1251defm V_WMMA_F32_16X16X16_F16_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_f16", F32_F16_WMMA_w64, "_w64">; 1252defm V_WMMA_F32_16X16X16_BF16_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf16", F32_BF16_WMMA_w64, "_w64">; 1253defm V_WMMA_F16_16X16X16_F16_w64 : WMMAInstGFX12<"v_wmma_f16_16x16x16_f16", F16_F16_WMMA_w64, "_w64">; 1254defm V_WMMA_BF16_16X16X16_BF16_w64 : WMMAInstGFX12<"v_wmma_bf16_16x16x16_bf16", BF16_BF16_WMMA_w64, "_w64">; 1255defm V_WMMA_I32_16X16X16_IU8_w64 : WMMAInstGFX12<"v_wmma_i32_16x16x16_iu8", I32_IU8_WMMA_w64, "_w64">; 1256defm V_WMMA_I32_16X16X16_IU4_w64 : WMMAInstGFX12<"v_wmma_i32_16x16x16_iu4", I32_IU4X16_WMMA_w64, "_w64">; 1257defm V_WMMA_F32_16X16X16_FP8_FP8_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_fp8_fp8", F32_FP8BF8_WMMA_w64, "_w64">; 1258defm V_WMMA_F32_16X16X16_FP8_BF8_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_fp8_bf8", F32_FP8BF8_WMMA_w64, "_w64">; 1259defm V_WMMA_F32_16X16X16_BF8_FP8_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf8_fp8", F32_FP8BF8_WMMA_w64, "_w64">; 1260defm V_WMMA_F32_16X16X16_BF8_BF8_w64 : WMMAInstGFX12<"v_wmma_f32_16x16x16_bf8_bf8", F32_FP8BF8_WMMA_w64, "_w64">; 1261defm V_WMMA_I32_16X16X32_IU4_w64 : WMMAInstGFX12<"v_wmma_i32_16x16x32_iu4", I32_IU4X32_WMMA_w64, "_w64">; 1262 1263defm V_SWMMAC_F32_16X16X32_F16_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_f16", F32_F16_SWMMAC_w64, "_w64">; 1264defm V_SWMMAC_F32_16X16X32_BF16_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf16", F32_BF16_SWMMAC_w64, "_w64">; 1265defm V_SWMMAC_F16_16X16X32_F16_w64 : SWMMACInstGFX12<"v_swmmac_f16_16x16x32_f16", F16_F16_SWMMAC_w64, "_w64">; 1266defm V_SWMMAC_BF16_16X16X32_BF16_w64 : SWMMACInstGFX12<"v_swmmac_bf16_16x16x32_bf16", BF16_BF16_SWMMAC_w64, "_w64">; 1267defm V_SWMMAC_I32_16X16X32_IU8_w64 : SWMMACInstGFX12<"v_swmmac_i32_16x16x32_iu8", I32_IU8_SWMMAC_w64, "_w64">; 1268defm V_SWMMAC_I32_16X16X32_IU4_w64 : SWMMACInstGFX12<"v_swmmac_i32_16x16x32_iu4", I32_IU4X32_SWMMAC_w64, "_w64">; 1269defm V_SWMMAC_I32_16X16X64_IU4_w64 : SWMMACInstGFX12<"v_swmmac_i32_16x16x64_iu4", I32_IU4X64_SWMMAC_w64, "_w64">; 1270defm V_SWMMAC_F32_16X16X32_FP8_FP8_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_fp8_fp8", F32_FP8BF8_SWMMAC_w64, "_w64">; 1271defm V_SWMMAC_F32_16X16X32_FP8_BF8_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_fp8_bf8", F32_FP8BF8_SWMMAC_w64, "_w64">; 1272defm V_SWMMAC_F32_16X16X32_BF8_FP8_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf8_fp8", F32_FP8BF8_SWMMAC_w64, "_w64">; 1273defm V_SWMMAC_F32_16X16X32_BF8_BF8_w64 : SWMMACInstGFX12<"v_swmmac_f32_16x16x32_bf8_bf8", F32_FP8BF8_SWMMAC_w64, "_w64">; 1274} 1275 1276// IsGFX11OpselIntrinsic: f16_f16 and bf16_bf16 Intrinsics have imm operand that 1277// controls opsel. Used by gfx11, removed in gfx12 (operand must be 0). 1278multiclass WMMAPat<string Inst, SDPatternOperator node, VOP3PWMMA_Profile P, bit IsGFX11OpselIntrinsic = 0> { 1279 def : GCNPat <(P.DstVT !setdagop(!con(P.WmmaInPat, !if(IsGFX11OpselIntrinsic, (ins 0), (ins))), node)), 1280 (P.DstVT !setdagop(P.WmmaOutPat, !cast<Instruction>(Inst#"_twoaddr")))>; 1281 let AddedComplexity = 4 in 1282 def : GCNPat <(P.DstVT !setdagop(!con(P.WmmaInlineInPat, !if(IsGFX11OpselIntrinsic, (ins 0), (ins))), node)), 1283 (P.DstVT !setdagop(P.WmmaInlineOutPat, !cast<Instruction>(Inst#"_threeaddr")))>; 1284} 1285 1286class SWMMACPat<Instruction Inst, SDPatternOperator node, VOP3PWMMA_Profile P> : 1287 GCNPat <(P.DstVT !setdagop(P.SwmmacInPat, node)), 1288 (P.DstVT !setdagop(P.SwmmacOutPat, Inst))>; 1289 1290class SWMMACPat_w64<Instruction Inst, SDPatternOperator node, VOP3PWMMA_Profile P> : 1291 GCNPat <(P.DstVT !setdagop(P.SwmmacInPat, node)), 1292 (P.DstVT !setdagop(P.SwmmacOutPat, Inst))>{ 1293 let WaveSizePredicate = isWave64; 1294 } 1295 1296let WaveSizePredicate = isWave32, SubtargetPredicate = isGFX12Plus in { 1297 defm : WMMAPat<"V_WMMA_F32_16X16X16_F16_w32", int_amdgcn_wmma_f32_16x16x16_f16, F32_F16_WMMA_w32>; 1298 defm : WMMAPat<"V_WMMA_F32_16X16X16_BF16_w32", int_amdgcn_wmma_f32_16x16x16_bf16, F32_BF16_WMMA_w32>; 1299 defm : WMMAPat<"V_WMMA_F16_16X16X16_F16_w32", int_amdgcn_wmma_f16_16x16x16_f16, F16_F16_WMMA_w32,1>; 1300 defm : WMMAPat<"V_WMMA_BF16_16X16X16_BF16_w32", int_amdgcn_wmma_bf16_16x16x16_bf16, BF16_BF16_WMMA_w32,1>; 1301 defm : WMMAPat<"V_WMMA_I32_16X16X16_IU8_w32", int_amdgcn_wmma_i32_16x16x16_iu8, I32_IU8_WMMA_w32>; 1302 defm : WMMAPat<"V_WMMA_I32_16X16X16_IU4_w32", int_amdgcn_wmma_i32_16x16x16_iu4, I32_IU4X16_WMMA_w32>; 1303 defm : WMMAPat<"V_WMMA_F32_16X16X16_FP8_FP8_w32", int_amdgcn_wmma_f32_16x16x16_fp8_fp8, F32_FP8BF8_WMMA_w32>; 1304 defm : WMMAPat<"V_WMMA_F32_16X16X16_FP8_BF8_w32", int_amdgcn_wmma_f32_16x16x16_fp8_bf8, F32_FP8BF8_WMMA_w32>; 1305 defm : WMMAPat<"V_WMMA_F32_16X16X16_BF8_FP8_w32", int_amdgcn_wmma_f32_16x16x16_bf8_fp8, F32_FP8BF8_WMMA_w32>; 1306 defm : WMMAPat<"V_WMMA_F32_16X16X16_BF8_BF8_w32", int_amdgcn_wmma_f32_16x16x16_bf8_bf8, F32_FP8BF8_WMMA_w32>; 1307 defm : WMMAPat<"V_WMMA_I32_16X16X32_IU4_w32", int_amdgcn_wmma_i32_16x16x32_iu4, I32_IU4X32_WMMA_w32>; 1308 1309 def : SWMMACPat<V_SWMMAC_F32_16X16X32_F16_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_f16, F32_F16_SWMMAC_w32>; 1310 def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF16_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf16, F32_BF16_SWMMAC_w32>; 1311 def : SWMMACPat<V_SWMMAC_F16_16X16X32_F16_w32_twoaddr, int_amdgcn_swmmac_f16_16x16x32_f16, F16_F16_SWMMAC_w32>; 1312 def : SWMMACPat<V_SWMMAC_BF16_16X16X32_BF16_w32_twoaddr, int_amdgcn_swmmac_bf16_16x16x32_bf16, BF16_BF16_SWMMAC_w32>; 1313 def : SWMMACPat<V_SWMMAC_I32_16X16X32_IU8_w32_twoaddr, int_amdgcn_swmmac_i32_16x16x32_iu8, I32_IU8_SWMMAC_w32>; 1314 def : SWMMACPat<V_SWMMAC_I32_16X16X32_IU4_w32_twoaddr, int_amdgcn_swmmac_i32_16x16x32_iu4, I32_IU4X32_SWMMAC_w32>; 1315 def : GCNPat <(I32_IU4X64_SWMMAC_w32.DstVT !setdagop(I32_IU4X64_SWMMAC_w32.SwmmacInPat, int_amdgcn_swmmac_i32_16x16x64_iu4)), 1316 (I32_IU4X64_SWMMAC_w32.DstVT !setdagop(I32_IU4X64_SWMMAC_w32.SwmmacOutPat, V_SWMMAC_I32_16X16X64_IU4_w32_twoaddr))>; 1317 def : SWMMACPat<V_SWMMAC_F32_16X16X32_FP8_FP8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_fp8_fp8, F32_FP8BF8_SWMMAC_w32>; 1318 def : SWMMACPat<V_SWMMAC_F32_16X16X32_FP8_BF8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_fp8_bf8, F32_FP8BF8_SWMMAC_w32>; 1319 def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF8_FP8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf8_fp8, F32_FP8BF8_SWMMAC_w32>; 1320 def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF8_BF8_w32_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf8_bf8, F32_FP8BF8_SWMMAC_w32>; 1321} 1322 1323let WaveSizePredicate = isWave64, SubtargetPredicate = isGFX12Plus in { 1324 defm : WMMAPat<"V_WMMA_F32_16X16X16_F16_w64", int_amdgcn_wmma_f32_16x16x16_f16, F32_F16_WMMA_w64>; 1325 defm : WMMAPat<"V_WMMA_F32_16X16X16_BF16_w64", int_amdgcn_wmma_f32_16x16x16_bf16, F32_BF16_WMMA_w64>; 1326 defm : WMMAPat<"V_WMMA_F16_16X16X16_F16_w64", int_amdgcn_wmma_f16_16x16x16_f16, F16_F16_WMMA_w64,1>; 1327 defm : WMMAPat<"V_WMMA_BF16_16X16X16_BF16_w64", int_amdgcn_wmma_bf16_16x16x16_bf16, BF16_BF16_WMMA_w64,1>; 1328 defm : WMMAPat<"V_WMMA_I32_16X16X16_IU8_w64", int_amdgcn_wmma_i32_16x16x16_iu8, I32_IU8_WMMA_w64>; 1329 defm : WMMAPat<"V_WMMA_I32_16X16X16_IU4_w64", int_amdgcn_wmma_i32_16x16x16_iu4, I32_IU4X16_WMMA_w64>; 1330 defm : WMMAPat<"V_WMMA_F32_16X16X16_FP8_FP8_w64", int_amdgcn_wmma_f32_16x16x16_fp8_fp8, F32_FP8BF8_WMMA_w64>; 1331 defm : WMMAPat<"V_WMMA_F32_16X16X16_FP8_BF8_w64", int_amdgcn_wmma_f32_16x16x16_fp8_bf8, F32_FP8BF8_WMMA_w64>; 1332 defm : WMMAPat<"V_WMMA_F32_16X16X16_BF8_FP8_w64", int_amdgcn_wmma_f32_16x16x16_bf8_fp8, F32_FP8BF8_WMMA_w64>; 1333 defm : WMMAPat<"V_WMMA_F32_16X16X16_BF8_BF8_w64", int_amdgcn_wmma_f32_16x16x16_bf8_bf8, F32_FP8BF8_WMMA_w64>; 1334 defm : WMMAPat<"V_WMMA_I32_16X16X32_IU4_w64", int_amdgcn_wmma_i32_16x16x32_iu4, I32_IU4X32_WMMA_w64>; 1335 1336 def : SWMMACPat<V_SWMMAC_F32_16X16X32_F16_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_f16, F32_F16_SWMMAC_w64>; 1337 def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF16_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf16, F32_BF16_SWMMAC_w64>; 1338 def : SWMMACPat<V_SWMMAC_F16_16X16X32_F16_w64_twoaddr, int_amdgcn_swmmac_f16_16x16x32_f16, F16_F16_SWMMAC_w64>; 1339 def : SWMMACPat<V_SWMMAC_BF16_16X16X32_BF16_w64_twoaddr, int_amdgcn_swmmac_bf16_16x16x32_bf16, BF16_BF16_SWMMAC_w64>; 1340 def : SWMMACPat<V_SWMMAC_I32_16X16X32_IU8_w64_twoaddr, int_amdgcn_swmmac_i32_16x16x32_iu8, I32_IU8_SWMMAC_w64>; 1341 def : SWMMACPat<V_SWMMAC_I32_16X16X32_IU4_w64_twoaddr, int_amdgcn_swmmac_i32_16x16x32_iu4, I32_IU4X32_SWMMAC_w64>; 1342 def : SWMMACPat<V_SWMMAC_I32_16X16X64_IU4_w64_twoaddr, int_amdgcn_swmmac_i32_16x16x64_iu4, I32_IU4X64_SWMMAC_w64>; 1343 def : SWMMACPat<V_SWMMAC_F32_16X16X32_FP8_FP8_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_fp8_fp8, F32_FP8BF8_SWMMAC_w64>; 1344 def : SWMMACPat<V_SWMMAC_F32_16X16X32_FP8_BF8_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_fp8_bf8, F32_FP8BF8_SWMMAC_w64>; 1345 def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF8_FP8_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf8_fp8, F32_FP8BF8_SWMMAC_w64>; 1346 def : SWMMACPat<V_SWMMAC_F32_16X16X32_BF8_BF8_w64_twoaddr, int_amdgcn_swmmac_f32_16x16x32_bf8_bf8, F32_FP8BF8_SWMMAC_w64>; 1347} 1348 1349 1350//===----------------------------------------------------------------------===// 1351// Begin Real Encodings 1352//===----------------------------------------------------------------------===// 1353 1354class VOP3P_DPP16<bits<7> op, VOP_DPP_Pseudo ps, int subtarget, 1355 string opName = ps.OpName> 1356 : VOP3P_DPP<op, opName, ps.Pfl, 1>, SIMCInstr<ps.PseudoInstr, subtarget> { 1357 let hasSideEffects = ps.hasSideEffects; 1358 let Defs = ps.Defs; 1359 let SchedRW = ps.SchedRW; 1360 let Uses = ps.Uses; 1361 let AssemblerPredicate = HasDPP16; 1362 let SubtargetPredicate = ps.SubtargetPredicate; 1363 let OtherPredicates = ps.OtherPredicates; 1364 let IsPacked = ps.IsPacked; 1365} 1366 1367class VOP3P_DPP8_Base<bits<7> op, VOP_Pseudo ps, string opName = ps.OpName> 1368 : VOP3P_DPP8<op, opName, ps.Pfl> { 1369 let hasSideEffects = ps.hasSideEffects; 1370 let Defs = ps.Defs; 1371 let SchedRW = ps.SchedRW; 1372 let Uses = ps.Uses; 1373 let SubtargetPredicate = ps.SubtargetPredicate; 1374 let OtherPredicates = ps.OtherPredicates; 1375 let IsPacked = ps.IsPacked; 1376} 1377 1378//===----------------------------------------------------------------------===// 1379// GFX11, GFX12 1380//===----------------------------------------------------------------------===// 1381 1382multiclass VOP3P_Real_Base<GFXGen Gen, bits<7> op, string backing_ps_name = NAME, 1383 string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> { 1384 def Gen.Suffix : 1385 VOP3P_Real_Gen<!cast<VOP3P_Pseudo>(backing_ps_name), Gen, asmName>, 1386 VOP3Pe_gfx11_gfx12<op, !cast<VOP3P_Pseudo>(backing_ps_name).Pfl>; 1387} 1388 1389class VOP3PeWmma<bits<7> op, VOPProfile P, VOP3PWMMA_Profile WMMAP> 1390 : VOP3Pe_gfx11_gfx12<op, P>{ 1391 // opsel 1392 let Inst{11} = !cond(!eq(WMMAP.IndexType, 0) : 0, 1393 !eq(WMMAP.IndexType, 8) : index_key_8bit{0}, 1394 !eq(WMMAP.IndexType, 16) : index_key_16bit{0}); 1395 let Inst{12} = !if(!eq(WMMAP.IndexType, 8), index_key_8bit{1}, 0); 1396 let Inst{13} = 0; 1397 // opsel_hi 1398 let Inst{59} = 1; 1399 let Inst{60} = 1; 1400 let Inst{14} = 1; 1401 // neg_lo 1402 let Inst{61} = !if(WMMAP.NegLo01, src0_modifiers{0}, 0); 1403 let Inst{62} = !if(WMMAP.NegLo01, src1_modifiers{0}, 0); 1404 let Inst{63} = !if(WMMAP.NegLo2, src2_modifiers{0}, 0); 1405 // neg_hi 1406 let Inst{8} = !if(WMMAP.NegHi01, src0_modifiers{1}, 0); 1407 let Inst{9} = !if(WMMAP.NegHi01, src1_modifiers{1}, 0); 1408 let Inst{10} = !if(WMMAP.NegHi2, src2_modifiers{1}, 0); 1409 // clamp 1410 let Inst{15} = !if(WMMAP.IsIU, clamp{0}, 0); 1411} 1412 1413multiclass VOP3P_WMMA_Real_Base<GFXGen Gen, bits<7> op, VOP3PWMMA_Profile WMMAP, 1414 string backing_ps_name = NAME, 1415 string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> { 1416 def Gen.Suffix : 1417 VOP3P_Real_Gen<!cast<VOP3P_Pseudo>(backing_ps_name), Gen, asmName>, 1418 VOP3PeWmma<op, !cast<VOP3P_Pseudo>(backing_ps_name).Pfl, WMMAP>; 1419} 1420 1421multiclass VOP3P_Real_WMMA_gfx12 <bits<7> op, VOP3PWMMA_Profile WMMAP> { 1422 let WaveSizePredicate = isWave32, DecoderNamespace = "GFX12" in { 1423 defm _twoaddr : VOP3P_WMMA_Real_Base <GFX12Gen, op, WMMAP>; 1424 } 1425} 1426 1427multiclass VOP3P_Real_WMMA_gfx12w64 <bits<7> op, VOP3PWMMA_Profile WMMAP> { 1428 let WaveSizePredicate = isWave64, DecoderNamespace = "GFX12W64" in { 1429 defm _twoaddr : VOP3P_WMMA_Real_Base <GFX12Gen, op, WMMAP>; 1430 } 1431} 1432 1433defm V_WMMA_F32_16X16X16_F16_w32 : VOP3P_Real_WMMA_gfx12 <0x040, F32_F16_WMMA_w32>; 1434defm V_WMMA_F32_16X16X16_BF16_w32 : VOP3P_Real_WMMA_gfx12 <0x041, F32_BF16_WMMA_w32>; 1435defm V_WMMA_F16_16X16X16_F16_w32 : VOP3P_Real_WMMA_gfx12 <0x042, F16_F16_WMMA_w32>; 1436defm V_WMMA_BF16_16X16X16_BF16_w32 : VOP3P_Real_WMMA_gfx12 <0x043, BF16_BF16_WMMA_w32>; 1437defm V_WMMA_I32_16X16X16_IU8_w32 : VOP3P_Real_WMMA_gfx12 <0x044, I32_IU8_WMMA_w32>; 1438defm V_WMMA_I32_16X16X16_IU4_w32 : VOP3P_Real_WMMA_gfx12 <0x045, I32_IU4X16_WMMA_w32>; 1439defm V_WMMA_F32_16X16X16_FP8_FP8_w32 : VOP3P_Real_WMMA_gfx12 <0x046, F32_FP8BF8_WMMA_w32>; 1440defm V_WMMA_F32_16X16X16_FP8_BF8_w32 : VOP3P_Real_WMMA_gfx12 <0x047, F32_FP8BF8_WMMA_w32>; 1441defm V_WMMA_F32_16X16X16_BF8_FP8_w32 : VOP3P_Real_WMMA_gfx12 <0x048, F32_FP8BF8_WMMA_w32>; 1442defm V_WMMA_F32_16X16X16_BF8_BF8_w32 : VOP3P_Real_WMMA_gfx12 <0x049, F32_FP8BF8_WMMA_w32>; 1443defm V_WMMA_I32_16X16X32_IU4_w32 : VOP3P_Real_WMMA_gfx12 <0x04a, I32_IU4X32_WMMA_w32>; 1444 1445defm V_WMMA_F32_16X16X16_F16_w64 : VOP3P_Real_WMMA_gfx12w64 <0x040, F32_F16_WMMA_w64>; 1446defm V_WMMA_F32_16X16X16_BF16_w64 : VOP3P_Real_WMMA_gfx12w64 <0x041, F32_BF16_WMMA_w64>; 1447defm V_WMMA_F16_16X16X16_F16_w64 : VOP3P_Real_WMMA_gfx12w64 <0x042, F16_F16_WMMA_w64>; 1448defm V_WMMA_BF16_16X16X16_BF16_w64 : VOP3P_Real_WMMA_gfx12w64 <0x043, BF16_BF16_WMMA_w64>; 1449defm V_WMMA_I32_16X16X16_IU8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x044, I32_IU8_WMMA_w64>; 1450defm V_WMMA_I32_16X16X16_IU4_w64 : VOP3P_Real_WMMA_gfx12w64 <0x045, I32_IU4X16_WMMA_w64>; 1451defm V_WMMA_F32_16X16X16_FP8_FP8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x046, F32_FP8BF8_WMMA_w64>; 1452defm V_WMMA_F32_16X16X16_FP8_BF8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x047, F32_FP8BF8_WMMA_w64>; 1453defm V_WMMA_F32_16X16X16_BF8_FP8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x048, F32_FP8BF8_WMMA_w64>; 1454defm V_WMMA_F32_16X16X16_BF8_BF8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x049, F32_FP8BF8_WMMA_w64>; 1455defm V_WMMA_I32_16X16X32_IU4_w64 : VOP3P_Real_WMMA_gfx12w64 <0x04a, I32_IU4X32_WMMA_w64>; 1456 1457 1458defm V_SWMMAC_F32_16X16X32_F16_w32 : VOP3P_Real_WMMA_gfx12 <0x050, F32_F16_SWMMAC_w32>; 1459defm V_SWMMAC_F32_16X16X32_BF16_w32 : VOP3P_Real_WMMA_gfx12 <0x051, F32_BF16_SWMMAC_w32>; 1460defm V_SWMMAC_F16_16X16X32_F16_w32 : VOP3P_Real_WMMA_gfx12 <0x052, F16_F16_SWMMAC_w32>; 1461defm V_SWMMAC_BF16_16X16X32_BF16_w32 : VOP3P_Real_WMMA_gfx12 <0x053, BF16_BF16_SWMMAC_w32>; 1462defm V_SWMMAC_I32_16X16X32_IU8_w32 : VOP3P_Real_WMMA_gfx12 <0x054, I32_IU8_SWMMAC_w32>; 1463defm V_SWMMAC_I32_16X16X32_IU4_w32 : VOP3P_Real_WMMA_gfx12 <0x055, I32_IU4X32_SWMMAC_w32>; 1464defm V_SWMMAC_I32_16X16X64_IU4_w32 : VOP3P_Real_WMMA_gfx12 <0x056, I32_IU4X64_SWMMAC_w32>; 1465defm V_SWMMAC_F32_16X16X32_FP8_FP8_w32 : VOP3P_Real_WMMA_gfx12 <0x057, F32_FP8BF8_SWMMAC_w32>; 1466defm V_SWMMAC_F32_16X16X32_FP8_BF8_w32 : VOP3P_Real_WMMA_gfx12 <0x058, F32_FP8BF8_SWMMAC_w32>; 1467defm V_SWMMAC_F32_16X16X32_BF8_FP8_w32 : VOP3P_Real_WMMA_gfx12 <0x059, F32_FP8BF8_SWMMAC_w32>; 1468defm V_SWMMAC_F32_16X16X32_BF8_BF8_w32 : VOP3P_Real_WMMA_gfx12 <0x05a, F32_FP8BF8_SWMMAC_w32>; 1469 1470defm V_SWMMAC_F32_16X16X32_F16_w64 : VOP3P_Real_WMMA_gfx12w64 <0x050, F32_F16_SWMMAC_w64>; 1471defm V_SWMMAC_F32_16X16X32_BF16_w64 : VOP3P_Real_WMMA_gfx12w64 <0x051, F32_BF16_SWMMAC_w64>; 1472defm V_SWMMAC_F16_16X16X32_F16_w64 : VOP3P_Real_WMMA_gfx12w64 <0x052, F16_F16_SWMMAC_w64>; 1473defm V_SWMMAC_BF16_16X16X32_BF16_w64 : VOP3P_Real_WMMA_gfx12w64 <0x053, BF16_BF16_SWMMAC_w64>; 1474defm V_SWMMAC_I32_16X16X32_IU8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x054, I32_IU8_SWMMAC_w64>; 1475defm V_SWMMAC_I32_16X16X32_IU4_w64 : VOP3P_Real_WMMA_gfx12w64 <0x055, I32_IU4X32_SWMMAC_w64>; 1476defm V_SWMMAC_I32_16X16X64_IU4_w64 : VOP3P_Real_WMMA_gfx12w64 <0x056, I32_IU4X64_SWMMAC_w64>; 1477defm V_SWMMAC_F32_16X16X32_FP8_FP8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x057, F32_FP8BF8_SWMMAC_w64>; 1478defm V_SWMMAC_F32_16X16X32_FP8_BF8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x058, F32_FP8BF8_SWMMAC_w64>; 1479defm V_SWMMAC_F32_16X16X32_BF8_FP8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x059, F32_FP8BF8_SWMMAC_w64>; 1480defm V_SWMMAC_F32_16X16X32_BF8_BF8_w64 : VOP3P_Real_WMMA_gfx12w64 <0x05a, F32_FP8BF8_SWMMAC_w64>; 1481 1482multiclass VOP3P_Real_with_name<GFXGen Gen, bits<7> op, 1483 string backing_ps_name = NAME, 1484 string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> { 1485 defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name); 1486 let AsmString = asmName # ps.AsmOperands in 1487 def Gen.Suffix : 1488 VOP3P_Real_Gen<!cast<VOP3P_Pseudo>(backing_ps_name), Gen, asmName>, 1489 VOP3Pe_gfx11_gfx12<op, !cast<VOP3P_Pseudo>(backing_ps_name).Pfl>; 1490 1491 def : AMDGPUMnemonicAlias<ps.Mnemonic, asmName> { 1492 let AssemblerPredicate = Gen.AssemblerPredicate; 1493 } 1494} 1495 1496multiclass VOP3P_Real_dpp<GFXGen Gen, bits<7> op, string backing_ps_name = NAME, 1497 string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> { 1498 defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name); 1499 def _dpp#Gen.Suffix 1500 : VOP3P_DPP16<op, !cast<VOP_DPP_Pseudo>(backing_ps_name #"_dpp"), 1501 Gen.Subtarget> { 1502 let AsmString = asmName #ps.Pfl.AsmVOP3DPP16; 1503 let DecoderNamespace = Gen.DecoderNamespace; 1504 let AssemblerPredicate = Gen.AssemblerPredicate; 1505 } 1506} 1507 1508multiclass VOP3P_Real_dpp8<GFXGen Gen, bits<7> op, string backing_ps_name = NAME, 1509 string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> { 1510 defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name); 1511 def _dpp8#Gen.Suffix : VOP3P_DPP8_Base<op, ps> { 1512 let AsmString = asmName #ps.Pfl.AsmVOP3DPP8; 1513 let DecoderNamespace = Gen.DecoderNamespace; 1514 let AssemblerPredicate = Gen.AssemblerPredicate; 1515 } 1516} 1517 1518multiclass VOP3P_Realtriple<GFXGen Gen, bits<7> op, string backing_ps_name = NAME, 1519 string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> 1520 : VOP3P_Real_Base<Gen, op, backing_ps_name, asmName>, 1521 VOP3P_Real_dpp<Gen, op, backing_ps_name, asmName>, 1522 VOP3P_Real_dpp8<Gen, op, backing_ps_name, asmName>; 1523 1524//===----------------------------------------------------------------------===// 1525// GFX12 1526//===----------------------------------------------------------------------===// 1527 1528multiclass VOP3P_Real_gfx12<bits<7> op> : VOP3P_Real_Base<GFX12Gen, op>; 1529 1530multiclass VOP3P_Real_with_name_gfx12<bits<7> op, 1531 string backing_ps_name = NAME, 1532 string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> : 1533 VOP3P_Real_with_name<GFX12Gen, op, backing_ps_name, asmName>; 1534 1535defm V_PK_MIN_NUM_F16 : VOP3P_Real_with_name_gfx12<0x1b, "V_PK_MIN_F16", "v_pk_min_num_f16">; 1536defm V_PK_MAX_NUM_F16 : VOP3P_Real_with_name_gfx12<0x1c, "V_PK_MAX_F16", "v_pk_max_num_f16">; 1537 1538defm V_PK_MINIMUM_F16 : VOP3P_Real_gfx12<0x1d>; 1539defm V_PK_MAXIMUM_F16 : VOP3P_Real_gfx12<0x1e>; 1540 1541defm V_DOT4_F32_FP8_BF8 : VOP3P_Realtriple<GFX12Gen, 0x24>; 1542defm V_DOT4_F32_BF8_FP8 : VOP3P_Realtriple<GFX12Gen, 0x25>; 1543defm V_DOT4_F32_FP8_FP8 : VOP3P_Realtriple<GFX12Gen, 0x26>; 1544defm V_DOT4_F32_BF8_BF8 : VOP3P_Realtriple<GFX12Gen, 0x27>; 1545 1546//===----------------------------------------------------------------------===// 1547// GFX11 1548//===----------------------------------------------------------------------===// 1549 1550multiclass VOP3P_Real_gfx11_gfx12<bits<7> op> : 1551 VOP3P_Real_Base<GFX11Gen, op>, VOP3P_Real_Base<GFX12Gen, op>; 1552 1553defm V_DOT4_I32_IU8 : VOP3P_Real_gfx11_gfx12<0x16>; 1554defm V_DOT8_I32_IU4 : VOP3P_Real_gfx11_gfx12<0x18>; 1555defm V_DOT2_F32_BF16 : VOP3P_Real_gfx11_gfx12<0x1a>; 1556 1557multiclass VOP3P_Real_WMMA <bits<7> op> { 1558 let WaveSizePredicate = isWave32, DecoderNamespace = "GFX11" in { 1559 defm _twoaddr_w32 : VOP3P_Real_Base <GFX11Gen, op>; 1560 } 1561 let WaveSizePredicate = isWave64, DecoderNamespace = "GFX11W64" in { 1562 defm _twoaddr_w64 : VOP3P_Real_Base <GFX11Gen, op>; 1563 } 1564} 1565 1566defm V_WMMA_F32_16X16X16_F16 : VOP3P_Real_WMMA <0x040>; 1567defm V_WMMA_F32_16X16X16_BF16 : VOP3P_Real_WMMA <0x041>; 1568defm V_WMMA_F16_16X16X16_F16 : VOP3P_Real_WMMA <0x042>; 1569defm V_WMMA_BF16_16X16X16_BF16 : VOP3P_Real_WMMA <0x043>; 1570defm V_WMMA_I32_16X16X16_IU8 : VOP3P_Real_WMMA <0x044>; 1571defm V_WMMA_I32_16X16X16_IU4 : VOP3P_Real_WMMA <0x045>; 1572 1573//===----------------------------------------------------------------------===// 1574// GFX8 (VI) 1575//===----------------------------------------------------------------------===// 1576 1577multiclass VOP3P_Real_vi<bits<7> op> { 1578 def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.VI>, 1579 VOP3Pe <op, !cast<VOP3_Pseudo>(NAME).Pfl> { 1580 let AssemblerPredicate = HasVOP3PInsts; 1581 let DecoderNamespace = "GFX8"; 1582 let VOP3P = 1; 1583 } 1584} 1585 1586multiclass VOP3P_Real_MAI<bits<7> op> { 1587 def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>, 1588 VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, ?> { 1589 let AssemblerPredicate = HasMAIInsts; 1590 let DecoderNamespace = "GFX8"; 1591 let Inst{14} = ?; // op_sel_hi(2) 1592 let Inst{59} = ?; // op_sel_hi(0) 1593 let Inst{60} = ?; // op_sel_hi(1) 1594 } 1595} 1596 1597let Constraints = "" in { 1598multiclass VOP3P_Real_MFMA_gfx90a<bits<7> op> { 1599 let SubtargetPredicate = isGFX90AOnly, 1600 AssemblerPredicate = isGFX90AOnly, DecoderNamespace = "GFX90A" in { 1601 def _gfx90a_acd : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.GFX90A>, 1602 VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, 1>; 1603 1604 def _gfx90a_vcd : VOP3P_Real<!cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64"), SIEncodingFamily.GFX90A>, 1605 VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64").Pfl, 0>; 1606 } // End AssemblerPredicate = isGFX90AOnly, DecoderNamespace = "GFX90A" 1607} 1608} 1609 1610multiclass VOP3P_Real_MFMA_gfx940_aliases<string NameFrom, string NameTo, string Op, 1611 VOP3_Pseudo PS_ACD = !cast<VOP3_Pseudo>(Op # "_e64"), 1612 VOP3_Pseudo PS_VCD = !cast<VOP3_Pseudo>(Op # "_vgprcd" # "_e64"), 1613 VOPProfile Pfl_ACD = PS_ACD.Pfl, 1614 VOPProfile Pfl_VCD = PS_VCD.Pfl> { 1615 if !ne(NameFrom, NameTo) then { 1616 def : InstAlias <NameTo # " " # PS_ACD.AsmOperands, 1617 (!cast<VOP3P_Real>(Op # "_gfx940_acd") Pfl_ACD.DstRC:$vdst, 1618 Pfl_ACD.Src0RC64:$src0, Pfl_ACD.Src1RC64:$src1, Pfl_ACD.Src2RC64:$src2, 1619 CBSZ:$cbsz, ABID:$abid, blgp:$blgp)>, PredicateControl; 1620 def : InstAlias <NameTo # " " # PS_VCD.AsmOperands, 1621 (!cast<VOP3P_Real>(Op # "_gfx940_vcd") Pfl_VCD.DstRC:$vdst, 1622 Pfl_VCD.Src0RC64:$src0, Pfl_VCD.Src1RC64:$src1, Pfl_VCD.Src2RC64:$src2, 1623 CBSZ:$cbsz, ABID:$abid, blgp:$blgp)>, PredicateControl; 1624 } 1625} 1626 1627multiclass VOP3P_Real_MFMA_gfx940<bits<7> op, string Name = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic, 1628 VOP3_Pseudo PS_ACD = !cast<VOP3_Pseudo>(NAME # "_e64"), 1629 VOP3_Pseudo PS_VCD = !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64")> { 1630 let AssemblerPredicate = isGFX940Plus, 1631 DecoderNamespace = "GFX940", 1632 AsmString = Name # PS_ACD.AsmOperands, Constraints = "" in { 1633 def _gfx940_acd : VOP3P_Real<PS_ACD, SIEncodingFamily.GFX940>, 1634 VOP3Pe_MAI <op, PS_ACD.Pfl, 1>; 1635 1636 def _gfx940_vcd : VOP3P_Real<PS_VCD, SIEncodingFamily.GFX940>, 1637 VOP3Pe_MAI <op, PS_VCD.Pfl, 0>; 1638 } // End AssemblerPredicate = isGFX940Plus, DecoderNamespace = "GFX940" 1639 1640 let SubtargetPredicate = isGFX940Plus in { 1641 defm : VOP3P_Real_MFMA_gfx940_aliases<Name, PS_ACD.Mnemonic, NAME>; 1642 1643 if !ne(!subst("_1k", "", PS_ACD.Mnemonic), PS_ACD.Mnemonic) then 1644 defm : VOP3P_Real_MFMA_gfx940_aliases<Name, !subst("_1k", "", PS_ACD.Mnemonic), NAME>; 1645 } 1646} 1647 1648multiclass VOP3P_Real_MFMA_vi<bits<7> op> { 1649 def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>, 1650 VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, ?> { 1651 let SubtargetPredicate = isGFX8GFX9NotGFX90A; 1652 let AssemblerPredicate = HasMAIInsts; 1653 let DecoderNamespace = "GFX8"; 1654 let Constraints = ""; 1655 } 1656} 1657 1658multiclass VOP3P_Real_MFMA_vi_gfx90a<bits<7> op> : 1659 VOP3P_Real_MFMA_gfx90a <op>, 1660 VOP3P_Real_MFMA_vi <op>; 1661 1662multiclass VOP3P_Real_MFMA<bits<7> op, string GFX940Name = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic> : 1663 VOP3P_Real_MFMA_vi_gfx90a <op>, 1664 VOP3P_Real_MFMA_gfx940 <op, GFX940Name>; 1665 1666multiclass VOP3P_Real_SMFMAC<bits<7> op, string alias> { 1667 def _gfx940 : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>, 1668 VOP3Pe_SMFMAC <op> { 1669 let AssemblerPredicate = isGFX940Plus; 1670 let DecoderNamespace = "GFX8"; 1671 } 1672 def : AMDGPUMnemonicAlias<alias, !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic> { 1673 let AssemblerPredicate = isGFX940Plus; 1674 } 1675} 1676 1677let SubtargetPredicate = isGFX8GFX9 in { 1678defm V_PK_MAD_I16 : VOP3P_Real_vi <0x00>; 1679defm V_PK_MUL_LO_U16 : VOP3P_Real_vi <0x01>; 1680defm V_PK_ADD_I16 : VOP3P_Real_vi <0x02>; 1681defm V_PK_SUB_I16 : VOP3P_Real_vi <0x03>; 1682defm V_PK_LSHLREV_B16 : VOP3P_Real_vi <0x04>; 1683defm V_PK_LSHRREV_B16 : VOP3P_Real_vi <0x05>; 1684defm V_PK_ASHRREV_I16 : VOP3P_Real_vi <0x06>; 1685defm V_PK_MAX_I16 : VOP3P_Real_vi <0x07>; 1686defm V_PK_MIN_I16 : VOP3P_Real_vi <0x08>; 1687defm V_PK_MAD_U16 : VOP3P_Real_vi <0x09>; 1688 1689defm V_PK_ADD_U16 : VOP3P_Real_vi <0x0a>; 1690defm V_PK_SUB_U16 : VOP3P_Real_vi <0x0b>; 1691defm V_PK_MAX_U16 : VOP3P_Real_vi <0x0c>; 1692defm V_PK_MIN_U16 : VOP3P_Real_vi <0x0d>; 1693defm V_PK_FMA_F16 : VOP3P_Real_vi <0x0e>; 1694defm V_PK_ADD_F16 : VOP3P_Real_vi <0x0f>; 1695defm V_PK_MUL_F16 : VOP3P_Real_vi <0x10>; 1696defm V_PK_MIN_F16 : VOP3P_Real_vi <0x11>; 1697defm V_PK_MAX_F16 : VOP3P_Real_vi <0x12>; 1698 1699let OtherPredicates = [HasMadMixInsts] in { 1700defm V_MAD_MIX_F32 : VOP3P_Real_vi <0x20>; 1701defm V_MAD_MIXLO_F16 : VOP3P_Real_vi <0x21>; 1702defm V_MAD_MIXHI_F16 : VOP3P_Real_vi <0x22>; 1703} 1704 1705let OtherPredicates = [HasFmaMixInsts], 1706 DecoderNamespace = "GFX9_DL" in { 1707// The mad_mix instructions were renamed and their behaviors changed, 1708// but the opcode stayed the same so we need to put these in a 1709// different DecoderNamespace to avoid the ambiguity. 1710defm V_FMA_MIX_F32 : VOP3P_Real_vi <0x20>; 1711defm V_FMA_MIXLO_F16 : VOP3P_Real_vi <0x21>; 1712defm V_FMA_MIXHI_F16 : VOP3P_Real_vi <0x22>; 1713} 1714 1715defm V_DOT2_I32_I16 : VOP3P_Real_vi <0x26>; 1716defm V_DOT2_U32_U16 : VOP3P_Real_vi <0x27>; 1717 1718defm V_DOT2_F32_F16 : VOP3P_Real_vi <0x23>; 1719defm V_DOT4_U32_U8 : VOP3P_Real_vi <0x29>; 1720defm V_DOT8_U32_U4 : VOP3P_Real_vi <0x2b>; 1721 1722defm V_DOT4_I32_I8 : VOP3P_Real_vi <0x28>; 1723defm V_DOT8_I32_I4 : VOP3P_Real_vi <0x2a>; 1724} // End SubtargetPredicate = isGFX8GFX9 1725 1726let OtherPredicates = [HasMAIInsts] in { 1727 1728defm V_ACCVGPR_READ_B32 : VOP3P_Real_MAI <0x58>; 1729defm V_ACCVGPR_WRITE_B32 : VOP3P_Real_MAI <0x59>; 1730defm V_MFMA_F32_32X32X1F32 : VOP3P_Real_MFMA <0x40, "v_mfma_f32_32x32x1_2b_f32">; 1731defm V_MFMA_F32_16X16X1F32 : VOP3P_Real_MFMA <0x41, "v_mfma_f32_16x16x1_4b_f32">; 1732defm V_MFMA_F32_4X4X1F32 : VOP3P_Real_MFMA <0x42, "v_mfma_f32_4x4x1_16b_f32">; 1733defm V_MFMA_F32_32X32X2F32 : VOP3P_Real_MFMA <0x44, "v_mfma_f32_32x32x2_f32">; 1734defm V_MFMA_F32_16X16X4F32 : VOP3P_Real_MFMA <0x45, "v_mfma_f32_16x16x4_f32">; 1735defm V_MFMA_F32_32X32X4F16 : VOP3P_Real_MFMA <0x48, "v_mfma_f32_32x32x4_2b_f16">; 1736defm V_MFMA_F32_16X16X4F16 : VOP3P_Real_MFMA <0x49, "v_mfma_f32_16x16x4_4b_f16">; 1737defm V_MFMA_F32_4X4X4F16 : VOP3P_Real_MFMA <0x4a, "v_mfma_f32_4x4x4_16b_f16">; 1738defm V_MFMA_F32_32X32X8F16 : VOP3P_Real_MFMA <0x4c, "v_mfma_f32_32x32x8_f16">; 1739defm V_MFMA_F32_16X16X16F16 : VOP3P_Real_MFMA <0x4d, "v_mfma_f32_16x16x16_f16">; 1740defm V_MFMA_I32_32X32X4I8 : VOP3P_Real_MFMA <0x50, "v_mfma_i32_32x32x4_2b_i8">; 1741defm V_MFMA_I32_16X16X4I8 : VOP3P_Real_MFMA <0x51, "v_mfma_i32_16x16x4_4b_i8">; 1742defm V_MFMA_I32_4X4X4I8 : VOP3P_Real_MFMA <0x52, "v_mfma_i32_4x4x4_16b_i8">; 1743 1744defm V_MFMA_I32_16X16X16I8 : VOP3P_Real_MFMA_vi_gfx90a <0x55>; 1745defm V_MFMA_I32_32X32X8I8 : VOP3P_Real_MFMA_vi_gfx90a <0x54>; 1746defm V_MFMA_F32_32X32X2BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x68>; 1747defm V_MFMA_F32_16X16X2BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x69>; 1748defm V_MFMA_F32_4X4X2BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x6b>; 1749defm V_MFMA_F32_32X32X4BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x6c>; 1750defm V_MFMA_F32_16X16X8BF16 : VOP3P_Real_MFMA_vi_gfx90a <0x6d>; 1751 1752} // End OtherPredicates = [HasMAIInsts] 1753 1754defm V_MFMA_F32_32X32X4BF16_1K : VOP3P_Real_MFMA_gfx90a <0x63>; 1755defm V_MFMA_F32_16X16X4BF16_1K : VOP3P_Real_MFMA_gfx90a <0x64>; 1756defm V_MFMA_F32_4X4X4BF16_1K : VOP3P_Real_MFMA_gfx90a <0x65>; 1757defm V_MFMA_F32_32X32X8BF16_1K : VOP3P_Real_MFMA_gfx90a <0x66>; 1758defm V_MFMA_F32_16X16X16BF16_1K : VOP3P_Real_MFMA_gfx90a <0x67>; 1759defm V_MFMA_F64_16X16X4F64 : VOP3P_Real_MFMA_gfx90a <0x6e>; 1760defm V_MFMA_F64_4X4X4F64 : VOP3P_Real_MFMA_gfx90a <0x6f>; 1761 1762defm V_MFMA_I32_32X32X16I8 : VOP3P_Real_MFMA_gfx940 <0x56, "v_mfma_i32_32x32x16_i8">; 1763defm V_MFMA_I32_16X16X32I8 : VOP3P_Real_MFMA_gfx940 <0x57, "v_mfma_i32_16x16x32_i8">; 1764defm V_MFMA_F32_16X16X8XF32 : VOP3P_Real_MFMA_gfx940 <0x3e, "v_mfma_f32_16x16x8_xf32">; 1765defm V_MFMA_F32_32X32X4XF32 : VOP3P_Real_MFMA_gfx940 <0x3f, "v_mfma_f32_32x32x4_xf32">; 1766defm V_MFMA_F32_16X16X32_BF8_BF8 : VOP3P_Real_MFMA_gfx940 <0x70>; 1767defm V_MFMA_F32_16X16X32_BF8_FP8 : VOP3P_Real_MFMA_gfx940 <0x71>; 1768defm V_MFMA_F32_16X16X32_FP8_BF8 : VOP3P_Real_MFMA_gfx940 <0x72>; 1769defm V_MFMA_F32_16X16X32_FP8_FP8 : VOP3P_Real_MFMA_gfx940 <0x73>; 1770defm V_MFMA_F32_32X32X16_BF8_BF8 : VOP3P_Real_MFMA_gfx940 <0x74>; 1771defm V_MFMA_F32_32X32X16_BF8_FP8 : VOP3P_Real_MFMA_gfx940 <0x75>; 1772defm V_MFMA_F32_32X32X16_FP8_BF8 : VOP3P_Real_MFMA_gfx940 <0x76>; 1773defm V_MFMA_F32_32X32X16_FP8_FP8 : VOP3P_Real_MFMA_gfx940 <0x77>; 1774 1775defm V_MFMA_F32_32X32X4BF16_1K : VOP3P_Real_MFMA_gfx940 <0x5d, "v_mfma_f32_32x32x4_2b_bf16">; 1776defm V_MFMA_F32_16X16X4BF16_1K : VOP3P_Real_MFMA_gfx940 <0x5e, "v_mfma_f32_16x16x4_4b_bf16">; 1777defm V_MFMA_F32_4X4X4BF16_1K : VOP3P_Real_MFMA_gfx940 <0x5f, "v_mfma_f32_4x4x4_16b_bf16">; 1778defm V_MFMA_F32_32X32X8BF16_1K : VOP3P_Real_MFMA_gfx940 <0x60, "v_mfma_f32_32x32x8_bf16">; 1779defm V_MFMA_F32_16X16X16BF16_1K : VOP3P_Real_MFMA_gfx940 <0x61, "v_mfma_f32_16x16x16_bf16">; 1780 1781defm V_MFMA_F64_16X16X4F64 : VOP3P_Real_MFMA_gfx940 <0x6e, "v_mfma_f64_16x16x4_f64">; 1782defm V_MFMA_F64_4X4X4F64 : VOP3P_Real_MFMA_gfx940 <0x6f, "v_mfma_f64_4x4x4_4b_f64">; 1783 1784defm V_SMFMAC_F32_16X16X32_F16 : VOP3P_Real_SMFMAC <0x62, "v_smfmac_f32_16x16x32f16">; 1785defm V_SMFMAC_F32_32X32X16_F16 : VOP3P_Real_SMFMAC <0x64, "v_smfmac_f32_32x32x16f16">; 1786defm V_SMFMAC_F32_16X16X32_BF16 : VOP3P_Real_SMFMAC <0x66, "v_smfmac_f32_16x16x32bf16">; 1787defm V_SMFMAC_F32_32X32X16_BF16 : VOP3P_Real_SMFMAC <0x68, "v_smfmac_f32_32x32x16bf16">; 1788defm V_SMFMAC_I32_16X16X64_I8 : VOP3P_Real_SMFMAC <0x6a, "v_smfmac_i32_16x16x64i8">; 1789defm V_SMFMAC_I32_32X32X32_I8 : VOP3P_Real_SMFMAC <0x6c, "v_smfmac_i32_32x32x32i8">; 1790defm V_SMFMAC_F32_16X16X64_BF8_BF8 : VOP3P_Real_SMFMAC <0x78, "v_smfmac_f32_16x16x64bf8bf8">; 1791defm V_SMFMAC_F32_16X16X64_BF8_FP8 : VOP3P_Real_SMFMAC <0x79, "v_smfmac_f32_16x16x64bf8fp8">; 1792defm V_SMFMAC_F32_16X16X64_FP8_BF8 : VOP3P_Real_SMFMAC <0x7a, "v_smfmac_f32_16x16x64fp8bf8">; 1793defm V_SMFMAC_F32_16X16X64_FP8_FP8 : VOP3P_Real_SMFMAC <0x7b, "v_smfmac_f32_16x16x64fp8fp8">; 1794defm V_SMFMAC_F32_32X32X32_BF8_BF8 : VOP3P_Real_SMFMAC <0x7c, "v_smfmac_f32_32x32x32bf8bf8">; 1795defm V_SMFMAC_F32_32X32X32_BF8_FP8 : VOP3P_Real_SMFMAC <0x7d, "v_smfmac_f32_32x32x32bf8fp8">; 1796defm V_SMFMAC_F32_32X32X32_FP8_BF8 : VOP3P_Real_SMFMAC <0x7e, "v_smfmac_f32_32x32x32fp8bf8">; 1797defm V_SMFMAC_F32_32X32X32_FP8_FP8 : VOP3P_Real_SMFMAC <0x7f, "v_smfmac_f32_32x32x32fp8fp8">; 1798 1799defm V_PK_FMA_F32 : VOP3P_Real_vi <0x30>; 1800defm V_PK_MUL_F32 : VOP3P_Real_vi <0x31>; 1801defm V_PK_ADD_F32 : VOP3P_Real_vi <0x32>; 1802defm V_PK_MOV_B32 : VOP3P_Real_vi <0x33>; 1803 1804//===----------------------------------------------------------------------===// 1805// GFX10. 1806//===----------------------------------------------------------------------===// 1807 1808let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10", VOP3P = 1 in { 1809 multiclass VOP3P_Real_gfx10<bits<7> op> { 1810 def _gfx10 : VOP3P_Real<!cast<VOP3P_Pseudo>(NAME), SIEncodingFamily.GFX10>, 1811 VOP3Pe_gfx10 <op, !cast<VOP3P_Pseudo>(NAME).Pfl>; 1812 } 1813} // End AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10", VOP3P = 1 1814 1815multiclass VOP3P_Real_gfx10_gfx11<bits<7> op> : 1816 VOP3P_Real_gfx10<op>, VOP3P_Real_Base<GFX11Gen, op>; 1817 1818multiclass VOP3P_Real_gfx10_gfx11_gfx12<bits<7> op> : 1819 VOP3P_Real_gfx10_gfx11<op>, VOP3P_Real_Base<GFX12Gen, op>; 1820 1821multiclass VOP3P_Real_gfx10_gfx11_gfx12_Triple<bits<7> op> : 1822 VOP3P_Real_gfx10<op>, VOP3P_Realtriple<GFX11Gen, op>, 1823 VOP3P_Realtriple<GFX12Gen, op>; 1824 1825defm V_PK_MAD_I16 : VOP3P_Real_gfx10_gfx11_gfx12<0x00>; 1826defm V_PK_MUL_LO_U16 : VOP3P_Real_gfx10_gfx11_gfx12<0x01>; 1827defm V_PK_ADD_I16 : VOP3P_Real_gfx10_gfx11_gfx12<0x02>; 1828defm V_PK_SUB_I16 : VOP3P_Real_gfx10_gfx11_gfx12<0x03>; 1829defm V_PK_LSHLREV_B16 : VOP3P_Real_gfx10_gfx11_gfx12<0x04>; 1830defm V_PK_LSHRREV_B16 : VOP3P_Real_gfx10_gfx11_gfx12<0x05>; 1831defm V_PK_ASHRREV_I16 : VOP3P_Real_gfx10_gfx11_gfx12<0x06>; 1832defm V_PK_MAX_I16 : VOP3P_Real_gfx10_gfx11_gfx12<0x07>; 1833defm V_PK_MIN_I16 : VOP3P_Real_gfx10_gfx11_gfx12<0x08>; 1834defm V_PK_MAD_U16 : VOP3P_Real_gfx10_gfx11_gfx12<0x09>; 1835defm V_PK_ADD_U16 : VOP3P_Real_gfx10_gfx11_gfx12<0x0a>; 1836defm V_PK_SUB_U16 : VOP3P_Real_gfx10_gfx11_gfx12<0x0b>; 1837defm V_PK_MAX_U16 : VOP3P_Real_gfx10_gfx11_gfx12<0x0c>; 1838defm V_PK_MIN_U16 : VOP3P_Real_gfx10_gfx11_gfx12<0x0d>; 1839defm V_PK_FMA_F16 : VOP3P_Real_gfx10_gfx11_gfx12<0x0e>; 1840defm V_PK_ADD_F16 : VOP3P_Real_gfx10_gfx11_gfx12<0x0f>; 1841defm V_PK_MUL_F16 : VOP3P_Real_gfx10_gfx11_gfx12<0x10>; 1842defm V_PK_MIN_F16 : VOP3P_Real_gfx10_gfx11<0x11>; 1843defm V_PK_MAX_F16 : VOP3P_Real_gfx10_gfx11<0x12>; 1844defm V_FMA_MIX_F32 : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x20>; 1845defm V_FMA_MIXLO_F16 : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x21>; 1846defm V_FMA_MIXHI_F16 : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x22>; 1847 1848defm V_DOT2_I32_I16 : VOP3P_Real_gfx10 <0x14>; 1849defm V_DOT2_U32_U16 : VOP3P_Real_gfx10 <0x15>; 1850 1851defm V_DOT2_F32_F16 : VOP3P_Real_gfx10_gfx11_gfx12_Triple<0x13>; 1852defm V_DOT4_U32_U8 : VOP3P_Real_gfx10_gfx11_gfx12<0x17>; 1853defm V_DOT8_U32_U4 : VOP3P_Real_gfx10_gfx11_gfx12<0x19>; 1854 1855defm V_DOT4_I32_I8 : VOP3P_Real_gfx10 <0x16>; 1856defm V_DOT8_I32_I4 : VOP3P_Real_gfx10 <0x18>; 1857