1//===-- VOP3PInstructions.td - Vector Instruction Definitions -------------===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8 9//===----------------------------------------------------------------------===// 10// VOP3P Classes 11//===----------------------------------------------------------------------===// 12 13class VOP3P_Profile<VOPProfile P, VOP3Features Features = VOP3_REGULAR, 14 bit HasDPP = 0> : VOP3_Profile<P, Features> { 15 let IsVOP3P = 1; 16 let HasExtVOP3DPP = HasDPP; 17 // We do not want to print src modifiers for vop3p because the bits are 18 // overloaded in meaning and the logic in printOperandAndFPInputMods is 19 // wrong for vop3p 20 let AsmVOP3Base = AsmVOP3P; 21} 22 23// Used for FMA_MIX* and MAD_MIX* insts 24// Their operands are only sort of f16 operands. Depending on 25// op_sel_hi, these may be interpreted as f32. The inline immediate 26// values are really f16 converted to f32, so we treat these as f16 27// operands. 28class VOP3P_Mix_Profile<VOPProfile P, VOP3Features Features = VOP3_REGULAR, 29 bit useTiedOutput = 0> : VOP3P_Profile<P, Features, 1> { 30 bit UseTiedOutput = useTiedOutput; 31 32 dag srcs = 33 (ins FP16InputMods:$src0_modifiers, VCSrc_f16:$src0, 34 FP16InputMods:$src1_modifiers, VCSrc_f16:$src1, 35 FP16InputMods:$src2_modifiers, VCSrc_f16:$src2); 36 dag dpp_srcs = 37 (ins FPVRegInputMods:$src0_modifiers, VGPRSrc_32:$src0, 38 FPVRegInputMods:$src1_modifiers, VGPRSrc_32:$src1, 39 FP16InputMods:$src2_modifiers, VCSrc_f16:$src2); 40 41 // FIXME: clampmod0 misbehaves with the non-default vdst_in 42 // following it. For now workaround this by requiring clamp 43 // in tied patterns. This should use undef_tied_input, but it 44 // seems underdeveloped and doesn't apply the right register 45 // class constraints. 46 dag mods = !con(!if(UseTiedOutput, (ins clampmod:$clamp, VGPR_32:$vdst_in), 47 (ins clampmod0:$clamp)), 48 (ins op_sel0:$op_sel, op_sel_hi0:$op_sel_hi)); 49 // We use Ins64 because that is the one which populates InOperandList 50 // due to the logic in class VOP3_Pseudo 51 let Ins64 = !con(srcs, mods); 52 let InsVOP3Base = !con(dpp_srcs, mods); 53 let AsmVOP3Base = 54 "$vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$op_sel$op_sel_hi$clamp"; 55} 56 57multiclass VOP3PInst<string OpName, VOPProfile P, 58 SDPatternOperator node = null_frag, bit IsDOT = 0> { 59 def NAME : VOP3P_Pseudo<OpName, P, 60 !if (P.HasModifiers, 61 getVOP3PModPat<P, node, IsDOT, IsDOT>.ret, 62 getVOP3Pat<P, node>.ret)>; 63 let SubtargetPredicate = isGFX11Plus in { 64 if P.HasExtVOP3DPP then 65 def _dpp : VOP3_DPP_Pseudo<OpName, P> { 66 let VOP3P = 1; 67 let PseudoInstr = OpName #"_dpp"; 68 } 69 } // end SubtargetPredicate = isGFX11Plus 70} 71 72// Non-packed instructions that use the VOP3P encoding. 73// VOP3 neg/abs and VOP3P opsel/opsel_hi modifiers are allowed. 74multiclass VOP3_VOP3PInst<string OpName, VOP3P_Mix_Profile P> { 75 def NAME : VOP3P_Pseudo<OpName, P> { 76 let Constraints = !if(P.UseTiedOutput, "$vdst = $vdst_in", ""); 77 let DisableEncoding = !if(P.UseTiedOutput, "$vdst_in", ""); 78 } 79 let SubtargetPredicate = isGFX11Plus in { 80 if P.HasExtVOP3DPP then 81 def _dpp : VOP3_DPP_Pseudo<OpName, P> { 82 let VOP3P = 1; 83 let PseudoInstr = OpName#"_dpp"; 84 let Constraints = !if(P.UseTiedOutput, "$vdst = $vdst_in", ""); 85 let DisableEncoding = !if(P.UseTiedOutput, "$vdst_in", ""); 86 } 87 } // end SubtargetPredicate = isGFX11Plus 88} 89 90let isReMaterializable = 1 in { 91let isCommutable = 1 in { 92defm V_PK_MAD_I16 : VOP3PInst<"v_pk_mad_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16_V2I16>>; 93defm V_PK_MAD_U16 : VOP3PInst<"v_pk_mad_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16_V2I16>>; 94 95let FPDPRounding = 1 in { 96defm V_PK_FMA_F16 : VOP3PInst<"v_pk_fma_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16_V2F16>, any_fma>; 97defm V_PK_ADD_F16 : VOP3PInst<"v_pk_add_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, any_fadd>; 98defm V_PK_MUL_F16 : VOP3PInst<"v_pk_mul_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, any_fmul>; 99} // End FPDPRounding = 1 100defm V_PK_MAX_F16 : VOP3PInst<"v_pk_max_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, fmaxnum_like>; 101defm V_PK_MIN_F16 : VOP3PInst<"v_pk_min_f16", VOP3P_Profile<VOP_V2F16_V2F16_V2F16>, fminnum_like>; 102 103defm V_PK_ADD_U16 : VOP3PInst<"v_pk_add_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, add>; 104defm V_PK_ADD_I16 : VOP3PInst<"v_pk_add_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>>; 105defm V_PK_MUL_LO_U16 : VOP3PInst<"v_pk_mul_lo_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, mul>; 106 107defm V_PK_MIN_I16 : VOP3PInst<"v_pk_min_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, smin>; 108defm V_PK_MIN_U16 : VOP3PInst<"v_pk_min_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, umin>; 109defm V_PK_MAX_I16 : VOP3PInst<"v_pk_max_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, smax>; 110defm V_PK_MAX_U16 : VOP3PInst<"v_pk_max_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, umax>; 111} 112 113defm V_PK_SUB_U16 : VOP3PInst<"v_pk_sub_u16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>>; 114defm V_PK_SUB_I16 : VOP3PInst<"v_pk_sub_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, sub>; 115 116defm V_PK_LSHLREV_B16 : VOP3PInst<"v_pk_lshlrev_b16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, clshl_rev_16>; 117defm V_PK_ASHRREV_I16 : VOP3PInst<"v_pk_ashrrev_i16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, cashr_rev_16>; 118defm V_PK_LSHRREV_B16 : VOP3PInst<"v_pk_lshrrev_b16", VOP3P_Profile<VOP_V2I16_V2I16_V2I16>, clshr_rev_16>; 119} // End isReMaterializable = 1 120 121let SubtargetPredicate = HasVOP3PInsts in { 122 123// Undo sub x, c -> add x, -c canonicalization since c is more likely 124// an inline immediate than -c. 125// The constant will be emitted as a mov, and folded later. 126// TODO: We could directly encode the immediate now 127def : GCNPat< 128 (add (v2i16 (VOP3PMods v2i16:$src0, i32:$src0_modifiers)), NegSubInlineConstV216:$src1), 129 (V_PK_SUB_U16 $src0_modifiers, $src0, SRCMODS.OP_SEL_1, NegSubInlineConstV216:$src1) 130>; 131 132// Integer operations with clamp bit set. 133class VOP3PSatPat<SDPatternOperator pat, Instruction inst> : GCNPat< 134 (pat (v2i16 (VOP3PMods v2i16:$src0, i32:$src0_modifiers)), 135 (v2i16 (VOP3PMods v2i16:$src1, i32:$src1_modifiers))), 136 (inst $src0_modifiers, $src0, $src1_modifiers, $src1, DSTCLAMP.ENABLE) 137>; 138 139def : VOP3PSatPat<uaddsat, V_PK_ADD_U16>; 140def : VOP3PSatPat<saddsat, V_PK_ADD_I16>; 141def : VOP3PSatPat<usubsat, V_PK_SUB_U16>; 142def : VOP3PSatPat<ssubsat, V_PK_SUB_I16>; 143} // End SubtargetPredicate = HasVOP3PInsts 144 145multiclass MadFmaMixPats<SDPatternOperator fma_like, 146 Instruction mixlo_inst, 147 Instruction mixhi_inst> { 148 def : GCNPat < 149 (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)), 150 (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)), 151 (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers))))), 152 (mixlo_inst $src0_modifiers, $src0, 153 $src1_modifiers, $src1, 154 $src2_modifiers, $src2, 155 DSTCLAMP.NONE, 156 (i32 (IMPLICIT_DEF))) 157 >; 158 159 // FIXME: Special case handling for maxhi (especially for clamp) 160 // because dealing with the write to high half of the register is 161 // difficult. 162 def : GCNPat < 163 (build_vector f16:$elt0, (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)), 164 (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)), 165 (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers))))), 166 (v2f16 (mixhi_inst $src0_modifiers, $src0, 167 $src1_modifiers, $src1, 168 $src2_modifiers, $src2, 169 DSTCLAMP.NONE, 170 VGPR_32:$elt0)) 171 >; 172 173 def : GCNPat < 174 (build_vector 175 f16:$elt0, 176 (AMDGPUclamp (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)), 177 (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)), 178 (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers)))))), 179 (v2f16 (mixhi_inst $src0_modifiers, $src0, 180 $src1_modifiers, $src1, 181 $src2_modifiers, $src2, 182 DSTCLAMP.ENABLE, 183 VGPR_32:$elt0)) 184 >; 185 186 def : GCNPat < 187 (AMDGPUclamp (build_vector 188 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$lo_src0, i32:$lo_src0_modifiers)), 189 (f32 (VOP3PMadMixMods f16:$lo_src1, i32:$lo_src1_modifiers)), 190 (f32 (VOP3PMadMixMods f16:$lo_src2, i32:$lo_src2_modifiers)))), 191 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$hi_src0, i32:$hi_src0_modifiers)), 192 (f32 (VOP3PMadMixMods f16:$hi_src1, i32:$hi_src1_modifiers)), 193 (f32 (VOP3PMadMixMods f16:$hi_src2, i32:$hi_src2_modifiers)))))), 194 (v2f16 (mixhi_inst $hi_src0_modifiers, $hi_src0, 195 $hi_src1_modifiers, $hi_src1, 196 $hi_src2_modifiers, $hi_src2, 197 DSTCLAMP.ENABLE, 198 (mixlo_inst $lo_src0_modifiers, $lo_src0, 199 $lo_src1_modifiers, $lo_src1, 200 $lo_src2_modifiers, $lo_src2, 201 DSTCLAMP.ENABLE, 202 (i32 (IMPLICIT_DEF))))) 203 >; 204} 205 206let SubtargetPredicate = HasMadMixInsts in { 207 208// These are VOP3a-like opcodes which accept no omod. 209// Size of src arguments (16/32) is controlled by op_sel. 210// For 16-bit src arguments their location (hi/lo) are controlled by op_sel_hi. 211let isCommutable = 1, mayRaiseFPException = 0 in { 212let isReMaterializable = 1 in 213defm V_MAD_MIX_F32 : VOP3_VOP3PInst<"v_mad_mix_f32", VOP3P_Mix_Profile<VOP_F32_F16_F16_F16, VOP3_OPSEL>>; 214 215let FPDPRounding = 1 in { 216// Clamp modifier is applied after conversion to f16. 217defm V_MAD_MIXLO_F16 : VOP3_VOP3PInst<"v_mad_mixlo_f16", VOP3P_Mix_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL, 1>>; 218 219let ClampLo = 0, ClampHi = 1 in { 220defm V_MAD_MIXHI_F16 : VOP3_VOP3PInst<"v_mad_mixhi_f16", VOP3P_Mix_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL, 1>>; 221} 222} // End FPDPRounding = 1 223} 224 225defm : MadFmaMixPats<fmad, V_MAD_MIXLO_F16, V_MAD_MIXHI_F16>; 226} // End SubtargetPredicate = HasMadMixInsts 227 228 229// Essentially the same as the mad_mix versions 230let SubtargetPredicate = HasFmaMixInsts in { 231let isCommutable = 1 in { 232 233let isReMaterializable = 1 in 234defm V_FMA_MIX_F32 : VOP3_VOP3PInst<"v_fma_mix_f32", VOP3P_Mix_Profile<VOP_F32_F16_F16_F16, VOP3_OPSEL>>; 235 236let FPDPRounding = 1 in { 237// Clamp modifier is applied after conversion to f16. 238defm V_FMA_MIXLO_F16 : VOP3_VOP3PInst<"v_fma_mixlo_f16", VOP3P_Mix_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL, 1>>; 239 240let ClampLo = 0, ClampHi = 1 in { 241defm V_FMA_MIXHI_F16 : VOP3_VOP3PInst<"v_fma_mixhi_f16", VOP3P_Mix_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL, 1>>; 242} 243} // End FPDPRounding = 1 244} 245 246defm : MadFmaMixPats<fma, V_FMA_MIXLO_F16, V_FMA_MIXHI_F16>; 247} 248 249// Defines patterns that extract signed 4bit from each Idx[0]. 250foreach Idx = [[0,28],[4,24],[8,20],[12,16],[16,12],[20,8],[24,4]] in 251 def ExtractSigned4bit_#Idx[0] : PatFrag<(ops node:$src), 252 (sra (shl node:$src, (i32 Idx[1])), (i32 28))>; 253 254// Defines code pattern that extracts U(unsigned/signed) 4/8bit from FromBitIndex. 255class Extract<int FromBitIndex, int BitMask, bit U>: PatFrag< 256 (ops node:$src), 257 !if (!or (!and (!eq (BitMask, 255), !eq (FromBitIndex, 24)), !eq (FromBitIndex, 28)), // last element 258 !if (U, (srl node:$src, (i32 FromBitIndex)), (sra node:$src, (i32 FromBitIndex))), 259 !if (!eq (FromBitIndex, 0), // first element 260 !if (U, (and node:$src, (i32 BitMask)), 261 !if (!eq (BitMask, 15), (!cast<PatFrag>("ExtractSigned4bit_"#FromBitIndex) node:$src), 262 (sext_inreg node:$src, i8))), 263 !if (U, (and (srl node:$src, (i32 FromBitIndex)), (i32 BitMask)), 264 !if (!eq (BitMask, 15), (!cast<PatFrag>("ExtractSigned4bit_"#FromBitIndex) node:$src), 265 (sext_inreg (srl node:$src, (i32 FromBitIndex)), i8)))))>; 266 267 268foreach Type = ["I", "U"] in 269 foreach Index = 0-3 in { 270 // Defines patterns that extract each Index'ed 8bit from an unsigned 271 // 32bit scalar value; 272 def Type#Index#"_8bit" : Extract<!shl(Index, 3), 255, !eq (Type, "U")>; 273 274 // Defines multiplication patterns where the multiplication is happening on each 275 // Index'ed 8bit of a 32bit scalar value. 276 277 def Mul#Type#_Elt#Index : PatFrag< 278 (ops node:$src0, node:$src1), 279 (!cast<HasOneUseBinOp>(!if (!eq (Type, "I"), AMDGPUmul_i24_oneuse, AMDGPUmul_u24_oneuse)) 280 (!cast<Extract>(Type#Index#"_8bit") node:$src0), 281 (!cast<Extract>(Type#Index#"_8bit") node:$src1))>; 282 } 283 284// Different variants of dot8 patterns cause a huge increase in the compile time. 285// Define non-associative/commutative add/mul to prevent permutation in the dot8 286// pattern. 287def NonACAdd : SDNode<"ISD::ADD" , SDTIntBinOp>; 288def NonACAdd_oneuse : HasOneUseBinOp<NonACAdd>; 289 290def NonACAMDGPUmul_u24 : SDNode<"AMDGPUISD::MUL_U24" , SDTIntBinOp>; 291def NonACAMDGPUmul_u24_oneuse : HasOneUseBinOp<NonACAMDGPUmul_u24>; 292 293def NonACAMDGPUmul_i24 : SDNode<"AMDGPUISD::MUL_I24" , SDTIntBinOp>; 294def NonACAMDGPUmul_i24_oneuse : HasOneUseBinOp<NonACAMDGPUmul_i24>; 295 296foreach Type = ["I", "U"] in 297 foreach Index = 0-7 in { 298 // Defines patterns that extract each Index'ed 4bit from an unsigned 299 // 32bit scalar value; 300 def Type#Index#"_4bit" : Extract<!shl(Index, 2), 15, !eq (Type, "U")>; 301 302 // Defines multiplication patterns where the multiplication is happening on each 303 // Index'ed 8bit of a 32bit scalar value. 304 def Mul#Type#Index#"_4bit" : PatFrag< 305 (ops node:$src0, node:$src1), 306 (!cast<HasOneUseBinOp>(!if (!eq (Type, "I"), NonACAMDGPUmul_i24_oneuse, NonACAMDGPUmul_u24_oneuse)) 307 (!cast<Extract>(Type#Index#"_4bit") node:$src0), 308 (!cast<Extract>(Type#Index#"_4bit") node:$src1))>; 309 } 310 311class UDot2Pat<Instruction Inst> : GCNPat < 312 (add (add_oneuse (AMDGPUmul_u24_oneuse (srl i32:$src0, (i32 16)), 313 (srl i32:$src1, (i32 16))), i32:$src2), 314 (AMDGPUmul_u24_oneuse (and i32:$src0, (i32 65535)), 315 (and i32:$src1, (i32 65535))) 316 ), 317 (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))> { 318 let SubtargetPredicate = !cast<VOP_Pseudo>(Inst).SubtargetPredicate; 319} 320 321class SDot2Pat<Instruction Inst> : GCNPat < 322 (add (add_oneuse (AMDGPUmul_i24_oneuse (sra i32:$src0, (i32 16)), 323 (sra i32:$src1, (i32 16))), i32:$src2), 324 (AMDGPUmul_i24_oneuse (sext_inreg i32:$src0, i16), 325 (sext_inreg i32:$src1, i16))), 326 (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))> { 327 let SubtargetPredicate = !cast<VOP_Pseudo>(Inst).SubtargetPredicate; 328} 329 330let IsDOT = 1 in { 331let SubtargetPredicate = HasDot2Insts in { 332 333defm V_DOT2_I32_I16 : VOP3PInst<"v_dot2_i32_i16", 334 VOP3P_Profile<VOP_I32_V2I16_V2I16_I32>, int_amdgcn_sdot2, 1>; 335defm V_DOT2_U32_U16 : VOP3PInst<"v_dot2_u32_u16", 336 VOP3P_Profile<VOP_I32_V2I16_V2I16_I32>, int_amdgcn_udot2, 1>; 337 338} // End SubtargetPredicate = HasDot2Insts 339 340let SubtargetPredicate = HasDot7Insts in { 341 342defm V_DOT2_F32_F16 : VOP3PInst<"v_dot2_f32_f16", 343 VOP3P_Profile<VOP_F32_V2F16_V2F16_F32, VOP3_REGULAR, /*HasDPP*/ 1>, 344 AMDGPUfdot2, 1/*ExplicitClamp*/>; 345defm V_DOT4_U32_U8 : VOP3PInst<"v_dot4_u32_u8", 346 VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot4, 1>; 347defm V_DOT8_U32_U4 : VOP3PInst<"v_dot8_u32_u4", 348 VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot8, 1>; 349 350} // End SubtargetPredicate = HasDot7Insts 351 352let SubtargetPredicate = HasDot1Insts in { 353 354defm V_DOT4_I32_I8 : VOP3PInst<"v_dot4_i32_i8", 355 VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot4, 1>; 356defm V_DOT8_I32_I4 : VOP3PInst<"v_dot8_i32_i4", 357 VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot8, 1>; 358 359} // End SubtargetPredicate = HasDot1Insts 360 361def DOT2_BF16_Profile 362 : VOP3P_Profile<VOP_F32_V2I16_V2I16_F32, VOP3_REGULAR, /*HasDPP*/ 1> { 363 let HasSrc1Mods = 1; 364} 365 366let SubtargetPredicate = HasDot9Insts in { 367 368defm V_DOT2_F32_BF16 : VOP3PInst<"v_dot2_f32_bf16", DOT2_BF16_Profile, 369 int_amdgcn_fdot2_f32_bf16, 1>; 370 371} // End SubtargetPredicate = HasDot9Insts 372 373} // End let IsDOT = 1 374 375multiclass VOP3PDOTIUInst <string OpName, SDPatternOperator intrinsic_node> { 376 let IsDOT = 1 in 377 defm NAME : VOP3PInst<OpName, VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, 378 null_frag, 1>; 379 // Dot-iu instructions consider input as signed if imod neg bits are set. Thus 380 // Dot-iu Intrinsics have extra operands and require separate codegen pattern. 381 def : GCNPat < (intrinsic_node (DotIUVOP3PMods i32:$src0_mods), i32:$src0, 382 (DotIUVOP3PMods i32:$src1_mods), i32:$src1, 383 i32:$src2, (i1 timm:$clamp)), 384 (!cast<Instruction>(NAME) $src0_mods, i32:$src0, 385 $src1_mods, i32:$src1, 386 (i32 8), i32:$src2, i1:$clamp) 387 >; 388} 389 390let SubtargetPredicate = HasDot8Insts in { 391defm V_DOT4_I32_IU8 : VOP3PDOTIUInst<"v_dot4_i32_iu8", int_amdgcn_sudot4>; 392defm V_DOT8_I32_IU4 : VOP3PDOTIUInst<"v_dot8_i32_iu4", int_amdgcn_sudot8>; 393} // End SubtargetPredicate = HasDot8Insts 394 395def : UDot2Pat<V_DOT2_U32_U16>; 396def : SDot2Pat<V_DOT2_I32_I16>; 397 398foreach Type = ["U", "I"] in 399 let SubtargetPredicate = !cast<VOP_Pseudo>("V_DOT4_"#Type#"32_"#Type#8).SubtargetPredicate in 400 def : GCNPat < 401 !cast<dag>(!foldl((i32 i32:$src2), [0, 1, 2, 3], lhs, y, 402 (add_oneuse lhs, (!cast<PatFrag>("Mul"#Type#"_Elt"#y) i32:$src0, i32:$src1)))), 403 (!cast<VOP3P_Pseudo>("V_DOT4_"#Type#"32_"#Type#8) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>; 404 405foreach Type = ["U", "I"] in 406 let SubtargetPredicate = !cast<VOP_Pseudo>("V_DOT8_"#Type#"32_"#Type#4).SubtargetPredicate in 407 def : GCNPat < 408 !cast<dag>(!foldl((add_oneuse i32:$src2, (!cast<PatFrag>("Mul"#Type#"0_4bit") i32:$src0, i32:$src1)), 409 [1, 2, 3, 4, 5, 6, 7], lhs, y, 410 (NonACAdd_oneuse lhs, (!cast<PatFrag>("Mul"#Type#y#"_4bit") i32:$src0, i32:$src1)))), 411 (!cast<VOP3P_Pseudo>("V_DOT8_"#Type#"32_"#Type#4) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>; 412 413// Different variants of dot8 code-gen dag patterns are not generated through table-gen due to a huge increase 414// in the compile time. Directly handle the pattern generated by the FE here. 415foreach Type = ["U", "I"] in 416 let SubtargetPredicate = !cast<VOP_Pseudo>("V_DOT8_"#Type#"32_"#Type#4).SubtargetPredicate in 417 def : GCNPat < 418 !cast<dag>(!foldl((add_oneuse i32:$src2, (!cast<PatFrag>("Mul"#Type#"0_4bit") i32:$src0, i32:$src1)), 419 [7, 1, 2, 3, 4, 5, 6], lhs, y, 420 (NonACAdd_oneuse lhs, (!cast<PatFrag>("Mul"#Type#y#"_4bit") i32:$src0, i32:$src1)))), 421 (!cast<VOP3P_Pseudo>("V_DOT8_"#Type#"32_"#Type#4) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>; 422 423def ADst_32 : VOPDstOperand<AGPR_32>; 424def ADst_64 : VOPDstOperand<AReg_64>; 425def ADst_128 : VOPDstOperand<AReg_128>; 426def ADst_256 : VOPDstOperand<AReg_256>; 427def ADst_512 : VOPDstOperand<AReg_512>; 428def ADst_1024 : VOPDstOperand<AReg_1024>; 429def VDst_64 : VOPDstOperand<VReg_64>; 430def VDst_128 : VOPDstOperand<VReg_128>; 431def VDst_256 : VOPDstOperand<VReg_256>; 432def VDst_512 : VOPDstOperand<VReg_512>; 433def VDst_1024 : VOPDstOperand<VReg_1024>; 434 435def VOPProfileAccRead : VOP3P_Profile<VOP_I32_I32, VOP3_MAI> { 436 let Src0RC64 = ARegSrc_32; 437} 438 439def VOPProfileAccWrite : VOP3P_Profile<VOP_I32_I32, VOP3_MAI> { 440 let DstRC = ADst_32; 441 let Src0RC64 = VCSrc_b32; 442} 443 444class VOPProfileMAI<VOPProfile P, RegisterOperand _SrcRC, RegisterOperand _DstRC, 445 RegisterOperand SrcABRC = AVSrc_32> 446 : VOP3P_Profile<P, VOP3_MAI> { 447 let DstRC = _DstRC; 448 let Src0RC64 = SrcABRC; 449 let Src1RC64 = SrcABRC; 450 let Src2RC64 = _SrcRC; 451 let HasOpSel = 0; 452 let HasClamp = 0; 453 let HasIntClamp = 0; 454 let HasOMod = 0; 455 let HasModifiers = 0; 456 let AsmVOP3Base = "$vdst, $src0, $src1, $src2$cbsz$abid$blgp"; 457 let Ins64 = (ins Src0RC64:$src0, Src1RC64:$src1, Src2RC64:$src2, cbsz:$cbsz, abid:$abid, blgp:$blgp); 458 let InsVOP3Base = Ins64; 459 // Dst and SrcC cannot partially overlap if SrcC/Dst is bigger than 4 VGPRs. 460 // We then create two versions of the instruction: with tied dst and src2 461 // and with the earlyclobber flag on the dst. This is stricter than the 462 // actual HW restriction. In particular earlyclobber also affects src0 and 463 // src1 allocation which is not required. 464 bit NoDstOverlap = !gt(DstVT.Size, 128); 465} 466 467class VOPProfileSMFMAC<VOPProfile P, RegisterOperand _DstRC, 468 RegisterOperand _SrcARC, RegisterOperand _SrcBRC> 469 : VOPProfileMAI<P, _DstRC, _DstRC, _SrcARC> { 470 let Src1RC64 = _SrcBRC; 471 let Src2VT = DstVT; 472 let Asm64 = " $vdst, $src0, $src1, $idx$cbsz$abid"; 473 let Outs64 = (outs DstRC:$vdst); 474 let Ins64 = (ins Src0RC64:$src0, Src1RC64:$src1, VRegSrc_32:$idx, cbsz:$cbsz, abid:$abid, Src2RC64:$src2); 475} 476 477def VOPProfileMAI_F32_F32_X4 : VOPProfileMAI<VOP_V4F32_F32_F32_V4F32, AISrc_128_f32, ADst_128>; 478def VOPProfileMAI_F32_F32_X16 : VOPProfileMAI<VOP_V16F32_F32_F32_V16F32, AISrc_512_f32, ADst_512>; 479def VOPProfileMAI_F32_F32_X32 : VOPProfileMAI<VOP_V32F32_F32_F32_V32F32, AISrc_1024_f32, ADst_1024>; 480def VOPProfileMAI_I32_I32_X4 : VOPProfileMAI<VOP_V4I32_I32_I32_V4I32, AISrc_128_b32, ADst_128>; 481def VOPProfileMAI_I32_I32_X16 : VOPProfileMAI<VOP_V16I32_I32_I32_V16I32, AISrc_512_b32, ADst_512>; 482def VOPProfileMAI_I32_I32_X32 : VOPProfileMAI<VOP_V32I32_I32_I32_V32I32, AISrc_1024_b32, ADst_1024>; 483def VOPProfileMAI_F32_V2I16_X4 : VOPProfileMAI<VOP_V4F32_V2I16_V2I16_V4F32, AISrc_128_b32, ADst_128>; 484def VOPProfileMAI_F32_V2I16_X16 : VOPProfileMAI<VOP_V16F32_V2I16_V2I16_V16F32, AISrc_512_b32, ADst_512>; 485def VOPProfileMAI_F32_V2I16_X32 : VOPProfileMAI<VOP_V32F32_V2I16_V2I16_V32F32, AISrc_1024_b32, ADst_1024>; 486def VOPProfileMAI_F32_V4F16_X4 : VOPProfileMAI<VOP_V4F32_V4F16_V4F16_V4F32, AISrc_128_b32, ADst_128, AVSrc_64>; 487def VOPProfileMAI_F32_V4F16_X16 : VOPProfileMAI<VOP_V16F32_V4F16_V4F16_V16F32, AISrc_512_b32, ADst_512, AVSrc_64>; 488def VOPProfileMAI_F32_V4F16_X32 : VOPProfileMAI<VOP_V32F32_V4F16_V4F16_V32F32, AISrc_1024_b32, ADst_1024, AVSrc_64>; 489def VOPProfileMAI_F32_V4I16_X4 : VOPProfileMAI<VOP_V4F32_V4I16_V4I16_V4F32, AISrc_128_b32, ADst_128, AVSrc_64>; 490def VOPProfileMAI_F32_V4I16_X16 : VOPProfileMAI<VOP_V16F32_V4I16_V4I16_V16F32, AISrc_512_b32, ADst_512, AVSrc_64>; 491def VOPProfileMAI_F32_V4I16_X32 : VOPProfileMAI<VOP_V32F32_V4I16_V4I16_V32F32, AISrc_1024_b32, ADst_1024, AVSrc_64>; 492def VOPProfileMAI_F64_16X16X4F64 : VOPProfileMAI<VOP_V4F64_F64_F64_V4F64, AISrc_256_f64, ADst_256, AVSrc_64>; 493def VOPProfileMAI_F64_4X4X4F64 : VOPProfileMAI<VOP_F64_F64_F64_F64, AISrc_64_f64, ADst_64, AVSrc_64>; 494def VOPProfileMAI_I32_I64_X16 : VOPProfileMAI<VOP_V4I32_I64_I64_V4I32, AISrc_128_b32, ADst_128, AVSrc_64>; 495def VOPProfileMAI_I32_I64_X32 : VOPProfileMAI<VOP_V16I32_I64_I64_V16I32, AISrc_512_b32, ADst_512, AVSrc_64>; 496def VOPProfileMAI_F32_V2F32_X16 : VOPProfileMAI<VOP_V4F32_V2F32_V2F32_V4F32, AISrc_128_b32, ADst_128, AVSrc_64>; 497def VOPProfileMAI_F32_V2F32_X32 : VOPProfileMAI<VOP_V16F32_V2F32_V2F32_V16F32, AISrc_512_b32, ADst_512, AVSrc_64>; 498def VOPProfileMAI_F32_I64_X32 : VOPProfileMAI<VOP_V4F32_I64_I64_V4F32, AISrc_128_b32, ADst_128, AVSrc_64>; 499def VOPProfileMAI_F32_I64_X16 : VOPProfileMAI<VOP_V16F32_I64_I64_V16F32, AISrc_512_b32, ADst_512, AVSrc_64>; 500 501def VOPProfileMAI_F32_F32_X4_VCD : VOPProfileMAI<VOP_V4F32_F32_F32_V4F32, VISrc_128_f32, VDst_128>; 502def VOPProfileMAI_F32_F32_X16_VCD : VOPProfileMAI<VOP_V16F32_F32_F32_V16F32, VISrc_512_f32, VDst_512>; 503def VOPProfileMAI_F32_F32_X32_VCD : VOPProfileMAI<VOP_V32F32_F32_F32_V32F32, VISrc_1024_f32, VDst_1024>; 504def VOPProfileMAI_I32_I32_X4_VCD : VOPProfileMAI<VOP_V4I32_I32_I32_V4I32, VISrc_128_b32, VDst_128>; 505def VOPProfileMAI_I32_I32_X16_VCD : VOPProfileMAI<VOP_V16I32_I32_I32_V16I32, VISrc_512_b32, VDst_512>; 506def VOPProfileMAI_I32_I32_X32_VCD : VOPProfileMAI<VOP_V32I32_I32_I32_V32I32, VISrc_1024_b32, VDst_1024>; 507def VOPProfileMAI_F32_V2I16_X4_VCD : VOPProfileMAI<VOP_V4F32_V2I16_V2I16_V4F32, VISrc_128_b32, VDst_128>; 508def VOPProfileMAI_F32_V2I16_X16_VCD : VOPProfileMAI<VOP_V16F32_V2I16_V2I16_V16F32, VISrc_512_b32, VDst_512>; 509def VOPProfileMAI_F32_V2I16_X32_VCD : VOPProfileMAI<VOP_V32F32_V2I16_V2I16_V32F32, VISrc_1024_b32, VDst_1024>; 510def VOPProfileMAI_F32_V4F16_X4_VCD : VOPProfileMAI<VOP_V4F32_V4F16_V4F16_V4F32, VISrc_128_b32, VDst_128, AVSrc_64>; 511def VOPProfileMAI_F32_V4F16_X16_VCD : VOPProfileMAI<VOP_V16F32_V4F16_V4F16_V16F32, VISrc_512_b32, VDst_512, AVSrc_64>; 512def VOPProfileMAI_F32_V4F16_X32_VCD : VOPProfileMAI<VOP_V32F32_V4F16_V4F16_V32F32, VISrc_1024_b32, VDst_1024, AVSrc_64>; 513def VOPProfileMAI_F32_V4I16_X4_VCD : VOPProfileMAI<VOP_V4F32_V4I16_V4I16_V4F32, VISrc_128_b32, VDst_128, AVSrc_64>; 514def VOPProfileMAI_F32_V4I16_X16_VCD : VOPProfileMAI<VOP_V16F32_V4I16_V4I16_V16F32, VISrc_512_b32, VDst_512, AVSrc_64>; 515def VOPProfileMAI_F32_V4I16_X32_VCD : VOPProfileMAI<VOP_V32F32_V4I16_V4I16_V32F32, VISrc_1024_b32, VDst_1024, AVSrc_64>; 516def VOPProfileMAI_F64_16X16X4F64_VCD : VOPProfileMAI<VOP_V4F64_F64_F64_V4F64, VISrc_256_f64, VDst_256, AVSrc_64>; 517def VOPProfileMAI_F64_4X4X4F64_VCD : VOPProfileMAI<VOP_F64_F64_F64_F64, VISrc_64_f64, VDst_64, AVSrc_64>; 518def VOPProfileMAI_I32_I64_X16_VCD : VOPProfileMAI<VOP_V4I32_I64_I64_V4I32, VISrc_128_b32, VDst_128, AVSrc_64>; 519def VOPProfileMAI_I32_I64_X32_VCD : VOPProfileMAI<VOP_V16I32_I64_I64_V16I32, VISrc_512_b32, VDst_512, AVSrc_64>; 520def VOPProfileMAI_F32_V2F32_X16_VCD : VOPProfileMAI<VOP_V4F32_V2F32_V2F32_V4F32, VISrc_128_b32, VDst_128, AVSrc_64>; 521def VOPProfileMAI_F32_V2F32_X32_VCD : VOPProfileMAI<VOP_V16F32_V2F32_V2F32_V16F32, VISrc_512_b32, VDst_512, AVSrc_64>; 522def VOPProfileMAI_F32_I64_X32_VCD : VOPProfileMAI<VOP_V4F32_I64_I64_V4F32, VISrc_128_b32, VDst_128, AVSrc_64>; 523def VOPProfileMAI_F32_I64_X16_VCD : VOPProfileMAI<VOP_V16F32_I64_I64_V16F32, VISrc_512_b32, VDst_512, AVSrc_64>; 524 525def VOPProfileSMFMAC_F32_16X16X32_F16 : VOPProfileSMFMAC<VOP_V4F32_V4F16_V8F16_I32, AVDst_128, AVSrc_64, AVSrc_128>; 526def VOPProfileSMFMAC_F32_32X32X16_F16 : VOPProfileSMFMAC<VOP_V16F32_V4F16_V8F16_I32, AVDst_512, AVSrc_64, AVSrc_128>; 527def VOPProfileSMFMAC_F32_16X16X32_I16 : VOPProfileSMFMAC<VOP_V4F32_V4I16_V8I16_I32, AVDst_128, AVSrc_64, AVSrc_128>; 528def VOPProfileSMFMAC_F32_32X32X16_I16 : VOPProfileSMFMAC<VOP_V16F32_V4I16_V8I16_I32, AVDst_512, AVSrc_64, AVSrc_128>; 529def VOPProfileSMFMAC_I32_16X16X64_I8 : VOPProfileSMFMAC<VOP_V4I32_V2I32_V4I32_I32, AVDst_128, AVSrc_64, AVSrc_128>; 530def VOPProfileSMFMAC_I32_32X32X32_I8 : VOPProfileSMFMAC<VOP_V16I32_V2I32_V4I32_I32, AVDst_512, AVSrc_64, AVSrc_128>; 531def VOPProfileSMFMAC_F32_16X16X64_F8 : VOPProfileSMFMAC<VOP_V4F32_V2I32_V4I32_I32, AVDst_128, AVSrc_64, AVSrc_128>; 532def VOPProfileSMFMAC_F32_32X32X32_F8 : VOPProfileSMFMAC<VOP_V16F32_V2I32_V4I32_I32, AVDst_512, AVSrc_64, AVSrc_128>; 533 534class MFMATable <bit is_mac, string Name> { 535 bit IsMac = is_mac; 536 string FMAOp = Name; 537} 538 539class MAIFrag<SDPatternOperator Op, code pred> : PatFrag < 540 (ops node:$src0, node:$src1, node:$src2, node:$cbsz, node:$abid, node:$blgp), 541 (Op $src0, $src1, $src2, $cbsz, $abid, $blgp), 542 pred 543>; 544 545let GISelPredicateCode = [{ return MF.getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs(); }] in 546class AgprMAIFrag<SDPatternOperator Op> : 547 MAIFrag<Op, [{ return MF->getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs(); }]>; 548 549let GISelPredicateCode = [{ return !MF.getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs(); }] in 550class VgprMAIFrag<SDPatternOperator Op> : 551 MAIFrag<Op, [{ return !MF->getInfo<SIMachineFunctionInfo>()->mayNeedAGPRs(); }]>; 552 553let Predicates = [HasMAIInsts] in { 554 555let isAsCheapAsAMove = 1, isReMaterializable = 1 in { 556 defm V_ACCVGPR_READ_B32 : VOP3Inst<"v_accvgpr_read_b32", VOPProfileAccRead>; 557 let isMoveImm = 1 in { 558 defm V_ACCVGPR_WRITE_B32 : VOP3Inst<"v_accvgpr_write_b32", VOPProfileAccWrite>; 559 } // End isMoveImm = 1 560} // End isAsCheapAsAMove = 1, isReMaterializable = 1 561 562class MAIInst<string OpName, VOPProfile P, SDPatternOperator node> 563 : VOP3InstBase<OpName, P, node> { 564 Instruction Opcode = !cast<Instruction>(NAME); 565 bit is_dgemm = 0; 566 bit is_gfx940_xdl = 0; 567} 568 569multiclass MAIInst<string OpName, string P, SDPatternOperator node, 570 bit NoDstOverlap = !cast<VOPProfileMAI>("VOPProfileMAI_" # P).NoDstOverlap> { 571 let isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1 in { 572 // FP32 denorm mode is respected, rounding mode is not. Exceptions are not supported. 573 let Constraints = !if(NoDstOverlap, "@earlyclobber $vdst", "") in { 574 def _e64 : MAIInst<OpName, !cast<VOPProfileMAI>("VOPProfileMAI_" # P), 575 !if(NoDstOverlap, null_frag, AgprMAIFrag<node>)>, 576 MFMATable<0, NAME # "_e64">; 577 578 let SubtargetPredicate = isGFX90APlus, Mnemonic = OpName in 579 def _vgprcd_e64 : MAIInst<OpName # "_vgprcd", !cast<VOPProfileMAI>("VOPProfileMAI_" # P # "_VCD"), 580 !if(NoDstOverlap, null_frag, VgprMAIFrag<node>)>, 581 MFMATable<0, NAME # "_vgprcd_e64">; 582 } 583 584 foreach _ = BoolToList<NoDstOverlap>.ret in { 585 let Constraints = !if(NoDstOverlap, "$vdst = $src2", ""), 586 isConvertibleToThreeAddress = NoDstOverlap, 587 Mnemonic = OpName in { 588 def "_mac_e64" : MAIInst<OpName # "_mac", !cast<VOPProfileMAI>("VOPProfileMAI_" # P), AgprMAIFrag<node>>, 589 MFMATable<1, NAME # "_e64">; 590 591 let SubtargetPredicate = isGFX90APlus in 592 def _mac_vgprcd_e64 : MAIInst<OpName # "_mac_vgprcd", !cast<VOPProfileMAI>("VOPProfileMAI_" # P # "_VCD"), 593 VgprMAIFrag<node>>, 594 MFMATable<1, NAME # "_vgprcd_e64">; 595 } 596 } 597 } // End isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1 598} 599 600defm V_MFMA_F32_4X4X1F32 : MAIInst<"v_mfma_f32_4x4x1f32", "F32_F32_X4", int_amdgcn_mfma_f32_4x4x1f32>; 601defm V_MFMA_F32_16X16X1F32 : MAIInst<"v_mfma_f32_16x16x1f32", "F32_F32_X16", int_amdgcn_mfma_f32_16x16x1f32>; 602defm V_MFMA_F32_16X16X4F32 : MAIInst<"v_mfma_f32_16x16x4f32", "F32_F32_X4", int_amdgcn_mfma_f32_16x16x4f32>; 603defm V_MFMA_F32_32X32X1F32 : MAIInst<"v_mfma_f32_32x32x1f32", "F32_F32_X32", int_amdgcn_mfma_f32_32x32x1f32>; 604defm V_MFMA_F32_32X32X2F32 : MAIInst<"v_mfma_f32_32x32x2f32", "F32_F32_X16", int_amdgcn_mfma_f32_32x32x2f32>; 605 606let is_gfx940_xdl = 1 in { 607defm V_MFMA_F32_4X4X4F16 : MAIInst<"v_mfma_f32_4x4x4f16", "F32_V4F16_X4", int_amdgcn_mfma_f32_4x4x4f16>; 608defm V_MFMA_I32_4X4X4I8 : MAIInst<"v_mfma_i32_4x4x4i8", "I32_I32_X4", int_amdgcn_mfma_i32_4x4x4i8>; 609defm V_MFMA_F32_16X16X4F16 : MAIInst<"v_mfma_f32_16x16x4f16", "F32_V4F16_X16", int_amdgcn_mfma_f32_16x16x4f16>; 610defm V_MFMA_F32_16X16X16F16 : MAIInst<"v_mfma_f32_16x16x16f16", "F32_V4F16_X4", int_amdgcn_mfma_f32_16x16x16f16>; 611defm V_MFMA_I32_16X16X4I8 : MAIInst<"v_mfma_i32_16x16x4i8", "I32_I32_X16", int_amdgcn_mfma_i32_16x16x4i8>; 612defm V_MFMA_F32_32X32X4F16 : MAIInst<"v_mfma_f32_32x32x4f16", "F32_V4F16_X32", int_amdgcn_mfma_f32_32x32x4f16>; 613defm V_MFMA_F32_32X32X8F16 : MAIInst<"v_mfma_f32_32x32x8f16", "F32_V4F16_X16", int_amdgcn_mfma_f32_32x32x8f16>; 614defm V_MFMA_I32_32X32X4I8 : MAIInst<"v_mfma_i32_32x32x4i8", "I32_I32_X32", int_amdgcn_mfma_i32_32x32x4i8>; 615} 616 617let Predicates = [isGFX908orGFX90A] in { 618defm V_MFMA_I32_16X16X16I8 : MAIInst<"v_mfma_i32_16x16x16i8", "I32_I32_X4", int_amdgcn_mfma_i32_16x16x16i8>; 619defm V_MFMA_I32_32X32X8I8 : MAIInst<"v_mfma_i32_32x32x8i8", "I32_I32_X16", int_amdgcn_mfma_i32_32x32x8i8>; 620defm V_MFMA_F32_4X4X2BF16 : MAIInst<"v_mfma_f32_4x4x2bf16", "F32_V2I16_X4", int_amdgcn_mfma_f32_4x4x2bf16>; 621defm V_MFMA_F32_16X16X2BF16 : MAIInst<"v_mfma_f32_16x16x2bf16", "F32_V2I16_X16", int_amdgcn_mfma_f32_16x16x2bf16>; 622defm V_MFMA_F32_16X16X8BF16 : MAIInst<"v_mfma_f32_16x16x8bf16", "F32_V2I16_X4", int_amdgcn_mfma_f32_16x16x8bf16>; 623defm V_MFMA_F32_32X32X2BF16 : MAIInst<"v_mfma_f32_32x32x2bf16", "F32_V2I16_X32", int_amdgcn_mfma_f32_32x32x2bf16>; 624defm V_MFMA_F32_32X32X4BF16 : MAIInst<"v_mfma_f32_32x32x4bf16", "F32_V2I16_X16", int_amdgcn_mfma_f32_32x32x4bf16>; 625} 626 627} // End SubtargetPredicate = HasMAIInsts 628 629let Predicates = [isGFX90APlus] in { 630 let is_gfx940_xdl = 1 in { 631 defm V_MFMA_F32_32X32X4BF16_1K : MAIInst<"v_mfma_f32_32x32x4bf16_1k", "F32_V4I16_X32", int_amdgcn_mfma_f32_32x32x4bf16_1k>; 632 defm V_MFMA_F32_16X16X4BF16_1K : MAIInst<"v_mfma_f32_16x16x4bf16_1k", "F32_V4I16_X16", int_amdgcn_mfma_f32_16x16x4bf16_1k>; 633 defm V_MFMA_F32_4X4X4BF16_1K : MAIInst<"v_mfma_f32_4x4x4bf16_1k", "F32_V4I16_X4", int_amdgcn_mfma_f32_4x4x4bf16_1k>; 634 defm V_MFMA_F32_32X32X8BF16_1K : MAIInst<"v_mfma_f32_32x32x8bf16_1k", "F32_V4I16_X16", int_amdgcn_mfma_f32_32x32x8bf16_1k>; 635 defm V_MFMA_F32_16X16X16BF16_1K : MAIInst<"v_mfma_f32_16x16x16bf16_1k", "F32_V4I16_X4", int_amdgcn_mfma_f32_16x16x16bf16_1k>; 636 } 637 638 let is_dgemm = 1 in { 639 defm V_MFMA_F64_16X16X4F64 : MAIInst<"v_mfma_f64_16x16x4f64", "F64_16X16X4F64", int_amdgcn_mfma_f64_16x16x4f64>; 640 defm V_MFMA_F64_4X4X4F64 : MAIInst<"v_mfma_f64_4x4x4f64", "F64_4X4X4F64", int_amdgcn_mfma_f64_4x4x4f64>; 641 } 642} // End Predicates = [isGFX90APlus] 643 644let Predicates = [isGFX940Plus], is_gfx940_xdl = 1 in { 645 defm V_MFMA_I32_32X32X16I8 : MAIInst<"v_mfma_i32_32x32x16i8", "I32_I64_X32", int_amdgcn_mfma_i32_32x32x16_i8>; 646 defm V_MFMA_I32_16X16X32I8 : MAIInst<"v_mfma_i32_16x16x32i8", "I32_I64_X16", int_amdgcn_mfma_i32_16x16x32_i8>; 647 defm V_MFMA_F32_16X16X8XF32 : MAIInst<"v_mfma_f32_16x16x8xf32", "F32_V2F32_X16", int_amdgcn_mfma_f32_16x16x8_xf32>; 648 defm V_MFMA_F32_32X32X4XF32 : MAIInst<"v_mfma_f32_32x32x4xf32", "F32_V2F32_X32", int_amdgcn_mfma_f32_32x32x4_xf32>; 649 defm V_MFMA_F32_16X16X32_BF8_BF8 : MAIInst<"v_mfma_f32_16x16x32_bf8_bf8", "F32_I64_X32", int_amdgcn_mfma_f32_16x16x32_bf8_bf8>; 650 defm V_MFMA_F32_16X16X32_BF8_FP8 : MAIInst<"v_mfma_f32_16x16x32_bf8_fp8", "F32_I64_X32", int_amdgcn_mfma_f32_16x16x32_bf8_fp8>; 651 defm V_MFMA_F32_16X16X32_FP8_BF8 : MAIInst<"v_mfma_f32_16x16x32_fp8_bf8", "F32_I64_X32", int_amdgcn_mfma_f32_16x16x32_fp8_bf8>; 652 defm V_MFMA_F32_16X16X32_FP8_FP8 : MAIInst<"v_mfma_f32_16x16x32_fp8_fp8", "F32_I64_X32", int_amdgcn_mfma_f32_16x16x32_fp8_fp8>; 653 defm V_MFMA_F32_32X32X16_BF8_BF8 : MAIInst<"v_mfma_f32_32x32x16_bf8_bf8", "F32_I64_X16", int_amdgcn_mfma_f32_32x32x16_bf8_bf8>; 654 defm V_MFMA_F32_32X32X16_BF8_FP8 : MAIInst<"v_mfma_f32_32x32x16_bf8_fp8", "F32_I64_X16", int_amdgcn_mfma_f32_32x32x16_bf8_fp8>; 655 defm V_MFMA_F32_32X32X16_FP8_BF8 : MAIInst<"v_mfma_f32_32x32x16_fp8_bf8", "F32_I64_X16", int_amdgcn_mfma_f32_32x32x16_fp8_bf8>; 656 defm V_MFMA_F32_32X32X16_FP8_FP8 : MAIInst<"v_mfma_f32_32x32x16_fp8_fp8", "F32_I64_X16", int_amdgcn_mfma_f32_32x32x16_fp8_fp8>; 657} // End Predicates = [isGFX940Plus], is_gfx940_xdl = 1 658 659multiclass SMFMACInst<string OpName, string P, SDPatternOperator node> { 660 let Constraints = "$vdst = $src2", DisableEncoding = "$src2", 661 isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1, is_gfx940_xdl = 1 in { 662 def _e64 : MAIInst<OpName, !cast<VOPProfileSMFMAC>("VOPProfileSMFMAC_" # P), node>; 663 } 664} 665 666let SubtargetPredicate = isGFX940Plus in { 667defm V_SMFMAC_F32_16X16X32_F16 : SMFMACInst<"v_smfmac_f32_16x16x32_f16", "F32_16X16X32_F16", int_amdgcn_smfmac_f32_16x16x32_f16>; 668defm V_SMFMAC_F32_32X32X16_F16 : SMFMACInst<"v_smfmac_f32_32x32x16_f16", "F32_32X32X16_F16", int_amdgcn_smfmac_f32_32x32x16_f16>; 669defm V_SMFMAC_F32_16X16X32_BF16 : SMFMACInst<"v_smfmac_f32_16x16x32_bf16", "F32_16X16X32_I16", int_amdgcn_smfmac_f32_16x16x32_bf16>; 670defm V_SMFMAC_F32_32X32X16_BF16 : SMFMACInst<"v_smfmac_f32_32x32x16_bf16", "F32_32X32X16_I16", int_amdgcn_smfmac_f32_32x32x16_bf16>; 671defm V_SMFMAC_I32_16X16X64_I8 : SMFMACInst<"v_smfmac_i32_16x16x64_i8", "I32_16X16X64_I8", int_amdgcn_smfmac_i32_16x16x64_i8>; 672defm V_SMFMAC_I32_32X32X32_I8 : SMFMACInst<"v_smfmac_i32_32x32x32_i8", "I32_32X32X32_I8", int_amdgcn_smfmac_i32_32x32x32_i8>; 673defm V_SMFMAC_F32_16X16X64_BF8_BF8 : SMFMACInst<"v_smfmac_f32_16x16x64_bf8_bf8", "F32_16X16X64_F8", int_amdgcn_smfmac_f32_16x16x64_bf8_bf8>; 674defm V_SMFMAC_F32_16X16X64_BF8_FP8 : SMFMACInst<"v_smfmac_f32_16x16x64_bf8_fp8", "F32_16X16X64_F8", int_amdgcn_smfmac_f32_16x16x64_bf8_fp8>; 675defm V_SMFMAC_F32_16X16X64_FP8_BF8 : SMFMACInst<"v_smfmac_f32_16x16x64_fp8_bf8", "F32_16X16X64_F8", int_amdgcn_smfmac_f32_16x16x64_fp8_bf8>; 676defm V_SMFMAC_F32_16X16X64_FP8_FP8 : SMFMACInst<"v_smfmac_f32_16x16x64_fp8_fp8", "F32_16X16X64_F8", int_amdgcn_smfmac_f32_16x16x64_fp8_fp8>; 677defm V_SMFMAC_F32_32X32X32_BF8_BF8 : SMFMACInst<"v_smfmac_f32_32x32x32_bf8_bf8", "F32_32X32X32_F8", int_amdgcn_smfmac_f32_32x32x32_bf8_bf8>; 678defm V_SMFMAC_F32_32X32X32_BF8_FP8 : SMFMACInst<"v_smfmac_f32_32x32x32_bf8_fp8", "F32_32X32X32_F8", int_amdgcn_smfmac_f32_32x32x32_bf8_fp8>; 679defm V_SMFMAC_F32_32X32X32_FP8_BF8 : SMFMACInst<"v_smfmac_f32_32x32x32_fp8_bf8", "F32_32X32X32_F8", int_amdgcn_smfmac_f32_32x32x32_fp8_bf8>; 680defm V_SMFMAC_F32_32X32X32_FP8_FP8 : SMFMACInst<"v_smfmac_f32_32x32x32_fp8_fp8", "F32_32X32X32_F8", int_amdgcn_smfmac_f32_32x32x32_fp8_fp8>; 681} 682 683def MAIInstInfoTable : GenericTable { 684 let FilterClass = "MAIInst"; 685 let CppTypeName = "MAIInstInfo"; 686 let Fields = [ 687 "Opcode", "is_dgemm", "is_gfx940_xdl" 688 ]; 689 690 let PrimaryKey = ["Opcode"]; 691 let PrimaryKeyName = "getMAIInstInfoHelper"; 692} 693 694let SubtargetPredicate = HasPackedFP32Ops, isCommutable = 1, isReMaterializable = 1 in { 695 defm V_PK_FMA_F32 : VOP3PInst<"v_pk_fma_f32", VOP3P_Profile<VOP_V2F32_V2F32_V2F32_V2F32, VOP3_PACKED>, any_fma>; 696 defm V_PK_MUL_F32 : VOP3PInst<"v_pk_mul_f32", VOP3P_Profile<VOP_V2F32_V2F32_V2F32, VOP3_PACKED>, any_fmul>; 697 defm V_PK_ADD_F32 : VOP3PInst<"v_pk_add_f32", VOP3P_Profile<VOP_V2F32_V2F32_V2F32, VOP3_PACKED>, any_fadd>; 698 defm V_PK_MOV_B32 : VOP3PInst<"v_pk_mov_b32", VOP3P_Profile<VOP_V2I32_V2I32_V2I32, VOP3_PACKED>>; 699} // End SubtargetPredicate = HasPackedFP32Ops, isCommutable = 1 700 701def : MnemonicAlias<"v_accvgpr_read", "v_accvgpr_read_b32">; 702def : MnemonicAlias<"v_accvgpr_write", "v_accvgpr_write_b32">; 703 704class VOPProfileWMMA<VOPProfile P, string Suffix, RegisterOperand _Src01RC64, bit _HasClamp, bit _HasOpSel> : VOP3P_Profile<P> { 705 let DstRC = !if(!eq(Suffix, "_w32"), VDst_256, VDst_128); 706 let Src0RC64 = _Src01RC64; 707 let Src1RC64 = _Src01RC64; 708 let Src2RC64 = !if(!eq(Suffix, "_w32"), VISrc_256_f64, VISrc_128_f32); 709 let HasClamp = _HasClamp; 710 let HasOpSel = _HasOpSel; 711 let IsPacked = 1; 712 let IsWMMA = 1; 713} 714 715def VOP_V8F32_V16F16_V16F16_V8F32 : VOPProfile <[v8f32, v16f16, v16f16, v8f32]>; 716def VOP_V8F32_V16I16_V16I16_V8F32 : VOPProfile <[v8f32, v16i16, v16i16, v8f32]>; 717def VOP_V16F16_V16F16_V16F16_V16F16 : VOPProfile <[v16f16, v16f16, v16f16, v16f16]>; 718def VOP_V16I16_V16I16_V16I16_V16I16 : VOPProfile <[v16i16, v16i16, v16i16, v16i16]>; 719def VOP_V8I32_V4I32_V4I32_V8I32 : VOPProfile <[v8i32, v4i32, v4i32, v8i32]>; 720def VOP_V8I32_V2I32_V2I32_V8I32 : VOPProfile <[v8i32, v2i32, v2i32, v8i32]>; 721 722def VOP_V4F32_V16F16_V16F16_V4F32 : VOPProfile <[v4f32, v16f16, v16f16, v4f32]>; 723def VOP_V4F32_V16I16_V16I16_V4F32 : VOPProfile <[v4f32, v16i16, v16i16, v4f32]>; 724def VOP_V8F16_V16F16_V16F16_V8F16 : VOPProfile <[v8f16, v16f16, v16f16, v8f16]>; 725def VOP_V8I16_V16I16_V16I16_V8I16 : VOPProfile <[v8i16, v16i16, v16i16, v8i16]>; 726def VOP_V4I32_V4I32_V4I32_V4I32 : VOPProfile <[v4i32, v4i32, v4i32, v4i32]>; 727def VOP_V4I32_V2I32_V2I32_V4I32 : VOPProfile <[v4i32, v2i32, v2i32, v4i32]>; 728 729 730class WMMAType <bits<2> val> { 731 bit hasClamp = val{0}; 732 bit hasOpsel = val{1}; 733} 734 735def WMMARegular : WMMAType<0b00>; 736def WMMAUIClamp : WMMAType<0b01>; 737def WMMAOpSel : WMMAType<0b10>; 738 739class WMMARegularPat<Instruction Inst, SDPatternOperator node, VOPProfile P> : 740 GCNPat < (P.DstVT (node 741 (P.Src0VT (VOP3PMods P.Src0VT:$src0, i32:$src0_modifiers)), 742 (P.Src1VT (VOP3PMods P.Src1VT:$src1, i32:$src1_modifiers)), 743 (P.Src2VT (VOP3PMods P.Src2VT:$src2, i32:$src2_modifiers)) 744 )), 745 (P.DstVT (Inst i32:$src0_modifiers, P.Src0VT:$src0, i32:$src1_modifiers, P.Src1VT:$src1, $src2_modifiers, P.Src2VT:$src2)) 746>; 747 748class WMMAOpSelPat<Instruction Inst, SDPatternOperator node, VOPProfile P> : 749 GCNPat < (P.DstVT (node 750 (P.Src0VT P.Src0VT:$src0), 751 (P.Src1VT P.Src1VT:$src1), 752 (P.Src2VT P.Src2VT:$src2), (WMMAOpSelVOP3PMods i32:$src2_modifiers) 753 )), 754 (P.DstVT (Inst (i32 8), P.Src0VT:$src0, (i32 8), P.Src1VT:$src1, i32:$src2_modifiers, P.Src2VT:$src2)) 755>; 756 757class WMMAUIClampPat<Instruction Inst, SDPatternOperator node, VOPProfile P> : 758 GCNPat < (P.DstVT (node 759 (DotIUVOP3PMods i32:$src0_modifiers), (P.Src0VT P.Src0VT:$src0), 760 (DotIUVOP3PMods i32:$src1_modifiers), (P.Src1VT P.Src1VT:$src1), 761 (P.Src2VT P.Src2VT:$src2), (i1 timm:$clamp) 762 )), 763 (P.DstVT (Inst i32:$src0_modifiers, P.Src0VT:$src0, i32:$src1_modifiers, P.Src1VT:$src1, (i32 8), P.Src2VT:$src2, i1:$clamp)) 764>; 765 766class WMMAOpcodeMapping<Instruction TwoAddr, Instruction ThreeAddr> { 767 Instruction Opcode2Addr = TwoAddr; 768 Instruction Opcode3Addr = ThreeAddr; 769 Predicate WaveSizePredicate; 770} 771 772def WMMAOpcode : GenericEnum { 773 let FilterClass = "VOP3P_Pseudo"; 774} 775 776class WMMAMappingTable : GenericTable { 777 let FilterClass = "WMMAOpcodeMapping"; 778 let CppTypeName = "WMMAOpcodeMappingInfo"; 779 let Fields = ["Opcode2Addr", "Opcode3Addr"]; 780 string TypeOf_Opcode2Addr = "WMMAOpcode"; 781 string TypeOf_Opcode3Addr = "WMMAOpcode"; 782} 783 784def WMMAOpcode2AddrMappingTable : WMMAMappingTable { 785 let PrimaryKey = ["Opcode2Addr"]; 786 let PrimaryKeyName = "getWMMAMappingInfoFrom2AddrOpcode"; 787} 788 789def WMMAOpcode3AddrMappingTable : WMMAMappingTable { 790 let PrimaryKey = ["Opcode3Addr"]; 791 let PrimaryKeyName = "getWMMAMappingInfoFrom3AddrOpcode"; 792} 793 794// The WMMA instruction has extra constraints: 795// Matrices A and B cannot overlap with D. C cannot partially overlap with D, 796// but it is OK for them to be the same (which is a typical case). 797// 798// We implement it as follows: 799// 1) Map the intrinsic to the pseudo where D is tied to C ($vdst = $src2). 800// 2) The pass twoaddressinstruction checks if src2 is live and if that is the case 801// it converts the default pseudo to the pseudo where src2 is not the same as vdst. 802// 3) @earlyclobber on the destination satisfies the constraint during RA. 803 804multiclass WMMAInst<string Suffix, string Instr, VOPProfile P, SDPatternOperator node = null_frag, RegisterOperand _Src01RC64 = VRegSrc_256, WMMAType Type> { 805 806 defvar WMMAConstraints2Addr = "@earlyclobber $vdst,$vdst = $src2"; 807 defvar WMMAConstraints3Addr = "@earlyclobber $vdst"; 808 809 defvar WMMAProfile = VOPProfileWMMA<P, Suffix, _Src01RC64, Type.hasClamp, Type.hasOpsel>; 810 if !eq(Suffix, "_w32") then { 811 let Mnemonic = Instr, mayRaiseFPException = 0, ReadsModeReg = 0 in { 812 let Constraints = WMMAConstraints2Addr, isConvertibleToThreeAddress = 1 in { 813 def _twoaddr_w32 : VOP3P_Pseudo<Instr # Suffix, WMMAProfile>; 814 } 815 let Constraints = WMMAConstraints3Addr, SchedRW = [Write32Bit, Write32Bit] in { 816 def _threeaddr_w32 : VOP3P_Pseudo<Instr # Suffix, WMMAProfile>; 817 } 818 } 819 def : WMMAOpcodeMapping<!cast<Instruction>(NAME # _twoaddr_w32), 820 !cast<Instruction>(NAME # _threeaddr_w32)>; 821 } else if !eq(Suffix, "_w64") then { 822 let Mnemonic = Instr, mayRaiseFPException = 0, ReadsModeReg = 0 in { 823 let Constraints = WMMAConstraints2Addr, isConvertibleToThreeAddress = 1 in { 824 def _twoaddr_w64 : VOP3P_Pseudo<Instr # Suffix, WMMAProfile>; 825 } 826 let Constraints = WMMAConstraints3Addr, SchedRW = [Write32Bit, Write32Bit] in { 827 def _threeaddr_w64 : VOP3P_Pseudo<Instr # Suffix, WMMAProfile>; 828 } 829 } 830 def : WMMAOpcodeMapping<!cast<Instruction>(NAME # _twoaddr_w64), 831 !cast<Instruction>(NAME # _threeaddr_w64)>; 832 } 833 834 if !eq(Type, WMMAOpSel) then { 835 def : WMMAOpSelPat<!cast<Instruction>(NAME # _twoaddr # Suffix), node, P>; 836 } else if !eq(Type, WMMAUIClamp) then { 837 def : WMMAUIClampPat<!cast<Instruction>(NAME # _twoaddr # Suffix), node, P>; 838 } else { 839 def : WMMARegularPat<!cast<Instruction>(NAME # _twoaddr # Suffix), node, P>; 840 } 841} 842 843 844let WaveSizePredicate = isWave32 in { 845 defm V_WMMA_F32_16X16X16_F16 : WMMAInst<"_w32", "v_wmma_f32_16x16x16_f16", VOP_V8F32_V16F16_V16F16_V8F32, int_amdgcn_wmma_f32_16x16x16_f16, VRegSrc_256, WMMARegular>; 846 defm V_WMMA_F32_16X16X16_BF16 : WMMAInst<"_w32", "v_wmma_f32_16x16x16_bf16", VOP_V8F32_V16I16_V16I16_V8F32, int_amdgcn_wmma_f32_16x16x16_bf16, VRegSrc_256, WMMARegular>; 847 defm V_WMMA_F16_16X16X16_F16 : WMMAInst<"_w32", "v_wmma_f16_16x16x16_f16", VOP_V16F16_V16F16_V16F16_V16F16, int_amdgcn_wmma_f16_16x16x16_f16, VRegSrc_256, WMMAOpSel>; 848 defm V_WMMA_BF16_16X16X16_BF16 : WMMAInst<"_w32", "v_wmma_bf16_16x16x16_bf16", VOP_V16I16_V16I16_V16I16_V16I16, int_amdgcn_wmma_bf16_16x16x16_bf16, VRegSrc_256, WMMAOpSel>; 849 defm V_WMMA_I32_16X16X16_IU8 : WMMAInst<"_w32", "v_wmma_i32_16x16x16_iu8", VOP_V8I32_V4I32_V4I32_V8I32, int_amdgcn_wmma_i32_16x16x16_iu8, VRegSrc_128, WMMAUIClamp>; 850 defm V_WMMA_I32_16X16X16_IU4 : WMMAInst<"_w32", "v_wmma_i32_16x16x16_iu4", VOP_V8I32_V2I32_V2I32_V8I32, int_amdgcn_wmma_i32_16x16x16_iu4, VRegSrc_64, WMMAUIClamp>; 851} 852 853let WaveSizePredicate = isWave64 in { 854 defm V_WMMA_F32_16X16X16_F16 : WMMAInst<"_w64", "v_wmma_f32_16x16x16_f16", VOP_V4F32_V16F16_V16F16_V4F32, int_amdgcn_wmma_f32_16x16x16_f16, VRegSrc_256, WMMARegular>; 855 defm V_WMMA_F32_16X16X16_BF16 : WMMAInst<"_w64", "v_wmma_f32_16x16x16_bf16", VOP_V4F32_V16I16_V16I16_V4F32, int_amdgcn_wmma_f32_16x16x16_bf16, VRegSrc_256, WMMARegular>; 856 defm V_WMMA_F16_16X16X16_F16 : WMMAInst<"_w64", "v_wmma_f16_16x16x16_f16", VOP_V8F16_V16F16_V16F16_V8F16, int_amdgcn_wmma_f16_16x16x16_f16, VRegSrc_256, WMMAOpSel>; 857 defm V_WMMA_BF16_16X16X16_BF16 : WMMAInst<"_w64", "v_wmma_bf16_16x16x16_bf16", VOP_V8I16_V16I16_V16I16_V8I16, int_amdgcn_wmma_bf16_16x16x16_bf16, VRegSrc_256, WMMAOpSel>; 858 defm V_WMMA_I32_16X16X16_IU8 : WMMAInst<"_w64", "v_wmma_i32_16x16x16_iu8", VOP_V4I32_V4I32_V4I32_V4I32, int_amdgcn_wmma_i32_16x16x16_iu8, VRegSrc_128, WMMAUIClamp>; 859 defm V_WMMA_I32_16X16X16_IU4 : WMMAInst<"_w64", "v_wmma_i32_16x16x16_iu4", VOP_V4I32_V2I32_V2I32_V4I32, int_amdgcn_wmma_i32_16x16x16_iu4, VRegSrc_64, WMMAUIClamp>; 860 861} 862 863//===----------------------------------------------------------------------===// 864// Begin Real Encodings 865//===----------------------------------------------------------------------===// 866 867class VOP3P_DPP16<bits<7> op, VOP_DPP_Pseudo ps, int subtarget, 868 string opName = ps.OpName> 869 : VOP3P_DPP<op, opName, ps.Pfl, 1>, SIMCInstr<ps.PseudoInstr, subtarget> { 870 let hasSideEffects = ps.hasSideEffects; 871 let Defs = ps.Defs; 872 let SchedRW = ps.SchedRW; 873 let Uses = ps.Uses; 874 let AssemblerPredicate = HasDPP16; 875 let SubtargetPredicate = HasDPP16; 876 let OtherPredicates = ps.OtherPredicates; 877} 878 879class VOP3P_DPP8_Base<bits<7> op, VOP_Pseudo ps, string opName = ps.OpName> 880 : VOP3P_DPP8<op, opName, ps.Pfl> { 881 let hasSideEffects = ps.hasSideEffects; 882 let Defs = ps.Defs; 883 let SchedRW = ps.SchedRW; 884 let Uses = ps.Uses; 885 let OtherPredicates = ps.OtherPredicates; 886} 887 888//===----------------------------------------------------------------------===// 889// GFX11. 890//===----------------------------------------------------------------------===// 891 892let AssemblerPredicate = isGFX11Plus, 893 DecoderNamespace = "GFX11" in { 894 895 multiclass VOP3P_Real_gfx11<bits<7> op, string backing_ps_name = NAME, 896 string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> { 897 def _gfx11 : VOP3P_Real<!cast<VOP3P_Pseudo>(backing_ps_name), 898 SIEncodingFamily.GFX11, asmName>, 899 VOP3Pe_gfx11<op, !cast<VOP3P_Pseudo>(backing_ps_name).Pfl>; 900 } 901 902 multiclass VOP3P_Real_dpp_gfx11<bits<7> op, string backing_ps_name = NAME, 903 string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> { 904 defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name); 905 def _dpp_gfx11 906 : VOP3P_DPP16<op, !cast<VOP_DPP_Pseudo>(backing_ps_name #"_dpp"), 907 SIEncodingFamily.GFX11> { 908 let AsmString = asmName #ps.Pfl.AsmVOP3DPP16; 909 let DecoderNamespace = "DPPGFX11"; 910 } 911 } 912 913 multiclass VOP3P_Real_dpp8_gfx11<bits<7> op, string backing_ps_name = NAME, 914 string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> { 915 defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name); 916 def _dpp8_gfx11 : VOP3P_DPP8_Base<op, ps> { 917 let AsmString = asmName #ps.Pfl.AsmVOP3DPP8; 918 let DecoderNamespace = "DPP8GFX11"; 919 } 920 } 921 922 multiclass VOP3P_Realtriple_gfx11<bits<7> op, string backing_ps_name = NAME, 923 string asmName = !cast<VOP3P_Pseudo>(NAME).Mnemonic> 924 : VOP3P_Real_gfx11<op, backing_ps_name, asmName>, 925 VOP3P_Real_dpp_gfx11<op, backing_ps_name, asmName>, 926 VOP3P_Real_dpp8_gfx11<op, backing_ps_name, asmName>; 927} // End AssemblerPredicate = isGFX11Plus, DecoderNamespace = "GFX11" 928 929defm V_DOT4_I32_IU8 : VOP3P_Real_gfx11 <0x16>; 930defm V_DOT8_I32_IU4 : VOP3P_Real_gfx11 <0x18>; 931defm V_DOT2_F32_BF16 : VOP3P_Real_gfx11 <0x1a>; 932 933multiclass VOP3P_Real_WMMA <bits<7> op> { 934 let WaveSizePredicate = isWave32, DecoderNamespace = "GFX11" in { 935 defm _twoaddr_w32 : VOP3P_Real_gfx11 <op>; 936 } 937 let WaveSizePredicate = isWave64, DecoderNamespace = "WMMAGFX11" in { 938 defm _twoaddr_w64 : VOP3P_Real_gfx11 <op>; 939 } 940} 941 942defm V_WMMA_F32_16X16X16_F16 : VOP3P_Real_WMMA <0x040>; 943defm V_WMMA_F32_16X16X16_BF16 : VOP3P_Real_WMMA <0x041>; 944defm V_WMMA_F16_16X16X16_F16 : VOP3P_Real_WMMA <0x042>; 945defm V_WMMA_BF16_16X16X16_BF16 : VOP3P_Real_WMMA <0x043>; 946defm V_WMMA_I32_16X16X16_IU8 : VOP3P_Real_WMMA <0x044>; 947defm V_WMMA_I32_16X16X16_IU4 : VOP3P_Real_WMMA <0x045>; 948 949//===----------------------------------------------------------------------===// 950// GFX8 (VI) 951//===----------------------------------------------------------------------===// 952 953multiclass VOP3P_Real_vi<bits<7> op> { 954 def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.VI>, 955 VOP3Pe <op, !cast<VOP3_Pseudo>(NAME).Pfl> { 956 let AssemblerPredicate = HasVOP3PInsts; 957 let DecoderNamespace = "GFX8"; 958 let VOP3P = 1; 959 } 960} 961 962multiclass VOP3P_Real_MAI<bits<7> op> { 963 def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>, 964 VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, ?> { 965 let AssemblerPredicate = HasMAIInsts; 966 let DecoderNamespace = "GFX8"; 967 let Inst{14} = ?; // op_sel_hi(2) 968 let Inst{59} = ?; // op_sel_hi(0) 969 let Inst{60} = ?; // op_sel_hi(1) 970 } 971} 972 973let Constraints = "" in { 974multiclass VOP3P_Real_MFMA_gfx90a<bits<7> op> { 975 let SubtargetPredicate = isGFX90AOnly, 976 AssemblerPredicate = isGFX90AOnly, DecoderNamespace = "GFX90A" in { 977 def _gfx90a_acd : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.GFX90A>, 978 VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, 1>; 979 980 def _gfx90a_vcd : VOP3P_Real<!cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64"), SIEncodingFamily.GFX90A>, 981 VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64").Pfl, 0>; 982 } // End AssemblerPredicate = isGFX90AOnly, DecoderNamespace = "GFX90A" 983} 984} 985 986multiclass VOP3P_Real_MFMA_gfx940_aliases<string NameFrom, string NameTo, string Op, 987 VOP3_Pseudo PS_ACD = !cast<VOP3_Pseudo>(Op # "_e64"), 988 VOP3_Pseudo PS_VCD = !cast<VOP3_Pseudo>(Op # "_vgprcd" # "_e64"), 989 VOPProfile Pfl_ACD = PS_ACD.Pfl, 990 VOPProfile Pfl_VCD = PS_VCD.Pfl> { 991 let Predicates = [isGFX940Plus] in { 992 foreach _ = BoolToList<!ne(NameFrom, NameTo)>.ret in { 993 def : InstAlias <NameTo # " " # PS_ACD.AsmOperands, 994 (!cast<VOP3P_Real>(Op # "_gfx940_acd") Pfl_ACD.DstRC:$vdst, 995 Pfl_ACD.Src0RC64:$src0, Pfl_ACD.Src1RC64:$src1, Pfl_ACD.Src2RC64:$src2, 996 cbsz:$cbsz, abid:$abid, blgp:$blgp)>, PredicateControl; 997 def : InstAlias <NameTo # " " # PS_VCD.AsmOperands, 998 (!cast<VOP3P_Real>(Op # "_gfx940_vcd") Pfl_VCD.DstRC:$vdst, 999 Pfl_VCD.Src0RC64:$src0, Pfl_VCD.Src1RC64:$src1, Pfl_VCD.Src2RC64:$src2, 1000 cbsz:$cbsz, abid:$abid, blgp:$blgp)>, PredicateControl; 1001 } 1002 } // End Predicates = [isGFX940Plus] 1003} 1004 1005multiclass VOP3P_Real_MFMA_gfx940<bits<7> op, string Name = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic, 1006 VOP3_Pseudo PS_ACD = !cast<VOP3_Pseudo>(NAME # "_e64"), 1007 VOP3_Pseudo PS_VCD = !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64")> { 1008 let SubtargetPredicate = isGFX940Plus, 1009 AssemblerPredicate = isGFX940Plus, DecoderNamespace = "GFX940", 1010 AsmString = Name # PS_ACD.AsmOperands, Constraints = "" in { 1011 def _gfx940_acd : VOP3P_Real<PS_ACD, SIEncodingFamily.GFX940>, 1012 VOP3Pe_MAI <op, PS_ACD.Pfl, 1>; 1013 1014 def _gfx940_vcd : VOP3P_Real<PS_VCD, SIEncodingFamily.GFX940>, 1015 VOP3Pe_MAI <op, PS_VCD.Pfl, 0>; 1016 } // End AssemblerPredicate = isGFX940Plus, DecoderNamespace = "GFX940" 1017 1018 defm : VOP3P_Real_MFMA_gfx940_aliases<Name, PS_ACD.Mnemonic, NAME>; 1019 1020 foreach _ = BoolToList<!ne(!subst("_1k", "", PS_ACD.Mnemonic), PS_ACD.Mnemonic)>.ret in 1021 defm : VOP3P_Real_MFMA_gfx940_aliases<Name, !subst("_1k", "", PS_ACD.Mnemonic), NAME>; 1022} 1023 1024multiclass VOP3P_Real_MFMA<bits<7> op, string GFX940Name = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic> : 1025 VOP3P_Real_MFMA_gfx90a <op>, 1026 VOP3P_Real_MFMA_gfx940 <op, GFX940Name> { 1027 def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>, 1028 VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl, ?> { 1029 let AssemblerPredicate = HasMAIInsts; 1030 let DecoderNamespace = "GFX8"; 1031 let Constraints = ""; 1032 } 1033} 1034 1035multiclass VOP3P_Real_SMFMAC<bits<7> op, string alias> { 1036 def _gfx940 : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>, 1037 VOP3Pe_SMFMAC <op> { 1038 let AssemblerPredicate = isGFX940Plus; 1039 let DecoderNamespace = "GFX8"; 1040 } 1041 def : MnemonicAlias<alias, !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic>; 1042} 1043 1044defm V_PK_MAD_I16 : VOP3P_Real_vi <0x00>; 1045defm V_PK_MUL_LO_U16 : VOP3P_Real_vi <0x01>; 1046defm V_PK_ADD_I16 : VOP3P_Real_vi <0x02>; 1047defm V_PK_SUB_I16 : VOP3P_Real_vi <0x03>; 1048defm V_PK_LSHLREV_B16 : VOP3P_Real_vi <0x04>; 1049defm V_PK_LSHRREV_B16 : VOP3P_Real_vi <0x05>; 1050defm V_PK_ASHRREV_I16 : VOP3P_Real_vi <0x06>; 1051defm V_PK_MAX_I16 : VOP3P_Real_vi <0x07>; 1052defm V_PK_MIN_I16 : VOP3P_Real_vi <0x08>; 1053defm V_PK_MAD_U16 : VOP3P_Real_vi <0x09>; 1054 1055defm V_PK_ADD_U16 : VOP3P_Real_vi <0x0a>; 1056defm V_PK_SUB_U16 : VOP3P_Real_vi <0x0b>; 1057defm V_PK_MAX_U16 : VOP3P_Real_vi <0x0c>; 1058defm V_PK_MIN_U16 : VOP3P_Real_vi <0x0d>; 1059defm V_PK_FMA_F16 : VOP3P_Real_vi <0x0e>; 1060defm V_PK_ADD_F16 : VOP3P_Real_vi <0x0f>; 1061defm V_PK_MUL_F16 : VOP3P_Real_vi <0x10>; 1062defm V_PK_MIN_F16 : VOP3P_Real_vi <0x11>; 1063defm V_PK_MAX_F16 : VOP3P_Real_vi <0x12>; 1064 1065 1066let SubtargetPredicate = HasMadMixInsts in { 1067defm V_MAD_MIX_F32 : VOP3P_Real_vi <0x20>; 1068defm V_MAD_MIXLO_F16 : VOP3P_Real_vi <0x21>; 1069defm V_MAD_MIXHI_F16 : VOP3P_Real_vi <0x22>; 1070} 1071 1072let SubtargetPredicate = HasFmaMixInsts in { 1073let DecoderNamespace = "GFX9_DL" in { 1074// The mad_mix instructions were renamed and their behaviors changed, 1075// but the opcode stayed the same so we need to put these in a 1076// different DecoderNamespace to avoid the ambiguity. 1077defm V_FMA_MIX_F32 : VOP3P_Real_vi <0x20>; 1078defm V_FMA_MIXLO_F16 : VOP3P_Real_vi <0x21>; 1079defm V_FMA_MIXHI_F16 : VOP3P_Real_vi <0x22>; 1080} 1081} 1082 1083 1084let SubtargetPredicate = HasDot2Insts in { 1085 1086defm V_DOT2_I32_I16 : VOP3P_Real_vi <0x26>; 1087defm V_DOT2_U32_U16 : VOP3P_Real_vi <0x27>; 1088 1089} // End SubtargetPredicate = HasDot2Insts 1090 1091let SubtargetPredicate = HasDot7Insts in { 1092 1093defm V_DOT2_F32_F16 : VOP3P_Real_vi <0x23>; 1094defm V_DOT4_U32_U8 : VOP3P_Real_vi <0x29>; 1095defm V_DOT8_U32_U4 : VOP3P_Real_vi <0x2b>; 1096 1097} // End SubtargetPredicate = HasDot7Insts 1098 1099let SubtargetPredicate = HasDot1Insts in { 1100 1101defm V_DOT4_I32_I8 : VOP3P_Real_vi <0x28>; 1102defm V_DOT8_I32_I4 : VOP3P_Real_vi <0x2a>; 1103 1104} // End SubtargetPredicate = HasDot1Insts 1105 1106let SubtargetPredicate = HasMAIInsts in { 1107 1108defm V_ACCVGPR_READ_B32 : VOP3P_Real_MAI <0x58>; 1109defm V_ACCVGPR_WRITE_B32 : VOP3P_Real_MAI <0x59>; 1110defm V_MFMA_F32_32X32X1F32 : VOP3P_Real_MFMA <0x40, "v_mfma_f32_32x32x1_2b_f32">; 1111defm V_MFMA_F32_16X16X1F32 : VOP3P_Real_MFMA <0x41, "v_mfma_f32_16x16x1_4b_f32">; 1112defm V_MFMA_F32_4X4X1F32 : VOP3P_Real_MFMA <0x42, "v_mfma_f32_4x4x1_16b_f32">; 1113defm V_MFMA_F32_32X32X2F32 : VOP3P_Real_MFMA <0x44, "v_mfma_f32_32x32x2_f32">; 1114defm V_MFMA_F32_16X16X4F32 : VOP3P_Real_MFMA <0x45, "v_mfma_f32_16x16x4_f32">; 1115defm V_MFMA_F32_32X32X4F16 : VOP3P_Real_MFMA <0x48, "v_mfma_f32_32x32x4_2b_f16">; 1116defm V_MFMA_F32_16X16X4F16 : VOP3P_Real_MFMA <0x49, "v_mfma_f32_16x16x4_4b_f16">; 1117defm V_MFMA_F32_4X4X4F16 : VOP3P_Real_MFMA <0x4a, "v_mfma_f32_4x4x4_16b_f16">; 1118defm V_MFMA_F32_32X32X8F16 : VOP3P_Real_MFMA <0x4c, "v_mfma_f32_32x32x8_f16">; 1119defm V_MFMA_F32_16X16X16F16 : VOP3P_Real_MFMA <0x4d, "v_mfma_f32_16x16x16_f16">; 1120defm V_MFMA_I32_32X32X4I8 : VOP3P_Real_MFMA <0x50, "v_mfma_i32_32x32x4_2b_i8">; 1121defm V_MFMA_I32_16X16X4I8 : VOP3P_Real_MFMA <0x51, "v_mfma_i32_16x16x4_4b_i8">; 1122defm V_MFMA_I32_4X4X4I8 : VOP3P_Real_MFMA <0x52, "v_mfma_i32_4x4x4_16b_i8">; 1123 1124let SubtargetPredicate = isGFX908orGFX90A in { 1125defm V_MFMA_I32_16X16X16I8 : VOP3P_Real_MFMA <0x55>; 1126defm V_MFMA_I32_32X32X8I8 : VOP3P_Real_MFMA <0x54>; 1127defm V_MFMA_F32_32X32X2BF16 : VOP3P_Real_MFMA <0x68>; 1128defm V_MFMA_F32_16X16X2BF16 : VOP3P_Real_MFMA <0x69>; 1129defm V_MFMA_F32_4X4X2BF16 : VOP3P_Real_MFMA <0x6b>; 1130defm V_MFMA_F32_32X32X4BF16 : VOP3P_Real_MFMA <0x6c>; 1131defm V_MFMA_F32_16X16X8BF16 : VOP3P_Real_MFMA <0x6d>; 1132} 1133 1134} // End SubtargetPredicate = HasMAIInsts 1135 1136defm V_MFMA_F32_32X32X4BF16_1K : VOP3P_Real_MFMA_gfx90a <0x63>; 1137defm V_MFMA_F32_16X16X4BF16_1K : VOP3P_Real_MFMA_gfx90a <0x64>; 1138defm V_MFMA_F32_4X4X4BF16_1K : VOP3P_Real_MFMA_gfx90a <0x65>; 1139defm V_MFMA_F32_32X32X8BF16_1K : VOP3P_Real_MFMA_gfx90a <0x66>; 1140defm V_MFMA_F32_16X16X16BF16_1K : VOP3P_Real_MFMA_gfx90a <0x67>; 1141defm V_MFMA_F64_16X16X4F64 : VOP3P_Real_MFMA_gfx90a <0x6e>; 1142defm V_MFMA_F64_4X4X4F64 : VOP3P_Real_MFMA_gfx90a <0x6f>; 1143 1144defm V_MFMA_I32_32X32X16I8 : VOP3P_Real_MFMA_gfx940 <0x56, "v_mfma_i32_32x32x16_i8">; 1145defm V_MFMA_I32_16X16X32I8 : VOP3P_Real_MFMA_gfx940 <0x57, "v_mfma_i32_16x16x32_i8">; 1146defm V_MFMA_F32_16X16X8XF32 : VOP3P_Real_MFMA_gfx940 <0x3e, "v_mfma_f32_16x16x8_xf32">; 1147defm V_MFMA_F32_32X32X4XF32 : VOP3P_Real_MFMA_gfx940 <0x3f, "v_mfma_f32_32x32x4_xf32">; 1148defm V_MFMA_F32_16X16X32_BF8_BF8 : VOP3P_Real_MFMA_gfx940 <0x70>; 1149defm V_MFMA_F32_16X16X32_BF8_FP8 : VOP3P_Real_MFMA_gfx940 <0x71>; 1150defm V_MFMA_F32_16X16X32_FP8_BF8 : VOP3P_Real_MFMA_gfx940 <0x72>; 1151defm V_MFMA_F32_16X16X32_FP8_FP8 : VOP3P_Real_MFMA_gfx940 <0x73>; 1152defm V_MFMA_F32_32X32X16_BF8_BF8 : VOP3P_Real_MFMA_gfx940 <0x74>; 1153defm V_MFMA_F32_32X32X16_BF8_FP8 : VOP3P_Real_MFMA_gfx940 <0x75>; 1154defm V_MFMA_F32_32X32X16_FP8_BF8 : VOP3P_Real_MFMA_gfx940 <0x76>; 1155defm V_MFMA_F32_32X32X16_FP8_FP8 : VOP3P_Real_MFMA_gfx940 <0x77>; 1156 1157defm V_MFMA_F32_32X32X4BF16_1K : VOP3P_Real_MFMA_gfx940 <0x5d, "v_mfma_f32_32x32x4_2b_bf16">; 1158defm V_MFMA_F32_16X16X4BF16_1K : VOP3P_Real_MFMA_gfx940 <0x5e, "v_mfma_f32_16x16x4_4b_bf16">; 1159defm V_MFMA_F32_4X4X4BF16_1K : VOP3P_Real_MFMA_gfx940 <0x5f, "v_mfma_f32_4x4x4_16b_bf16">; 1160defm V_MFMA_F32_32X32X8BF16_1K : VOP3P_Real_MFMA_gfx940 <0x60, "v_mfma_f32_32x32x8_bf16">; 1161defm V_MFMA_F32_16X16X16BF16_1K : VOP3P_Real_MFMA_gfx940 <0x61, "v_mfma_f32_16x16x16_bf16">; 1162 1163defm V_MFMA_F64_16X16X4F64 : VOP3P_Real_MFMA_gfx940 <0x6e, "v_mfma_f64_16x16x4_f64">; 1164defm V_MFMA_F64_4X4X4F64 : VOP3P_Real_MFMA_gfx940 <0x6f, "v_mfma_f64_4x4x4_4b_f64">; 1165 1166defm V_SMFMAC_F32_16X16X32_F16 : VOP3P_Real_SMFMAC <0x62, "v_smfmac_f32_16x16x32f16">; 1167defm V_SMFMAC_F32_32X32X16_F16 : VOP3P_Real_SMFMAC <0x64, "v_smfmac_f32_32x32x16f16">; 1168defm V_SMFMAC_F32_16X16X32_BF16 : VOP3P_Real_SMFMAC <0x66, "v_smfmac_f32_16x16x32bf16">; 1169defm V_SMFMAC_F32_32X32X16_BF16 : VOP3P_Real_SMFMAC <0x68, "v_smfmac_f32_32x32x16bf16">; 1170defm V_SMFMAC_I32_16X16X64_I8 : VOP3P_Real_SMFMAC <0x6a, "v_smfmac_i32_16x16x64i8">; 1171defm V_SMFMAC_I32_32X32X32_I8 : VOP3P_Real_SMFMAC <0x6c, "v_smfmac_i32_32x32x32i8">; 1172defm V_SMFMAC_F32_16X16X64_BF8_BF8 : VOP3P_Real_SMFMAC <0x78, "v_smfmac_f32_16x16x64bf8bf8">; 1173defm V_SMFMAC_F32_16X16X64_BF8_FP8 : VOP3P_Real_SMFMAC <0x79, "v_smfmac_f32_16x16x64bf8fp8">; 1174defm V_SMFMAC_F32_16X16X64_FP8_BF8 : VOP3P_Real_SMFMAC <0x7a, "v_smfmac_f32_16x16x64fp8bf8">; 1175defm V_SMFMAC_F32_16X16X64_FP8_FP8 : VOP3P_Real_SMFMAC <0x7b, "v_smfmac_f32_16x16x64fp8fp8">; 1176defm V_SMFMAC_F32_32X32X32_BF8_BF8 : VOP3P_Real_SMFMAC <0x7c, "v_smfmac_f32_32x32x32bf8bf8">; 1177defm V_SMFMAC_F32_32X32X32_BF8_FP8 : VOP3P_Real_SMFMAC <0x7d, "v_smfmac_f32_32x32x32bf8fp8">; 1178defm V_SMFMAC_F32_32X32X32_FP8_BF8 : VOP3P_Real_SMFMAC <0x7e, "v_smfmac_f32_32x32x32fp8bf8">; 1179defm V_SMFMAC_F32_32X32X32_FP8_FP8 : VOP3P_Real_SMFMAC <0x7f, "v_smfmac_f32_32x32x32fp8fp8">; 1180 1181let SubtargetPredicate = HasPackedFP32Ops in { 1182 defm V_PK_FMA_F32 : VOP3P_Real_vi <0x30>; 1183 defm V_PK_MUL_F32 : VOP3P_Real_vi <0x31>; 1184 defm V_PK_ADD_F32 : VOP3P_Real_vi <0x32>; 1185 defm V_PK_MOV_B32 : VOP3P_Real_vi <0x33>; 1186} // End SubtargetPredicate = HasPackedFP32Ops 1187 1188//===----------------------------------------------------------------------===// 1189// GFX10. 1190//===----------------------------------------------------------------------===// 1191 1192let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10", VOP3P = 1 in { 1193 multiclass VOP3P_Real_gfx10<bits<7> op> { 1194 def _gfx10 : VOP3P_Real<!cast<VOP3P_Pseudo>(NAME), SIEncodingFamily.GFX10>, 1195 VOP3Pe_gfx10 <op, !cast<VOP3P_Pseudo>(NAME).Pfl>; 1196 } 1197} // End AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10", VOP3P = 1 1198 1199multiclass VOP3P_Real_gfx10_gfx11<bits<7> op> 1200 : VOP3P_Real_gfx10<op>, VOP3P_Real_gfx11<op>; 1201 1202multiclass VOP3P_Real_gfx10_gfx11_Triple<bits<7> op> 1203 : VOP3P_Real_gfx10<op>, VOP3P_Realtriple_gfx11<op>; 1204 1205defm V_PK_MAD_I16 : VOP3P_Real_gfx10_gfx11<0x00>; 1206defm V_PK_MUL_LO_U16 : VOP3P_Real_gfx10_gfx11<0x01>; 1207defm V_PK_ADD_I16 : VOP3P_Real_gfx10_gfx11<0x02>; 1208defm V_PK_SUB_I16 : VOP3P_Real_gfx10_gfx11<0x03>; 1209defm V_PK_LSHLREV_B16 : VOP3P_Real_gfx10_gfx11<0x04>; 1210defm V_PK_LSHRREV_B16 : VOP3P_Real_gfx10_gfx11<0x05>; 1211defm V_PK_ASHRREV_I16 : VOP3P_Real_gfx10_gfx11<0x06>; 1212defm V_PK_MAX_I16 : VOP3P_Real_gfx10_gfx11<0x07>; 1213defm V_PK_MIN_I16 : VOP3P_Real_gfx10_gfx11<0x08>; 1214defm V_PK_MAD_U16 : VOP3P_Real_gfx10_gfx11<0x09>; 1215defm V_PK_ADD_U16 : VOP3P_Real_gfx10_gfx11<0x0a>; 1216defm V_PK_SUB_U16 : VOP3P_Real_gfx10_gfx11<0x0b>; 1217defm V_PK_MAX_U16 : VOP3P_Real_gfx10_gfx11<0x0c>; 1218defm V_PK_MIN_U16 : VOP3P_Real_gfx10_gfx11<0x0d>; 1219defm V_PK_FMA_F16 : VOP3P_Real_gfx10_gfx11<0x0e>; 1220defm V_PK_ADD_F16 : VOP3P_Real_gfx10_gfx11<0x0f>; 1221defm V_PK_MUL_F16 : VOP3P_Real_gfx10_gfx11<0x10>; 1222defm V_PK_MIN_F16 : VOP3P_Real_gfx10_gfx11<0x11>; 1223defm V_PK_MAX_F16 : VOP3P_Real_gfx10_gfx11<0x12>; 1224defm V_FMA_MIX_F32 : VOP3P_Real_gfx10_gfx11_Triple <0x20>; 1225defm V_FMA_MIXLO_F16 : VOP3P_Real_gfx10_gfx11_Triple <0x21>; 1226defm V_FMA_MIXHI_F16 : VOP3P_Real_gfx10_gfx11_Triple <0x22>; 1227 1228let SubtargetPredicate = HasDot2Insts in { 1229 1230defm V_DOT2_I32_I16 : VOP3P_Real_gfx10 <0x14>; 1231defm V_DOT2_U32_U16 : VOP3P_Real_gfx10 <0x15>; 1232 1233} // End SubtargetPredicate = HasDot2Insts 1234 1235let SubtargetPredicate = HasDot7Insts in { 1236 1237defm V_DOT2_F32_F16 : VOP3P_Real_gfx10_gfx11_Triple <0x13>; 1238defm V_DOT4_U32_U8 : VOP3P_Real_gfx10_gfx11 <0x17>; 1239defm V_DOT8_U32_U4 : VOP3P_Real_gfx10_gfx11 <0x19>; 1240 1241} // End SubtargetPredicate = HasDot7Insts 1242 1243let SubtargetPredicate = HasDot1Insts in { 1244 1245defm V_DOT4_I32_I8 : VOP3P_Real_gfx10 <0x16>; 1246defm V_DOT8_I32_I4 : VOP3P_Real_gfx10 <0x18>; 1247 1248} // End SubtargetPredicate = HasDot1Insts 1249