1//===-- VOP3PInstructions.td - Vector Instruction Definitions -------------===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8 9//===----------------------------------------------------------------------===// 10// VOP3P Classes 11//===----------------------------------------------------------------------===// 12 13class VOP3PInst<string OpName, VOPProfile P, 14 SDPatternOperator node = null_frag, 15 bit HasExplicitClamp = 0> : 16 VOP3P_Pseudo<OpName, P, 17 !if(P.HasModifiers, getVOP3PModPat<P, node, HasExplicitClamp>.ret, getVOP3Pat<P, node>.ret) 18>; 19 20// Non-packed instructions that use the VOP3P encoding. 21// VOP3 neg/abs and VOP3P opsel/opsel_hi modifiers are allowed. 22class VOP3_VOP3PInst<string OpName, VOPProfile P, bit UseTiedOutput = 0, 23 SDPatternOperator node = null_frag> : 24 VOP3P_Pseudo<OpName, P> { 25 // These operands are only sort of f16 operands. Depending on 26 // op_sel_hi, these may be interpreted as f32. The inline immediate 27 // values are really f16 converted to f32, so we treat these as f16 28 // operands. 29 let InOperandList = 30 !con( 31 !con( 32 (ins FP16InputMods:$src0_modifiers, VCSrc_f16:$src0, 33 FP16InputMods:$src1_modifiers, VCSrc_f16:$src1, 34 FP16InputMods:$src2_modifiers, VCSrc_f16:$src2), 35 // FIXME: clampmod0 misbehaves with the non-default vdst_in 36 // following it. For now workaround this by requiring clamp 37 // in tied patterns. This should use undef_tied_input, but it 38 // seems underdeveloped and doesn't apply the right register 39 // class constraints. 40 !if(UseTiedOutput, (ins clampmod:$clamp, VGPR_32:$vdst_in), 41 (ins clampmod0:$clamp))), 42 (ins op_sel0:$op_sel, op_sel_hi0:$op_sel_hi)); 43 44 let Constraints = !if(UseTiedOutput, "$vdst = $vdst_in", ""); 45 let DisableEncoding = !if(UseTiedOutput, "$vdst_in", ""); 46 let AsmOperands = 47 " $vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$op_sel$op_sel_hi$clamp"; 48} 49 50let isCommutable = 1 in { 51def V_PK_MAD_I16 : VOP3PInst<"v_pk_mad_i16", VOP3_Profile<VOP_V2I16_V2I16_V2I16_V2I16>>; 52def V_PK_MAD_U16 : VOP3PInst<"v_pk_mad_u16", VOP3_Profile<VOP_V2I16_V2I16_V2I16_V2I16>>; 53 54let FPDPRounding = 1 in { 55def V_PK_FMA_F16 : VOP3PInst<"v_pk_fma_f16", VOP3_Profile<VOP_V2F16_V2F16_V2F16_V2F16>, any_fma>; 56def V_PK_ADD_F16 : VOP3PInst<"v_pk_add_f16", VOP3_Profile<VOP_V2F16_V2F16_V2F16>, any_fadd>; 57def V_PK_MUL_F16 : VOP3PInst<"v_pk_mul_f16", VOP3_Profile<VOP_V2F16_V2F16_V2F16>, any_fmul>; 58} // End FPDPRounding = 1 59def V_PK_MAX_F16 : VOP3PInst<"v_pk_max_f16", VOP3_Profile<VOP_V2F16_V2F16_V2F16>, fmaxnum_like>; 60def V_PK_MIN_F16 : VOP3PInst<"v_pk_min_f16", VOP3_Profile<VOP_V2F16_V2F16_V2F16>, fminnum_like>; 61 62def V_PK_ADD_U16 : VOP3PInst<"v_pk_add_u16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, add>; 63def V_PK_ADD_I16 : VOP3PInst<"v_pk_add_i16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>>; 64def V_PK_MUL_LO_U16 : VOP3PInst<"v_pk_mul_lo_u16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, mul>; 65 66def V_PK_MIN_I16 : VOP3PInst<"v_pk_min_i16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, smin>; 67def V_PK_MIN_U16 : VOP3PInst<"v_pk_min_u16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, umin>; 68def V_PK_MAX_I16 : VOP3PInst<"v_pk_max_i16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, smax>; 69def V_PK_MAX_U16 : VOP3PInst<"v_pk_max_u16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, umax>; 70} 71 72def V_PK_SUB_U16 : VOP3PInst<"v_pk_sub_u16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>>; 73def V_PK_SUB_I16 : VOP3PInst<"v_pk_sub_i16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, sub>; 74 75def V_PK_LSHLREV_B16 : VOP3PInst<"v_pk_lshlrev_b16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, lshl_rev>; 76def V_PK_ASHRREV_I16 : VOP3PInst<"v_pk_ashrrev_i16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, ashr_rev>; 77def V_PK_LSHRREV_B16 : VOP3PInst<"v_pk_lshrrev_b16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, lshr_rev>; 78 79 80let SubtargetPredicate = HasVOP3PInsts in { 81 82// Undo sub x, c -> add x, -c canonicalization since c is more likely 83// an inline immediate than -c. 84// The constant will be emitted as a mov, and folded later. 85// TODO: We could directly encode the immediate now 86def : GCNPat< 87 (add (v2i16 (VOP3PMods v2i16:$src0, i32:$src0_modifiers)), NegSubInlineConstV216:$src1), 88 (V_PK_SUB_U16 $src0_modifiers, $src0, SRCMODS.OP_SEL_1, NegSubInlineConstV216:$src1) 89>; 90 91// Integer operations with clamp bit set. 92class VOP3PSatPat<SDPatternOperator pat, Instruction inst> : GCNPat< 93 (pat (v2i16 (VOP3PMods v2i16:$src0, i32:$src0_modifiers)), 94 (v2i16 (VOP3PMods v2i16:$src1, i32:$src1_modifiers))), 95 (inst $src0_modifiers, $src0, $src1_modifiers, $src1, DSTCLAMP.ENABLE) 96>; 97 98def : VOP3PSatPat<uaddsat, V_PK_ADD_U16>; 99def : VOP3PSatPat<saddsat, V_PK_ADD_I16>; 100def : VOP3PSatPat<usubsat, V_PK_SUB_U16>; 101def : VOP3PSatPat<ssubsat, V_PK_SUB_I16>; 102} // End SubtargetPredicate = HasVOP3PInsts 103 104multiclass MadFmaMixPats<SDPatternOperator fma_like, 105 Instruction mix_inst, 106 Instruction mixlo_inst, 107 Instruction mixhi_inst> { 108 def : GCNPat < 109 (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)), 110 (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)), 111 (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers))))), 112 (mixlo_inst $src0_modifiers, $src0, 113 $src1_modifiers, $src1, 114 $src2_modifiers, $src2, 115 DSTCLAMP.NONE, 116 (i32 (IMPLICIT_DEF))) 117 >; 118 119 // FIXME: Special case handling for maxhi (especially for clamp) 120 // because dealing with the write to high half of the register is 121 // difficult. 122 def : GCNPat < 123 (build_vector f16:$elt0, (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)), 124 (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)), 125 (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers))))), 126 (v2f16 (mixhi_inst $src0_modifiers, $src0, 127 $src1_modifiers, $src1, 128 $src2_modifiers, $src2, 129 DSTCLAMP.NONE, 130 $elt0)) 131 >; 132 133 def : GCNPat < 134 (build_vector 135 f16:$elt0, 136 (AMDGPUclamp (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)), 137 (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)), 138 (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers)))))), 139 (v2f16 (mixhi_inst $src0_modifiers, $src0, 140 $src1_modifiers, $src1, 141 $src2_modifiers, $src2, 142 DSTCLAMP.ENABLE, 143 $elt0)) 144 >; 145 146 def : GCNPat < 147 (AMDGPUclamp (build_vector 148 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$lo_src0, i32:$lo_src0_modifiers)), 149 (f32 (VOP3PMadMixMods f16:$lo_src1, i32:$lo_src1_modifiers)), 150 (f32 (VOP3PMadMixMods f16:$lo_src2, i32:$lo_src2_modifiers)))), 151 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$hi_src0, i32:$hi_src0_modifiers)), 152 (f32 (VOP3PMadMixMods f16:$hi_src1, i32:$hi_src1_modifiers)), 153 (f32 (VOP3PMadMixMods f16:$hi_src2, i32:$hi_src2_modifiers)))))), 154 (v2f16 (mixhi_inst $hi_src0_modifiers, $hi_src0, 155 $hi_src1_modifiers, $hi_src1, 156 $hi_src2_modifiers, $hi_src2, 157 DSTCLAMP.ENABLE, 158 (mixlo_inst $lo_src0_modifiers, $lo_src0, 159 $lo_src1_modifiers, $lo_src1, 160 $lo_src2_modifiers, $lo_src2, 161 DSTCLAMP.ENABLE, 162 (i32 (IMPLICIT_DEF))))) 163 >; 164} 165 166let SubtargetPredicate = HasMadMixInsts in { 167 168// These are VOP3a-like opcodes which accept no omod. 169// Size of src arguments (16/32) is controlled by op_sel. 170// For 16-bit src arguments their location (hi/lo) are controlled by op_sel_hi. 171let isCommutable = 1, mayRaiseFPException = 0 in { 172def V_MAD_MIX_F32 : VOP3_VOP3PInst<"v_mad_mix_f32", VOP3_Profile<VOP_F32_F16_F16_F16, VOP3_OPSEL>>; 173 174let FPDPRounding = 1 in { 175// Clamp modifier is applied after conversion to f16. 176def V_MAD_MIXLO_F16 : VOP3_VOP3PInst<"v_mad_mixlo_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, 1>; 177 178let ClampLo = 0, ClampHi = 1 in { 179def V_MAD_MIXHI_F16 : VOP3_VOP3PInst<"v_mad_mixhi_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, 1>; 180} 181} // End FPDPRounding = 1 182} 183 184defm : MadFmaMixPats<fmad, V_MAD_MIX_F32, V_MAD_MIXLO_F16, V_MAD_MIXHI_F16>; 185} // End SubtargetPredicate = HasMadMixInsts 186 187 188// Essentially the same as the mad_mix versions 189let SubtargetPredicate = HasFmaMixInsts in { 190let isCommutable = 1 in { 191def V_FMA_MIX_F32 : VOP3_VOP3PInst<"v_fma_mix_f32", VOP3_Profile<VOP_F32_F16_F16_F16, VOP3_OPSEL>>; 192 193let FPDPRounding = 1 in { 194// Clamp modifier is applied after conversion to f16. 195def V_FMA_MIXLO_F16 : VOP3_VOP3PInst<"v_fma_mixlo_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, 1>; 196 197let ClampLo = 0, ClampHi = 1 in { 198def V_FMA_MIXHI_F16 : VOP3_VOP3PInst<"v_fma_mixhi_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, 1>; 199} 200} // End FPDPRounding = 1 201} 202 203defm : MadFmaMixPats<fma, V_FMA_MIX_F32, V_FMA_MIXLO_F16, V_FMA_MIXHI_F16>; 204} 205 206// Defines patterns that extract signed 4bit from each Idx[0]. 207foreach Idx = [[0,28],[4,24],[8,20],[12,16],[16,12],[20,8],[24,4]] in 208 def ExtractSigned4bit_#Idx[0] : PatFrag<(ops node:$src), 209 (sra (shl node:$src, (i32 Idx[1])), (i32 28))>; 210 211// Defines code pattern that extracts U(unsigned/signed) 4/8bit from FromBitIndex. 212class Extract<int FromBitIndex, int BitMask, bit U>: PatFrag< 213 (ops node:$src), 214 !if (!or (!and (!eq (BitMask, 255), !eq (FromBitIndex, 24)), !eq (FromBitIndex, 28)), // last element 215 !if (U, (srl node:$src, (i32 FromBitIndex)), (sra node:$src, (i32 FromBitIndex))), 216 !if (!eq (FromBitIndex, 0), // first element 217 !if (U, (and node:$src, (i32 BitMask)), 218 !if (!eq (BitMask, 15), (!cast<PatFrag>("ExtractSigned4bit_"#FromBitIndex) node:$src), 219 (sext_inreg node:$src, i8))), 220 !if (U, (and (srl node:$src, (i32 FromBitIndex)), (i32 BitMask)), 221 !if (!eq (BitMask, 15), (!cast<PatFrag>("ExtractSigned4bit_"#FromBitIndex) node:$src), 222 (sext_inreg (srl node:$src, (i32 FromBitIndex)), i8)))))>; 223 224 225foreach Type = ["I", "U"] in 226 foreach Index = 0-3 in { 227 // Defines patterns that extract each Index'ed 8bit from an unsigned 228 // 32bit scalar value; 229 def Type#Index#"_8bit" : Extract<!shl(Index, 3), 255, !eq (Type, "U")>; 230 231 // Defines multiplication patterns where the multiplication is happening on each 232 // Index'ed 8bit of a 32bit scalar value. 233 234 def Mul#Type#_Elt#Index : PatFrag< 235 (ops node:$src0, node:$src1), 236 (!cast<HasOneUseBinOp>(!if (!eq (Type, "I"), AMDGPUmul_i24_oneuse, AMDGPUmul_u24_oneuse)) 237 (!cast<Extract>(Type#Index#"_8bit") node:$src0), 238 (!cast<Extract>(Type#Index#"_8bit") node:$src1))>; 239 } 240 241// Different variants of dot8 patterns cause a huge increase in the compile time. 242// Define non-associative/commutative add/mul to prevent permutation in the dot8 243// pattern. 244def NonACAdd : SDNode<"ISD::ADD" , SDTIntBinOp>; 245def NonACAdd_oneuse : HasOneUseBinOp<NonACAdd>; 246 247def NonACAMDGPUmul_u24 : SDNode<"AMDGPUISD::MUL_U24" , SDTIntBinOp>; 248def NonACAMDGPUmul_u24_oneuse : HasOneUseBinOp<NonACAMDGPUmul_u24>; 249 250def NonACAMDGPUmul_i24 : SDNode<"AMDGPUISD::MUL_I24" , SDTIntBinOp>; 251def NonACAMDGPUmul_i24_oneuse : HasOneUseBinOp<NonACAMDGPUmul_i24>; 252 253foreach Type = ["I", "U"] in 254 foreach Index = 0-7 in { 255 // Defines patterns that extract each Index'ed 4bit from an unsigned 256 // 32bit scalar value; 257 def Type#Index#"_4bit" : Extract<!shl(Index, 2), 15, !eq (Type, "U")>; 258 259 // Defines multiplication patterns where the multiplication is happening on each 260 // Index'ed 8bit of a 32bit scalar value. 261 def Mul#Type#Index#"_4bit" : PatFrag< 262 (ops node:$src0, node:$src1), 263 (!cast<HasOneUseBinOp>(!if (!eq (Type, "I"), NonACAMDGPUmul_i24_oneuse, NonACAMDGPUmul_u24_oneuse)) 264 (!cast<Extract>(Type#Index#"_4bit") node:$src0), 265 (!cast<Extract>(Type#Index#"_4bit") node:$src1))>; 266 } 267 268class UDot2Pat<Instruction Inst> : GCNPat < 269 (add (add_oneuse (AMDGPUmul_u24_oneuse (srl i32:$src0, (i32 16)), 270 (srl i32:$src1, (i32 16))), i32:$src2), 271 (AMDGPUmul_u24_oneuse (and i32:$src0, (i32 65535)), 272 (and i32:$src1, (i32 65535))) 273 ), 274 (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))> { 275 let SubtargetPredicate = !cast<VOP_Pseudo>(Inst).SubtargetPredicate; 276} 277 278class SDot2Pat<Instruction Inst> : GCNPat < 279 (add (add_oneuse (AMDGPUmul_i24_oneuse (sra i32:$src0, (i32 16)), 280 (sra i32:$src1, (i32 16))), i32:$src2), 281 (AMDGPUmul_i24_oneuse (sext_inreg i32:$src0, i16), 282 (sext_inreg i32:$src1, i16))), 283 (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))> { 284 let SubtargetPredicate = !cast<VOP_Pseudo>(Inst).SubtargetPredicate; 285} 286 287let IsDOT = 1 in { 288let SubtargetPredicate = HasDot2Insts in { 289 290def V_DOT2_F32_F16 : VOP3PInst<"v_dot2_f32_f16", 291 VOP3_Profile<VOP_F32_V2F16_V2F16_F32>, 292 AMDGPUfdot2, 1/*ExplicitClamp*/>; 293def V_DOT2_I32_I16 : VOP3PInst<"v_dot2_i32_i16", 294 VOP3_Profile<VOP_I32_V2I16_V2I16_I32>, int_amdgcn_sdot2, 1>; 295def V_DOT2_U32_U16 : VOP3PInst<"v_dot2_u32_u16", 296 VOP3_Profile<VOP_I32_V2I16_V2I16_I32>, int_amdgcn_udot2, 1>; 297def V_DOT4_U32_U8 : VOP3PInst<"v_dot4_u32_u8", 298 VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot4, 1>; 299def V_DOT8_U32_U4 : VOP3PInst<"v_dot8_u32_u4", 300 VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot8, 1>; 301 302} // End SubtargetPredicate = HasDot2Insts 303 304let SubtargetPredicate = HasDot1Insts in { 305 306def V_DOT4_I32_I8 : VOP3PInst<"v_dot4_i32_i8", 307 VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot4, 1>; 308def V_DOT8_I32_I4 : VOP3PInst<"v_dot8_i32_i4", 309 VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot8, 1>; 310 311} // End SubtargetPredicate = HasDot1Insts 312} // End let IsDOT = 1 313 314def : UDot2Pat<V_DOT2_U32_U16>; 315def : SDot2Pat<V_DOT2_I32_I16>; 316 317foreach Type = ["U", "I"] in 318 let SubtargetPredicate = !cast<VOP_Pseudo>("V_DOT4_"#Type#"32_"#Type#8).SubtargetPredicate in 319 def : GCNPat < 320 !cast<dag>(!foldl((i32 i32:$src2), [0, 1, 2, 3], lhs, y, 321 (add_oneuse lhs, (!cast<PatFrag>("Mul"#Type#"_Elt"#y) i32:$src0, i32:$src1)))), 322 (!cast<VOP3PInst>("V_DOT4_"#Type#"32_"#Type#8) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>; 323 324foreach Type = ["U", "I"] in 325 let SubtargetPredicate = !cast<VOP_Pseudo>("V_DOT8_"#Type#"32_"#Type#4).SubtargetPredicate in 326 def : GCNPat < 327 !cast<dag>(!foldl((add_oneuse i32:$src2, (!cast<PatFrag>("Mul"#Type#"0_4bit") i32:$src0, i32:$src1)), 328 [1, 2, 3, 4, 5, 6, 7], lhs, y, 329 (NonACAdd_oneuse lhs, (!cast<PatFrag>("Mul"#Type#y#"_4bit") i32:$src0, i32:$src1)))), 330 (!cast<VOP3PInst>("V_DOT8_"#Type#"32_"#Type#4) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>; 331 332// Different variants of dot8 code-gen dag patterns are not generated through table-gen due to a huge increase 333// in the compile time. Directly handle the pattern generated by the FE here. 334foreach Type = ["U", "I"] in 335 let SubtargetPredicate = !cast<VOP_Pseudo>("V_DOT8_"#Type#"32_"#Type#4).SubtargetPredicate in 336 def : GCNPat < 337 !cast<dag>(!foldl((add_oneuse i32:$src2, (!cast<PatFrag>("Mul"#Type#"0_4bit") i32:$src0, i32:$src1)), 338 [7, 1, 2, 3, 4, 5, 6], lhs, y, 339 (NonACAdd_oneuse lhs, (!cast<PatFrag>("Mul"#Type#y#"_4bit") i32:$src0, i32:$src1)))), 340 (!cast<VOP3PInst>("V_DOT8_"#Type#"32_"#Type#4) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>; 341 342def ADst_32 : VOPDstOperand<AGPR_32>; 343def ADst_128 : VOPDstOperand<AReg_128>; 344def ADst_512 : VOPDstOperand<AReg_512>; 345def ADst_1024 : VOPDstOperand<AReg_1024>; 346 347def VOPProfileAccRead : VOP3_Profile<VOP_I32_I32, VOP3_MAI> { 348 let Src0RC64 = ARegSrc_32; 349} 350 351def VOPProfileAccWrite : VOP3_Profile<VOP_I32_I32, VOP3_MAI> { 352 let DstRC = ADst_32; 353 let Src0RC64 = VISrc_b32; 354} 355 356class VOPProfileMAI<VOPProfile P, RegisterOperand _SrcRC, RegisterOperand _DstRC, 357 RegisterOperand SrcABRC = AVSrc_32> 358 : VOP3_Profile<P, VOP3_MAI> { 359 let DstRC = _DstRC; 360 let Src0RC64 = SrcABRC; 361 let Src1RC64 = SrcABRC; 362 let Src2RC64 = _SrcRC; 363 let HasOpSel = 0; 364 let HasClamp = 0; 365 let Asm64 = " $vdst, $src0, $src1, $src2$cbsz$abid$blgp"; 366 let Ins64 = (ins Src0RC64:$src0, Src1RC64:$src1, Src2RC64:$src2, cbsz:$cbsz, abid:$abid, blgp:$blgp); 367} 368 369def VOPProfileMAI_F32_F32_X4 : VOPProfileMAI<VOP_V4F32_F32_F32_V4F32, AISrc_128_f32, ADst_128>; 370def VOPProfileMAI_F32_F32_X16 : VOPProfileMAI<VOP_V16F32_F32_F32_V16F32, AISrc_512_f32, ADst_512>; 371def VOPProfileMAI_F32_F32_X32 : VOPProfileMAI<VOP_V32F32_F32_F32_V32F32, AISrc_1024_f32, ADst_1024>; 372def VOPProfileMAI_I32_I32_X4 : VOPProfileMAI<VOP_V4I32_I32_I32_V4I32, AISrc_128_b32, ADst_128>; 373def VOPProfileMAI_I32_I32_X16 : VOPProfileMAI<VOP_V16I32_I32_I32_V16I32, AISrc_512_b32, ADst_512>; 374def VOPProfileMAI_I32_I32_X32 : VOPProfileMAI<VOP_V32I32_I32_I32_V32I32, AISrc_1024_b32, ADst_1024>; 375def VOPProfileMAI_F32_V2I16_X4 : VOPProfileMAI<VOP_V4F32_V2I16_V2I16_V4F32, AISrc_128_b32, ADst_128>; 376def VOPProfileMAI_F32_V2I16_X16 : VOPProfileMAI<VOP_V16F32_V2I16_V2I16_V16F32, AISrc_512_b32, ADst_512>; 377def VOPProfileMAI_F32_V2I16_X32 : VOPProfileMAI<VOP_V32F32_V2I16_V2I16_V32F32, AISrc_1024_b32, ADst_1024>; 378def VOPProfileMAI_F32_V4F16_X4 : VOPProfileMAI<VOP_V4F32_V4F16_V4F16_V4F32, AISrc_128_b32, ADst_128, AVSrc_64>; 379def VOPProfileMAI_F32_V4F16_X16 : VOPProfileMAI<VOP_V16F32_V4F16_V4F16_V16F32, AISrc_512_b32, ADst_512, AVSrc_64>; 380def VOPProfileMAI_F32_V4F16_X32 : VOPProfileMAI<VOP_V32F32_V4F16_V4F16_V32F32, AISrc_1024_b32, ADst_1024, AVSrc_64>; 381 382let Predicates = [HasMAIInsts] in { 383 384let isAsCheapAsAMove = 1, isReMaterializable = 1 in { 385 defm V_ACCVGPR_READ_B32 : VOP3Inst<"v_accvgpr_read_b32", VOPProfileAccRead>; 386 let isMoveImm = 1 in { 387 defm V_ACCVGPR_WRITE_B32 : VOP3Inst<"v_accvgpr_write_b32", VOPProfileAccWrite>; 388 } // End isMoveImm = 1 389} // End isAsCheapAsAMove = 1, isReMaterializable = 1 390 391// FP32 denorm mode is respected, rounding mode is not. Exceptions are not supported. 392let isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1 in { 393defm V_MFMA_F32_4X4X1F32 : VOP3Inst<"v_mfma_f32_4x4x1f32", VOPProfileMAI_F32_F32_X4, int_amdgcn_mfma_f32_4x4x1f32>; 394defm V_MFMA_F32_4X4X4F16 : VOP3Inst<"v_mfma_f32_4x4x4f16", VOPProfileMAI_F32_V4F16_X4, int_amdgcn_mfma_f32_4x4x4f16>; 395defm V_MFMA_I32_4X4X4I8 : VOP3Inst<"v_mfma_i32_4x4x4i8", VOPProfileMAI_I32_I32_X4, int_amdgcn_mfma_i32_4x4x4i8>; 396defm V_MFMA_F32_4X4X2BF16 : VOP3Inst<"v_mfma_f32_4x4x2bf16", VOPProfileMAI_F32_V2I16_X4, int_amdgcn_mfma_f32_4x4x2bf16>; 397defm V_MFMA_F32_16X16X1F32 : VOP3Inst<"v_mfma_f32_16x16x1f32", VOPProfileMAI_F32_F32_X16, int_amdgcn_mfma_f32_16x16x1f32>; 398defm V_MFMA_F32_16X16X4F32 : VOP3Inst<"v_mfma_f32_16x16x4f32", VOPProfileMAI_F32_F32_X4, int_amdgcn_mfma_f32_16x16x4f32>; 399defm V_MFMA_F32_16X16X4F16 : VOP3Inst<"v_mfma_f32_16x16x4f16", VOPProfileMAI_F32_V4F16_X16, int_amdgcn_mfma_f32_16x16x4f16>; 400defm V_MFMA_F32_16X16X16F16 : VOP3Inst<"v_mfma_f32_16x16x16f16", VOPProfileMAI_F32_V4F16_X4, int_amdgcn_mfma_f32_16x16x16f16>; 401defm V_MFMA_I32_16X16X4I8 : VOP3Inst<"v_mfma_i32_16x16x4i8", VOPProfileMAI_I32_I32_X16, int_amdgcn_mfma_i32_16x16x4i8>; 402defm V_MFMA_I32_16X16X16I8 : VOP3Inst<"v_mfma_i32_16x16x16i8", VOPProfileMAI_I32_I32_X4, int_amdgcn_mfma_i32_16x16x16i8>; 403defm V_MFMA_F32_16X16X2BF16 : VOP3Inst<"v_mfma_f32_16x16x2bf16", VOPProfileMAI_F32_V2I16_X16, int_amdgcn_mfma_f32_16x16x2bf16>; 404defm V_MFMA_F32_16X16X8BF16 : VOP3Inst<"v_mfma_f32_16x16x8bf16", VOPProfileMAI_F32_V2I16_X4, int_amdgcn_mfma_f32_16x16x8bf16>; 405defm V_MFMA_F32_32X32X1F32 : VOP3Inst<"v_mfma_f32_32x32x1f32", VOPProfileMAI_F32_F32_X32, int_amdgcn_mfma_f32_32x32x1f32>; 406defm V_MFMA_F32_32X32X2F32 : VOP3Inst<"v_mfma_f32_32x32x2f32", VOPProfileMAI_F32_F32_X16, int_amdgcn_mfma_f32_32x32x2f32>; 407defm V_MFMA_F32_32X32X4F16 : VOP3Inst<"v_mfma_f32_32x32x4f16", VOPProfileMAI_F32_V4F16_X32, int_amdgcn_mfma_f32_32x32x4f16>; 408defm V_MFMA_F32_32X32X8F16 : VOP3Inst<"v_mfma_f32_32x32x8f16", VOPProfileMAI_F32_V4F16_X16, int_amdgcn_mfma_f32_32x32x8f16>; 409defm V_MFMA_I32_32X32X4I8 : VOP3Inst<"v_mfma_i32_32x32x4i8", VOPProfileMAI_I32_I32_X32, int_amdgcn_mfma_i32_32x32x4i8>; 410defm V_MFMA_I32_32X32X8I8 : VOP3Inst<"v_mfma_i32_32x32x8i8", VOPProfileMAI_I32_I32_X16, int_amdgcn_mfma_i32_32x32x8i8>; 411defm V_MFMA_F32_32X32X2BF16 : VOP3Inst<"v_mfma_f32_32x32x2bf16", VOPProfileMAI_F32_V2I16_X32, int_amdgcn_mfma_f32_32x32x2bf16>; 412defm V_MFMA_F32_32X32X4BF16 : VOP3Inst<"v_mfma_f32_32x32x4bf16", VOPProfileMAI_F32_V2I16_X16, int_amdgcn_mfma_f32_32x32x4bf16>; 413} // End isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1 414 415} // End SubtargetPredicate = HasMAIInsts 416 417def : MnemonicAlias<"v_accvgpr_read", "v_accvgpr_read_b32">; 418def : MnemonicAlias<"v_accvgpr_write", "v_accvgpr_write_b32">; 419 420//===----------------------------------------------------------------------===// 421// Begin Real Encodings 422//===----------------------------------------------------------------------===// 423 424//===----------------------------------------------------------------------===// 425// GFX8 (VI) 426//===----------------------------------------------------------------------===// 427 428multiclass VOP3P_Real_vi<bits<7> op> { 429 def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.VI>, 430 VOP3Pe <op, !cast<VOP3_Pseudo>(NAME).Pfl> { 431 let AssemblerPredicate = HasVOP3PInsts; 432 let DecoderNamespace = "GFX8"; 433 } 434} 435 436multiclass VOP3P_Real_MAI<bits<7> op> { 437 def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>, 438 VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl> { 439 let AssemblerPredicate = HasMAIInsts; 440 let DecoderNamespace = "GFX8"; 441 let Inst{14} = 1; // op_sel_hi(2) default value 442 let Inst{59} = 1; // op_sel_hi(0) default value 443 let Inst{60} = 1; // op_sel_hi(1) default value 444 } 445} 446 447multiclass VOP3P_Real_MFMA<bits<7> op> { 448 def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>, 449 VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl> { 450 let AssemblerPredicate = HasMAIInsts; 451 let DecoderNamespace = "GFX8"; 452 } 453} 454 455defm V_PK_MAD_I16 : VOP3P_Real_vi <0x00>; 456defm V_PK_MUL_LO_U16 : VOP3P_Real_vi <0x01>; 457defm V_PK_ADD_I16 : VOP3P_Real_vi <0x02>; 458defm V_PK_SUB_I16 : VOP3P_Real_vi <0x03>; 459defm V_PK_LSHLREV_B16 : VOP3P_Real_vi <0x04>; 460defm V_PK_LSHRREV_B16 : VOP3P_Real_vi <0x05>; 461defm V_PK_ASHRREV_I16 : VOP3P_Real_vi <0x06>; 462defm V_PK_MAX_I16 : VOP3P_Real_vi <0x07>; 463defm V_PK_MIN_I16 : VOP3P_Real_vi <0x08>; 464defm V_PK_MAD_U16 : VOP3P_Real_vi <0x09>; 465 466defm V_PK_ADD_U16 : VOP3P_Real_vi <0x0a>; 467defm V_PK_SUB_U16 : VOP3P_Real_vi <0x0b>; 468defm V_PK_MAX_U16 : VOP3P_Real_vi <0x0c>; 469defm V_PK_MIN_U16 : VOP3P_Real_vi <0x0d>; 470defm V_PK_FMA_F16 : VOP3P_Real_vi <0x0e>; 471defm V_PK_ADD_F16 : VOP3P_Real_vi <0x0f>; 472defm V_PK_MUL_F16 : VOP3P_Real_vi <0x10>; 473defm V_PK_MIN_F16 : VOP3P_Real_vi <0x11>; 474defm V_PK_MAX_F16 : VOP3P_Real_vi <0x12>; 475 476 477let SubtargetPredicate = HasMadMixInsts in { 478defm V_MAD_MIX_F32 : VOP3P_Real_vi <0x20>; 479defm V_MAD_MIXLO_F16 : VOP3P_Real_vi <0x21>; 480defm V_MAD_MIXHI_F16 : VOP3P_Real_vi <0x22>; 481} 482 483let SubtargetPredicate = HasFmaMixInsts in { 484let DecoderNamespace = "GFX9_DL" in { 485// The mad_mix instructions were renamed and their behaviors changed, 486// but the opcode stayed the same so we need to put these in a 487// different DecoderNamespace to avoid the ambiguity. 488defm V_FMA_MIX_F32 : VOP3P_Real_vi <0x20>; 489defm V_FMA_MIXLO_F16 : VOP3P_Real_vi <0x21>; 490defm V_FMA_MIXHI_F16 : VOP3P_Real_vi <0x22>; 491} 492} 493 494 495let SubtargetPredicate = HasDot2Insts in { 496 497defm V_DOT2_F32_F16 : VOP3P_Real_vi <0x23>; 498defm V_DOT2_I32_I16 : VOP3P_Real_vi <0x26>; 499defm V_DOT2_U32_U16 : VOP3P_Real_vi <0x27>; 500defm V_DOT4_U32_U8 : VOP3P_Real_vi <0x29>; 501defm V_DOT8_U32_U4 : VOP3P_Real_vi <0x2b>; 502 503} // End SubtargetPredicate = HasDot2Insts 504 505let SubtargetPredicate = HasDot1Insts in { 506 507defm V_DOT4_I32_I8 : VOP3P_Real_vi <0x28>; 508defm V_DOT8_I32_I4 : VOP3P_Real_vi <0x2a>; 509 510} // End SubtargetPredicate = HasDot1Insts 511 512let SubtargetPredicate = HasMAIInsts in { 513 514defm V_ACCVGPR_READ_B32 : VOP3P_Real_MAI <0x58>; 515defm V_ACCVGPR_WRITE_B32 : VOP3P_Real_MAI <0x59>; 516defm V_MFMA_F32_32X32X1F32 : VOP3P_Real_MFMA <0x40>; 517defm V_MFMA_F32_16X16X1F32 : VOP3P_Real_MFMA <0x41>; 518defm V_MFMA_F32_4X4X1F32 : VOP3P_Real_MFMA <0x42>; 519defm V_MFMA_F32_32X32X2F32 : VOP3P_Real_MFMA <0x44>; 520defm V_MFMA_F32_16X16X4F32 : VOP3P_Real_MFMA <0x45>; 521defm V_MFMA_F32_32X32X4F16 : VOP3P_Real_MFMA <0x48>; 522defm V_MFMA_F32_16X16X4F16 : VOP3P_Real_MFMA <0x49>; 523defm V_MFMA_F32_4X4X4F16 : VOP3P_Real_MFMA <0x4a>; 524defm V_MFMA_F32_32X32X8F16 : VOP3P_Real_MFMA <0x4c>; 525defm V_MFMA_F32_16X16X16F16 : VOP3P_Real_MFMA <0x4d>; 526defm V_MFMA_I32_32X32X4I8 : VOP3P_Real_MFMA <0x50>; 527defm V_MFMA_I32_16X16X4I8 : VOP3P_Real_MFMA <0x51>; 528defm V_MFMA_I32_4X4X4I8 : VOP3P_Real_MFMA <0x52>; 529defm V_MFMA_I32_16X16X16I8 : VOP3P_Real_MFMA <0x55>; 530defm V_MFMA_I32_32X32X8I8 : VOP3P_Real_MFMA <0x54>; 531defm V_MFMA_F32_32X32X2BF16 : VOP3P_Real_MFMA <0x68>; 532defm V_MFMA_F32_16X16X2BF16 : VOP3P_Real_MFMA <0x69>; 533defm V_MFMA_F32_4X4X2BF16 : VOP3P_Real_MFMA <0x6b>; 534defm V_MFMA_F32_32X32X4BF16 : VOP3P_Real_MFMA <0x6c>; 535defm V_MFMA_F32_16X16X8BF16 : VOP3P_Real_MFMA <0x6d>; 536 537} // End SubtargetPredicate = HasMAIInsts 538 539//===----------------------------------------------------------------------===// 540// GFX10. 541//===----------------------------------------------------------------------===// 542 543let AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10" in { 544 multiclass VOP3P_Real_gfx10<bits<7> op> { 545 def _gfx10 : VOP3P_Real<!cast<VOP3P_Pseudo>(NAME), SIEncodingFamily.GFX10>, 546 VOP3Pe_gfx10 <op, !cast<VOP3P_Pseudo>(NAME).Pfl>; 547 } 548} // End AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10" 549 550defm V_PK_MAD_I16 : VOP3P_Real_gfx10<0x00>; 551defm V_PK_MUL_LO_U16 : VOP3P_Real_gfx10<0x01>; 552defm V_PK_ADD_I16 : VOP3P_Real_gfx10<0x02>; 553defm V_PK_SUB_I16 : VOP3P_Real_gfx10<0x03>; 554defm V_PK_LSHLREV_B16 : VOP3P_Real_gfx10<0x04>; 555defm V_PK_LSHRREV_B16 : VOP3P_Real_gfx10<0x05>; 556defm V_PK_ASHRREV_I16 : VOP3P_Real_gfx10<0x06>; 557defm V_PK_MAX_I16 : VOP3P_Real_gfx10<0x07>; 558defm V_PK_MIN_I16 : VOP3P_Real_gfx10<0x08>; 559defm V_PK_MAD_U16 : VOP3P_Real_gfx10<0x09>; 560defm V_PK_ADD_U16 : VOP3P_Real_gfx10<0x0a>; 561defm V_PK_SUB_U16 : VOP3P_Real_gfx10<0x0b>; 562defm V_PK_MAX_U16 : VOP3P_Real_gfx10<0x0c>; 563defm V_PK_MIN_U16 : VOP3P_Real_gfx10<0x0d>; 564defm V_PK_FMA_F16 : VOP3P_Real_gfx10<0x0e>; 565defm V_PK_ADD_F16 : VOP3P_Real_gfx10<0x0f>; 566defm V_PK_MUL_F16 : VOP3P_Real_gfx10<0x10>; 567defm V_PK_MIN_F16 : VOP3P_Real_gfx10<0x11>; 568defm V_PK_MAX_F16 : VOP3P_Real_gfx10<0x12>; 569defm V_FMA_MIX_F32 : VOP3P_Real_gfx10<0x20>; 570defm V_FMA_MIXLO_F16 : VOP3P_Real_gfx10<0x21>; 571defm V_FMA_MIXHI_F16 : VOP3P_Real_gfx10<0x22>; 572 573let SubtargetPredicate = HasDot2Insts in { 574 575defm V_DOT2_F32_F16 : VOP3P_Real_gfx10 <0x13>; 576defm V_DOT2_I32_I16 : VOP3P_Real_gfx10 <0x14>; 577defm V_DOT2_U32_U16 : VOP3P_Real_gfx10 <0x15>; 578defm V_DOT4_U32_U8 : VOP3P_Real_gfx10 <0x17>; 579defm V_DOT8_U32_U4 : VOP3P_Real_gfx10 <0x19>; 580 581} // End SubtargetPredicate = HasDot2Insts 582 583let SubtargetPredicate = HasDot1Insts in { 584 585defm V_DOT4_I32_I8 : VOP3P_Real_gfx10 <0x16>; 586defm V_DOT8_I32_I4 : VOP3P_Real_gfx10 <0x18>; 587 588} // End SubtargetPredicate = HasDot1Insts 589