1//===-- VOP3Instructions.td - Vector Instruction Definitions --------------===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8 9//===----------------------------------------------------------------------===// 10// VOP3 Classes 11//===----------------------------------------------------------------------===// 12 13class getVOP3ModPat<VOPProfile P, SDPatternOperator node> { 14 dag src0 = !if(P.HasOMod, 15 (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod), 16 (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp)); 17 18 list<dag> ret3 = [(set P.DstVT:$vdst, 19 (DivergentFragOrOp<node, P>.ret (P.Src0VT src0), 20 (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)), 21 (P.Src2VT (VOP3Mods P.Src2VT:$src2, i32:$src2_modifiers))))]; 22 23 list<dag> ret2 = [(set P.DstVT:$vdst, 24 (DivergentFragOrOp<node, P>.ret (P.Src0VT src0), 25 (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))]; 26 27 list<dag> ret1 = [(set P.DstVT:$vdst, 28 (DivergentFragOrOp<node, P>.ret (P.Src0VT src0)))]; 29 30 list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3, 31 !if(!eq(P.NumSrcArgs, 2), ret2, 32 ret1)); 33} 34 35class getVOP3PModPat<VOPProfile P, SDPatternOperator node, bit HasExplicitClamp> { 36 dag src0_dag = (P.Src0VT (VOP3PMods P.Src0VT:$src0, i32:$src0_modifiers)); 37 dag src1_dag = (P.Src1VT (VOP3PMods P.Src1VT:$src1, i32:$src1_modifiers)); 38 dag src2_dag = (P.Src2VT (VOP3PMods P.Src2VT:$src2, i32:$src2_modifiers)); 39 dag clamp_dag = (i1 timm:$clamp); 40 41 list<dag> ret3 = [(set P.DstVT:$vdst, 42 !if(HasExplicitClamp, 43 (DivergentFragOrOp<node, P>.ret src0_dag, src1_dag, src2_dag, clamp_dag), 44 (DivergentFragOrOp<node, P>.ret src0_dag, src1_dag, src2_dag)))]; 45 46 list<dag> ret2 = [(set P.DstVT:$vdst, 47 !if(HasExplicitClamp, 48 (DivergentFragOrOp<node, P>.ret src0_dag, src1_dag, clamp_dag), 49 (DivergentFragOrOp<node, P>.ret src0_dag, src1_dag)))]; 50 51 list<dag> ret1 = [(set P.DstVT:$vdst, 52 !if(HasExplicitClamp, 53 (DivergentFragOrOp<node, P>.ret src0_dag, clamp_dag), 54 (DivergentFragOrOp<node, P>.ret src0_dag)))]; 55 56 list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3, 57 !if(!eq(P.NumSrcArgs, 2), ret2, 58 ret1)); 59} 60 61class getVOP3OpSelPat<VOPProfile P, SDPatternOperator node> { 62 list<dag> ret3 = [(set P.DstVT:$vdst, 63 (DivergentFragOrOp<node, P>.ret (P.Src0VT (VOP3OpSel P.Src0VT:$src0, i32:$src0_modifiers)), 64 (P.Src1VT (VOP3OpSel P.Src1VT:$src1, i32:$src1_modifiers)), 65 (P.Src2VT (VOP3OpSel P.Src2VT:$src2, i32:$src2_modifiers))))]; 66 67 list<dag> ret2 = [(set P.DstVT:$vdst, 68 (DivergentFragOrOp<node, P>.ret (P.Src0VT (VOP3OpSel P.Src0VT:$src0, i32:$src0_modifiers)), 69 (P.Src1VT (VOP3OpSel P.Src1VT:$src1, i32:$src1_modifiers))))]; 70 71 list<dag> ret1 = [(set P.DstVT:$vdst, 72 (DivergentFragOrOp<node, P>.ret (P.Src0VT (VOP3OpSel P.Src0VT:$src0, i32:$src0_modifiers))))]; 73 74 list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3, 75 !if(!eq(P.NumSrcArgs, 2), ret2, 76 ret1)); 77} 78 79class getVOP3OpSelModPat<VOPProfile P, SDPatternOperator node> { 80 list<dag> ret3 = [(set P.DstVT:$vdst, 81 (DivergentFragOrOp<node, P>.ret (P.Src0VT !if(P.HasClamp, (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers), 82 (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers))), 83 (P.Src1VT (VOP3OpSelMods P.Src1VT:$src1, i32:$src1_modifiers)), 84 (P.Src2VT (VOP3OpSelMods P.Src2VT:$src2, i32:$src2_modifiers))))]; 85 86 list<dag> ret2 = [(set P.DstVT:$vdst, 87 (DivergentFragOrOp<node, P>.ret !if(P.HasClamp, (P.Src0VT (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers)), 88 (P.Src0VT (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers))), 89 (P.Src1VT (VOP3OpSelMods P.Src1VT:$src1, i32:$src1_modifiers))))]; 90 91 list<dag> ret1 = [(set P.DstVT:$vdst, 92 (DivergentFragOrOp<node, P>.ret (P.Src0VT (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers))))]; 93 94 list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3, 95 !if(!eq(P.NumSrcArgs, 2), ret2, 96 ret1)); 97} 98 99class getVOP3Pat<VOPProfile P, SDPatternOperator node> { 100 list<dag> ret3 = [(set P.DstVT:$vdst, (DivergentFragOrOp<node, P>.ret P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2))]; 101 list<dag> ret2 = [(set P.DstVT:$vdst, (DivergentFragOrOp<node, P>.ret P.Src0VT:$src0, P.Src1VT:$src1))]; 102 list<dag> ret1 = [(set P.DstVT:$vdst, (DivergentFragOrOp<node, P>.ret P.Src0VT:$src0))]; 103 list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3, 104 !if(!eq(P.NumSrcArgs, 2), ret2, 105 ret1)); 106} 107 108class getVOP3ClampPat<VOPProfile P, SDPatternOperator node> { 109 list<dag> ret3 = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2, i1:$clamp))]; 110 list<dag> ret2 = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1, i1:$clamp))]; 111 list<dag> ret1 = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, i1:$clamp))]; 112 list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3, 113 !if(!eq(P.NumSrcArgs, 2), ret2, 114 ret1)); 115} 116 117class getVOP3MAIPat<VOPProfile P, SDPatternOperator node> { 118 list<dag> ret = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2, 119 timm:$cbsz, timm:$abid, timm:$blgp))]; 120} 121 122class VOP3Inst<string OpName, VOPProfile P, SDPatternOperator node = null_frag, bit VOP3Only = 0> : 123 VOP3_Pseudo<OpName, P, 124 !if(P.HasOpSel, 125 !if(P.HasModifiers, 126 getVOP3OpSelModPat<P, node>.ret, 127 getVOP3OpSelPat<P, node>.ret), 128 !if(P.HasModifiers, 129 getVOP3ModPat<P, node>.ret, 130 !if(P.HasIntClamp, 131 getVOP3ClampPat<P, node>.ret, 132 !if (P.IsMAI, 133 getVOP3MAIPat<P, node>.ret, 134 getVOP3Pat<P, node>.ret)))), 135 VOP3Only, 0, P.HasOpSel> { 136 137 let IntClamp = P.HasIntClamp; 138 let AsmMatchConverter = 139 !if(P.HasOpSel, 140 "cvtVOP3OpSel", 141 !if(!or(P.HasModifiers, !or(P.HasOMod, P.HasIntClamp)), 142 "cvtVOP3", 143 "")); 144} 145 146// Special case for v_div_fmas_{f32|f64}, since it seems to be the 147// only VOP instruction that implicitly reads VCC. 148let Asm64 = " $vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$clamp$omod" in { 149def VOP_F32_F32_F32_F32_VCC : VOPProfile<[f32, f32, f32, f32]> { 150 let Outs64 = (outs DstRC.RegClass:$vdst); 151} 152def VOP_F64_F64_F64_F64_VCC : VOPProfile<[f64, f64, f64, f64]> { 153 let Outs64 = (outs DstRC.RegClass:$vdst); 154} 155} 156 157class VOP3Features<bit Clamp, bit OpSel, bit Packed, bit MAI> { 158 bit HasClamp = Clamp; 159 bit HasOpSel = OpSel; 160 bit IsPacked = Packed; 161 bit IsMAI = MAI; 162} 163 164def VOP3_REGULAR : VOP3Features<0, 0, 0, 0>; 165def VOP3_CLAMP : VOP3Features<1, 0, 0, 0>; 166def VOP3_OPSEL : VOP3Features<1, 1, 0, 0>; 167def VOP3_PACKED : VOP3Features<1, 1, 1, 0>; 168def VOP3_MAI : VOP3Features<0, 0, 0, 1>; 169 170class VOP3_Profile<VOPProfile P, VOP3Features Features = VOP3_REGULAR> : VOPProfile<P.ArgVT> { 171 172 let HasClamp = !if(Features.HasClamp, 1, P.HasClamp); 173 let HasOpSel = !if(Features.HasOpSel, 1, P.HasOpSel); 174 let IsMAI = !if(Features.IsMAI, 1, P.IsMAI); 175 let IsPacked = !if(Features.IsPacked, 1, P.IsPacked); 176 177 let HasModifiers = !if(Features.IsPacked, !if(Features.IsMAI, 0, 1), P.HasModifiers); 178 179 // FIXME: Hack to stop printing _e64 180 let Outs64 = (outs DstRC.RegClass:$vdst); 181 let Asm64 = 182 " " # !if(Features.HasOpSel, 183 getAsmVOP3OpSel<NumSrcArgs, 184 HasIntClamp, 185 HasSrc0FloatMods, 186 HasSrc1FloatMods, 187 HasSrc2FloatMods>.ret, 188 !if(Features.HasClamp, 189 getAsm64<HasDst, NumSrcArgs, HasIntClamp, 190 HasModifiers, HasOMod, DstVT>.ret, 191 P.Asm64)); 192 let NeedPatGen = P.NeedPatGen; 193} 194 195class VOP3b_Profile<ValueType vt> : VOPProfile<[vt, vt, vt, vt]> { 196 // v_div_scale_{f32|f64} do not support input modifiers. 197 let HasModifiers = 0; 198 let HasClamp = 0; 199 let HasOMod = 0; 200 let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst); 201 let Asm64 = " $vdst, $sdst, $src0, $src1, $src2"; 202} 203 204def VOP3b_F32_I1_F32_F32_F32 : VOP3b_Profile<f32> { 205 // FIXME: Hack to stop printing _e64 206 let DstRC = RegisterOperand<VGPR_32>; 207} 208 209def VOP3b_F64_I1_F64_F64_F64 : VOP3b_Profile<f64> { 210 // FIXME: Hack to stop printing _e64 211 let DstRC = RegisterOperand<VReg_64>; 212} 213 214def VOP3b_I64_I1_I32_I32_I64 : VOPProfile<[i64, i32, i32, i64]> { 215 let HasClamp = 1; 216 217 // FIXME: Hack to stop printing _e64 218 let DstRC = RegisterOperand<VReg_64>; 219 220 let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst); 221 let Asm64 = " $vdst, $sdst, $src0, $src1, $src2$clamp"; 222} 223 224//===----------------------------------------------------------------------===// 225// VOP3 INTERP 226//===----------------------------------------------------------------------===// 227 228class VOP3Interp<string OpName, VOPProfile P, list<dag> pattern = []> : 229 VOP3_Pseudo<OpName, P, pattern> { 230 let AsmMatchConverter = "cvtVOP3Interp"; 231 let mayRaiseFPException = 0; 232} 233 234def VOP3_INTERP : VOPProfile<[f32, f32, i32, untyped]> { 235 let Ins64 = (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0, 236 Attr:$attr, AttrChan:$attrchan, 237 clampmod0:$clamp, omod0:$omod); 238 239 let Asm64 = "$vdst, $src0_modifiers, $attr$attrchan$clamp$omod"; 240} 241 242def VOP3_INTERP_MOV : VOPProfile<[f32, i32, i32, untyped]> { 243 let Ins64 = (ins InterpSlot:$src0, 244 Attr:$attr, AttrChan:$attrchan, 245 clampmod0:$clamp, omod0:$omod); 246 247 let Asm64 = "$vdst, $src0, $attr$attrchan$clamp$omod"; 248 249 let HasClamp = 1; 250} 251 252class getInterp16Asm <bit HasSrc2, bit HasOMod> { 253 string src2 = !if(HasSrc2, ", $src2_modifiers", ""); 254 string omod = !if(HasOMod, "$omod", ""); 255 string ret = 256 " $vdst, $src0_modifiers, $attr$attrchan"#src2#"$high$clamp"#omod; 257} 258 259class getInterp16Ins <bit HasSrc2, bit HasOMod, 260 Operand Src0Mod, Operand Src2Mod> { 261 dag ret = !if(HasSrc2, 262 !if(HasOMod, 263 (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0, 264 Attr:$attr, AttrChan:$attrchan, 265 Src2Mod:$src2_modifiers, VRegSrc_32:$src2, 266 highmod:$high, clampmod0:$clamp, omod0:$omod), 267 (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0, 268 Attr:$attr, AttrChan:$attrchan, 269 Src2Mod:$src2_modifiers, VRegSrc_32:$src2, 270 highmod:$high, clampmod0:$clamp) 271 ), 272 (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0, 273 Attr:$attr, AttrChan:$attrchan, 274 highmod:$high, clampmod0:$clamp, omod0:$omod) 275 ); 276} 277 278class VOP3_INTERP16 <list<ValueType> ArgVT> : VOPProfile<ArgVT> { 279 280 let HasOMod = !if(!eq(DstVT.Value, f16.Value), 0, 1); 281 let HasHigh = 1; 282 283 let Outs64 = (outs VGPR_32:$vdst); 284 let Ins64 = getInterp16Ins<HasSrc2, HasOMod, Src0Mod, Src2Mod>.ret; 285 let Asm64 = getInterp16Asm<HasSrc2, HasOMod>.ret; 286} 287 288//===----------------------------------------------------------------------===// 289// VOP3 Instructions 290//===----------------------------------------------------------------------===// 291 292let isCommutable = 1 in { 293 294let mayRaiseFPException = 0 in { 295let SubtargetPredicate = HasMadMacF32Insts in { 296def V_MAD_LEGACY_F32 : VOP3Inst <"v_mad_legacy_f32", VOP3_Profile<VOP_F32_F32_F32_F32>>; 297def V_MAD_F32 : VOP3Inst <"v_mad_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, fmad>; 298} // End SubtargetPredicate = HasMadMacInsts 299 300let SubtargetPredicate = HasNoMadMacF32Insts in 301def V_FMA_LEGACY_F32 : VOP3Inst <"v_fma_legacy_f32", VOP3_Profile<VOP_F32_F32_F32_F32>>; 302} 303 304def V_MAD_I32_I24 : VOP3Inst <"v_mad_i32_i24", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>; 305def V_MAD_U32_U24 : VOP3Inst <"v_mad_u32_u24", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>; 306def V_FMA_F32 : VOP3Inst <"v_fma_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, any_fma>; 307def V_LERP_U8 : VOP3Inst <"v_lerp_u8", VOP3_Profile<VOP_I32_I32_I32_I32>, int_amdgcn_lerp>; 308 309let SchedRW = [WriteDoubleAdd] in { 310let FPDPRounding = 1 in { 311def V_FMA_F64 : VOP3Inst <"v_fma_f64", VOP3_Profile<VOP_F64_F64_F64_F64>, any_fma>; 312def V_ADD_F64 : VOP3Inst <"v_add_f64", VOP3_Profile<VOP_F64_F64_F64>, any_fadd, 1>; 313def V_MUL_F64 : VOP3Inst <"v_mul_f64", VOP3_Profile<VOP_F64_F64_F64>, fmul, 1>; 314} // End FPDPRounding = 1 315def V_MIN_F64 : VOP3Inst <"v_min_f64", VOP3_Profile<VOP_F64_F64_F64>, fminnum_like, 1>; 316def V_MAX_F64 : VOP3Inst <"v_max_f64", VOP3_Profile<VOP_F64_F64_F64>, fmaxnum_like, 1>; 317} // End SchedRW = [WriteDoubleAdd] 318 319let SchedRW = [WriteQuarterRate32] in { 320def V_MUL_LO_U32 : VOP3Inst <"v_mul_lo_u32", VOP3_Profile<VOP_I32_I32_I32>, mul>; 321def V_MUL_HI_U32 : VOP3Inst <"v_mul_hi_u32", VOP3_Profile<VOP_I32_I32_I32>, mulhu>; 322def V_MUL_LO_I32 : VOP3Inst <"v_mul_lo_i32", VOP3_Profile<VOP_I32_I32_I32>>; 323def V_MUL_HI_I32 : VOP3Inst <"v_mul_hi_i32", VOP3_Profile<VOP_I32_I32_I32>, mulhs>; 324} // End SchedRW = [WriteQuarterRate32] 325 326let Uses = [MODE, VCC, EXEC] in { 327// v_div_fmas_f32: 328// result = src0 * src1 + src2 329// if (vcc) 330// result *= 2^32 331// 332def V_DIV_FMAS_F32 : VOP3_Pseudo <"v_div_fmas_f32", VOP_F32_F32_F32_F32_VCC, []> { 333 let SchedRW = [WriteFloatFMA]; 334} 335// v_div_fmas_f64: 336// result = src0 * src1 + src2 337// if (vcc) 338// result *= 2^64 339// 340def V_DIV_FMAS_F64 : VOP3_Pseudo <"v_div_fmas_f64", VOP_F64_F64_F64_F64_VCC, []> { 341 let SchedRW = [WriteDouble]; 342 let FPDPRounding = 1; 343} 344} // End Uses = [VCC, EXEC] 345 346} // End isCommutable = 1 347 348let mayRaiseFPException = 0 in { 349def V_CUBEID_F32 : VOP3Inst <"v_cubeid_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, int_amdgcn_cubeid>; 350def V_CUBESC_F32 : VOP3Inst <"v_cubesc_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, int_amdgcn_cubesc>; 351def V_CUBETC_F32 : VOP3Inst <"v_cubetc_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, int_amdgcn_cubetc>; 352def V_CUBEMA_F32 : VOP3Inst <"v_cubema_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, int_amdgcn_cubema>; 353} // End mayRaiseFPException 354 355def V_BFE_U32 : VOP3Inst <"v_bfe_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUbfe_u32>; 356def V_BFE_I32 : VOP3Inst <"v_bfe_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUbfe_i32>; 357def V_BFI_B32 : VOP3Inst <"v_bfi_b32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUbfi>; 358def V_ALIGNBIT_B32 : VOP3Inst <"v_alignbit_b32", VOP3_Profile<VOP_I32_I32_I32_I32>, fshr>; 359def V_ALIGNBYTE_B32 : VOP3Inst <"v_alignbyte_b32", VOP3_Profile<VOP_I32_I32_I32_I32>, int_amdgcn_alignbyte>; 360 361let mayRaiseFPException = 0 in { // XXX - Seems suspect but manual doesn't say it does 362def V_MIN3_F32 : VOP3Inst <"v_min3_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUfmin3>; 363def V_MIN3_I32 : VOP3Inst <"v_min3_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUsmin3>; 364def V_MIN3_U32 : VOP3Inst <"v_min3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUumin3>; 365def V_MAX3_F32 : VOP3Inst <"v_max3_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUfmax3>; 366def V_MAX3_I32 : VOP3Inst <"v_max3_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUsmax3>; 367def V_MAX3_U32 : VOP3Inst <"v_max3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUumax3>; 368def V_MED3_F32 : VOP3Inst <"v_med3_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUfmed3>; 369def V_MED3_I32 : VOP3Inst <"v_med3_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUsmed3>; 370def V_MED3_U32 : VOP3Inst <"v_med3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUumed3>; 371} // End mayRaiseFPException = 0 372 373def V_SAD_U8 : VOP3Inst <"v_sad_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>; 374def V_SAD_HI_U8 : VOP3Inst <"v_sad_hi_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>; 375def V_SAD_U16 : VOP3Inst <"v_sad_u16", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>; 376def V_SAD_U32 : VOP3Inst <"v_sad_u32", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>; 377def V_CVT_PK_U8_F32 : VOP3Inst<"v_cvt_pk_u8_f32", VOP3_Profile<VOP_I32_F32_I32_I32>, int_amdgcn_cvt_pk_u8_f32>; 378def V_DIV_FIXUP_F32 : VOP3Inst <"v_div_fixup_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUdiv_fixup>; 379 380let SchedRW = [WriteDoubleAdd], FPDPRounding = 1 in { 381def V_DIV_FIXUP_F64 : VOP3Inst <"v_div_fixup_f64", VOP3_Profile<VOP_F64_F64_F64_F64>, AMDGPUdiv_fixup>; 382def V_LDEXP_F64 : VOP3Inst <"v_ldexp_f64", VOP3_Profile<VOP_F64_F64_I32>, AMDGPUldexp, 1>; 383} // End SchedRW = [WriteDoubleAdd], FPDPRounding = 1 384 385 386let mayRaiseFPException = 0 in { // Seems suspicious but manual doesn't say it does. 387def V_DIV_SCALE_F32 : VOP3_Pseudo <"v_div_scale_f32", VOP3b_F32_I1_F32_F32_F32, [], 1> { 388 let SchedRW = [WriteFloatFMA, WriteSALU]; 389 let AsmMatchConverter = ""; 390} 391 392// Double precision division pre-scale. 393def V_DIV_SCALE_F64 : VOP3_Pseudo <"v_div_scale_f64", VOP3b_F64_I1_F64_F64_F64, [], 1> { 394 let SchedRW = [WriteDouble, WriteSALU]; 395 let AsmMatchConverter = ""; 396 let FPDPRounding = 1; 397} 398} // End mayRaiseFPException = 0 399 400def V_MSAD_U8 : VOP3Inst <"v_msad_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>; 401 402let Constraints = "@earlyclobber $vdst" in { 403def V_MQSAD_PK_U16_U8 : VOP3Inst <"v_mqsad_pk_u16_u8", VOP3_Profile<VOP_I64_I64_I32_I64, VOP3_CLAMP>>; 404} // End Constraints = "@earlyclobber $vdst" 405 406def V_TRIG_PREOP_F64 : VOP3Inst <"v_trig_preop_f64", VOP3_Profile<VOP_F64_F64_I32>, int_amdgcn_trig_preop> { 407 let SchedRW = [WriteDouble]; 408} 409 410let SchedRW = [Write64Bit] in { 411let SubtargetPredicate = isGFX6GFX7 in { 412def V_LSHL_B64 : VOP3Inst <"v_lshl_b64", VOP3_Profile<VOP_I64_I64_I32>, shl>; 413def V_LSHR_B64 : VOP3Inst <"v_lshr_b64", VOP3_Profile<VOP_I64_I64_I32>, srl>; 414def V_ASHR_I64 : VOP3Inst <"v_ashr_i64", VOP3_Profile<VOP_I64_I64_I32>, sra>; 415} // End SubtargetPredicate = isGFX6GFX7 416 417let SubtargetPredicate = isGFX8Plus in { 418def V_LSHLREV_B64 : VOP3Inst <"v_lshlrev_b64", VOP3_Profile<VOP_I64_I32_I64>, lshl_rev>; 419def V_LSHRREV_B64 : VOP3Inst <"v_lshrrev_b64", VOP3_Profile<VOP_I64_I32_I64>, lshr_rev>; 420def V_ASHRREV_I64 : VOP3Inst <"v_ashrrev_i64", VOP3_Profile<VOP_I64_I32_I64>, ashr_rev>; 421} // End SubtargetPredicate = isGFX8Plus 422} // End SchedRW = [Write64Bit] 423 424def : GCNPat< 425 (i64 (getDivergentFrag<sext>.ret i16:$src)), 426 (REG_SEQUENCE VReg_64, 427 (i32 (V_BFE_I32 $src, (S_MOV_B32 (i32 0)), (S_MOV_B32 (i32 0x10)))), sub0, 428 (i32 (COPY_TO_REGCLASS 429 (V_ASHRREV_I32_e32 (S_MOV_B32 (i32 0x1f)), (i32 (V_BFE_I32 $src, (S_MOV_B32 (i32 0)), (S_MOV_B32 (i32 0x10)))) 430 ), VGPR_32)), sub1) 431>; 432 433def : GCNPat< 434 (i32 (getDivergentFrag<sext>.ret i16:$src)), 435 (i32 (V_BFE_I32 $src, (S_MOV_B32 (i32 0)), (S_MOV_B32 (i32 0x10)))) 436>; 437 438let SubtargetPredicate = isGFX6GFX7GFX10 in { 439def V_MULLIT_F32 : VOP3Inst <"v_mullit_f32", VOP3_Profile<VOP_F32_F32_F32_F32>>; 440} // End SubtargetPredicate = isGFX6GFX7GFX10 441 442let SchedRW = [Write32Bit] in { 443let SubtargetPredicate = isGFX8Plus in { 444def V_PERM_B32 : VOP3Inst <"v_perm_b32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUperm>; 445} // End SubtargetPredicate = isGFX8Plus 446} // End SchedRW = [Write32Bit] 447 448let SubtargetPredicate = isGFX7Plus in { 449 450let Constraints = "@earlyclobber $vdst", SchedRW = [WriteQuarterRate32] in { 451def V_QSAD_PK_U16_U8 : VOP3Inst <"v_qsad_pk_u16_u8", VOP3_Profile<VOP_I64_I64_I32_I64, VOP3_CLAMP>>; 452def V_MQSAD_U32_U8 : VOP3Inst <"v_mqsad_u32_u8", VOP3_Profile<VOP_V4I32_I64_I32_V4I32, VOP3_CLAMP>>; 453} // End Constraints = "@earlyclobber $vdst", SchedRW = [WriteQuarterRate32] 454 455let isCommutable = 1 in { 456let SchedRW = [WriteQuarterRate32, WriteSALU] in { 457def V_MAD_U64_U32 : VOP3Inst <"v_mad_u64_u32", VOP3b_I64_I1_I32_I32_I64>; 458def V_MAD_I64_I32 : VOP3Inst <"v_mad_i64_i32", VOP3b_I64_I1_I32_I32_I64>; 459} // End SchedRW = [WriteQuarterRate32, WriteSALU] 460} // End isCommutable = 1 461 462} // End SubtargetPredicate = isGFX7Plus 463 464 465def V_DIV_FIXUP_F16 : VOP3Inst <"v_div_fixup_f16", VOP3_Profile<VOP_F16_F16_F16_F16>, AMDGPUdiv_fixup> { 466 let Predicates = [Has16BitInsts, isGFX8Only]; 467 let FPDPRounding = 1; 468} 469def V_DIV_FIXUP_F16_gfx9 : VOP3Inst <"v_div_fixup_f16_gfx9", 470 VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUdiv_fixup> { 471 let renamedInGFX9 = 1; 472 let Predicates = [Has16BitInsts, isGFX9Plus]; 473 let FPDPRounding = 1; 474} 475 476def V_FMA_F16 : VOP3Inst <"v_fma_f16", VOP3_Profile<VOP_F16_F16_F16_F16>, any_fma> { 477 let Predicates = [Has16BitInsts, isGFX8Only]; 478 let FPDPRounding = 1; 479} 480def V_FMA_F16_gfx9 : VOP3Inst <"v_fma_f16_gfx9", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, any_fma> { 481 let renamedInGFX9 = 1; 482 let Predicates = [Has16BitInsts, isGFX9Plus]; 483 let FPDPRounding = 1; 484} 485 486let SubtargetPredicate = Has16BitInsts, isCommutable = 1 in { 487 488let renamedInGFX9 = 1 in { 489def V_MAD_U16 : VOP3Inst <"v_mad_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_CLAMP>>; 490def V_MAD_I16 : VOP3Inst <"v_mad_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_CLAMP>>; 491let FPDPRounding = 1 in { 492def V_MAD_F16 : VOP3Inst <"v_mad_f16", VOP3_Profile<VOP_F16_F16_F16_F16>, fmad>; 493let Uses = [MODE, M0, EXEC] in { 494// For some reason the intrinsic operands are in a different order 495// from the instruction operands. 496def V_INTERP_P2_F16 : VOP3Interp <"v_interp_p2_f16", VOP3_INTERP16<[f16, f32, i32, f32]>, 497 [(set f16:$vdst, 498 (int_amdgcn_interp_p2_f16 (VOP3Mods f32:$src2, i32:$src2_modifiers), 499 (VOP3Mods f32:$src0, i32:$src0_modifiers), 500 (i32 timm:$attrchan), 501 (i32 timm:$attr), 502 (i1 timm:$high), 503 M0))]>; 504} // End Uses = [M0, MODE, EXEC] 505} // End FPDPRounding = 1 506} // End renamedInGFX9 = 1 507 508let SubtargetPredicate = isGFX9Only in { 509def V_MAD_F16_gfx9 : VOP3Inst <"v_mad_f16_gfx9", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>> { 510 let FPDPRounding = 1; 511} 512} // End SubtargetPredicate = isGFX9Only 513 514let SubtargetPredicate = isGFX9Plus in { 515def V_MAD_U16_gfx9 : VOP3Inst <"v_mad_u16_gfx9", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>>; 516def V_MAD_I16_gfx9 : VOP3Inst <"v_mad_i16_gfx9", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>>; 517def V_INTERP_P2_F16_gfx9 : VOP3Interp <"v_interp_p2_f16_gfx9", VOP3_INTERP16<[f16, f32, i32, f32]>>; 518} // End SubtargetPredicate = isGFX9Plus 519 520let Uses = [MODE, M0, EXEC], FPDPRounding = 1 in { 521def V_INTERP_P1LL_F16 : VOP3Interp <"v_interp_p1ll_f16", VOP3_INTERP16<[f32, f32, i32, untyped]>, 522 [(set f32:$vdst, (int_amdgcn_interp_p1_f16 (VOP3Mods f32:$src0, i32:$src0_modifiers), 523 (i32 timm:$attrchan), 524 (i32 timm:$attr), 525 (i1 timm:$high), M0))]> { 526 // This predicate should only apply to the selection pattern. The 527 // instruction still exists and should decode on subtargets with 528 // other bank counts. 529 let OtherPredicates = [has32BankLDS]; 530} 531 532 533def V_INTERP_P1LV_F16 : VOP3Interp <"v_interp_p1lv_f16", VOP3_INTERP16<[f32, f32, i32, f16]>>; 534} // End Uses = [MODE, M0, EXEC], FPDPRounding = 1 535 536} // End SubtargetPredicate = Has16BitInsts, isCommutable = 1 537 538let SubtargetPredicate = isGFX8Plus, Uses = [MODE, M0, EXEC] in { 539def V_INTERP_P1_F32_e64 : VOP3Interp <"v_interp_p1_f32", VOP3_INTERP>; 540def V_INTERP_P2_F32_e64 : VOP3Interp <"v_interp_p2_f32", VOP3_INTERP>; 541def V_INTERP_MOV_F32_e64 : VOP3Interp <"v_interp_mov_f32", VOP3_INTERP_MOV>; 542} // End SubtargetPredicate = isGFX8Plus, Uses = [MODE, M0, EXEC] 543 544let Predicates = [Has16BitInsts, isGFX6GFX7GFX8GFX9] in { 545 546multiclass Ternary_i16_Pats <SDPatternOperator op1, SDPatternOperator op2, 547 Instruction inst, SDPatternOperator op3> { 548def : GCNPat < 549 (op2 (op1 i16:$src0, i16:$src1), i16:$src2), 550 (inst i16:$src0, i16:$src1, i16:$src2, (i1 0)) 551>; 552 553} 554 555defm: Ternary_i16_Pats<mul, add, V_MAD_U16, zext>; 556defm: Ternary_i16_Pats<mul, add, V_MAD_I16, sext>; 557 558} // End Predicates = [Has16BitInsts, isGFX6GFX7GFX8GFX9] 559 560let Predicates = [Has16BitInsts, isGFX10Plus] in { 561 562multiclass Ternary_i16_Pats_gfx9<SDPatternOperator op1, SDPatternOperator op2, 563 Instruction inst, SDPatternOperator op3> { 564def : GCNPat < 565 (op2 (op1 i16:$src0, i16:$src1), i16:$src2), 566 (inst SRCMODS.NONE, $src0, SRCMODS.NONE, $src1, SRCMODS.NONE, $src2, DSTCLAMP.NONE) 567>; 568 569} 570 571defm: Ternary_i16_Pats_gfx9<mul, add, V_MAD_U16_gfx9, zext>; 572defm: Ternary_i16_Pats_gfx9<mul, add, V_MAD_I16_gfx9, sext>; 573 574} // End Predicates = [Has16BitInsts, isGFX10Plus] 575 576class ThreeOpFrag<SDPatternOperator op1, SDPatternOperator op2> : PatFrag< 577 (ops node:$x, node:$y, node:$z), 578 // When the inner operation is used multiple times, selecting 3-op 579 // instructions may still be beneficial -- if the other users can be 580 // combined similarly. Let's be conservative for now. 581 (op2 (HasOneUseBinOp<op1> node:$x, node:$y), node:$z), 582 [{ 583 // Only use VALU ops when the result is divergent. 584 if (!N->isDivergent()) 585 return false; 586 587 // Check constant bus limitations. 588 // 589 // Note: Use !isDivergent as a conservative proxy for whether the value 590 // is in an SGPR (uniform values can end up in VGPRs as well). 591 unsigned ConstantBusUses = 0; 592 for (unsigned i = 0; i < 3; ++i) { 593 if (!Operands[i]->isDivergent() && 594 !isInlineImmediate(Operands[i].getNode())) { 595 ConstantBusUses++; 596 // This uses AMDGPU::V_ADD3_U32, but all three operand instructions 597 // have the same constant bus limit. 598 if (ConstantBusUses > Subtarget->getConstantBusLimit(AMDGPU::V_ADD3_U32)) 599 return false; 600 } 601 } 602 603 return true; 604 }]> { 605 let PredicateCodeUsesOperands = 1; 606 607 // The divergence predicate is irrelevant in GlobalISel, as we have 608 // proper register bank checks. We also force all VOP instruction 609 // operands to VGPR, so we should not need to check the constant bus 610 // restriction. 611 // 612 // FIXME: With unlucky SGPR operands, we could penalize code by 613 // blocking folding SGPR->VGPR copies later. 614 // FIXME: There's no register bank verifier 615 // FIXME: Should add a way for the emitter to recognize this is a 616 // trivially true predicate to eliminate the check. 617 let GISelPredicateCode = [{return true;}]; 618} 619 620let SubtargetPredicate = isGFX9Plus in { 621def V_PACK_B32_F16 : VOP3Inst <"v_pack_b32_f16", VOP3_Profile<VOP_B32_F16_F16, VOP3_OPSEL>>; 622def V_LSHL_ADD_U32 : VOP3Inst <"v_lshl_add_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>; 623def V_ADD_LSHL_U32 : VOP3Inst <"v_add_lshl_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>; 624def V_ADD3_U32 : VOP3Inst <"v_add3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>; 625def V_LSHL_OR_B32 : VOP3Inst <"v_lshl_or_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>; 626def V_AND_OR_B32 : VOP3Inst <"v_and_or_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>; 627def V_OR3_B32 : VOP3Inst <"v_or3_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>; 628 629def V_XAD_U32 : VOP3Inst <"v_xad_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>; 630 631def V_MED3_F16 : VOP3Inst <"v_med3_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUfmed3>; 632def V_MED3_I16 : VOP3Inst <"v_med3_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUsmed3>; 633def V_MED3_U16 : VOP3Inst <"v_med3_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUumed3>; 634 635def V_MIN3_F16 : VOP3Inst <"v_min3_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUfmin3>; 636def V_MIN3_I16 : VOP3Inst <"v_min3_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUsmin3>; 637def V_MIN3_U16 : VOP3Inst <"v_min3_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUumin3>; 638 639def V_MAX3_F16 : VOP3Inst <"v_max3_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUfmax3>; 640def V_MAX3_I16 : VOP3Inst <"v_max3_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUsmax3>; 641def V_MAX3_U16 : VOP3Inst <"v_max3_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUumax3>; 642 643def V_ADD_I16 : VOP3Inst <"v_add_i16", VOP3_Profile<VOP_I16_I16_I16, VOP3_OPSEL>>; 644def V_SUB_I16 : VOP3Inst <"v_sub_i16", VOP3_Profile<VOP_I16_I16_I16, VOP3_OPSEL>>; 645 646def V_MAD_U32_U16 : VOP3Inst <"v_mad_u32_u16", VOP3_Profile<VOP_I32_I16_I16_I32, VOP3_OPSEL>>; 647def V_MAD_I32_I16 : VOP3Inst <"v_mad_i32_i16", VOP3_Profile<VOP_I32_I16_I16_I32, VOP3_OPSEL>>; 648 649def V_CVT_PKNORM_I16_F16 : VOP3Inst <"v_cvt_pknorm_i16_f16", VOP3_Profile<VOP_B32_F16_F16, VOP3_OPSEL>>; 650def V_CVT_PKNORM_U16_F16 : VOP3Inst <"v_cvt_pknorm_u16_f16", VOP3_Profile<VOP_B32_F16_F16, VOP3_OPSEL>>; 651 652def V_ADD_I32_gfx9 : VOP3Inst <"v_add_i32_gfx9", VOP3_Profile<VOP_I32_I32_I32_ARITH>>; 653def V_SUB_I32_gfx9 : VOP3Inst <"v_sub_i32_gfx9", VOP3_Profile<VOP_I32_I32_I32_ARITH>>; 654 655 656class ThreeOp_i32_Pats <SDPatternOperator op1, SDPatternOperator op2, Instruction inst> : GCNPat < 657 // This matches (op2 (op1 i32:$src0, i32:$src1), i32:$src2) with conditions. 658 (ThreeOpFrag<op1, op2> i32:$src0, i32:$src1, i32:$src2), 659 (inst VSrc_b32:$src0, VSrc_b32:$src1, VSrc_b32:$src2) 660>; 661 662def : ThreeOp_i32_Pats<shl, add, V_LSHL_ADD_U32>; 663def : ThreeOp_i32_Pats<add, shl, V_ADD_LSHL_U32>; 664def : ThreeOp_i32_Pats<add, add, V_ADD3_U32>; 665def : ThreeOp_i32_Pats<shl, or, V_LSHL_OR_B32>; 666def : ThreeOp_i32_Pats<and, or, V_AND_OR_B32>; 667def : ThreeOp_i32_Pats<or, or, V_OR3_B32>; 668def : ThreeOp_i32_Pats<xor, add, V_XAD_U32>; 669 670} // End SubtargetPredicate = isGFX9Plus 671 672def VOP3_PERMLANE_Profile : VOP3_Profile<VOPProfile <[i32, i32, i32, i32]>, VOP3_OPSEL> { 673 let Src0RC64 = VRegSrc_32; 674 let Src1RC64 = SCSrc_b32; 675 let Src2RC64 = SCSrc_b32; 676 let InsVOP3OpSel = (ins IntOpSelMods:$src0_modifiers, VRegSrc_32:$src0, 677 IntOpSelMods:$src1_modifiers, SCSrc_b32:$src1, 678 IntOpSelMods:$src2_modifiers, SCSrc_b32:$src2, 679 VGPR_32:$vdst_in, op_sel:$op_sel); 680 let HasClamp = 0; 681 let HasOMod = 0; 682} 683 684class PermlanePat<SDPatternOperator permlane, 685 Instruction inst> : GCNPat< 686 (permlane i32:$vdst_in, i32:$src0, i32:$src1, i32:$src2, 687 timm:$fi, timm:$bc), 688 (inst (as_i1timm $fi), VGPR_32:$src0, (as_i1timm $bc), 689 SCSrc_b32:$src1, 0, SCSrc_b32:$src2, VGPR_32:$vdst_in) 690>; 691 692// Permlane intrinsic that has either fetch invalid or bound control 693// fields enabled. 694class BoundControlOrFetchInvalidPermlane<SDPatternOperator permlane> : 695 PatFrag<(ops node:$vdst_in, node:$src0, node:$src1, node:$src2, 696 node:$fi, node:$bc), 697 (permlane node:$vdst_in, node:$src0, node: 698 $src1, node:$src2, node:$fi, node:$bc)> { 699 let PredicateCode = [{ return N->getConstantOperandVal(5) != 0 || 700 N->getConstantOperandVal(6) != 0; }]; 701 let GISelPredicateCode = [{ 702 return MI.getOperand(6).getImm() != 0 || 703 MI.getOperand(7).getImm() != 0; 704 }]; 705} 706 707// Drop the input value if it won't be read. 708class PermlaneDiscardVDstIn<SDPatternOperator permlane, 709 Instruction inst> : GCNPat< 710 (permlane srcvalue, i32:$src0, i32:$src1, i32:$src2, 711 timm:$fi, timm:$bc), 712 (inst (as_i1timm $fi), VGPR_32:$src0, (as_i1timm $bc), 713 SCSrc_b32:$src1, 0, SCSrc_b32:$src2, 714 (IMPLICIT_DEF)) 715>; 716 717 718let SubtargetPredicate = isGFX10Plus in { 719 def V_XOR3_B32 : VOP3Inst <"v_xor3_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>; 720 def : ThreeOp_i32_Pats<xor, xor, V_XOR3_B32>; 721 722 let Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in" in { 723 def V_PERMLANE16_B32 : VOP3Inst <"v_permlane16_b32", VOP3_PERMLANE_Profile>; 724 def V_PERMLANEX16_B32 : VOP3Inst <"v_permlanex16_b32", VOP3_PERMLANE_Profile>; 725 } // End $vdst = $vdst_in, DisableEncoding $vdst_in 726 727 def : PermlanePat<int_amdgcn_permlane16, V_PERMLANE16_B32>; 728 def : PermlanePat<int_amdgcn_permlanex16, V_PERMLANEX16_B32>; 729 730 def : PermlaneDiscardVDstIn< 731 BoundControlOrFetchInvalidPermlane<int_amdgcn_permlane16>, 732 V_PERMLANE16_B32>; 733 def : PermlaneDiscardVDstIn< 734 BoundControlOrFetchInvalidPermlane<int_amdgcn_permlanex16>, 735 V_PERMLANEX16_B32>; 736} // End SubtargetPredicate = isGFX10Plus 737 738class DivFmasPat<ValueType vt, Instruction inst, Register CondReg> : GCNPat< 739 (AMDGPUdiv_fmas (vt (VOP3Mods vt:$src0, i32:$src0_modifiers)), 740 (vt (VOP3Mods vt:$src1, i32:$src1_modifiers)), 741 (vt (VOP3Mods vt:$src2, i32:$src2_modifiers)), 742 (i1 CondReg)), 743 (inst $src0_modifiers, $src0, $src1_modifiers, $src1, $src2_modifiers, $src2) 744>; 745 746let WaveSizePredicate = isWave64 in { 747def : DivFmasPat<f32, V_DIV_FMAS_F32, VCC>; 748def : DivFmasPat<f64, V_DIV_FMAS_F64, VCC>; 749} 750 751let WaveSizePredicate = isWave32 in { 752def : DivFmasPat<f32, V_DIV_FMAS_F32, VCC_LO>; 753def : DivFmasPat<f64, V_DIV_FMAS_F64, VCC_LO>; 754} 755 756//===----------------------------------------------------------------------===// 757// Integer Clamp Patterns 758//===----------------------------------------------------------------------===// 759 760class getClampPat<VOPProfile P, SDPatternOperator node> { 761 dag ret3 = (P.DstVT (node P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2)); 762 dag ret2 = (P.DstVT (node P.Src0VT:$src0, P.Src1VT:$src1)); 763 dag ret1 = (P.DstVT (node P.Src0VT:$src0)); 764 dag ret = !if(!eq(P.NumSrcArgs, 3), ret3, 765 !if(!eq(P.NumSrcArgs, 2), ret2, 766 ret1)); 767} 768 769class getClampRes<VOPProfile P, Instruction inst> { 770 dag ret3 = (inst P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2, (i1 0)); 771 dag ret2 = (inst P.Src0VT:$src0, P.Src1VT:$src1, (i1 0)); 772 dag ret1 = (inst P.Src0VT:$src0, (i1 0)); 773 dag ret = !if(!eq(P.NumSrcArgs, 3), ret3, 774 !if(!eq(P.NumSrcArgs, 2), ret2, 775 ret1)); 776} 777 778class IntClampPat<VOP3Inst inst, SDPatternOperator node> : GCNPat< 779 getClampPat<inst.Pfl, node>.ret, 780 getClampRes<inst.Pfl, inst>.ret 781>; 782 783def : IntClampPat<V_MAD_I32_I24, AMDGPUmad_i24>; 784def : IntClampPat<V_MAD_U32_U24, AMDGPUmad_u24>; 785 786def : IntClampPat<V_SAD_U8, int_amdgcn_sad_u8>; 787def : IntClampPat<V_SAD_HI_U8, int_amdgcn_sad_hi_u8>; 788def : IntClampPat<V_SAD_U16, int_amdgcn_sad_u16>; 789 790def : IntClampPat<V_MSAD_U8, int_amdgcn_msad_u8>; 791def : IntClampPat<V_MQSAD_PK_U16_U8, int_amdgcn_mqsad_pk_u16_u8>; 792 793def : IntClampPat<V_QSAD_PK_U16_U8, int_amdgcn_qsad_pk_u16_u8>; 794def : IntClampPat<V_MQSAD_U32_U8, int_amdgcn_mqsad_u32_u8>; 795 796 797//===----------------------------------------------------------------------===// 798// Target-specific instruction encodings. 799//===----------------------------------------------------------------------===// 800 801//===----------------------------------------------------------------------===// 802// GFX10. 803//===----------------------------------------------------------------------===// 804 805let AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10" in { 806 multiclass VOP3_Real_gfx10<bits<10> op> { 807 def _gfx10 : 808 VOP3_Real<!cast<VOP_Pseudo>(NAME), SIEncodingFamily.GFX10>, 809 VOP3e_gfx10<op, !cast<VOP_Pseudo>(NAME).Pfl>; 810 } 811 multiclass VOP3_Real_gfx10_with_name<bits<10> op, string opName, 812 string asmName> { 813 def _gfx10 : 814 VOP3_Real<!cast<VOP3_Pseudo>(opName), SIEncodingFamily.GFX10>, 815 VOP3e_gfx10<op, !cast<VOP3_Pseudo>(opName).Pfl> { 816 VOP3_Pseudo ps = !cast<VOP3_Pseudo>(opName); 817 let AsmString = asmName # ps.AsmOperands; 818 } 819 } 820 multiclass VOP3be_Real_gfx10<bits<10> op> { 821 def _gfx10 : 822 VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.GFX10>, 823 VOP3be_gfx10<op, !cast<VOP3_Pseudo>(NAME).Pfl>; 824 } 825 multiclass VOP3Interp_Real_gfx10<bits<10> op> { 826 def _gfx10 : 827 VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.GFX10>, 828 VOP3Interp_gfx10<op, !cast<VOP3_Pseudo>(NAME).Pfl>; 829 } 830 multiclass VOP3OpSel_Real_gfx10<bits<10> op> { 831 def _gfx10 : 832 VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.GFX10>, 833 VOP3OpSel_gfx10<op, !cast<VOP3_Pseudo>(NAME).Pfl>; 834 } 835 multiclass VOP3OpSel_Real_gfx10_with_name<bits<10> op, string opName, 836 string asmName> { 837 def _gfx10 : 838 VOP3_Real<!cast<VOP3_Pseudo>(opName), SIEncodingFamily.GFX10>, 839 VOP3OpSel_gfx10<op, !cast<VOP3_Pseudo>(opName).Pfl> { 840 VOP3_Pseudo ps = !cast<VOP3_Pseudo>(opName); 841 let AsmString = asmName # ps.AsmOperands; 842 } 843 } 844} // End AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10" 845 846defm V_READLANE_B32 : VOP3_Real_gfx10<0x360>; 847 848let InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VGPR_32:$vdst_in) in { 849 defm V_WRITELANE_B32 : VOP3_Real_gfx10<0x361>; 850} // End InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VGPR_32:$vdst_in) 851 852defm V_XOR3_B32 : VOP3_Real_gfx10<0x178>; 853defm V_LSHLREV_B64 : VOP3_Real_gfx10<0x2ff>; 854defm V_LSHRREV_B64 : VOP3_Real_gfx10<0x300>; 855defm V_ASHRREV_I64 : VOP3_Real_gfx10<0x301>; 856defm V_PERM_B32 : VOP3_Real_gfx10<0x344>; 857defm V_XAD_U32 : VOP3_Real_gfx10<0x345>; 858defm V_LSHL_ADD_U32 : VOP3_Real_gfx10<0x346>; 859defm V_ADD_LSHL_U32 : VOP3_Real_gfx10<0x347>; 860defm V_ADD3_U32 : VOP3_Real_gfx10<0x36d>; 861defm V_LSHL_OR_B32 : VOP3_Real_gfx10<0x36f>; 862defm V_AND_OR_B32 : VOP3_Real_gfx10<0x371>; 863defm V_OR3_B32 : VOP3_Real_gfx10<0x372>; 864 865// TODO-GFX10: add MC tests for v_add/sub_nc_i16 866defm V_ADD_NC_I16 : 867 VOP3OpSel_Real_gfx10_with_name<0x30d, "V_ADD_I16", "v_add_nc_i16">; 868defm V_SUB_NC_I16 : 869 VOP3OpSel_Real_gfx10_with_name<0x30e, "V_SUB_I16", "v_sub_nc_i16">; 870defm V_SUB_NC_I32 : 871 VOP3_Real_gfx10_with_name<0x376, "V_SUB_I32_gfx9", "v_sub_nc_i32">; 872defm V_ADD_NC_I32 : 873 VOP3_Real_gfx10_with_name<0x37f, "V_ADD_I32_gfx9", "v_add_nc_i32">; 874 875defm V_INTERP_P1_F32_e64 : VOP3Interp_Real_gfx10<0x200>; 876defm V_INTERP_P2_F32_e64 : VOP3Interp_Real_gfx10<0x201>; 877defm V_INTERP_MOV_F32_e64 : VOP3Interp_Real_gfx10<0x202>; 878 879defm V_INTERP_P1LL_F16 : VOP3Interp_Real_gfx10<0x342>; 880defm V_INTERP_P1LV_F16 : VOP3Interp_Real_gfx10<0x343>; 881defm V_INTERP_P2_F16 : VOP3Interp_Real_gfx10<0x35a>; 882 883defm V_PACK_B32_F16 : VOP3OpSel_Real_gfx10<0x311>; 884defm V_CVT_PKNORM_I16_F16 : VOP3OpSel_Real_gfx10<0x312>; 885defm V_CVT_PKNORM_U16_F16 : VOP3OpSel_Real_gfx10<0x313>; 886 887defm V_MIN3_F16 : VOP3OpSel_Real_gfx10<0x351>; 888defm V_MIN3_I16 : VOP3OpSel_Real_gfx10<0x352>; 889defm V_MIN3_U16 : VOP3OpSel_Real_gfx10<0x353>; 890defm V_MAX3_F16 : VOP3OpSel_Real_gfx10<0x354>; 891defm V_MAX3_I16 : VOP3OpSel_Real_gfx10<0x355>; 892defm V_MAX3_U16 : VOP3OpSel_Real_gfx10<0x356>; 893defm V_MED3_F16 : VOP3OpSel_Real_gfx10<0x357>; 894defm V_MED3_I16 : VOP3OpSel_Real_gfx10<0x358>; 895defm V_MED3_U16 : VOP3OpSel_Real_gfx10<0x359>; 896defm V_MAD_U32_U16 : VOP3OpSel_Real_gfx10<0x373>; 897defm V_MAD_I32_I16 : VOP3OpSel_Real_gfx10<0x375>; 898 899defm V_MAD_U16 : 900 VOP3OpSel_Real_gfx10_with_name<0x340, "V_MAD_U16_gfx9", "v_mad_u16">; 901defm V_FMA_F16 : 902 VOP3OpSel_Real_gfx10_with_name<0x34b, "V_FMA_F16_gfx9", "v_fma_f16">; 903defm V_MAD_I16 : 904 VOP3OpSel_Real_gfx10_with_name<0x35e, "V_MAD_I16_gfx9", "v_mad_i16">; 905defm V_DIV_FIXUP_F16 : 906 VOP3OpSel_Real_gfx10_with_name<0x35f, "V_DIV_FIXUP_F16_gfx9", "v_div_fixup_f16">; 907 908// FIXME-GFX10-OPSEL: Need to add "selective" opsel support to some of these 909// (they do not support SDWA or DPP). 910defm V_ADD_NC_U16 : VOP3_Real_gfx10_with_name<0x303, "V_ADD_U16_e64", "v_add_nc_u16">; 911defm V_SUB_NC_U16 : VOP3_Real_gfx10_with_name<0x304, "V_SUB_U16_e64", "v_sub_nc_u16">; 912defm V_MUL_LO_U16 : VOP3_Real_gfx10_with_name<0x305, "V_MUL_LO_U16_e64", "v_mul_lo_u16">; 913defm V_LSHRREV_B16 : VOP3_Real_gfx10_with_name<0x307, "V_LSHRREV_B16_e64", "v_lshrrev_b16">; 914defm V_ASHRREV_I16 : VOP3_Real_gfx10_with_name<0x308, "V_ASHRREV_I16_e64", "v_ashrrev_i16">; 915defm V_MAX_U16 : VOP3_Real_gfx10_with_name<0x309, "V_MAX_U16_e64", "v_max_u16">; 916defm V_MAX_I16 : VOP3_Real_gfx10_with_name<0x30a, "V_MAX_I16_e64", "v_max_i16">; 917defm V_MIN_U16 : VOP3_Real_gfx10_with_name<0x30b, "V_MIN_U16_e64", "v_min_u16">; 918defm V_MIN_I16 : VOP3_Real_gfx10_with_name<0x30c, "V_MIN_I16_e64", "v_min_i16">; 919defm V_LSHLREV_B16 : VOP3_Real_gfx10_with_name<0x314, "V_LSHLREV_B16_e64", "v_lshlrev_b16">; 920defm V_PERMLANE16_B32 : VOP3OpSel_Real_gfx10<0x377>; 921defm V_PERMLANEX16_B32 : VOP3OpSel_Real_gfx10<0x378>; 922 923//===----------------------------------------------------------------------===// 924// GFX7, GFX10. 925//===----------------------------------------------------------------------===// 926 927let AssemblerPredicate = isGFX7Only, DecoderNamespace = "GFX7" in { 928 multiclass VOP3_Real_gfx7<bits<10> op> { 929 def _gfx7 : 930 VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.SI>, 931 VOP3e_gfx6_gfx7<op{8-0}, !cast<VOP3_Pseudo>(NAME).Pfl>; 932 } 933 multiclass VOP3be_Real_gfx7<bits<10> op> { 934 def _gfx7 : 935 VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.SI>, 936 VOP3be_gfx6_gfx7<op{8-0}, !cast<VOP3_Pseudo>(NAME).Pfl>; 937 } 938} // End AssemblerPredicate = isGFX7Only, DecoderNamespace = "GFX7" 939 940multiclass VOP3_Real_gfx7_gfx10<bits<10> op> : 941 VOP3_Real_gfx7<op>, VOP3_Real_gfx10<op>; 942 943multiclass VOP3be_Real_gfx7_gfx10<bits<10> op> : 944 VOP3be_Real_gfx7<op>, VOP3be_Real_gfx10<op>; 945 946defm V_QSAD_PK_U16_U8 : VOP3_Real_gfx7_gfx10<0x172>; 947defm V_MQSAD_U32_U8 : VOP3_Real_gfx7_gfx10<0x175>; 948defm V_MAD_U64_U32 : VOP3be_Real_gfx7_gfx10<0x176>; 949defm V_MAD_I64_I32 : VOP3be_Real_gfx7_gfx10<0x177>; 950 951//===----------------------------------------------------------------------===// 952// GFX6, GFX7, GFX10. 953//===----------------------------------------------------------------------===// 954 955let AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" in { 956 multiclass VOP3_Real_gfx6_gfx7<bits<10> op> { 957 def _gfx6_gfx7 : 958 VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.SI>, 959 VOP3e_gfx6_gfx7<op{8-0}, !cast<VOP3_Pseudo>(NAME).Pfl>; 960 } 961 multiclass VOP3be_Real_gfx6_gfx7<bits<10> op> { 962 def _gfx6_gfx7 : 963 VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.SI>, 964 VOP3be_gfx6_gfx7<op{8-0}, !cast<VOP3_Pseudo>(NAME).Pfl>; 965 } 966} // End AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" 967 968multiclass VOP3_Real_gfx6_gfx7_gfx10<bits<10> op> : 969 VOP3_Real_gfx6_gfx7<op>, VOP3_Real_gfx10<op>; 970 971multiclass VOP3be_Real_gfx6_gfx7_gfx10<bits<10> op> : 972 VOP3be_Real_gfx6_gfx7<op>, VOP3be_Real_gfx10<op>; 973 974defm V_LSHL_B64 : VOP3_Real_gfx6_gfx7<0x161>; 975defm V_LSHR_B64 : VOP3_Real_gfx6_gfx7<0x162>; 976defm V_ASHR_I64 : VOP3_Real_gfx6_gfx7<0x163>; 977 978defm V_MAD_LEGACY_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x140>; 979defm V_MAD_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x141>; 980defm V_MAD_I32_I24 : VOP3_Real_gfx6_gfx7_gfx10<0x142>; 981defm V_MAD_U32_U24 : VOP3_Real_gfx6_gfx7_gfx10<0x143>; 982defm V_CUBEID_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x144>; 983defm V_CUBESC_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x145>; 984defm V_CUBETC_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x146>; 985defm V_CUBEMA_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x147>; 986defm V_BFE_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x148>; 987defm V_BFE_I32 : VOP3_Real_gfx6_gfx7_gfx10<0x149>; 988defm V_BFI_B32 : VOP3_Real_gfx6_gfx7_gfx10<0x14a>; 989defm V_FMA_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x14b>; 990defm V_FMA_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x14c>; 991defm V_LERP_U8 : VOP3_Real_gfx6_gfx7_gfx10<0x14d>; 992defm V_ALIGNBIT_B32 : VOP3_Real_gfx6_gfx7_gfx10<0x14e>; 993defm V_ALIGNBYTE_B32 : VOP3_Real_gfx6_gfx7_gfx10<0x14f>; 994defm V_MULLIT_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x150>; 995defm V_MIN3_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x151>; 996defm V_MIN3_I32 : VOP3_Real_gfx6_gfx7_gfx10<0x152>; 997defm V_MIN3_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x153>; 998defm V_MAX3_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x154>; 999defm V_MAX3_I32 : VOP3_Real_gfx6_gfx7_gfx10<0x155>; 1000defm V_MAX3_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x156>; 1001defm V_MED3_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x157>; 1002defm V_MED3_I32 : VOP3_Real_gfx6_gfx7_gfx10<0x158>; 1003defm V_MED3_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x159>; 1004defm V_SAD_U8 : VOP3_Real_gfx6_gfx7_gfx10<0x15a>; 1005defm V_SAD_HI_U8 : VOP3_Real_gfx6_gfx7_gfx10<0x15b>; 1006defm V_SAD_U16 : VOP3_Real_gfx6_gfx7_gfx10<0x15c>; 1007defm V_SAD_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x15d>; 1008defm V_CVT_PK_U8_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x15e>; 1009defm V_DIV_FIXUP_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x15f>; 1010defm V_DIV_FIXUP_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x160>; 1011defm V_ADD_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x164>; 1012defm V_MUL_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x165>; 1013defm V_MIN_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x166>; 1014defm V_MAX_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x167>; 1015defm V_LDEXP_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x168>; 1016defm V_MUL_LO_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x169>; 1017defm V_MUL_HI_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x16a>; 1018defm V_MUL_LO_I32 : VOP3_Real_gfx6_gfx7_gfx10<0x16b>; 1019defm V_MUL_HI_I32 : VOP3_Real_gfx6_gfx7_gfx10<0x16c>; 1020defm V_DIV_FMAS_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x16f>; 1021defm V_DIV_FMAS_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x170>; 1022defm V_MSAD_U8 : VOP3_Real_gfx6_gfx7_gfx10<0x171>; 1023defm V_MQSAD_PK_U16_U8 : VOP3_Real_gfx6_gfx7_gfx10<0x173>; 1024defm V_TRIG_PREOP_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x174>; 1025defm V_DIV_SCALE_F32 : VOP3be_Real_gfx6_gfx7_gfx10<0x16d>; 1026defm V_DIV_SCALE_F64 : VOP3be_Real_gfx6_gfx7_gfx10<0x16e>; 1027 1028// NB: Same opcode as v_mad_legacy_f32 1029let DecoderNamespace = "GFX10_B" in 1030defm V_FMA_LEGACY_F32 : VOP3_Real_gfx10<0x140>; 1031 1032//===----------------------------------------------------------------------===// 1033// GFX8, GFX9 (VI). 1034//===----------------------------------------------------------------------===// 1035 1036let AssemblerPredicate = isGFX8GFX9, DecoderNamespace = "GFX8" in { 1037 1038multiclass VOP3_Real_vi<bits<10> op> { 1039 def _vi : VOP3_Real<!cast<VOP_Pseudo>(NAME), SIEncodingFamily.VI>, 1040 VOP3e_vi <op, !cast<VOP_Pseudo>(NAME).Pfl>; 1041} 1042 1043multiclass VOP3be_Real_vi<bits<10> op> { 1044 def _vi : VOP3_Real<!cast<VOP_Pseudo>(NAME), SIEncodingFamily.VI>, 1045 VOP3be_vi <op, !cast<VOP_Pseudo>(NAME).Pfl>; 1046} 1047 1048multiclass VOP3OpSel_Real_gfx9<bits<10> op> { 1049 def _vi : VOP3_Real<!cast<VOP_Pseudo>(NAME), SIEncodingFamily.VI>, 1050 VOP3OpSel_gfx9 <op, !cast<VOP_Pseudo>(NAME).Pfl>; 1051} 1052 1053multiclass VOP3Interp_Real_vi<bits<10> op> { 1054 def _vi : VOP3_Real<!cast<VOP_Pseudo>(NAME), SIEncodingFamily.VI>, 1055 VOP3Interp_vi <op, !cast<VOP_Pseudo>(NAME).Pfl>; 1056} 1057 1058} // End AssemblerPredicate = isGFX8GFX9, DecoderNamespace = "GFX8" 1059 1060let AssemblerPredicate = isGFX8Only, DecoderNamespace = "GFX8" in { 1061 1062multiclass VOP3_F16_Real_vi<bits<10> op> { 1063 def _vi : VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.VI>, 1064 VOP3e_vi <op, !cast<VOP3_Pseudo>(NAME).Pfl>; 1065} 1066 1067multiclass VOP3Interp_F16_Real_vi<bits<10> op> { 1068 def _vi : VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.VI>, 1069 VOP3Interp_vi <op, !cast<VOP3_Pseudo>(NAME).Pfl>; 1070} 1071 1072} // End AssemblerPredicate = isGFX8Only, DecoderNamespace = "GFX8" 1073 1074let AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9" in { 1075 1076multiclass VOP3_F16_Real_gfx9<bits<10> op, string OpName, string AsmName> { 1077 def _gfx9 : VOP3_Real<!cast<VOP3_Pseudo>(OpName), SIEncodingFamily.GFX9>, 1078 VOP3e_vi <op, !cast<VOP3_Pseudo>(OpName).Pfl> { 1079 VOP3_Pseudo ps = !cast<VOP3_Pseudo>(OpName); 1080 let AsmString = AsmName # ps.AsmOperands; 1081 } 1082} 1083 1084multiclass VOP3OpSel_F16_Real_gfx9<bits<10> op, string AsmName> { 1085 def _gfx9 : VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.GFX9>, 1086 VOP3OpSel_gfx9 <op, !cast<VOP3_Pseudo>(NAME).Pfl> { 1087 VOP3_Pseudo ps = !cast<VOP3_Pseudo>(NAME); 1088 let AsmString = AsmName # ps.AsmOperands; 1089 } 1090} 1091 1092multiclass VOP3Interp_F16_Real_gfx9<bits<10> op, string OpName, string AsmName> { 1093 def _gfx9 : VOP3_Real<!cast<VOP3_Pseudo>(OpName), SIEncodingFamily.GFX9>, 1094 VOP3Interp_vi <op, !cast<VOP3_Pseudo>(OpName).Pfl> { 1095 VOP3_Pseudo ps = !cast<VOP3_Pseudo>(OpName); 1096 let AsmString = AsmName # ps.AsmOperands; 1097 } 1098} 1099 1100multiclass VOP3_Real_gfx9<bits<10> op, string AsmName> { 1101 def _gfx9 : VOP3_Real<!cast<VOP_Pseudo>(NAME), SIEncodingFamily.GFX9>, 1102 VOP3e_vi <op, !cast<VOP_Pseudo>(NAME).Pfl> { 1103 VOP_Pseudo ps = !cast<VOP_Pseudo>(NAME); 1104 let AsmString = AsmName # ps.AsmOperands; 1105 } 1106} 1107 1108} // End AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9" 1109 1110defm V_MAD_U64_U32 : VOP3be_Real_vi <0x1E8>; 1111defm V_MAD_I64_I32 : VOP3be_Real_vi <0x1E9>; 1112 1113defm V_MAD_LEGACY_F32 : VOP3_Real_vi <0x1c0>; 1114defm V_MAD_F32 : VOP3_Real_vi <0x1c1>; 1115defm V_MAD_I32_I24 : VOP3_Real_vi <0x1c2>; 1116defm V_MAD_U32_U24 : VOP3_Real_vi <0x1c3>; 1117defm V_CUBEID_F32 : VOP3_Real_vi <0x1c4>; 1118defm V_CUBESC_F32 : VOP3_Real_vi <0x1c5>; 1119defm V_CUBETC_F32 : VOP3_Real_vi <0x1c6>; 1120defm V_CUBEMA_F32 : VOP3_Real_vi <0x1c7>; 1121defm V_BFE_U32 : VOP3_Real_vi <0x1c8>; 1122defm V_BFE_I32 : VOP3_Real_vi <0x1c9>; 1123defm V_BFI_B32 : VOP3_Real_vi <0x1ca>; 1124defm V_FMA_F32 : VOP3_Real_vi <0x1cb>; 1125defm V_FMA_F64 : VOP3_Real_vi <0x1cc>; 1126defm V_LERP_U8 : VOP3_Real_vi <0x1cd>; 1127defm V_ALIGNBIT_B32 : VOP3_Real_vi <0x1ce>; 1128defm V_ALIGNBYTE_B32 : VOP3_Real_vi <0x1cf>; 1129defm V_MIN3_F32 : VOP3_Real_vi <0x1d0>; 1130defm V_MIN3_I32 : VOP3_Real_vi <0x1d1>; 1131defm V_MIN3_U32 : VOP3_Real_vi <0x1d2>; 1132defm V_MAX3_F32 : VOP3_Real_vi <0x1d3>; 1133defm V_MAX3_I32 : VOP3_Real_vi <0x1d4>; 1134defm V_MAX3_U32 : VOP3_Real_vi <0x1d5>; 1135defm V_MED3_F32 : VOP3_Real_vi <0x1d6>; 1136defm V_MED3_I32 : VOP3_Real_vi <0x1d7>; 1137defm V_MED3_U32 : VOP3_Real_vi <0x1d8>; 1138defm V_SAD_U8 : VOP3_Real_vi <0x1d9>; 1139defm V_SAD_HI_U8 : VOP3_Real_vi <0x1da>; 1140defm V_SAD_U16 : VOP3_Real_vi <0x1db>; 1141defm V_SAD_U32 : VOP3_Real_vi <0x1dc>; 1142defm V_CVT_PK_U8_F32 : VOP3_Real_vi <0x1dd>; 1143defm V_DIV_FIXUP_F32 : VOP3_Real_vi <0x1de>; 1144defm V_DIV_FIXUP_F64 : VOP3_Real_vi <0x1df>; 1145defm V_DIV_SCALE_F32 : VOP3be_Real_vi <0x1e0>; 1146defm V_DIV_SCALE_F64 : VOP3be_Real_vi <0x1e1>; 1147defm V_DIV_FMAS_F32 : VOP3_Real_vi <0x1e2>; 1148defm V_DIV_FMAS_F64 : VOP3_Real_vi <0x1e3>; 1149defm V_MSAD_U8 : VOP3_Real_vi <0x1e4>; 1150defm V_QSAD_PK_U16_U8 : VOP3_Real_vi <0x1e5>; 1151defm V_MQSAD_PK_U16_U8 : VOP3_Real_vi <0x1e6>; 1152defm V_MQSAD_U32_U8 : VOP3_Real_vi <0x1e7>; 1153 1154defm V_PERM_B32 : VOP3_Real_vi <0x1ed>; 1155 1156defm V_MAD_F16 : VOP3_F16_Real_vi <0x1ea>; 1157defm V_MAD_U16 : VOP3_F16_Real_vi <0x1eb>; 1158defm V_MAD_I16 : VOP3_F16_Real_vi <0x1ec>; 1159defm V_FMA_F16 : VOP3_F16_Real_vi <0x1ee>; 1160defm V_DIV_FIXUP_F16 : VOP3_F16_Real_vi <0x1ef>; 1161defm V_INTERP_P2_F16 : VOP3Interp_F16_Real_vi <0x276>; 1162 1163let FPDPRounding = 1 in { 1164defm V_MAD_LEGACY_F16 : VOP3_F16_Real_gfx9 <0x1ea, "V_MAD_F16", "v_mad_legacy_f16">; 1165defm V_FMA_LEGACY_F16 : VOP3_F16_Real_gfx9 <0x1ee, "V_FMA_F16", "v_fma_legacy_f16">; 1166defm V_DIV_FIXUP_LEGACY_F16 : VOP3_F16_Real_gfx9 <0x1ef, "V_DIV_FIXUP_F16", "v_div_fixup_legacy_f16">; 1167defm V_INTERP_P2_LEGACY_F16 : VOP3Interp_F16_Real_gfx9 <0x276, "V_INTERP_P2_F16", "v_interp_p2_legacy_f16">; 1168} // End FPDPRounding = 1 1169 1170defm V_MAD_LEGACY_U16 : VOP3_F16_Real_gfx9 <0x1eb, "V_MAD_U16", "v_mad_legacy_u16">; 1171defm V_MAD_LEGACY_I16 : VOP3_F16_Real_gfx9 <0x1ec, "V_MAD_I16", "v_mad_legacy_i16">; 1172 1173defm V_MAD_F16_gfx9 : VOP3OpSel_F16_Real_gfx9 <0x203, "v_mad_f16">; 1174defm V_MAD_U16_gfx9 : VOP3OpSel_F16_Real_gfx9 <0x204, "v_mad_u16">; 1175defm V_MAD_I16_gfx9 : VOP3OpSel_F16_Real_gfx9 <0x205, "v_mad_i16">; 1176defm V_FMA_F16_gfx9 : VOP3OpSel_F16_Real_gfx9 <0x206, "v_fma_f16">; 1177defm V_DIV_FIXUP_F16_gfx9 : VOP3OpSel_F16_Real_gfx9 <0x207, "v_div_fixup_f16">; 1178defm V_INTERP_P2_F16_gfx9 : VOP3Interp_F16_Real_gfx9 <0x277, "V_INTERP_P2_F16_gfx9", "v_interp_p2_f16">; 1179 1180defm V_ADD_I32_gfx9 : VOP3_Real_gfx9 <0x29c, "v_add_i32">; 1181defm V_SUB_I32_gfx9 : VOP3_Real_gfx9 <0x29d, "v_sub_i32">; 1182 1183defm V_INTERP_P1_F32_e64 : VOP3Interp_Real_vi <0x270>; 1184defm V_INTERP_P2_F32_e64 : VOP3Interp_Real_vi <0x271>; 1185defm V_INTERP_MOV_F32_e64 : VOP3Interp_Real_vi <0x272>; 1186 1187defm V_INTERP_P1LL_F16 : VOP3Interp_Real_vi <0x274>; 1188defm V_INTERP_P1LV_F16 : VOP3Interp_Real_vi <0x275>; 1189defm V_ADD_F64 : VOP3_Real_vi <0x280>; 1190defm V_MUL_F64 : VOP3_Real_vi <0x281>; 1191defm V_MIN_F64 : VOP3_Real_vi <0x282>; 1192defm V_MAX_F64 : VOP3_Real_vi <0x283>; 1193defm V_LDEXP_F64 : VOP3_Real_vi <0x284>; 1194defm V_MUL_LO_U32 : VOP3_Real_vi <0x285>; 1195 1196// removed from VI as identical to V_MUL_LO_U32 1197let isAsmParserOnly = 1 in { 1198defm V_MUL_LO_I32 : VOP3_Real_vi <0x285>; 1199} 1200 1201defm V_MUL_HI_U32 : VOP3_Real_vi <0x286>; 1202defm V_MUL_HI_I32 : VOP3_Real_vi <0x287>; 1203 1204defm V_READLANE_B32 : VOP3_Real_vi <0x289>; 1205defm V_WRITELANE_B32 : VOP3_Real_vi <0x28a>; 1206 1207defm V_LSHLREV_B64 : VOP3_Real_vi <0x28f>; 1208defm V_LSHRREV_B64 : VOP3_Real_vi <0x290>; 1209defm V_ASHRREV_I64 : VOP3_Real_vi <0x291>; 1210defm V_TRIG_PREOP_F64 : VOP3_Real_vi <0x292>; 1211 1212defm V_LSHL_ADD_U32 : VOP3_Real_vi <0x1fd>; 1213defm V_ADD_LSHL_U32 : VOP3_Real_vi <0x1fe>; 1214defm V_ADD3_U32 : VOP3_Real_vi <0x1ff>; 1215defm V_LSHL_OR_B32 : VOP3_Real_vi <0x200>; 1216defm V_AND_OR_B32 : VOP3_Real_vi <0x201>; 1217defm V_OR3_B32 : VOP3_Real_vi <0x202>; 1218defm V_PACK_B32_F16 : VOP3OpSel_Real_gfx9 <0x2a0>; 1219 1220defm V_XAD_U32 : VOP3_Real_vi <0x1f3>; 1221 1222defm V_MIN3_F16 : VOP3OpSel_Real_gfx9 <0x1f4>; 1223defm V_MIN3_I16 : VOP3OpSel_Real_gfx9 <0x1f5>; 1224defm V_MIN3_U16 : VOP3OpSel_Real_gfx9 <0x1f6>; 1225 1226defm V_MAX3_F16 : VOP3OpSel_Real_gfx9 <0x1f7>; 1227defm V_MAX3_I16 : VOP3OpSel_Real_gfx9 <0x1f8>; 1228defm V_MAX3_U16 : VOP3OpSel_Real_gfx9 <0x1f9>; 1229 1230defm V_MED3_F16 : VOP3OpSel_Real_gfx9 <0x1fa>; 1231defm V_MED3_I16 : VOP3OpSel_Real_gfx9 <0x1fb>; 1232defm V_MED3_U16 : VOP3OpSel_Real_gfx9 <0x1fc>; 1233 1234defm V_ADD_I16 : VOP3OpSel_Real_gfx9 <0x29e>; 1235defm V_SUB_I16 : VOP3OpSel_Real_gfx9 <0x29f>; 1236 1237defm V_MAD_U32_U16 : VOP3OpSel_Real_gfx9 <0x1f1>; 1238defm V_MAD_I32_I16 : VOP3OpSel_Real_gfx9 <0x1f2>; 1239 1240defm V_CVT_PKNORM_I16_F16 : VOP3OpSel_Real_gfx9 <0x299>; 1241defm V_CVT_PKNORM_U16_F16 : VOP3OpSel_Real_gfx9 <0x29a>; 1242