//===-- SIInstrInfo.td -----------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// def isWave32 : Predicate<"Subtarget->getWavefrontSize() == 32">, AssemblerPredicate <(all_of FeatureWavefrontSize32)>; def isWave64 : Predicate<"Subtarget->getWavefrontSize() == 64">, AssemblerPredicate <(all_of FeatureWavefrontSize64)>; class GCNPredicateControl : PredicateControl { Predicate SIAssemblerPredicate = isGFX6GFX7; Predicate VIAssemblerPredicate = isGFX8GFX9; } // Except for the NONE field, this must be kept in sync with the // SIEncodingFamily enum in SIInstrInfo.cpp and the columns of the // getMCOpcodeGen table. def SIEncodingFamily { int NONE = -1; int SI = 0; int VI = 1; int SDWA = 2; int SDWA9 = 3; int GFX80 = 4; int GFX9 = 5; int GFX10 = 6; int SDWA10 = 7; int GFX90A = 8; int GFX940 = 9; int GFX11 = 10; int GFX12 = 11; } //===----------------------------------------------------------------------===// // SI DAG Nodes //===----------------------------------------------------------------------===// def AMDGPUclamp : SDNode<"AMDGPUISD::CLAMP", SDTFPUnaryOp>; def SDTSBufferLoad : SDTypeProfile<1, 3, [ // vdata SDTCisVT<1, v4i32>, // rsrc SDTCisVT<2, i32>, // offset(imm) SDTCisVT<3, i32>]>; // cachepolicy def SIsbuffer_load : SDNode<"AMDGPUISD::SBUFFER_LOAD", SDTSBufferLoad, [SDNPMayLoad, SDNPMemOperand]>; def SIsbuffer_load_byte : SDNode<"AMDGPUISD::SBUFFER_LOAD_BYTE", SDTSBufferLoad, [SDNPMayLoad, SDNPMemOperand]>; def SIsbuffer_load_ubyte : SDNode<"AMDGPUISD::SBUFFER_LOAD_UBYTE", SDTSBufferLoad, [SDNPMayLoad, SDNPMemOperand]>; def SIsbuffer_load_short : SDNode<"AMDGPUISD::SBUFFER_LOAD_SHORT", SDTSBufferLoad, [SDNPMayLoad, SDNPMemOperand]>; def SIsbuffer_load_ushort : SDNode<"AMDGPUISD::SBUFFER_LOAD_USHORT", SDTSBufferLoad, [SDNPMayLoad, SDNPMemOperand]>; def SIds_ordered_count : SDNode<"AMDGPUISD::DS_ORDERED_COUNT", SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, SDTCisVT<2, i16>]>, [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain, SDNPInGlue] >; def SDTAtomic2_f32 : SDTypeProfile<1, 2, [ SDTCisSameAs<0,2>, SDTCisFP<0>, SDTCisPtrTy<1> ]>; def SIatomic_fmin : SDNode<"AMDGPUISD::ATOMIC_LOAD_FMIN", SDTAtomic2_f32, [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain] >; def SIatomic_fmax : SDNode<"AMDGPUISD::ATOMIC_LOAD_FMAX", SDTAtomic2_f32, [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain] >; // load_d16_{lo|hi} ptr, tied_input def SIload_d16 : SDTypeProfile<1, 2, [ SDTCisPtrTy<1>, SDTCisSameAs<0, 2> ]>; def SDTtbuffer_load : SDTypeProfile<1, 8, [ // vdata SDTCisVT<1, v4i32>, // rsrc SDTCisVT<2, i32>, // vindex(VGPR) SDTCisVT<3, i32>, // voffset(VGPR) SDTCisVT<4, i32>, // soffset(SGPR) SDTCisVT<5, i32>, // offset(imm) SDTCisVT<6, i32>, // format(imm) SDTCisVT<7, i32>, // cachepolicy, swizzled buffer(imm) SDTCisVT<8, i1> // idxen(imm) ]>; def SItbuffer_load : SDNode<"AMDGPUISD::TBUFFER_LOAD_FORMAT", SDTtbuffer_load, [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]>; def SItbuffer_load_d16 : SDNode<"AMDGPUISD::TBUFFER_LOAD_FORMAT_D16", SDTtbuffer_load, [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]>; def SDTtbuffer_store : SDTypeProfile<0, 9, [ // vdata SDTCisVT<1, v4i32>, // rsrc SDTCisVT<2, i32>, // vindex(VGPR) SDTCisVT<3, i32>, // voffset(VGPR) SDTCisVT<4, i32>, // soffset(SGPR) SDTCisVT<5, i32>, // offset(imm) SDTCisVT<6, i32>, // format(imm) SDTCisVT<7, i32>, // cachepolicy, swizzled buffer(imm) SDTCisVT<8, i1> // idxen(imm) ]>; def SItbuffer_store : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT", SDTtbuffer_store, [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>; def SItbuffer_store_d16 : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT_D16", SDTtbuffer_store, [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>; def SDTBufferLoad : SDTypeProfile<1, 7, [ // vdata SDTCisVT<1, v4i32>, // rsrc SDTCisVT<2, i32>, // vindex(VGPR) SDTCisVT<3, i32>, // voffset(VGPR) SDTCisVT<4, i32>, // soffset(SGPR) SDTCisVT<5, i32>, // offset(imm) SDTCisVT<6, i32>, // cachepolicy, swizzled buffer(imm) SDTCisVT<7, i1>]>; // idxen(imm) def SIbuffer_load : SDNode <"AMDGPUISD::BUFFER_LOAD", SDTBufferLoad, [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>; def SIbuffer_load_ubyte : SDNode <"AMDGPUISD::BUFFER_LOAD_UBYTE", SDTBufferLoad, [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>; def SIbuffer_load_ushort : SDNode <"AMDGPUISD::BUFFER_LOAD_USHORT", SDTBufferLoad, [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>; def SIbuffer_load_byte : SDNode <"AMDGPUISD::BUFFER_LOAD_BYTE", SDTBufferLoad, [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>; def SIbuffer_load_short: SDNode <"AMDGPUISD::BUFFER_LOAD_SHORT", SDTBufferLoad, [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>; def SIbuffer_load_format : SDNode <"AMDGPUISD::BUFFER_LOAD_FORMAT", SDTBufferLoad, [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>; def SIbuffer_load_format_tfe : SDNode <"AMDGPUISD::BUFFER_LOAD_FORMAT_TFE", SDTBufferLoad, [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>; def SIbuffer_load_format_d16 : SDNode <"AMDGPUISD::BUFFER_LOAD_FORMAT_D16", SDTBufferLoad, [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>; def SDTBufferStore : SDTypeProfile<0, 8, [ // vdata SDTCisVT<1, v4i32>, // rsrc SDTCisVT<2, i32>, // vindex(VGPR) SDTCisVT<3, i32>, // voffset(VGPR) SDTCisVT<4, i32>, // soffset(SGPR) SDTCisVT<5, i32>, // offset(imm) SDTCisVT<6, i32>, // cachepolicy, swizzled buffer(imm) SDTCisVT<7, i1>]>; // idxen(imm) def SIbuffer_store : SDNode <"AMDGPUISD::BUFFER_STORE", SDTBufferStore, [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>; def SIbuffer_store_byte: SDNode <"AMDGPUISD::BUFFER_STORE_BYTE", SDTBufferStore, [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>; def SIbuffer_store_short : SDNode <"AMDGPUISD::BUFFER_STORE_SHORT", SDTBufferStore, [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>; def SIbuffer_store_format : SDNode <"AMDGPUISD::BUFFER_STORE_FORMAT", SDTBufferStore, [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>; def SIbuffer_store_format_d16 : SDNode <"AMDGPUISD::BUFFER_STORE_FORMAT_D16", SDTBufferStore, [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>; multiclass SDBufferAtomic { def "" : SDNode , // rsrc SDTCisVT<3, i32>, // vindex(VGPR) SDTCisVT<4, i32>, // voffset(VGPR) SDTCisVT<5, i32>, // soffset(SGPR) SDTCisVT<6, i32>, // offset(imm) SDTCisVT<7, i32>, // cachepolicy(imm) SDTCisVT<8, i1>]>, // idxen(imm) [SDNPMemOperand, SDNPHasChain, SDNPMayLoad, SDNPMayStore] >; def "_noret" : PatFrag< (ops node:$vdata_in, node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, node:$cachepolicy, node:$idxen), (!cast(NAME) node:$vdata_in, node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, node:$cachepolicy, node:$idxen)> { let HasNoUse = true; } } defm SIbuffer_atomic_swap : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SWAP">; defm SIbuffer_atomic_add : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_ADD">; defm SIbuffer_atomic_sub : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SUB">; defm SIbuffer_atomic_smin : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SMIN">; defm SIbuffer_atomic_umin : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_UMIN">; defm SIbuffer_atomic_smax : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SMAX">; defm SIbuffer_atomic_umax : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_UMAX">; defm SIbuffer_atomic_and : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_AND">; defm SIbuffer_atomic_or : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_OR">; defm SIbuffer_atomic_xor : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_XOR">; defm SIbuffer_atomic_inc : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_INC">; defm SIbuffer_atomic_dec : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_DEC">; defm SIbuffer_atomic_csub : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_CSUB">; defm SIbuffer_atomic_fadd : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_FADD">; defm SIbuffer_atomic_fadd_bf16 : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_FADD_BF16">; defm SIbuffer_atomic_fmin : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_FMIN">; defm SIbuffer_atomic_fmax : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_FMAX">; defm SIbuffer_atomic_cond_sub_u32 : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_COND_SUB_U32">; def SIbuffer_atomic_cmpswap : SDNode <"AMDGPUISD::BUFFER_ATOMIC_CMPSWAP", SDTypeProfile<1, 9, [SDTCisVT<3, v4i32>, // rsrc SDTCisVT<4, i32>, // vindex(VGPR) SDTCisVT<5, i32>, // voffset(VGPR) SDTCisVT<6, i32>, // soffset(SGPR) SDTCisVT<7, i32>, // offset(imm) SDTCisVT<8, i32>, // cachepolicy(imm) SDTCisVT<9, i1>]>, // idxen(imm) [SDNPMemOperand, SDNPHasChain, SDNPMayLoad, SDNPMayStore] >; def SIbuffer_atomic_cmpswap_noret : PatFrag< (ops node:$src, node:$cmp, node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, node:$cachepolicy, node:$idxen), (SIbuffer_atomic_cmpswap node:$src, node:$cmp, node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, node:$cachepolicy, node:$idxen)> { let HasNoUse = true; } class SDGlobalAtomicNoRtn : SDNode , // vaddr SDTCisVT<1, ty>]>, // vdata [SDNPMemOperand, SDNPHasChain, SDNPMayLoad, SDNPMayStore] >; def SIpc_add_rel_offset : SDNode<"AMDGPUISD::PC_ADD_REL_OFFSET", SDTypeProfile<1, 2, [SDTCisVT<0, iPTR>, SDTCisSameAs<0,1>, SDTCisSameAs<0,2>]> >; def SIlds : SDNode<"AMDGPUISD::LDS", SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisSameAs<0,1>]> >; def SIload_d16_lo : SDNode<"AMDGPUISD::LOAD_D16_LO", SIload_d16, [SDNPMayLoad, SDNPMemOperand, SDNPHasChain] >; def SIload_d16_lo_u8 : SDNode<"AMDGPUISD::LOAD_D16_LO_U8", SIload_d16, [SDNPMayLoad, SDNPMemOperand, SDNPHasChain] >; def SIload_d16_lo_i8 : SDNode<"AMDGPUISD::LOAD_D16_LO_I8", SIload_d16, [SDNPMayLoad, SDNPMemOperand, SDNPHasChain] >; def SIload_d16_hi : SDNode<"AMDGPUISD::LOAD_D16_HI", SIload_d16, [SDNPMayLoad, SDNPMemOperand, SDNPHasChain] >; def SIload_d16_hi_u8 : SDNode<"AMDGPUISD::LOAD_D16_HI_U8", SIload_d16, [SDNPMayLoad, SDNPMemOperand, SDNPHasChain] >; def SIload_d16_hi_i8 : SDNode<"AMDGPUISD::LOAD_D16_HI_I8", SIload_d16, [SDNPMayLoad, SDNPMemOperand, SDNPHasChain] >; def SIdenorm_mode : SDNode<"AMDGPUISD::DENORM_MODE", SDTypeProfile<0 ,1, [SDTCisInt<0>]>, [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue] >; def SIfptrunc_round_upward : SDNode<"AMDGPUISD::FPTRUNC_ROUND_UPWARD", SDTFPRoundOp >; def SIfptrunc_round_downward : SDNode<"AMDGPUISD::FPTRUNC_ROUND_DOWNWARD", SDTFPRoundOp >; //===----------------------------------------------------------------------===// // ValueType helpers //===----------------------------------------------------------------------===// class isIntType { bit ret = !and(SrcVT.isInteger, !ne(SrcVT.Value, i1.Value)); } //===----------------------------------------------------------------------===// // PatFrags for global memory operations //===----------------------------------------------------------------------===// defm atomic_load_fmin : binary_atomic_op_all_as; defm atomic_load_fmax : binary_atomic_op_all_as; //===----------------------------------------------------------------------===// // SDNodes PatFrags for loads/stores with a glue input. // This is for SDNodes and PatFrag for local loads and stores to // enable s_mov_b32 m0, -1 to be glued to the memory instructions. // // These mirror the regular load/store PatFrags and rely on special // processing during Select() to add the glued copy. // //===----------------------------------------------------------------------===// def AMDGPUld_glue : SDNode <"ISD::LOAD", SDTLoad, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand, SDNPInGlue] >; def AMDGPUatomic_ld_glue : SDNode <"ISD::ATOMIC_LOAD", SDTAtomicLoad, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand, SDNPInGlue] >; def unindexedload_glue : PatFrag <(ops node:$ptr), (AMDGPUld_glue node:$ptr)> { let IsLoad = 1; let IsUnindexed = 1; } def load_glue : PatFrag <(ops node:$ptr), (unindexedload_glue node:$ptr)> { let IsLoad = 1; let IsNonExtLoad = 1; } def atomic_load_8_glue : PatFrag<(ops node:$ptr), (AMDGPUatomic_ld_glue node:$ptr)> { let IsAtomic = 1; let MemoryVT = i8; } def atomic_load_16_glue : PatFrag<(ops node:$ptr), (AMDGPUatomic_ld_glue node:$ptr)> { let IsAtomic = 1; let MemoryVT = i16; } def atomic_load_32_glue : PatFrag<(ops node:$ptr), (AMDGPUatomic_ld_glue node:$ptr)> { let IsAtomic = 1; let MemoryVT = i32; } def atomic_load_64_glue : PatFrag<(ops node:$ptr), (AMDGPUatomic_ld_glue node:$ptr)> { let IsAtomic = 1; let MemoryVT = i64; } def extload_glue : PatFrag<(ops node:$ptr), (unindexedload_glue node:$ptr)> { let IsLoad = 1; let IsAnyExtLoad = 1; } def sextload_glue : PatFrag<(ops node:$ptr), (unindexedload_glue node:$ptr)> { let IsLoad = 1; let IsSignExtLoad = 1; } def zextload_glue : PatFrag<(ops node:$ptr), (unindexedload_glue node:$ptr)> { let IsLoad = 1; let IsZeroExtLoad = 1; } def extloadi8_glue : PatFrag<(ops node:$ptr), (extload_glue node:$ptr)> { let IsLoad = 1; let MemoryVT = i8; } def zextloadi8_glue : PatFrag<(ops node:$ptr), (zextload_glue node:$ptr)> { let IsLoad = 1; let MemoryVT = i8; } def extloadi16_glue : PatFrag<(ops node:$ptr), (extload_glue node:$ptr)> { let IsLoad = 1; let MemoryVT = i16; } def zextloadi16_glue : PatFrag<(ops node:$ptr), (zextload_glue node:$ptr)> { let IsLoad = 1; let MemoryVT = i16; } def sextloadi8_glue : PatFrag<(ops node:$ptr), (sextload_glue node:$ptr)> { let IsLoad = 1; let MemoryVT = i8; } def sextloadi16_glue : PatFrag<(ops node:$ptr), (sextload_glue node:$ptr)> { let IsLoad = 1; let MemoryVT = i16; } let IsLoad = 1, AddressSpaces = LoadAddress_local.AddrSpaces in { def load_local_m0 : PatFrag<(ops node:$ptr), (load_glue node:$ptr)> { let IsNonExtLoad = 1; } def extloadi8_local_m0 : PatFrag<(ops node:$ptr), (extloadi8_glue node:$ptr)>; def sextloadi8_local_m0 : PatFrag<(ops node:$ptr), (sextloadi8_glue node:$ptr)>; def zextloadi8_local_m0 : PatFrag<(ops node:$ptr), (zextloadi8_glue node:$ptr)>; def extloadi16_local_m0 : PatFrag<(ops node:$ptr), (extloadi16_glue node:$ptr)>; def sextloadi16_local_m0 : PatFrag<(ops node:$ptr), (sextloadi16_glue node:$ptr)>; def zextloadi16_local_m0 : PatFrag<(ops node:$ptr), (zextloadi16_glue node:$ptr)>; } // End IsLoad = 1, , AddressSpaces = LoadAddress_local.AddrSpaces def load_align8_local_m0 : PatFrag<(ops node:$ptr), (load_local_m0 node:$ptr)> { let IsLoad = 1; int MinAlignment = 8; } def load_align16_local_m0 : PatFrag<(ops node:$ptr), (load_local_m0 node:$ptr)> { let IsLoad = 1; int MinAlignment = 16; } let IsAtomic = 1, AddressSpaces = LoadAddress_local.AddrSpaces in { def atomic_load_8_local_m0 : PatFrag<(ops node:$ptr), (atomic_load_8_glue node:$ptr)>; def atomic_load_16_local_m0 : PatFrag<(ops node:$ptr), (atomic_load_16_glue node:$ptr)>; def atomic_load_32_local_m0 : PatFrag<(ops node:$ptr), (atomic_load_32_glue node:$ptr)>; def atomic_load_64_local_m0 : PatFrag<(ops node:$ptr), (atomic_load_64_glue node:$ptr)>; } // End let AddressSpaces = LoadAddress_local.AddrSpaces def AMDGPUst_glue : SDNode <"ISD::STORE", SDTStore, [SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPInGlue] >; def AMDGPUatomic_st_glue : SDNode <"ISD::ATOMIC_STORE", SDTAtomicStore, [SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPInGlue] >; def unindexedstore_glue : PatFrag<(ops node:$val, node:$ptr), (AMDGPUst_glue node:$val, node:$ptr)> { let IsStore = 1; let IsUnindexed = 1; } def store_glue : PatFrag<(ops node:$val, node:$ptr), (unindexedstore_glue node:$val, node:$ptr)> { let IsStore = 1; let IsTruncStore = 0; } def truncstore_glue : PatFrag<(ops node:$val, node:$ptr), (unindexedstore_glue node:$val, node:$ptr)> { let IsStore = 1; let IsTruncStore = 1; } def truncstorei8_glue : PatFrag<(ops node:$val, node:$ptr), (truncstore_glue node:$val, node:$ptr)> { let IsStore = 1; let MemoryVT = i8; let IsTruncStore = 1; } def truncstorei16_glue : PatFrag<(ops node:$val, node:$ptr), (truncstore_glue node:$val, node:$ptr)> { let IsStore = 1; let MemoryVT = i16; let IsTruncStore = 1; } let IsStore = 1, AddressSpaces = StoreAddress_local.AddrSpaces in { def store_local_m0 : PatFrag<(ops node:$val, node:$ptr), (store_glue node:$val, node:$ptr)>; def truncstorei8_local_m0 : PatFrag<(ops node:$val, node:$ptr), (truncstorei8_glue node:$val, node:$ptr)>; def truncstorei16_local_m0 : PatFrag<(ops node:$val, node:$ptr), (truncstorei16_glue node:$val, node:$ptr)>; } def store_align8_local_m0 : PatFrag <(ops node:$value, node:$ptr), (store_local_m0 node:$value, node:$ptr)>, Aligned<8> { let IsStore = 1; } def store_align16_local_m0 : PatFrag <(ops node:$value, node:$ptr), (store_local_m0 node:$value, node:$ptr)>, Aligned<16> { let IsStore = 1; } let PredicateCode = [{return cast(N)->getAlign() < 4;}], GISelPredicateCode = [{return (*MI.memoperands_begin())->getAlign() < 4;}], AddressSpaces = [ AddrSpaces.Local ] in { def load_align_less_than_4_local : PatFrag<(ops node:$ptr), (load_local node:$ptr)> { let IsLoad = 1; let IsNonExtLoad = 1; } def load_align_less_than_4_local_m0 : PatFrag<(ops node:$ptr), (load_local_m0 node:$ptr)> { let IsLoad = 1; let IsNonExtLoad = 1; } def store_align_less_than_4_local : PatFrag <(ops node:$value, node:$ptr), (store_local node:$value, node:$ptr)> { let IsStore = 1; let IsTruncStore = 0; } def store_align_less_than_4_local_m0 : PatFrag <(ops node:$value, node:$ptr), (store_local_m0 node:$value, node:$ptr)> { let IsStore = 1; let IsTruncStore = 0; } } def atomic_store_8_glue : PatFrag < (ops node:$ptr, node:$value), (AMDGPUatomic_st_glue node:$ptr, node:$value)> { let IsAtomic = 1; let MemoryVT = i8; } def atomic_store_16_glue : PatFrag < (ops node:$ptr, node:$value), (AMDGPUatomic_st_glue node:$ptr, node:$value)> { let IsAtomic = 1; let MemoryVT = i16; } def atomic_store_32_glue : PatFrag < (ops node:$ptr, node:$value), (AMDGPUatomic_st_glue node:$ptr, node:$value)> { let IsAtomic = 1; let MemoryVT = i32; } def atomic_store_64_glue : PatFrag < (ops node:$ptr, node:$value), (AMDGPUatomic_st_glue node:$ptr, node:$value)> { let IsAtomic = 1; let MemoryVT = i64; } let IsAtomic = 1, AddressSpaces = StoreAddress_local.AddrSpaces in { def atomic_store_8_local_m0 : PatFrag<(ops node:$val, node:$ptr), (atomic_store_8_glue node:$val, node:$ptr)>; def atomic_store_16_local_m0 : PatFrag<(ops node:$val, node:$ptr), (atomic_store_16_glue node:$val, node:$ptr)>; def atomic_store_32_local_m0 : PatFrag<(ops node:$val, node:$ptr), (atomic_store_32_glue node:$val, node:$ptr)>; def atomic_store_64_local_m0 : PatFrag<(ops node:$val, node:$ptr), (atomic_store_64_glue node:$val, node:$ptr)>; } // End let IsAtomic = 1, AddressSpaces = StoreAddress_local.AddrSpaces //===----------------------------------------------------------------------===// // SDNodes PatFrags for a16 loads and stores with 3 components. // v3f16/v3i16 is widened to v4f16/v4i16, so we need to match on the memory // load/store size. //===----------------------------------------------------------------------===// class mubuf_intrinsic_load : PatFrag < (ops node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, node:$auxiliary, node:$idxen), (name node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, node:$auxiliary, node:$idxen)> { let IsLoad = 1; let MemoryVT = vt; } class mubuf_intrinsic_store : PatFrag < (ops node:$vdata, node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, node:$auxiliary, node:$idxen), (name node:$vdata, node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, node:$auxiliary, node:$idxen)> { let IsStore = 1; let MemoryVT = vt; } class mtbuf_intrinsic_load : PatFrag < (ops node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, node:$format, node:$auxiliary, node:$idxen), (name node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, node:$format, node:$auxiliary, node:$idxen)> { let IsLoad = 1; let MemoryVT = vt; } class mtbuf_intrinsic_store : PatFrag < (ops node:$vdata, node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, node:$format, node:$auxiliary, node:$idxen), (name node:$vdata, node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, node:$format, node:$auxiliary, node:$idxen)> { let IsStore = 1; let MemoryVT = vt; } //===----------------------------------------------------------------------===// // SDNodes PatFrags for d16 loads //===----------------------------------------------------------------------===// class LoadD16Frag : PatFrag< (ops node:$ptr, node:$tied_in), (op node:$ptr, node:$tied_in)> { let IsLoad = 1; } foreach as = [ "global", "flat", "constant", "local", "private", "region" ] in { let AddressSpaces = !cast("LoadAddress_"#as).AddrSpaces in { def load_d16_hi_#as : LoadD16Frag ; def az_extloadi8_d16_hi_#as : LoadD16Frag { let MemoryVT = i8; } def sextloadi8_d16_hi_#as : LoadD16Frag { let MemoryVT = i8; } def load_d16_lo_#as : LoadD16Frag ; def az_extloadi8_d16_lo_#as : LoadD16Frag { let MemoryVT = i8; } def sextloadi8_d16_lo_#as : LoadD16Frag { let MemoryVT = i8; } } // End let AddressSpaces = ... } // End foreach AddrSpace def lshr_rev : PatFrag < (ops node:$src1, node:$src0), (srl $src0, $src1) >; def ashr_rev : PatFrag < (ops node:$src1, node:$src0), (sra $src0, $src1) >; def lshl_rev : PatFrag < (ops node:$src1, node:$src0), (shl $src0, $src1) >; def add_ctpop : PatFrag < (ops node:$src0, node:$src1), (add (ctpop $src0), $src1) >; def xnor : PatFrag < (ops node:$src0, node:$src1), (not (xor $src0, $src1)) >; foreach I = 1-4 in { def shl#I#_add : PatFrag < (ops node:$src0, node:$src1), (add (shl_oneuse $src0, (i32 I)), $src1)> { // FIXME: Poor substitute for disabling pattern in SelectionDAG let PredicateCode = [{return false;}]; let GISelPredicateCode = [{return true;}]; } } multiclass SIAtomicM0Glue2 { def _glue : SDNode < !if(is_amdgpu, "AMDGPUISD", "ISD")#"::ATOMIC_"#op_name, tc, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand, SDNPInGlue] >; let AddressSpaces = StoreAddress_local.AddrSpaces in { defm _local_m0 : binary_atomic_op (NAME#"_glue"), IsInt>; defm _local_m0 : noret_binary_atomic_op (NAME#"_glue"), IsInt>; } let AddressSpaces = StoreAddress_region.AddrSpaces in { defm _region_m0 : binary_atomic_op (NAME#"_glue"), IsInt>; defm _region_m0 : noret_binary_atomic_op (NAME#"_glue"), IsInt>; } } defm atomic_load_add : SIAtomicM0Glue2 <"LOAD_ADD">; defm atomic_load_sub : SIAtomicM0Glue2 <"LOAD_SUB">; defm atomic_load_uinc_wrap : SIAtomicM0Glue2 <"LOAD_UINC_WRAP">; defm atomic_load_udec_wrap : SIAtomicM0Glue2 <"LOAD_UDEC_WRAP">; defm atomic_load_and : SIAtomicM0Glue2 <"LOAD_AND">; defm atomic_load_min : SIAtomicM0Glue2 <"LOAD_MIN">; defm atomic_load_max : SIAtomicM0Glue2 <"LOAD_MAX">; defm atomic_load_or : SIAtomicM0Glue2 <"LOAD_OR">; defm atomic_load_xor : SIAtomicM0Glue2 <"LOAD_XOR">; defm atomic_load_umin : SIAtomicM0Glue2 <"LOAD_UMIN">; defm atomic_load_umax : SIAtomicM0Glue2 <"LOAD_UMAX">; defm atomic_swap : SIAtomicM0Glue2 <"SWAP">; defm atomic_load_fadd : SIAtomicM0Glue2 <"LOAD_FADD", 0, SDTAtomic2_f32, 0>; defm atomic_load_fmin : SIAtomicM0Glue2 <"LOAD_FMIN", 1, SDTAtomic2_f32, 0>; defm atomic_load_fmax : SIAtomicM0Glue2 <"LOAD_FMAX", 1, SDTAtomic2_f32, 0>; def as_i1timm : SDNodeXFormgetTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i1); }]>; def as_i8imm : SDNodeXFormgetTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i8); }]>; def as_i8timm : SDNodeXFormgetTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i16); }]>; def as_i16imm : SDNodeXFormgetTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i16); }]>; def as_i16timm : SDNodeXFormgetTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i16); }]>; def as_i32imm: SDNodeXFormgetTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i32); }]>; def as_i32timm: SDNodeXFormgetTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i32); }]>; def as_i64imm: SDNodeXFormgetTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i64); }]>; def cond_as_i32imm: SDNodeXFormgetTargetConstant(N->get(), SDLoc(N), MVT::i32); }]>; // Copied from the AArch64 backend: def bitcast_fpimm_to_i32 : SDNodeXFormgetTargetConstant( N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32); }]>; def frameindex_to_targetframeindex : SDNodeXForm(N); return CurDAG->getTargetFrameIndex(FI->getIndex(), MVT::i32); }]>; // Copied from the AArch64 backend: def bitcast_fpimm_to_i64 : SDNodeXFormgetTargetConstant( N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64); }]>; class bitextract_imm : SDNodeXFormgetZExtValue(); unsigned Bit = (Imm >> }] # bitnum # [{ ) & 1; return CurDAG->getTargetConstant(Bit, SDLoc(N), MVT::i1); }]>; def SIMM16bit : TImmLeaf (Imm) || isUInt<16>(Imm);}], as_i16timm >; def i64imm_32bit : ImmLeaf(Imm); }]>; def InlineImm16 : ImmLeaf; def InlineImm32 : ImmLeaf; def InlineImm64 : ImmLeaf; def InlineImmFP32 : FPImmLeaf; def InlineImmFP64 : FPImmLeaf; class VGPRImm : PatLeaf; def NegateImm : SDNodeXFormgetConstant(-N->getSExtValue(), SDLoc(N), MVT::i32); }]>; // TODO: When FP inline imm values work? def NegSubInlineConst32 : ImmLeaf= -64; }], NegateImm>; def NegSubInlineIntConst16 : ImmLeaf= -64; }], NegateImm>; def ShiftAmt32Imm : ImmLeaf ; def fp16_zeros_high_16bits : PatLeaf<(f16 VGPR_32:$src), [{ return fp16SrcZerosHighBits(N->getOpcode()); }]>; //===----------------------------------------------------------------------===// // MUBUF/SMEM Patterns //===----------------------------------------------------------------------===// def extract_cpol : SDNodeXFormgetTargetConstant( N->getZExtValue() & (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX12 ? AMDGPU::CPol::ALL : AMDGPU::CPol::ALL_pregfx12), SDLoc(N), MVT::i8); }]>; def extract_swz : SDNodeXFormgetZExtValue() & (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX12 ? AMDGPU::CPol::SWZ : AMDGPU::CPol::SWZ_pregfx12); return CurDAG->getTargetConstant(Swizzle, SDLoc(N), MVT::i8); }]>; def extract_cpol_set_glc : SDNodeXFormgetZExtValue() & (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX12 ? AMDGPU::CPol::ALL : AMDGPU::CPol::ALL_pregfx12); return CurDAG->getTargetConstant(cpol | AMDGPU::CPol::GLC, SDLoc(N), MVT::i8); }]>; //===----------------------------------------------------------------------===// // Custom Operands //===----------------------------------------------------------------------===// def SOPPBrTarget : CustomOperand { let PrintMethod = "printOperand"; let EncoderMethod = "getSOPPBrEncoding"; let DecoderMethod = "decodeSOPPBrTarget"; let OperandType = "OPERAND_PCREL"; } def si_ga : Operand; def InterpSlot : CustomOperand; // It appears to be necessary to create a separate operand for this to // be able to parse attr with no space. def InterpAttr : CustomOperand; def InterpAttrChan : ImmOperand; def SplitBarrier : ImmOperand { let OperandNamespace = "AMDGPU"; let OperandType = "OPERAND_INLINE_SPLIT_BARRIER_INT32"; let DecoderMethod = "decodeSplitBarrier"; let PrintMethod = "printOperand"; } def VReg32OrOffClass : AsmOperandClass { let Name = "VReg32OrOff"; let ParserMethod = "parseVReg32OrOff"; } def SendMsg : CustomOperand; def Swizzle : CustomOperand; def Endpgm : CustomOperand; def SWaitCnt : CustomOperand; def DepCtr : CustomOperand; def SDelayALU : CustomOperand; include "SIInstrFormats.td" include "VIInstrFormats.td" def BoolReg : AsmOperandClass { let Name = "BoolReg"; let ParserMethod = "parseBoolReg"; let RenderMethod = "addRegOperands"; } class BoolRC : RegisterOperand { let ParserMatchClass = BoolReg; let DecoderMethod = "decodeBoolReg"; } def SSrc_i1 : RegisterOperand { let ParserMatchClass = BoolReg; let DecoderMethod = "decodeBoolReg"; } def VOPDstS64orS32 : BoolRC { let PrintMethod = "printVOPDst"; } // SCSrc_i1 is the operand for pseudo instructions only. // Boolean immediates shall not be exposed to codegen instructions. def SCSrc_i1 : RegisterOperand { let OperandNamespace = "AMDGPU"; let OperandType = "OPERAND_REG_IMM_INT32"; let ParserMatchClass = BoolReg; let DecoderMethod = "decodeBoolReg"; } // ===----------------------------------------------------------------------===// // ExpSrc* Special cases for exp src operands which are printed as // "off" depending on en operand. // ===----------------------------------------------------------------------===// def ExpSrc0 : RegisterOperand { let PrintMethod = "printExpSrc0"; let ParserMatchClass = VReg32OrOffClass; } def ExpSrc1 : RegisterOperand { let PrintMethod = "printExpSrc1"; let ParserMatchClass = VReg32OrOffClass; } def ExpSrc2 : RegisterOperand { let PrintMethod = "printExpSrc2"; let ParserMatchClass = VReg32OrOffClass; } def ExpSrc3 : RegisterOperand { let PrintMethod = "printExpSrc3"; let ParserMatchClass = VReg32OrOffClass; } class SDWASrc : RegisterOperand { let OperandNamespace = "AMDGPU"; string Type = !if(vt.isFP, "FP", "INT"); let OperandType = "OPERAND_REG_INLINE_C_"#Type#vt.Size; let DecoderMethod = "decodeSDWASrc"#vt.Size; let EncoderMethod = "getSDWASrcEncoding"; } def SDWASrc_i32 : SDWASrc; def SDWASrc_i16 : SDWASrc; def SDWASrc_f32 : SDWASrc; def SDWASrc_f16 : SDWASrc; def SDWAVopcDst : BoolRC { let OperandNamespace = "AMDGPU"; let OperandType = "OPERAND_SDWA_VOPC_DST"; let EncoderMethod = "getSDWAVopcDstEncoding"; let DecoderMethod = "decodeSDWAVopcDst"; let PrintMethod = "printVOPDst"; } class NamedIntOperand : CustomOperand { let ParserMethod = "[this](OperandVector &Operands) -> ParseStatus { "# "return parseIntWithPrefix(\""#Prefix#"\", Operands, "# "AMDGPUOperand::"#ImmTy#", "#ConvertMethod#"); }"; } class NamedBitOperand : CustomOperand { let PredicateMethod = "isImmTy"; let ParserMethod = "[this](OperandVector &Operands) -> ParseStatus { "# "return parseNamedBit(\""#Id#"\", Operands, AMDGPUOperand::"#ImmTy#"); }"; let PrintMethod = "[this](const MCInst *MI, unsigned OpNo, "# "const MCSubtargetInfo &STI, raw_ostream &O) { "# "printNamedBit(MI, OpNo, O, \""#Id#"\"); }"; } class DefaultOperand : OperandWithDefaultOps, CustomOperandProps<1> { let ParserMatchClass = Op.ParserMatchClass; let PrintMethod = Op.PrintMethod; } class SDWAOperand : CustomOperand { let ParserMethod = "[this](OperandVector &Operands) -> ParseStatus { "# "return parseSDWASel(Operands, \""#Id#"\", AMDGPUOperand::"#ImmTy#"); }"; } class ArrayOperand0 : OperandWithDefaultOps, CustomOperandProps<1, Name> { let ParserMethod = "[this](OperandVector &Operands) -> ParseStatus { "# "return parseOperandArrayWithPrefix(\""#Id#"\", Operands, "# "AMDGPUOperand::"#ImmTy#"); }"; } let ImmTy = "ImmTyOffset" in def flat_offset : CustomOperand; def offset : NamedIntOperand; def offset0 : NamedIntOperand; def offset1 : NamedIntOperand; def gds : NamedBitOperand<"gds", "GDS">; def omod : CustomOperand; def omod0 : DefaultOperand; // We need to make the cases with a default of 0 distinct from no // default to help deal with some cases where the operand appears // before a mandatory operand. def clampmod : NamedBitOperand<"clamp", "ClampSI">; def clampmod0 : DefaultOperand; def highmod : NamedBitOperand<"high", "High">; def CPol : CustomOperand; def CPol_0 : DefaultOperand; def CPol_GLC1 : DefaultOperand; def CPol_GLC : ValuePredicatedOperand; def CPol_NonGLC : ValuePredicatedOperand; def CPol_GLC_WithDefault : DefaultOperand; def CPol_NonGLC_WithDefault : DefaultOperand; def TFE : NamedBitOperand<"tfe">; def UNorm : NamedBitOperand<"unorm">; def DA : NamedBitOperand<"da">; def R128A16 : CustomOperand; def A16 : NamedBitOperand<"a16">; def D16 : NamedBitOperand<"d16">; def LWE : NamedBitOperand<"lwe">; def exp_compr : NamedBitOperand<"compr", "ExpCompr">; def exp_vm : NamedBitOperand<"vm", "ExpVM">; def FORMAT : CustomOperand; def DMask : NamedIntOperand; def Dim : CustomOperand; def dst_sel : SDWAOperand<"dst_sel", "SDWADstSel">; def src0_sel : SDWAOperand<"src0_sel", "SDWASrc0Sel">; def src1_sel : SDWAOperand<"src1_sel", "SDWASrc1Sel">; def dst_unused : CustomOperand; def op_sel0 : ArrayOperand0<"op_sel", "OpSel">; def op_sel_hi0 : ArrayOperand0<"op_sel_hi", "OpSelHi">; def neg_lo0 : ArrayOperand0<"neg_lo", "NegLo">; def neg_hi0 : ArrayOperand0<"neg_hi", "NegHi">; def IndexKey16bit : CustomOperand; def IndexKey8bit : CustomOperand; def dpp8 : CustomOperand; def dpp_ctrl : CustomOperand; let DefaultValue = "0xf" in { def row_mask : NamedIntOperand; def bank_mask : NamedIntOperand; } def bound_ctrl : NamedIntOperand bool { return convertDppBoundCtrl(BC); }">; def FI : NamedIntOperand; def blgp : CustomOperand; def cbsz : NamedIntOperand; def abid : NamedIntOperand; def hwreg : CustomOperand; def exp_tgt : CustomOperand; def wait_vdst : NamedIntOperand; def wait_exp : NamedIntOperand; def wait_va_vdst : NamedIntOperand; def wait_va_vsrc : NamedIntOperand; class KImmFPOperand : ImmOperand { let OperandNamespace = "AMDGPU"; let OperandType = "OPERAND_KIMM"#vt.Size; let PrintMethod = "printU"#vt.Size#"ImmOperand"; let DecoderMethod = "decodeOperand_KImmFP"; } // 32-bit VALU immediate operand that uses the constant bus. def KImmFP32 : KImmFPOperand; // 32-bit VALU immediate operand with a 16-bit value that uses the // constant bus. def KImmFP16 : KImmFPOperand; class FPInputModsMatchClass : AsmOperandClass { let Name = "RegOrImmWithFP"#opSize#"InputMods"; let ParserMethod = "parseRegOrImmWithFPInputMods"; let PredicateMethod = "isRegOrImmWithFP"#opSize#"InputMods"; } class FPVCSrcInputModsMatchClass : FPInputModsMatchClass { let Name = "RegOrInlineImmWithFP"#opSize#"InputMods"; let PredicateMethod = "isRegOrInlineImmWithFP"#opSize#"InputMods"; } def FP16InputModsMatchClass : FPInputModsMatchClass<16>; def FPT16InputModsMatchClass : FPInputModsMatchClass<16> { let Name = "RegOrImmWithFPT16InputMods"; let PredicateMethod = "isRegOrImmWithFPT16InputMods"; } def FP32InputModsMatchClass : FPInputModsMatchClass<32>; def FP64InputModsMatchClass : FPInputModsMatchClass<64>; def FP16VCSrcInputModsMatchClass : FPVCSrcInputModsMatchClass<16>; def FP32VCSrcInputModsMatchClass : FPVCSrcInputModsMatchClass<32>; class InputMods : Operand { let OperandNamespace = "AMDGPU"; let OperandType = "OPERAND_INPUT_MODS"; let ParserMatchClass = matchClass; } class FPInputMods : InputMods { let PrintMethod = "printOperandAndFPInputMods"; } def FP16InputMods : FPInputMods; def FPT16InputMods : FPInputMods; def FP32InputMods : FPInputMods; def FP64InputMods : FPInputMods; def FP16VCSrcInputMods : FPInputMods; def FP32VCSrcInputMods : FPInputMods; class IntInputModsMatchClass : AsmOperandClass { let Name = "RegOrImmWithInt"#opSize#"InputMods"; let ParserMethod = "parseRegOrImmWithIntInputMods"; let PredicateMethod = "isRegOrImmWithInt"#opSize#"InputMods"; } class IntVCSrcInputModsMatchClass : IntInputModsMatchClass { let Name = "RegOrInlineImmWithInt"#opSize#"InputMods"; let PredicateMethod = "isRegOrInlineImmWithInt"#opSize#"InputMods"; } def IntT16InputModsMatchClass : IntInputModsMatchClass<16> { let Name = "RegOrImmWithIntT16InputMods"; let PredicateMethod = "isRegOrImmWithIntT16InputMods"; } def Int32InputModsMatchClass : IntInputModsMatchClass<32>; def Int64InputModsMatchClass : IntInputModsMatchClass<64>; def Int32VCSrcInputModsMatchClass : IntVCSrcInputModsMatchClass<32>; class IntInputMods : InputMods { let PrintMethod = "printOperandAndIntInputMods"; } def IntT16InputMods : IntInputMods; def Int32InputMods : IntInputMods; def Int64InputMods : IntInputMods; def Int32VCSrcInputMods : IntInputMods; class OpSelModsMatchClass : AsmOperandClass { let Name = "OpSelMods"; let ParserMethod = "parseRegOrImm"; let PredicateMethod = "isRegOrImm"; } def IntOpSelModsMatchClass : OpSelModsMatchClass; def IntOpSelMods : InputMods; class FPSDWAInputModsMatchClass : AsmOperandClass { let Name = "SDWAWithFP"#opSize#"InputMods"; let ParserMethod = "parseRegOrImmWithFPInputMods"; let PredicateMethod = "isSDWAFP"#opSize#"Operand"; } def FP16SDWAInputModsMatchClass : FPSDWAInputModsMatchClass<16>; def FP32SDWAInputModsMatchClass : FPSDWAInputModsMatchClass<32>; class FPSDWAInputMods : InputMods { let PrintMethod = "printOperandAndFPInputMods"; } def FP16SDWAInputMods : FPSDWAInputMods; def FP32SDWAInputMods : FPSDWAInputMods; def FPVRegInputModsMatchClass : AsmOperandClass { let Name = "VRegWithFPInputMods"; let ParserMethod = "parseRegWithFPInputMods"; let PredicateMethod = "isVRegWithInputMods"; } class FPT16VRegInputModsMatchClass : AsmOperandClass { let Name = !if(IsFake16, "Fake16VRegWithFPInputMods", "T16VRegWithFPInputMods"); let ParserMethod = "parseRegWithFPInputMods"; let PredicateMethod = "isT16VRegWithInputMods<" # !if(IsFake16, "true", "false") # ">"; } def FPVRegInputMods : InputMods { let PrintMethod = "printOperandAndFPInputMods"; } class FPT16VRegInputMods : InputMods > { let PrintMethod = "printOperandAndFPInputMods"; } class IntSDWAInputModsMatchClass : AsmOperandClass { let Name = "SDWAWithInt"#opSize#"InputMods"; let ParserMethod = "parseRegOrImmWithIntInputMods"; let PredicateMethod = "isSDWAInt"#opSize#"Operand"; } def Int16SDWAInputModsMatchClass : IntSDWAInputModsMatchClass<16>; def Int32SDWAInputModsMatchClass : IntSDWAInputModsMatchClass<32>; def Bin32SDWAInputModsMatchClass : IntSDWAInputModsMatchClass<32> { let Name = "SDWAWithBin32InputMods"; let ParserMethod = "parseRegOrImm"; } class IntSDWAInputMods : InputMods { let PrintMethod = "printOperandAndIntInputMods"; } def Int16SDWAInputMods : IntSDWAInputMods; def Int32SDWAInputMods : IntSDWAInputMods; def Bin32SDWAInputMods : IntSDWAInputMods; def IntVRegInputModsMatchClass : AsmOperandClass { let Name = "VRegWithIntInputMods"; let ParserMethod = "parseRegWithIntInputMods"; let PredicateMethod = "isVRegWithInputMods"; } class IntT16VRegInputModsMatchClass : AsmOperandClass { let Name = !if(IsFake16, "Fake16VRegWithIntInputMods", "T16VRegWithIntInputMods"); let ParserMethod = "parseRegWithIntInputMods"; let PredicateMethod = "isT16VRegWithInputMods<" # !if(IsFake16, "true", "false") # ">"; } class IntT16VRegInputMods : InputMods > { let PrintMethod = "printOperandAndIntInputMods"; } def IntVRegInputMods : InputMods { let PrintMethod = "printOperandAndIntInputMods"; } class PackedFPInputModsMatchClass : AsmOperandClass { let Name = "PackedFP"#opSize#"InputMods"; let ParserMethod = "parseRegOrImm"; let PredicateMethod = "isRegOrImm"; // let PredicateMethod = "isPackedFP"#opSize#"InputMods"; } class PackedIntInputModsMatchClass : AsmOperandClass { let Name = "PackedInt"#opSize#"InputMods"; let ParserMethod = "parseRegOrImm"; let PredicateMethod = "isRegOrImm"; // let PredicateMethod = "isPackedInt"#opSize#"InputMods"; } def PackedF16InputModsMatchClass : PackedFPInputModsMatchClass<16>; def PackedI16InputModsMatchClass : PackedIntInputModsMatchClass<16>; class PackedFPInputMods : InputMods { // let PrintMethod = "printPackedFPInputMods"; } class PackedIntInputMods : InputMods { //let PrintMethod = "printPackedIntInputMods"; } def PackedF16InputMods : PackedFPInputMods; def PackedI16InputMods : PackedIntInputMods; //===----------------------------------------------------------------------===// // Complex patterns //===----------------------------------------------------------------------===// def DS1Addr1Offset : ComplexPattern; def DS64Bit4ByteAligned : ComplexPattern; def DS128Bit8ByteAligned : ComplexPattern; def MOVRELOffset : ComplexPattern; def VOP3Mods0 : ComplexPattern; // Modifiers for floating point instructions. def VOP3Mods : ComplexPattern; // VOP3 modifiers used for instructions that do not read canonicalized // floating point values (i.e. integer operations with FP source // modifiers) def VOP3ModsNonCanonicalizing : ComplexPattern; def VOP3NoMods : ComplexPattern; def VOP3OMods : ComplexPattern; def VOP3PMods : ComplexPattern; def VOP3PModsDOT : ComplexPattern; def VOP3PModsNeg : ComplexPattern; def WMMAOpSelVOP3PMods : ComplexPattern; def WMMAModsF32NegAbs : ComplexPattern; def WMMAModsF16Neg : ComplexPattern; def WMMAModsF16NegAbs : ComplexPattern; def WMMAVISrc : ComplexPattern; def SWMMACIndex8 : ComplexPattern; def SWMMACIndex16 : ComplexPattern; def VOP3OpSel : ComplexPattern; def VOP3OpSelMods : ComplexPattern; def VOP3PMadMixModsExt : ComplexPattern; def VOP3PMadMixMods : ComplexPattern; def VINTERPMods : ComplexPattern; def VINTERPModsHi : ComplexPattern; //===----------------------------------------------------------------------===// // SI assembler operands //===----------------------------------------------------------------------===// def SIOperand { int ZERO = 0x80; int VCC = 0x6A; int FLAT_SCR = 0x68; } // This should be kept in sync with SISrcMods enum def SRCMODS { int NONE = 0; int NEG = 1; int ABS = 2; int NEG_ABS = 3; int NEG_HI = ABS; int OP_SEL_0 = 4; int OP_SEL_1 = 8; int DST_OP_SEL = 8; } def DSTCLAMP { int NONE = 0; int ENABLE = 1; } def DSTOMOD { int NONE = 0; } def HWREG { int MODE = 1; int STATUS = 2; int TRAPSTS = 3; int HW_ID = 4; int GPR_ALLOC = 5; int LDS_ALLOC = 6; int IB_STS = 7; int MEM_BASES = 15; int TBA_LO = 16; int TBA_HI = 17; int TMA_LO = 18; int TMA_HI = 19; int FLAT_SCR_LO = 20; int FLAT_SCR_HI = 21; int XNACK_MASK = 22; int POPS_PACKER = 25; int SHADER_CYCLES = 29; } class getHwRegImm { int ret = !and(!or(Reg, !shl(Offset, 6), !shl(!add(Size, -1), 11)), 65535); } //===----------------------------------------------------------------------===// // // SI Instruction multiclass helpers. // // Instructions with _32 take 32-bit operands. // Instructions with _64 take 64-bit operands. // // VOP_* instructions can use either a 32-bit or 64-bit encoding. The 32-bit // encoding is the standard encoding, but instruction that make use of // any of the instruction modifiers must use the 64-bit encoding. // // Instructions with _e32 use the 32-bit encoding. // Instructions with _e64 use the 64-bit encoding. // //===----------------------------------------------------------------------===// class SIMCInstr { string PseudoInstr = pseudo; int Subtarget = subtarget; } //===----------------------------------------------------------------------===// // Vector ALU classes //===----------------------------------------------------------------------===// class getNumSrcArgs { int ret = !if (!eq(Src0.Value, untyped.Value), 0, !if (!eq(Src1.Value, untyped.Value), 1, // VOP1 !if (!eq(Src2.Value, untyped.Value), 2, // VOP2 3))); // VOP3 } // Returns the register class to use for the destination of VOP[123C] // instructions for the given VT. class getVALUDstForVT { defvar op16 = !if(IsTrue16, !if (IsVOP3Encoding, VOPDstOperand_t16, VOPDstOperand_t16Lo128), VOPDstOperand); RegisterOperand ret = !if(!eq(VT.Size, 32), VOPDstOperand, !if(!eq(VT.Size, 128), VOPDstOperand, !if(!eq(VT.Size, 64), VOPDstOperand, !if(!eq(VT.Size, 16), op16, VOPDstS64orS32)))); // else VT == i1 } class getVALUDstForVT_fake16 { RegisterOperand ret = !if(!eq(VT.Size, 32), VOPDstOperand, !if(!eq(VT.Size, 128), VOPDstOperand, !if(!eq(VT.Size, 64), VOPDstOperand, !if(!eq(VT.Size, 16), VOPDstOperand, VOPDstS64orS32)))); // else VT == i1 } // Returns the register class to use for the destination of VOP[12C] // instructions with SDWA extension class getSDWADstForVT { RegisterOperand ret = !if(!eq(VT.Size, 1), SDWAVopcDst, // VOPC VOPDstOperand); // VOP1/2 32-bit dst } // Returns the register class to use for source 0 of VOP[12C] // instructions for the given VT. class getVOPSrc0ForVT { RegisterOperand ret = !if(VT.isFP, !if(!eq(VT.Size, 64), VSrc_f64, !if(!or(!eq(VT.Value, f16.Value), !eq(VT.Value, bf16.Value)), !if(IsTrue16, !if(IsFake16, VSrcFake16_f16_Lo128, VSrcT_f16_Lo128), VSrc_f16 ), !if(!or(!eq(VT.Value, v2f16.Value), !eq(VT.Value, v2bf16.Value)), VSrc_v2f16, !if(!or(!eq(VT.Value, v4f16.Value), !eq(VT.Value, v4bf16.Value)), AVSrc_64, VSrc_f32 ) ) ) ), !if(!eq(VT.Size, 64), VSrc_b64, !if(!eq(VT.Value, i16.Value), !if(IsTrue16, !if(IsFake16, VSrcFake16_b16_Lo128, VSrcT_b16_Lo128), VSrc_b16 ), !if(!eq(VT.Value, v2i16.Value), VSrc_v2b16, VSrc_b32 ) ) ) ); } class getSOPSrcForVT { RegisterOperand ret = !if(!eq(VT.Size, 64), SSrc_b64, SSrc_b32); } // Returns the vreg register class to use for source operand given VT class getVregSrcForVT { RegisterOperand ret = !if (!eq(VT.Size, 128), RegisterOperand, !if (!eq(VT.Size, 96), RegisterOperand, !if (!eq(VT.Size, 64), RegisterOperand, !if (!eq(VT.Size, 48), RegisterOperand, !if (!eq(VT.Size, 16), !if (IsTrue16, !if (IsFake16, VGPRSrc_32_Lo128, VGPRSrc_16_Lo128), RegisterOperand), RegisterOperand))))); } class getSDWASrcForVT { RegisterOperand retFlt = !if(!eq(VT.Size, 16), SDWASrc_f16, SDWASrc_f32); RegisterOperand retInt = !if(!eq(VT.Size, 16), SDWASrc_i16, SDWASrc_i32); RegisterOperand ret = !if(VT.isFP, retFlt, retInt); } // Returns the register class to use for sources of VOP3 instructions for the // given VT. class getVOP3SrcForVT { RegisterOperand ret = !if(!eq(VT.Size, 128), VRegSrc_128, !if(!eq(VT.Size, 64), !if(VT.isFP, !if(!eq(VT.Value, v2f32.Value), VSrc_v2f32, VSrc_f64), !if(!eq(VT.Value, v2i32.Value), VSrc_v2b32, VSrc_b64)), !if(!eq(VT.Value, i1.Value), SSrc_i1, !if(VT.isFP, !if(!or(!eq(VT.Value, f16.Value), !eq(VT.Value, bf16.Value)), !if(IsTrue16, VSrcT_f16, VSrc_f16), !if(!or(!eq(VT.Value, v2f16.Value), !eq(VT.Value, v2bf16.Value)), VSrc_v2f16, !if(!or(!eq(VT.Value, v4f16.Value), !eq(VT.Value, v4bf16.Value)), AVSrc_64, VSrc_f32 ) ) ), !if(!eq(VT.Value, i16.Value), !if(IsTrue16, VSrcT_b16, VSrc_b16), !if(!eq(VT.Value, v2i16.Value), VSrc_v2b16, VSrc_b32 ) ) ) ) ) ); } // Src2 of VOP3 DPP instructions cannot be a literal class getVOP3DPPSrcForVT { RegisterOperand ret = !if (!eq(VT.Value, i1.Value), SSrc_i1, !if (VT.isFP, !if (!or(!eq(VT.Value, f16.Value), !eq(VT.Value, bf16.Value)), VCSrc_f16, !if (!or(!eq(VT.Value, v2f16.Value), !eq(VT.Value, v2bf16.Value)), VCSrc_v2f16, VCSrc_f32)), !if (!eq(VT.Value, i16.Value), VCSrc_b16, !if (!eq(VT.Value, v2i16.Value), VCSrc_v2b16, VCSrc_b32)))); } // Float or packed int class isModifierType { bit ret = !or(!eq(SrcVT.Value, f16.Value), !eq(SrcVT.Value, bf16.Value), !eq(SrcVT.Value, f32.Value), !eq(SrcVT.Value, f64.Value), !eq(SrcVT.Value, v2f16.Value), !eq(SrcVT.Value, v2i16.Value), !eq(SrcVT.Value, v2bf16.Value), !eq(SrcVT.Value, v2f32.Value), !eq(SrcVT.Value, v2i32.Value), !eq(SrcVT.Value, v4f16.Value), !eq(SrcVT.Value, v4i16.Value), !eq(SrcVT.Value, v4bf16.Value), !eq(SrcVT.Value, v4f32.Value), !eq(SrcVT.Value, v4i32.Value), !eq(SrcVT.Value, v8f16.Value), !eq(SrcVT.Value, v8i16.Value), !eq(SrcVT.Value, v8bf16.Value), !eq(SrcVT.Value, v8f32.Value), !eq(SrcVT.Value, v8i32.Value), !eq(SrcVT.Value, v16f16.Value), !eq(SrcVT.Value, v16i16.Value), !eq(SrcVT.Value, v16bf16.Value)); } // Return type of input modifiers operand for specified input operand class getSrcMod { Operand ret = !if(!eq(VT.Size, 64), !if(VT.isFP, FP64InputMods, Int64InputMods), !if(!eq(VT.Size, 16), !if(VT.isFP, !if(IsTrue16, FPT16InputMods, FP16InputMods), !if(IsTrue16, IntT16InputMods, IntOpSelMods)), !if(VT.isFP, FP32InputMods, Int32InputMods))); } class getOpSelMod { Operand ret = !if(!or(!eq(VT.Value, f16.Value), !eq(VT.Value, bf16.Value)), FP16InputMods, IntOpSelMods); } // Return type of input modifiers operand specified input operand for DPP class getSrcModDPP { Operand ret = !if(VT.isFP, FPVRegInputMods, IntVRegInputMods); } class getSrcModDPP_t16 { Operand ret = !if (VT.isFP, !if (!or(!eq(VT.Value, f16.Value), !eq(VT.Value, bf16.Value)), FPT16VRegInputMods, FPVRegInputMods), !if (!eq(VT.Value, i16.Value), IntT16VRegInputMods, IntVRegInputMods)); } // Return type of input modifiers operand for specified input operand for DPP class getSrcModVOP3DPP { Operand ret = !if (VT.isFP, !if (!or(!eq(VT.Value, f16.Value), !eq(VT.Value, bf16.Value)), FP16VCSrcInputMods, FP32VCSrcInputMods), Int32VCSrcInputMods); } // Return type of input modifiers operand specified input operand for SDWA class getSrcModSDWA { Operand ret = !if(!eq(VT.Value, f16.Value), FP16SDWAInputMods, !if(!eq(VT.Value, f32.Value), FP32SDWAInputMods, !if(!eq(VT.Value, i16.Value), Int16SDWAInputMods, !if(!eq(VT.Value, bf16.Value), FP16SDWAInputMods, Int32SDWAInputMods)))); } // Returns the input arguments for VOP[12C] instructions for the given SrcVT. class getIns32 { dag ret = !if(!eq(NumSrcArgs, 1), (ins Src0RC:$src0), // VOP1 !if(!eq(NumSrcArgs, 2), (ins Src0RC:$src0, Src1RC:$src1), // VOP2 (ins))); } // Returns the input arguments for VOP3 instructions for the given SrcVT. class getIns64 { dag ret = !if (!eq(NumSrcArgs, 0), // VOP1 without input operands (V_NOP, V_CLREXCP) (ins), /* else */ !if (!eq(NumSrcArgs, 1), !if (HasModifiers, // VOP1 with modifiers !if(HasOMod, (ins Src0Mod:$src0_modifiers, Src0RC:$src0, clampmod0:$clamp, omod0:$omod), !if (HasClamp, (ins Src0Mod:$src0_modifiers, Src0RC:$src0, clampmod0:$clamp), (ins Src0Mod:$src0_modifiers, Src0RC:$src0))) /* else */, // VOP1 without modifiers !if (HasClamp, (ins Src0RC:$src0, clampmod0:$clamp), (ins Src0RC:$src0)) /* endif */ ), !if (!eq(NumSrcArgs, 2), !if (HasModifiers, // VOP 2 with modifiers !if(HasOMod, (ins Src0Mod:$src0_modifiers, Src0RC:$src0, Src1Mod:$src1_modifiers, Src1RC:$src1, clampmod0:$clamp, omod0:$omod), (ins Src0Mod:$src0_modifiers, Src0RC:$src0, Src1Mod:$src1_modifiers, Src1RC:$src1, clampmod0:$clamp)) /* else */, // VOP2 without modifiers !if (HasClamp, (ins Src0RC:$src0, Src1RC:$src1, clampmod0:$clamp), (ins Src0RC:$src0, Src1RC:$src1)) /* endif */ ) /* NumSrcArgs == 3 */, !if (HasModifiers, !if (HasSrc2Mods, // VOP3 with modifiers !if (HasOMod, (ins Src0Mod:$src0_modifiers, Src0RC:$src0, Src1Mod:$src1_modifiers, Src1RC:$src1, Src2Mod:$src2_modifiers, Src2RC:$src2, clampmod0:$clamp, omod0:$omod), !if (HasClamp, (ins Src0Mod:$src0_modifiers, Src0RC:$src0, Src1Mod:$src1_modifiers, Src1RC:$src1, Src2Mod:$src2_modifiers, Src2RC:$src2, clampmod0:$clamp), (ins Src0Mod:$src0_modifiers, Src0RC:$src0, Src1Mod:$src1_modifiers, Src1RC:$src1, Src2Mod:$src2_modifiers, Src2RC:$src2))), // VOP3 with modifiers except src2 !if (HasOMod, (ins Src0Mod:$src0_modifiers, Src0RC:$src0, Src1Mod:$src1_modifiers, Src1RC:$src1, Src2RC:$src2, clampmod0:$clamp, omod0:$omod), !if (HasClamp, (ins Src0Mod:$src0_modifiers, Src0RC:$src0, Src1Mod:$src1_modifiers, Src1RC:$src1, Src2RC:$src2, clampmod0:$clamp), (ins Src0Mod:$src0_modifiers, Src0RC:$src0, Src1Mod:$src1_modifiers, Src1RC:$src1, Src2RC:$src2)))) /* else */, // VOP3 without modifiers !if (HasClamp, (ins Src0RC:$src0, Src1RC:$src1, Src2RC:$src2, clampmod0:$clamp), (ins Src0RC:$src0, Src1RC:$src1, Src2RC:$src2)) /* endif */ )))); } class getInsVOP3Base { // getInst64 handles clamp and omod. implicit mutex between vop3p and omod dag base = getIns64 .ret; dag opsel = (ins op_sel0:$op_sel); dag ret = !con(base, !if(HasOpSel, opsel, (ins))); } class getInsVOP3P { dag base = getInsVOP3Base.ret; dag vop3pOpsel = (ins op_sel_hi0:$op_sel_hi); dag vop3p_neg = (ins neg_lo0:$neg_lo, neg_hi0:$neg_hi); dag vop3pFields = !con(!if(HasOpSel, vop3pOpsel, (ins)), vop3p_neg); dag ret = !con(base, vop3pFields); } class getInsVOP3OpSel { dag ret = getInsVOP3Base.ret; } class getInsDPPBase { dag ret = !if(!eq(NumSrcArgs, 0), // VOP1 without input operands (V_NOP) (ins ), !con( !if(HasOld ,(ins OldRC:$old), (ins)), !if (!eq(NumSrcArgs, 1), !if (HasModifiers, // VOP1_DPP with modifiers (ins Src0Mod:$src0_modifiers, Src0RC:$src0) /* else */, // VOP1_DPP without modifiers (ins Src0RC:$src0) /* endif */), !if (!eq(NumSrcArgs, 2), !if (HasModifiers, // VOP2_DPP with modifiers (ins Src0Mod:$src0_modifiers, Src0RC:$src0, Src1Mod:$src1_modifiers, Src1RC:$src1) /* else */, // VOP2_DPP without modifiers (ins Src0RC:$src0, Src1RC:$src1) ) /* NumSrcArgs == 3, VOP3 */, !if (HasModifiers, // VOP3_DPP with modifiers (ins Src0Mod:$src0_modifiers, Src0RC:$src0, Src1Mod:$src1_modifiers, Src1RC:$src1, Src2Mod:$src2_modifiers, Src2RC:$src2) /* else */, // VOP3_DPP without modifiers (ins Src0RC:$src0, Src1RC:$src1, Src2RC:$src2) ) ) ) ) ); } class getInsDPP { dag ret = !con(getInsDPPBase.ret, (ins dpp_ctrl:$dpp_ctrl, row_mask:$row_mask, bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)); } class getInsDPP16 { dag ret = !con(getInsDPP.ret, (ins FI:$fi)); } class getInsDPP8 { dag ret = !con(getInsDPPBase.ret, (ins dpp8:$dpp8, FI:$fi)); } class getInsVOP3DPPBase { dag old = ( ins OldRC:$old ); dag base = VOP3Base; dag ret = !con( !if(!and(HasOld,!ne(NumSrcArgs, 0)), old, (ins)), base ); } class getInsVOP3DPP { dag ret = !con(getInsVOP3DPPBase.ret, (ins dpp_ctrl:$dpp_ctrl, row_mask:$row_mask, bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)); } class getInsVOP3DPP16 { dag ret = !con(getInsVOP3DPP.ret, (ins FI:$fi)); } class getInsVOP3DPP8 { dag ret = !con(getInsVOP3DPPBase.ret, (ins dpp8:$dpp8, FI:$fi)); } // Ins for SDWA class getInsSDWA { dag ret = !if(!eq(NumSrcArgs, 0), // VOP1 without input operands (V_NOP) (ins), !if(!eq(NumSrcArgs, 1), // VOP1 !if(!not(HasSDWAOMod), // VOP1_SDWA without omod (ins Src0Mod:$src0_modifiers, Src0RC:$src0, clampmod:$clamp, dst_sel:$dst_sel, dst_unused:$dst_unused, src0_sel:$src0_sel), // VOP1_SDWA with omod (ins Src0Mod:$src0_modifiers, Src0RC:$src0, clampmod:$clamp, omod:$omod, dst_sel:$dst_sel, dst_unused:$dst_unused, src0_sel:$src0_sel)), !if(!eq(NumSrcArgs, 2), !if(!eq(DstVT.Size, 1), // VOPC_SDWA (ins Src0Mod:$src0_modifiers, Src0RC:$src0, Src1Mod:$src1_modifiers, Src1RC:$src1, clampmod:$clamp, src0_sel:$src0_sel, src1_sel:$src1_sel), // VOP2_SDWA !if(!not(HasSDWAOMod), // VOP2_SDWA without omod (ins Src0Mod:$src0_modifiers, Src0RC:$src0, Src1Mod:$src1_modifiers, Src1RC:$src1, clampmod:$clamp, dst_sel:$dst_sel, dst_unused:$dst_unused, src0_sel:$src0_sel, src1_sel:$src1_sel), // VOP2_SDWA with omod (ins Src0Mod:$src0_modifiers, Src0RC:$src0, Src1Mod:$src1_modifiers, Src1RC:$src1, clampmod:$clamp, omod:$omod, dst_sel:$dst_sel, dst_unused:$dst_unused, src0_sel:$src0_sel, src1_sel:$src1_sel))), (ins)/* endif */))); } // Outs for DPP class getOutsDPP { dag ret = !if(HasDst, !if(!eq(DstVT.Size, 1), (outs), // no dst for VOPC, we use "vcc"-token as dst in SDWA VOPC instructions (outs DstRCDPP:$vdst)), (outs)); // V_NOP } // Outs for SDWA class getOutsSDWA { dag ret = !if(HasDst, !if(!eq(DstVT.Size, 1), (outs DstRCSDWA:$sdst), (outs DstRCSDWA:$vdst)), (outs)); // V_NOP } // Returns the assembly string for the inputs and outputs of a VOP[12C] // instruction. class getAsm32 { string dst = !if(!eq(DstVT.Size, 1), "$sdst", "$vdst"); // use $sdst for VOPC string src0 = ", $src0"; string src1 = ", $src1"; string src2 = ", $src2"; string ret = !if(HasDst, dst, "") # !if(!eq(NumSrcArgs, 1), src0, "") # !if(!eq(NumSrcArgs, 2), src0#src1, "") # !if(!eq(NumSrcArgs, 3), src0#src1#src2, ""); } class getAsmVOPDPart { string dst = "$vdst" # XorY; string src0 = ", $src0" # XorY; string src1 = ", $vsrc1" # XorY; string ret = dst # !if(!ge(NumSrcArgs, 1), src0, "") # !if(!ge(NumSrcArgs, 2), src1, ""); } // Returns the assembly string for the inputs and outputs of a VOP3P // instruction. class getAsmVOP3P { string dst = "$vdst"; string src0 = !if(!eq(NumSrcArgs, 1), "$src0", "$src0,"); string src1 = !if(!eq(NumSrcArgs, 1), "", !if(!eq(NumSrcArgs, 2), " $src1", " $src1,")); string src2 = !if(!eq(NumSrcArgs, 3), " $src2", ""); string mods = !if(HasModifiers, "$neg_lo$neg_hi", ""); string clamp = !if(HasClamp, "$clamp", ""); string opsel = !if(HasOpSel, "$op_sel$op_sel_hi", ""); // Each modifier is printed as an array of bits for each operand, so // all operands are printed as part of src0_modifiers. string ret = dst#", "#src0#src1#src2#opsel#mods#clamp; } class getAsmVOP3OpSel { string dst = "$vdst"; string isrc0 = !if(!eq(NumSrcArgs, 1), "$src0", "$src0,"); string isrc1 = !if(!eq(NumSrcArgs, 1), "", !if(!eq(NumSrcArgs, 2), " $src1", " $src1,")); string isrc2 = !if(!eq(NumSrcArgs, 3), " $src2", ""); string fsrc0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,"); string fsrc1 = !if(!eq(NumSrcArgs, 1), "", !if(!eq(NumSrcArgs, 2), " $src1_modifiers", " $src1_modifiers,")); string fsrc2 = !if(!eq(NumSrcArgs, 3), " $src2_modifiers", ""); string src0 = !if(Src0HasMods, fsrc0, isrc0); string src1 = !if(Src1HasMods, fsrc1, isrc1); string src2 = !if(Src2HasMods, fsrc2, isrc2); string clamp = !if(HasClamp, "$clamp", ""); string omod = !if(HasOMod, "$omod", ""); string ret = dst#", "#src0#src1#src2#"$op_sel"#clamp#omod; } class getAsmDPP { string dst = !if(HasDst, !if(!eq(DstVT.Size, 1), "$sdst", "$vdst"), ""); // use $sdst for VOPC string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,"); string src1 = !if(!eq(NumSrcArgs, 1), "", !if(!eq(NumSrcArgs, 2), " $src1_modifiers", " $src1_modifiers,")); string args = !if(!not(HasModifiers), getAsm32<0, NumSrcArgs, DstVT>.ret, ", "#src0#src1); string ret = dst#args#" $dpp_ctrl$row_mask$bank_mask$bound_ctrl"; } class getAsmDPP16 { string ret = getAsmDPP.ret#"$fi"; } class getAsmDPP8 : getAsmDPP{ let ret = dst#args#" $dpp8$fi"; } class getAsmVOP3Base { string dst = !if(HasDst, !if(!eq(DstVT.Size, 1), "$sdst", "$vdst"), ""); // use $sdst for VOPC string src0nomods = !if(!eq(NumSrcArgs, 1), "$src0", "$src0,"); string src1nomods = !if(!eq(NumSrcArgs, 1), "", !if(!eq(NumSrcArgs, 2), " $src1", " $src1,")); string src2nomods = !if(!eq(NumSrcArgs, 3), " $src2", ""); string src0mods = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,"); string src1mods = !if(!eq(NumSrcArgs, 1), "", !if(!eq(NumSrcArgs, 2), " $src1_modifiers", " $src1_modifiers,")); string src2mods = !if(!eq(NumSrcArgs, 3), " $src2_modifiers", ""); string src0 = !if(Src0HasMods, src0mods, src0nomods); string src1 = !if(Src1HasMods, src1mods, src1nomods); string src2 = !if(Src2HasMods, src2mods, src2nomods); string opsel = !if(HasOpSel, "$op_sel", ""); string 3PMods = !if(IsVOP3P, !if(HasOpSel, "$op_sel_hi", "") #!if(HasModifiers, "$neg_lo$neg_hi", ""), ""); string clamp = !if(HasClamp, "$clamp", ""); string omod = !if(HasOMod, "$omod", ""); string ret = dst#!if(!gt(NumSrcArgs,0),", "#src0#src1#src2#opsel#3PMods#clamp#omod, ""); } class getAsmVOP3DPP { string ret = base # " $dpp_ctrl$row_mask$bank_mask$bound_ctrl"; } class getAsmVOP3DPP16 { string ret = getAsmVOP3DPP.ret # "$fi"; } class getAsmVOP3DPP8 { string ret = base # " $dpp8$fi"; } class getAsmSDWA { string dst = !if(HasDst, !if(!eq(DstVT.Size, 1), " vcc", // use vcc token as dst for VOPC instructions "$vdst"), ""); string src0 = "$src0_modifiers"; string src1 = "$src1_modifiers"; string args = !if(!eq(NumSrcArgs, 0), "", !if(!eq(NumSrcArgs, 1), ", "#src0#"$clamp", ", "#src0#", "#src1#"$clamp" ) ); string sdwa = !if(!eq(NumSrcArgs, 0), "", !if(!eq(NumSrcArgs, 1), " $dst_sel $dst_unused $src0_sel", !if(!eq(DstVT.Size, 1), " $src0_sel $src1_sel", // No dst_sel and dst_unused for VOPC " $dst_sel $dst_unused $src0_sel $src1_sel" ) ) ); string ret = dst#args#sdwa; } class getAsmSDWA9 { string dst = !if(HasDst, !if(!eq(DstVT.Size, 1), "$sdst", // VOPC "$vdst"), // VOP1/2 ""); string src0 = "$src0_modifiers"; string src1 = "$src1_modifiers"; string out_mods = !if(!not(HasOMod), "$clamp", "$clamp$omod"); string args = !if(!eq(NumSrcArgs, 0), "", !if(!eq(NumSrcArgs, 1), ", "#src0, ", "#src0#", "#src1 ) ); string sdwa = !if(!eq(NumSrcArgs, 0), "", !if(!eq(NumSrcArgs, 1), out_mods#" $dst_sel $dst_unused $src0_sel", !if(!eq(DstVT.Size, 1), " $src0_sel $src1_sel", // No dst_sel, dst_unused and output modifiers for VOPC out_mods#" $dst_sel $dst_unused $src0_sel $src1_sel" ) ) ); string ret = dst#args#sdwa; } class getHas64BitOps { bit ret = !if(!eq(NumSrcArgs, 3), 0, !if(!eq(DstVT.Size, 64), 1, !if(!eq(Src0VT.Size, 64), 1, !if(!eq(Src1VT.Size, 64), 1, 0 ) ) ) ); } class getHasSDWA { bit ret = !if(!eq(NumSrcArgs, 3), 0, // NumSrcArgs == 3 - No SDWA for VOP3 !if(!eq(DstVT.Size, 64), 0, // 64-bit dst - No SDWA for 64-bit operands !if(!eq(Src0VT.Size, 64), 0, // 64-bit src0 !if(!eq(Src1VT.Size, 64), 0, // 64-bit src2 1 ) ) ) ); } class getHasDPP { bit ret = !if(!eq(NumSrcArgs, 3), 0, // NumSrcArgs == 3 - No DPP for VOP3 1); } class getHasExt32BitDPP { bit ret = !and(getHasDPP.ret, !not(getHas64BitOps.ret)); } class getHasExt64BitDPP { bit ret = !and(getHasDPP.ret, getHas64BitOps.ret); } // Function that checks if instruction supports DPP and SDWA class getHasExt { bit ret = !or(getHasDPP.ret, getHasSDWA.ret); } // Return an AGPR+VGPR operand class for the given VGPR register class. class getLdStRegisterOperand { RegisterOperand ret = !if(!eq(RC.Size, 32), AVLdSt_32, !if(!eq(RC.Size, 64), AVLdSt_64, !if(!eq(RC.Size, 96), AVLdSt_96, !if(!eq(RC.Size, 128), AVLdSt_128, !if(!eq(RC.Size, 160), AVLdSt_160, RegisterOperand // invalid register ))))); } class getHasVOP3DPP { bit ret = !if(!eq(DstVT.Size, 64), 0, // 64-bit dst No DPP for 64-bit operands !if(!eq(Src0VT.Size, 64), 0, // 64-bit src0 !if(!eq(Src1VT.Size, 64), 0, // 64-bit src1 !if(!eq(Src2VT.Size, 64), 0, // 64-bit src2 1 ) ) ) ); } def PatGenMode { int NoPattern = 0; int Pattern = 1; } class VOPProfile _ArgVT, bit _EnableClamp = 0> { field list ArgVT = _ArgVT; field bit EnableClamp = _EnableClamp; field bit IsTrue16 = 0; field bit IsRealTrue16 = 0; field ValueType DstVT = ArgVT[0]; field ValueType Src0VT = ArgVT[1]; field ValueType Src1VT = ArgVT[2]; field ValueType Src2VT = ArgVT[3]; field RegisterOperand DstRC = getVALUDstForVT.ret; field RegisterOperand DstRCDPP = DstRC; field RegisterOperand DstRC64 = DstRC; field RegisterOperand DstRCVOP3DPP = DstRC64; field RegisterOperand DstRCSDWA = getSDWADstForVT.ret; field RegisterOperand Src0RC32 = getVOPSrc0ForVT.ret; field RegisterOperand Src1RC32 = getVregSrcForVT.ret; field RegisterOperand Src0RC64 = getVOP3SrcForVT.ret; field RegisterOperand Src1RC64 = getVOP3SrcForVT.ret; field RegisterOperand Src2RC64 = getVOP3SrcForVT.ret; field RegisterOperand Src0DPP = getVregSrcForVT.ret; field RegisterOperand Src1DPP = getVregSrcForVT.ret; field RegisterOperand Src2DPP = getVregSrcForVT.ret; field RegisterOperand Src0VOP3DPP = VGPRSrc_32; field RegisterOperand Src1VOP3DPP = getVOP3DPPSrcForVT.ret; field RegisterOperand Src2VOP3DPP = getVOP3DPPSrcForVT.ret; field RegisterOperand Src0SDWA = getSDWASrcForVT.ret; field RegisterOperand Src1SDWA = getSDWASrcForVT.ret; field Operand Src0Mod = getSrcMod.ret; field Operand Src1Mod = getSrcMod.ret; field Operand Src2Mod = getSrcMod.ret; field Operand Src0ModDPP = getSrcModDPP.ret; field Operand Src1ModDPP = getSrcModDPP.ret; field Operand Src2ModDPP = getSrcModDPP.ret; field Operand Src0ModVOP3DPP = getSrcModDPP.ret; field Operand Src1ModVOP3DPP = getSrcModDPP.ret; field Operand Src2ModVOP3DPP = getSrcModVOP3DPP.ret; field Operand Src0ModSDWA = getSrcModSDWA.ret; field Operand Src1ModSDWA = getSrcModSDWA.ret; field bit IsMAI = 0; field bit IsVOP3P = 0; field bit IsDOT = 0; field bit IsSingle = 0; field bit IsWMMA = 0; field bit IsSWMMAC = 0; field bit IsFP8 = 0; field bit HasDst = !ne(DstVT.Value, untyped.Value); field bit HasDst32 = HasDst; field bit EmitDst = HasDst; // force dst encoding, see v_movreld_b32 special case field bit EmitDstSel = EmitDst; field int NumSrcArgs = getNumSrcArgs.ret; field bit HasSrc0 = !ne(Src0VT.Value, untyped.Value); field bit HasSrc1 = !ne(Src1VT.Value, untyped.Value); field bit HasSrc2 = !ne(Src2VT.Value, untyped.Value); field bit HasSrc0FloatMods = Src0VT.isFP; field bit HasSrc1FloatMods = Src1VT.isFP; field bit HasSrc2FloatMods = Src2VT.isFP; field bit HasSrc0IntMods = isIntType.ret; field bit HasSrc1IntMods = isIntType.ret; field bit HasSrc2IntMods = isIntType.ret; field bit HasClamp = !or(isModifierType.ret, EnableClamp); field bit HasSDWAClamp = EmitDst; field bit HasFPClamp = !and(DstVT.isFP, HasClamp); field bit HasIntClamp = !if(DstVT.isFP, 0, HasClamp); field bit HasClampLo = HasClamp; field bit HasClampHi = !and(DstVT.isVector, HasClamp); field bit HasHigh = 0; field bit IsPacked = Src0VT.isVector; field bit HasOpSel = IsPacked; field bit HasOMod = !if(IsVOP3P, 0, DstVT.isFP); field bit HasSDWAOMod = DstVT.isFP; field bit HasModifiers = !or(isModifierType.ret, isModifierType.ret, isModifierType.ret, HasOMod); field bit HasSrc0Mods = HasModifiers; field bit HasSrc1Mods = !if(HasModifiers, !or(HasSrc1FloatMods, HasSrc1IntMods), 0); field bit HasSrc2Mods = !if(HasModifiers, !or(HasSrc2FloatMods, HasSrc2IntMods), 0); field bit HasExt = getHasExt.ret; field bit HasExtVOP3DPP = getHasVOP3DPP.ret; field bit HasExtDPP = !or(getHasDPP.ret, HasExtVOP3DPP); field bit HasExt32BitDPP = getHasExt32BitDPP.ret; field bit HasExt64BitDPP = getHasExt64BitDPP.ret; field bit HasExtSDWA = getHasSDWA.ret; field bit HasExtSDWA9 = HasExtSDWA; field int NeedPatGen = PatGenMode.NoPattern; field Operand Src0PackedMod = !if(HasSrc0FloatMods, PackedF16InputMods, PackedI16InputMods); field Operand Src1PackedMod = !if(HasSrc1FloatMods, PackedF16InputMods, PackedI16InputMods); field Operand Src2PackedMod = !if(HasSrc2FloatMods, PackedF16InputMods, PackedI16InputMods); field dag Outs = !if(HasDst,(outs DstRC:$vdst),(outs)); // VOP3b instructions are a special case with a second explicit // output. This is manually overridden for them. field dag Outs32 = Outs; field dag Outs64 = !if(HasDst,(outs DstRC64:$vdst),(outs)); field dag OutsDPP = getOutsDPP.ret; field dag OutsDPP8 = OutsDPP; field dag OutsVOP3DPP = getOutsDPP.ret; field dag OutsVOP3DPP8 = OutsVOP3DPP; field dag OutsSDWA = getOutsSDWA.ret; field dag Ins32 = getIns32.ret; field dag Ins64 = getIns64.ret; field dag InsVOP3P = getInsVOP3P.ret; field dag InsVOP3OpSel = getInsVOP3OpSel.ret, getOpSelMod.ret, getOpSelMod.ret>.ret; field dag InsDPP = !if(HasExtDPP, getInsDPP.ret, (ins)); field dag InsDPP16 = getInsDPP16.ret; field dag InsDPP8 = getInsDPP8.ret; defvar InsVOP3DPPBase = getInsVOP3Base.ret; defvar InsVOP3PDPPBase = getInsVOP3P.ret; field dag InsVOP3Base = !if(IsVOP3P, InsVOP3PDPPBase, InsVOP3DPPBase); field dag InsVOP3DPP = getInsVOP3DPP.ret; field dag InsVOP3DPP16 = getInsVOP3DPP16.ret; field dag InsVOP3DPP8 = getInsVOP3DPP8.ret; field dag InsSDWA = getInsSDWA.ret; field dag InsVOPDX = (ins Src0RC32:$src0X, Src1RC32:$vsrc1X); // It is a slight misnomer to use the deferred f32 operand type for non-float // operands, but this operand type will only be used if the other dual // component is FMAAK or FMAMK field dag InsVOPDXDeferred = (ins !if(!eq(Src0VT.Size, 32), VSrc_f32_Deferred, VSrc_f16_Deferred):$src0X, VGPR_32:$vsrc1X); field dag InsVOPDY = (ins Src0RC32:$src0Y, Src1RC32:$vsrc1Y); field dag InsVOPDYDeferred = (ins !if(!eq(Src1VT.Size, 32), VSrc_f32_Deferred, VSrc_f16_Deferred):$src0Y, VGPR_32:$vsrc1Y); field string Asm32 = getAsm32.ret; field string AsmDPP = !if(HasExtDPP, getAsmDPP.ret, ""); field string AsmDPP16 = getAsmDPP16.ret; // DPP8 encoding has no fields for modifiers, and it is enforced by setting // the asm operand name via this HasModifiers flag field string AsmDPP8 = getAsmDPP8.ret; field string AsmVOP3Base = getAsmVOP3Base.ret; field string Asm64 = AsmVOP3Base; field string AsmVOP3P = getAsmVOP3P.ret; field string AsmVOP3OpSel = getAsmVOP3OpSel.ret; field string AsmVOP3DPP = getAsmVOP3DPP.ret; field string AsmVOP3DPP16 = getAsmVOP3DPP16.ret; field string AsmVOP3DPP8 = getAsmVOP3DPP8.ret; field string AsmSDWA = getAsmSDWA.ret; field string AsmSDWA9 = getAsmSDWA9.ret; field string AsmVOPDX = getAsmVOPDPart.ret; field string AsmVOPDY = getAsmVOPDPart.ret; field string TieRegDPP = "$old"; } class VOP_NO_EXT : VOPProfile { let HasExt = 0; let HasExtDPP = 0; let HasExtVOP3DPP = 0; let HasExt32BitDPP = 0; let HasExt64BitDPP = 0; let HasExtSDWA = 0; let HasExtSDWA9 = 0; } class VOP_PAT_GEN : VOPProfile { let NeedPatGen = mode; } // VOPC_Profile_t16, VOPC_NoSdst_Profile_t16, VOPC_Class_Profile_t16, // VOPC_Class_NoSdst_Profile_t16, and VOP_MAC_F16_t16 do not inherit from this // class, so copy changes to this class in those profiles class VOPProfile_True16 : VOPProfile { let IsTrue16 = 1; let IsRealTrue16 = 1; // Most DstVT are 16-bit, but not all. let DstRC = getVALUDstForVT.ret; let DstRC64 = getVALUDstForVT.ret; let Src0RC32 = getVOPSrc0ForVT.ret; let Src1RC32 = getVregSrcForVT.ret; let Src0DPP = getVregSrcForVT.ret; let Src1DPP = getVregSrcForVT.ret; let Src2DPP = getVregSrcForVT.ret; let Src0ModDPP = getSrcModDPP_t16.ret; let Src1ModDPP = getSrcModDPP_t16.ret; let Src2ModDPP = getSrcModDPP_t16.ret; let DstRC64 = getVALUDstForVT.ret; let Src0RC64 = getVOP3SrcForVT.ret; let Src1RC64 = getVOP3SrcForVT.ret; let Src2RC64 = getVOP3SrcForVT.ret; let Src0Mod = getSrcMod.ret; let Src1Mod = getSrcMod.ret; let Src2Mod = getSrcMod.ret; } class VOPProfile_Fake16 : VOPProfile { let IsTrue16 = 1; // Most DstVT are 16-bit, but not all let DstRC = getVALUDstForVT_fake16.ret; let DstRC64 = getVALUDstForVT.ret; let Src1RC32 = getVregSrcForVT.ret; let Src0DPP = getVregSrcForVT.ret; let Src1DPP = getVregSrcForVT.ret; let Src2DPP = getVregSrcForVT.ret; let Src0ModDPP = getSrcModDPP_t16.ret; let Src1ModDPP = getSrcModDPP_t16.ret; let Src2ModDPP = getSrcModDPP_t16.ret; } def VOP_F16_F16 : VOPProfile<[f16, f16, untyped, untyped]>; def VOP_F16_I16 : VOPProfile <[f16, i16, untyped, untyped]>; def VOP_I16_F16 : VOPProfile <[i16, f16, untyped, untyped]>; def VOP_I16_I16 : VOPProfile <[i16, i16, untyped, untyped]>; def VOP_F16_F16_F16 : VOPProfile <[f16, f16, f16, untyped]>; def VOP_F16_F16_I16 : VOPProfile <[f16, f16, i16, untyped]>; def VOP_F16_F16_I32 : VOPProfile <[f16, f16, i32, untyped]>; def VOP_I16_I16_I16 : VOPProfile <[i16, i16, i16, untyped]>; def VOP_I16_I16_I16_ARITH : VOPProfile <[i16, i16, i16, untyped], /*EnableClamp=*/1>; def VOP_I16_I16_I16_I16 : VOPProfile <[i16, i16, i16, i16, untyped]>; def VOP_F16_F16_F16_F16 : VOPProfile <[f16, f16, f16, f16, untyped]>; def VOP_I32_I16_I16_I32 : VOPProfile <[i32, i16, i16, i32, untyped]>; def VOP_I32_I16 : VOPProfile <[i32, i16, untyped, untyped]>; def VOP_I16_I32 : VOPProfile <[i16, i32, untyped, untyped]>; def VOP_V2F16_V2F16_V2F16 : VOPProfile <[v2f16, v2f16, v2f16, untyped]>; def VOP_V2I16_V2I16_V2I16 : VOPProfile <[v2i16, v2i16, v2i16, untyped]>; def VOP_B32_F16_F16 : VOPProfile <[i32, f16, f16, untyped]>; def VOP_V2F16_V2F16_V2F16_V2F16 : VOPProfile <[v2f16, v2f16, v2f16, v2f16]>; def VOP_V2I16_V2I16_V2I16_V2I16 : VOPProfile <[v2i16, v2i16, v2i16, v2i16]>; def VOP_V2I16_F32_F32 : VOPProfile <[v2i16, f32, f32, untyped]>; def VOP_V2I16_I32_I32 : VOPProfile <[v2i16, i32, i32, untyped]>; def VOP_F16_V2F16_V2F16_F16 : VOPProfile <[f16, v2f16, v2f16, f16]>; def VOP_I16_V2I16_V2I16_I16 : VOPProfile <[i16, v2i16, v2i16, i16]>; def VOP_F32_V2I16_V2I16_F32 : VOPProfile <[f32, v2i16, v2i16, f32]>; def VOP_F32_V2F16_V2F16_V2F16 : VOPProfile <[f32, v2f16, v2f16, v2f16]>; def VOP_NONE : VOPProfile <[untyped, untyped, untyped, untyped]>; def VOP_F32_F32 : VOPProfile <[f32, f32, untyped, untyped]>; def VOP_F32_F64 : VOPProfile <[f32, f64, untyped, untyped]>; def VOP_F32_I32 : VOPProfile <[f32, i32, untyped, untyped]>; def VOP_F64_F32 : VOPProfile <[f64, f32, untyped, untyped]>; def VOP_F64_F64 : VOPProfile <[f64, f64, untyped, untyped]>; def VOP_F64_I32 : VOPProfile <[f64, i32, untyped, untyped]>; def VOP_I32_F32 : VOPProfile <[i32, f32, untyped, untyped]>; def VOP_I32_F64 : VOPProfile <[i32, f64, untyped, untyped]>; def VOP_I32_I32 : VOPProfile <[i32, i32, untyped, untyped]>; def VOP_F16_F32 : VOPProfile <[f16, f32, untyped, untyped]>; def VOP_F32_F16 : VOPProfile <[f32, f16, untyped, untyped]>; def VOP_I64_I64 : VOPProfile <[i64, i64, untyped, untyped]>; def VOP_F32_F32_F16 : VOPProfile <[f32, f32, f16, untyped]>; def VOP_F32_F32_F32 : VOPProfile <[f32, f32, f32, untyped]>; def VOP_F32_F32_I32 : VOPProfile <[f32, f32, i32, untyped]>; def VOP_F64_F64_F64 : VOPProfile <[f64, f64, f64, untyped]>; def VOP_F64_F64_I32 : VOPProfile <[f64, f64, i32, untyped]>; def VOP_I32_F32_F32 : VOPProfile <[i32, f32, f32, untyped]>; def VOP_I32_F32_I32 : VOPProfile <[i32, f32, i32, untyped]>; def VOP_I32_I32_I32 : VOPProfile <[i32, i32, i32, untyped]>; def VOP_I32_I32_I32_ARITH : VOPProfile <[i32, i32, i32, untyped], /*EnableClamp=*/1>; def VOP_V2F16_F32_F32 : VOPProfile <[v2f16, f32, f32, untyped]>; def VOP_F32_F16_F16_F16 : VOPProfile <[f32, f16, f16, f16]>; def VOP_I64_I64_I32 : VOPProfile <[i64, i64, i32, untyped]>; def VOP_I64_I32_I64 : VOPProfile <[i64, i32, i64, untyped]>; def VOP_I64_I64_I64 : VOPProfile <[i64, i64, i64, untyped]>; def VOP_F16_F32_F16_F32 : VOPProfile <[f16, f32, f16, f32]>; def VOP_F32_F32_F16_F16 : VOPProfile <[f32, f32, f16, f16]>; def VOP_F32_F32_F32_F32 : VOPProfile <[f32, f32, f32, f32]>; def VOP_F64_F64_F64_F64 : VOPProfile <[f64, f64, f64, f64]>; def VOP_I32_I32_I32_I32 : VOPProfile <[i32, i32, i32, i32]>; def VOP_I64_I32_I32_I64 : VOPProfile <[i64, i32, i32, i64]>; def VOP_I32_F32_I32_I32 : VOPProfile <[i32, f32, i32, i32]>; def VOP_I64_I64_I32_I64 : VOPProfile <[i64, i64, i32, i64]>; def VOP_V4I32_I64_I32_V4I32 : VOPProfile <[v4i32, i64, i32, v4i32]>; def VOP_F32_V2F16_V2F16_F32 : VOPProfile <[f32, v2f16, v2f16, f32]>; def VOP_I32_V2I16_V2I16_I32 : VOPProfile <[i32, v2i16, v2i16, i32]>; def VOP_V4F32_F32_F32_V4F32 : VOPProfile <[v4f32, f32, f32, v4f32]>; def VOP_V16F32_F32_F32_V16F32 : VOPProfile <[v16f32, f32, f32, v16f32]>; def VOP_V32F32_F32_F32_V32F32 : VOPProfile <[v32f32, f32, f32, v32f32]>; def VOP_V4F32_V4F16_V4F16_V4F32 : VOPProfile <[v4f32, v4f16, v4f16, v4f32]>; def VOP_V16F32_V4F16_V4F16_V16F32 : VOPProfile <[v16f32, v4f16, v4f16, v16f32]>; def VOP_V32F32_V4F16_V4F16_V32F32 : VOPProfile <[v32f32, v4f16, v4f16, v32f32]>; def VOP_V4F32_V2I16_V2I16_V4F32 : VOPProfile <[v4f32, v2i16, v2i16, v4f32]>; def VOP_V16F32_V2I16_V2I16_V16F32 : VOPProfile <[v16f32, v2i16, v2i16, v16f32]>; def VOP_V32F32_V2I16_V2I16_V32F32 : VOPProfile <[v32f32, v2i16, v2i16, v32f32]>; def VOP_V4I32_I32_I32_V4I32 : VOPProfile <[v4i32, i32, i32, v4i32]>; def VOP_V16I32_I32_I32_V16I32 : VOPProfile <[v16i32, i32, i32, v16i32]>; def VOP_V32I32_I32_I32_V32I32 : VOPProfile <[v32i32, i32, i32, v32i32]>; def VOP_V4F64_F64_F64_V4F64 : VOPProfile <[v4f64, f64, f64, v4f64]>; def VOP_V1F64_F64_F64_V1F64 : VOPProfile <[v1f64, f64, f64, v1f64]>; def VOP_V2F32_V2F32_V2F32_V2F32 : VOPProfile <[v2f32, v2f32, v2f32, v2f32]>; def VOP_V2F32_V2F32_V2F32 : VOPProfile <[v2f32, v2f32, v2f32, untyped]>; def VOP_V2I32_V2I32_V2I32 : VOPProfile <[v2i32, v2i32, v2i32, untyped]>; def VOP_V4F32_V4I16_V4I16_V4F32 : VOPProfile <[v4f32, v4i16, v4i16, v4f32]>; def VOP_V16F32_V4I16_V4I16_V16F32 : VOPProfile <[v16f32, v4i16, v4i16, v16f32]>; def VOP_V32F32_V4I16_V4I16_V32F32 : VOPProfile <[v32f32, v4i16, v4i16, v32f32]>; def VOP_V4I32_I64_I64_V4I32 : VOPProfile <[v4i32, i64, i64, v4i32]>; def VOP_V16I32_I64_I64_V16I32 : VOPProfile <[v16i32, i64, i64, v16i32]>; def VOP_V4F32_V2F32_V2F32_V4F32 : VOPProfile <[v4f32, v2f32, v2f32, v4f32]>; def VOP_V16F32_V2F32_V2F32_V16F32 : VOPProfile <[v16f32, v2f32, v2f32, v16f32]>; def VOP_V4F32_I64_I64_V4F32 : VOPProfile <[v4f32, i64, i64, v4f32]>; def VOP_V16F32_I64_I64_V16F32 : VOPProfile <[v16f32, i64, i64, v16f32]>; def VOP_V4F32_V4F16_V8F16_I32 : VOPProfile <[v4f32, v4f16, v8f16, i32]>; def VOP_V16F32_V4F16_V8F16_I32 : VOPProfile <[v16f32, v4f16, v8f16, i32]>; def VOP_V4F32_V4I16_V8I16_I32 : VOPProfile <[v4f32, v4i16, v8i16, i32]>; def VOP_V16F32_V4I16_V8I16_I32 : VOPProfile <[v16f32, v4i16, v8i16, i32]>; def VOP_V4I32_V2I32_V4I32_I32 : VOPProfile <[v4i32, v2i32, v4i32, i32]>; def VOP_V16I32_V2I32_V4I32_I32 : VOPProfile <[v16i32, v2i32, v4i32, i32]>; def VOP_V4F32_V2I32_V4I32_I32 : VOPProfile <[v4f32, v2i32, v4i32, i32]>; def VOP_V16F32_V2I32_V4I32_I32 : VOPProfile <[v16f32, v2i32, v4i32, i32]>; class Commutable_REV { string RevOp = revOp; bit IsOrig = isOrig; } class AtomicNoRet { string NoRetOp = noRetOp; bit IsRet = isRet; } //===----------------------------------------------------------------------===// // Interpolation opcodes //===----------------------------------------------------------------------===// class VINTRPDstOperand : RegisterOperand ; class VINTRP_Pseudo pattern> : VINTRPCommon , SIMCInstr { let isPseudo = 1; let isCodeGenOnly = 1; } // FIXME-GFX10: WIP. class VINTRP_Real_si op, string opName, dag outs, dag ins, string asm, int encodingFamily> : VINTRPCommon , VINTRPe , SIMCInstr { } class VINTRP_Real_vi op, string opName, dag outs, dag ins, string asm> : VINTRPCommon , VINTRPe_vi , SIMCInstr { let AssemblerPredicate = VIAssemblerPredicate; let DecoderNamespace = "GFX8"; } // FIXME-GFX10: WIP. multiclass VINTRP_m op, dag outs, dag ins, string asm, list pattern = []> { def "" : VINTRP_Pseudo ; let AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" in { def _si : VINTRP_Real_si ; } // End AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" def _vi : VINTRP_Real_vi ; let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" in { def _gfx10 : VINTRP_Real_si; } // End AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" } //===----------------------------------------------------------------------===// // Vector instruction mappings //===----------------------------------------------------------------------===// // Maps an opcode in e32 form to its e64 equivalent def getVOPe64 : InstrMapping { let FilterClass = "VOP"; let RowFields = ["OpName"]; let ColFields = ["Size", "VOP3"]; let KeyCol = ["4", "0"]; let ValueCols = [["8", "1"]]; } // Maps an opcode in e64 form to its e32 equivalent def getVOPe32 : InstrMapping { let FilterClass = "VOP"; let RowFields = ["OpName"]; let ColFields = ["Size", "VOP3"]; let KeyCol = ["8", "1"]; let ValueCols = [["4", "0"]]; } // Maps ordinary instructions to their SDWA counterparts def getSDWAOp : InstrMapping { let FilterClass = "VOP"; let RowFields = ["OpName"]; let ColFields = ["AsmVariantName"]; let KeyCol = ["Default"]; let ValueCols = [["SDWA"]]; } // Maps SDWA instructions to their ordinary counterparts def getBasicFromSDWAOp : InstrMapping { let FilterClass = "VOP"; let RowFields = ["OpName"]; let ColFields = ["AsmVariantName"]; let KeyCol = ["SDWA"]; let ValueCols = [["Default"]]; } // Maps ordinary instructions to their DPP counterparts def getDPPOp32 : InstrMapping { let FilterClass = "VOP"; let RowFields = ["OpName"]; let ColFields = ["AsmVariantName"]; let KeyCol = ["Default"]; let ValueCols = [["DPP"]]; } def getDPPOp64 : InstrMapping { let FilterClass = "VOP"; let RowFields = ["OpName"]; let ColFields = ["AsmVariantName"]; let KeyCol = ["VOP3"]; let ValueCols = [["VOP3_DPP"]]; } // Maps an commuted opcode to its original version def getCommuteOrig : InstrMapping { let FilterClass = "Commutable_REV"; let RowFields = ["RevOp"]; let ColFields = ["IsOrig"]; let KeyCol = ["0"]; let ValueCols = [["1"]]; } // Maps an original opcode to its commuted version def getCommuteRev : InstrMapping { let FilterClass = "Commutable_REV"; let RowFields = ["RevOp"]; let ColFields = ["IsOrig"]; let KeyCol = ["1"]; let ValueCols = [["0"]]; } def getMCOpcodeGen : InstrMapping { let FilterClass = "SIMCInstr"; let RowFields = ["PseudoInstr"]; let ColFields = ["Subtarget"]; let KeyCol = [!cast(SIEncodingFamily.NONE)]; // These columns must be kept in sync with the SIEncodingFamily enumeration. let ValueCols = [[!cast(SIEncodingFamily.SI)], [!cast(SIEncodingFamily.VI)], [!cast(SIEncodingFamily.SDWA)], [!cast(SIEncodingFamily.SDWA9)], // GFX80 encoding is added to work around a multiple matching // issue for buffer instructions with unpacked d16 data. This // does not actually change the encoding, and thus may be // removed later. [!cast(SIEncodingFamily.GFX80)], [!cast(SIEncodingFamily.GFX9)], [!cast(SIEncodingFamily.GFX10)], [!cast(SIEncodingFamily.SDWA10)], [!cast(SIEncodingFamily.GFX90A)], [!cast(SIEncodingFamily.GFX940)], [!cast(SIEncodingFamily.GFX11)], [!cast(SIEncodingFamily.GFX12)]]; } // Get equivalent SOPK instruction. def getSOPKOp : InstrMapping { let FilterClass = "SOPKInstTable"; let RowFields = ["BaseCmpOp"]; let ColFields = ["IsSOPK"]; let KeyCol = ["0"]; let ValueCols = [["1"]]; } def getAddr64Inst : InstrMapping { let FilterClass = "MUBUFAddr64Table"; let RowFields = ["OpName"]; let ColFields = ["IsAddr64"]; let KeyCol = ["0"]; let ValueCols = [["1"]]; } def getIfAddr64Inst : InstrMapping { let FilterClass = "MUBUFAddr64Table"; let RowFields = ["OpName"]; let ColFields = ["IsAddr64"]; let KeyCol = ["1"]; let ValueCols = [["1"]]; } // Maps an atomic opcode to its returnless version. def getAtomicNoRetOp : InstrMapping { let FilterClass = "AtomicNoRet"; let RowFields = ["NoRetOp"]; let ColFields = ["IsRet"]; let KeyCol = ["1"]; let ValueCols = [["0"]]; } // Maps a GLOBAL to its SADDR form. def getGlobalSaddrOp : InstrMapping { let FilterClass = "GlobalSaddrTable"; let RowFields = ["SaddrOp"]; let ColFields = ["IsSaddr"]; let KeyCol = ["0"]; let ValueCols = [["1"]]; } // Maps a GLOBAL SADDR to its VADDR form. def getGlobalVaddrOp : InstrMapping { let FilterClass = "GlobalSaddrTable"; let RowFields = ["SaddrOp"]; let ColFields = ["IsSaddr"]; let KeyCol = ["1"]; let ValueCols = [["0"]]; } // Maps a v_cmpx opcode with sdst to opcode without sdst. def getVCMPXNoSDstOp : InstrMapping { let FilterClass = "VCMPXNoSDstTable"; let RowFields = ["NoSDstOp"]; let ColFields = ["HasSDst"]; let KeyCol = ["1"]; let ValueCols = [["0"]]; } // Maps a SOPP to a SOPP with S_NOP def getSOPPWithRelaxation : InstrMapping { let FilterClass = "SOPPRelaxTable"; let RowFields = ["KeyName"]; let ColFields = ["IsRelaxed"]; let KeyCol = ["0"]; let ValueCols = [["1"]]; } // Maps flat scratch opcodes by addressing modes def getFlatScratchInstSTfromSS : InstrMapping { let FilterClass = "FlatScratchInst"; let RowFields = ["SVOp"]; let ColFields = ["Mode"]; let KeyCol = ["SS"]; let ValueCols = [["ST"]]; } def getFlatScratchInstSSfromSV : InstrMapping { let FilterClass = "FlatScratchInst"; let RowFields = ["SVOp"]; let ColFields = ["Mode"]; let KeyCol = ["SV"]; let ValueCols = [["SS"]]; } def getFlatScratchInstSVfromSVS : InstrMapping { let FilterClass = "FlatScratchInst"; let RowFields = ["SVOp"]; let ColFields = ["Mode"]; let KeyCol = ["SVS"]; let ValueCols = [["SV"]]; } def getFlatScratchInstSVfromSS : InstrMapping { let FilterClass = "FlatScratchInst"; let RowFields = ["SVOp"]; let ColFields = ["Mode"]; let KeyCol = ["SS"]; let ValueCols = [["SV"]]; } def getMFMAEarlyClobberOp : InstrMapping { let FilterClass = "MFMATable"; let RowFields = ["FMAOp"]; let ColFields = ["IsMac"]; let KeyCol = ["1"]; let ValueCols = [["0"]]; } // Maps an v_cmp instruction to its v_cmpx equivalent. def getVCMPXOpFromVCMP : InstrMapping { let FilterClass = "VCMPVCMPXTable"; let RowFields = ["VCMPOp"]; let ColFields = ["IsVCMPX"]; let KeyCol = ["0"]; let ValueCols = [["1"]]; } def VOPDComponentTable : GenericTable { let FilterClass = "VOPD_Component"; let CppTypeName = "VOPDComponentInfo"; let Fields = ["BaseVOP", "VOPDOp", "CanBeVOPDX"]; let PrimaryKey = ["BaseVOP"]; let PrimaryKeyName = "getVOPDComponentHelper"; } def getVOPDBaseFromComponent : SearchIndex { let Table = VOPDComponentTable; let Key = ["VOPDOp"]; } def VOPDPairs : GenericTable { let FilterClass = "VOPD_Base"; let CppTypeName = "VOPDInfo"; let Fields = ["Opcode", "OpX", "OpY", "SubTgt"]; let PrimaryKey = ["Opcode"]; let PrimaryKeyName = "getVOPDOpcodeHelper"; } def getVOPDInfoFromComponentOpcodes : SearchIndex { let Table = VOPDPairs; let Key = ["OpX", "OpY", "SubTgt"]; } include "SIInstructions.td" include "DSInstructions.td" include "MIMGInstructions.td"