1//===-- SIInstrInfo.td - SI Instruction Infos -------------*- tablegen -*--===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8 9def isWave32 : Predicate<"Subtarget->getWavefrontSize() == 32">, 10 AssemblerPredicate <(all_of FeatureWavefrontSize32)>; 11def isWave64 : Predicate<"Subtarget->getWavefrontSize() == 64">, 12 AssemblerPredicate <(all_of FeatureWavefrontSize64)>; 13 14class GCNPredicateControl : PredicateControl { 15 Predicate SIAssemblerPredicate = isGFX6GFX7; 16 Predicate VIAssemblerPredicate = isGFX8GFX9; 17} 18 19// Execpt for the NONE field, this must be kept in sync with the 20// SIEncodingFamily enum in AMDGPUInstrInfo.cpp 21def SIEncodingFamily { 22 int NONE = -1; 23 int SI = 0; 24 int VI = 1; 25 int SDWA = 2; 26 int SDWA9 = 3; 27 int GFX80 = 4; 28 int GFX9 = 5; 29 int GFX10 = 6; 30 int SDWA10 = 7; 31} 32 33//===----------------------------------------------------------------------===// 34// SI DAG Nodes 35//===----------------------------------------------------------------------===// 36 37def AMDGPUclamp : SDNode<"AMDGPUISD::CLAMP", SDTFPUnaryOp>; 38 39def SIsbuffer_load : SDNode<"AMDGPUISD::SBUFFER_LOAD", 40 SDTypeProfile<1, 3, [SDTCisVT<1, v4i32>, SDTCisVT<2, i32>, SDTCisVT<3, i32>]>, 41 [SDNPMayLoad, SDNPMemOperand] 42>; 43 44def SIds_ordered_count : SDNode<"AMDGPUISD::DS_ORDERED_COUNT", 45 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, SDTCisVT<2, i16>]>, 46 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain, SDNPInGlue] 47>; 48 49def SIatomic_inc : SDNode<"AMDGPUISD::ATOMIC_INC", SDTAtomic2, 50 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain] 51>; 52 53def SIatomic_dec : SDNode<"AMDGPUISD::ATOMIC_DEC", SDTAtomic2, 54 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain] 55>; 56 57def SDTAtomic2_f32 : SDTypeProfile<1, 2, [ 58 SDTCisSameAs<0,2>, SDTCisFP<0>, SDTCisPtrTy<1> 59]>; 60 61def SIatomic_fmin : SDNode<"AMDGPUISD::ATOMIC_LOAD_FMIN", SDTAtomic2_f32, 62 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain] 63>; 64 65def SIatomic_fmax : SDNode<"AMDGPUISD::ATOMIC_LOAD_FMAX", SDTAtomic2_f32, 66 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain] 67>; 68 69// load_d16_{lo|hi} ptr, tied_input 70def SIload_d16 : SDTypeProfile<1, 2, [ 71 SDTCisPtrTy<1>, 72 SDTCisSameAs<0, 2> 73]>; 74 75 76def SDTtbuffer_load : SDTypeProfile<1, 8, 77 [ // vdata 78 SDTCisVT<1, v4i32>, // rsrc 79 SDTCisVT<2, i32>, // vindex(VGPR) 80 SDTCisVT<3, i32>, // voffset(VGPR) 81 SDTCisVT<4, i32>, // soffset(SGPR) 82 SDTCisVT<5, i32>, // offset(imm) 83 SDTCisVT<6, i32>, // format(imm) 84 SDTCisVT<7, i32>, // cachepolicy, swizzled buffer(imm) 85 SDTCisVT<8, i1> // idxen(imm) 86 ]>; 87 88def SItbuffer_load : SDNode<"AMDGPUISD::TBUFFER_LOAD_FORMAT", SDTtbuffer_load, 89 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]>; 90def SItbuffer_load_d16 : SDNode<"AMDGPUISD::TBUFFER_LOAD_FORMAT_D16", 91 SDTtbuffer_load, 92 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]>; 93 94def SDTtbuffer_store : SDTypeProfile<0, 9, 95 [ // vdata 96 SDTCisVT<1, v4i32>, // rsrc 97 SDTCisVT<2, i32>, // vindex(VGPR) 98 SDTCisVT<3, i32>, // voffset(VGPR) 99 SDTCisVT<4, i32>, // soffset(SGPR) 100 SDTCisVT<5, i32>, // offset(imm) 101 SDTCisVT<6, i32>, // format(imm) 102 SDTCisVT<7, i32>, // cachepolicy, swizzled buffer(imm) 103 SDTCisVT<8, i1> // idxen(imm) 104 ]>; 105 106def SItbuffer_store : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT", SDTtbuffer_store, 107 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>; 108def SItbuffer_store_d16 : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT_D16", 109 SDTtbuffer_store, 110 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>; 111 112def SDTBufferLoad : SDTypeProfile<1, 7, 113 [ // vdata 114 SDTCisVT<1, v4i32>, // rsrc 115 SDTCisVT<2, i32>, // vindex(VGPR) 116 SDTCisVT<3, i32>, // voffset(VGPR) 117 SDTCisVT<4, i32>, // soffset(SGPR) 118 SDTCisVT<5, i32>, // offset(imm) 119 SDTCisVT<6, i32>, // cachepolicy, swizzled buffer(imm) 120 SDTCisVT<7, i1>]>; // idxen(imm) 121 122def SIbuffer_load : SDNode <"AMDGPUISD::BUFFER_LOAD", SDTBufferLoad, 123 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>; 124def SIbuffer_load_ubyte : SDNode <"AMDGPUISD::BUFFER_LOAD_UBYTE", SDTBufferLoad, 125 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>; 126def SIbuffer_load_ushort : SDNode <"AMDGPUISD::BUFFER_LOAD_USHORT", SDTBufferLoad, 127 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>; 128def SIbuffer_load_byte : SDNode <"AMDGPUISD::BUFFER_LOAD_BYTE", SDTBufferLoad, 129 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>; 130def SIbuffer_load_short: SDNode <"AMDGPUISD::BUFFER_LOAD_SHORT", SDTBufferLoad, 131 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>; 132def SIbuffer_load_format : SDNode <"AMDGPUISD::BUFFER_LOAD_FORMAT", SDTBufferLoad, 133 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>; 134def SIbuffer_load_format_d16 : SDNode <"AMDGPUISD::BUFFER_LOAD_FORMAT_D16", 135 SDTBufferLoad, 136 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>; 137 138def SDTBufferStore : SDTypeProfile<0, 8, 139 [ // vdata 140 SDTCisVT<1, v4i32>, // rsrc 141 SDTCisVT<2, i32>, // vindex(VGPR) 142 SDTCisVT<3, i32>, // voffset(VGPR) 143 SDTCisVT<4, i32>, // soffset(SGPR) 144 SDTCisVT<5, i32>, // offset(imm) 145 SDTCisVT<6, i32>, // cachepolicy, swizzled buffer(imm) 146 SDTCisVT<7, i1>]>; // idxen(imm) 147 148def SIbuffer_store : SDNode <"AMDGPUISD::BUFFER_STORE", SDTBufferStore, 149 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>; 150def SIbuffer_store_byte: SDNode <"AMDGPUISD::BUFFER_STORE_BYTE", 151 SDTBufferStore, 152 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>; 153def SIbuffer_store_short : SDNode <"AMDGPUISD::BUFFER_STORE_SHORT", 154 SDTBufferStore, 155 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>; 156def SIbuffer_store_format : SDNode <"AMDGPUISD::BUFFER_STORE_FORMAT", 157 SDTBufferStore, 158 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>; 159def SIbuffer_store_format_d16 : SDNode <"AMDGPUISD::BUFFER_STORE_FORMAT_D16", 160 SDTBufferStore, 161 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>; 162 163class SDBufferAtomic<string opcode> : SDNode <opcode, 164 SDTypeProfile<1, 8, 165 [SDTCisVT<2, v4i32>, // rsrc 166 SDTCisVT<3, i32>, // vindex(VGPR) 167 SDTCisVT<4, i32>, // voffset(VGPR) 168 SDTCisVT<5, i32>, // soffset(SGPR) 169 SDTCisVT<6, i32>, // offset(imm) 170 SDTCisVT<7, i32>, // cachepolicy(imm) 171 SDTCisVT<8, i1>]>, // idxen(imm) 172 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad, SDNPMayStore] 173>; 174 175def SIbuffer_atomic_swap : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SWAP">; 176def SIbuffer_atomic_add : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_ADD">; 177def SIbuffer_atomic_sub : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SUB">; 178def SIbuffer_atomic_smin : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SMIN">; 179def SIbuffer_atomic_umin : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_UMIN">; 180def SIbuffer_atomic_smax : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SMAX">; 181def SIbuffer_atomic_umax : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_UMAX">; 182def SIbuffer_atomic_and : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_AND">; 183def SIbuffer_atomic_or : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_OR">; 184def SIbuffer_atomic_xor : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_XOR">; 185def SIbuffer_atomic_inc : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_INC">; 186def SIbuffer_atomic_dec : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_DEC">; 187def SIbuffer_atomic_csub : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_CSUB">; 188def SIbuffer_atomic_fadd : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_FADD">; 189 190def SIbuffer_atomic_cmpswap : SDNode <"AMDGPUISD::BUFFER_ATOMIC_CMPSWAP", 191 SDTypeProfile<1, 9, 192 [SDTCisVT<0, i32>, // dst 193 SDTCisVT<1, i32>, // src 194 SDTCisVT<2, i32>, // cmp 195 SDTCisVT<3, v4i32>, // rsrc 196 SDTCisVT<4, i32>, // vindex(VGPR) 197 SDTCisVT<5, i32>, // voffset(VGPR) 198 SDTCisVT<6, i32>, // soffset(SGPR) 199 SDTCisVT<7, i32>, // offset(imm) 200 SDTCisVT<8, i32>, // cachepolicy(imm) 201 SDTCisVT<9, i1>]>, // idxen(imm) 202 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad, SDNPMayStore] 203>; 204 205class SDGlobalAtomicNoRtn<string opcode, ValueType ty> : SDNode <opcode, 206 SDTypeProfile<0, 2, 207 [SDTCisPtrTy<0>, // vaddr 208 SDTCisVT<1, ty>]>, // vdata 209 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad, SDNPMayStore] 210>; 211 212def SIpc_add_rel_offset : SDNode<"AMDGPUISD::PC_ADD_REL_OFFSET", 213 SDTypeProfile<1, 2, [SDTCisVT<0, iPTR>, SDTCisSameAs<0,1>, SDTCisSameAs<0,2>]> 214>; 215 216def SIlds : SDNode<"AMDGPUISD::LDS", 217 SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisSameAs<0,1>]> 218>; 219 220def SIload_d16_lo : SDNode<"AMDGPUISD::LOAD_D16_LO", 221 SIload_d16, 222 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain] 223>; 224 225def SIload_d16_lo_u8 : SDNode<"AMDGPUISD::LOAD_D16_LO_U8", 226 SIload_d16, 227 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain] 228>; 229 230def SIload_d16_lo_i8 : SDNode<"AMDGPUISD::LOAD_D16_LO_I8", 231 SIload_d16, 232 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain] 233>; 234 235def SIload_d16_hi : SDNode<"AMDGPUISD::LOAD_D16_HI", 236 SIload_d16, 237 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain] 238>; 239 240def SIload_d16_hi_u8 : SDNode<"AMDGPUISD::LOAD_D16_HI_U8", 241 SIload_d16, 242 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain] 243>; 244 245def SIload_d16_hi_i8 : SDNode<"AMDGPUISD::LOAD_D16_HI_I8", 246 SIload_d16, 247 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain] 248>; 249 250def SIdenorm_mode : SDNode<"AMDGPUISD::DENORM_MODE", 251 SDTypeProfile<0 ,1, [SDTCisInt<0>]>, 252 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue] 253>; 254 255//===----------------------------------------------------------------------===// 256// ValueType helpers 257//===----------------------------------------------------------------------===// 258 259// Returns 1 if the source arguments have modifiers, 0 if they do not. 260// XXX - do f16 instructions? 261class isFloatType<ValueType SrcVT> { 262 bit ret = !or(!eq(SrcVT.Value, f16.Value), 263 !eq(SrcVT.Value, f32.Value), 264 !eq(SrcVT.Value, f64.Value), 265 !eq(SrcVT.Value, v2f16.Value), 266 !eq(SrcVT.Value, v4f16.Value), 267 !eq(SrcVT.Value, v2f32.Value), 268 !eq(SrcVT.Value, v2f64.Value)); 269} 270 271class isIntType<ValueType SrcVT> { 272 bit ret = !or(!eq(SrcVT.Value, i16.Value), 273 !eq(SrcVT.Value, i32.Value), 274 !eq(SrcVT.Value, i64.Value)); 275} 276 277class isPackedType<ValueType SrcVT> { 278 bit ret = !or(!eq(SrcVT.Value, v2i16.Value), 279 !eq(SrcVT.Value, v2f16.Value), 280 !eq(SrcVT.Value, v4f16.Value)); 281} 282 283//===----------------------------------------------------------------------===// 284// PatFrags for global memory operations 285//===----------------------------------------------------------------------===// 286 287foreach as = [ "global", "flat", "constant", "local", "private", "region" ] in { 288let AddressSpaces = !cast<AddressSpaceList>("LoadAddress_"#as).AddrSpaces in { 289 290 291defm atomic_inc_#as : binary_atomic_op<SIatomic_inc>; 292defm atomic_dec_#as : binary_atomic_op<SIatomic_dec>; 293defm atomic_load_fmin_#as : binary_atomic_op<SIatomic_fmin, 0>; 294defm atomic_load_fmax_#as : binary_atomic_op<SIatomic_fmax, 0>; 295 296 297} // End let AddressSpaces = ... 298} // End foreach AddrSpace 299 300 301//===----------------------------------------------------------------------===// 302// SDNodes PatFrags for loads/stores with a glue input. 303// This is for SDNodes and PatFrag for local loads and stores to 304// enable s_mov_b32 m0, -1 to be glued to the memory instructions. 305// 306// These mirror the regular load/store PatFrags and rely on special 307// processing during Select() to add the glued copy. 308// 309//===----------------------------------------------------------------------===// 310 311def AMDGPUld_glue : SDNode <"ISD::LOAD", SDTLoad, 312 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand, SDNPInGlue] 313>; 314 315def AMDGPUatomic_ld_glue : SDNode <"ISD::ATOMIC_LOAD", SDTAtomicLoad, 316 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand, SDNPInGlue] 317>; 318 319def unindexedload_glue : PatFrag <(ops node:$ptr), (AMDGPUld_glue node:$ptr)> { 320 let IsLoad = 1; 321 let IsUnindexed = 1; 322} 323 324def load_glue : PatFrag <(ops node:$ptr), (unindexedload_glue node:$ptr)> { 325 let IsLoad = 1; 326 let IsNonExtLoad = 1; 327} 328 329def atomic_load_32_glue : PatFrag<(ops node:$ptr), 330 (AMDGPUatomic_ld_glue node:$ptr)> { 331 let IsAtomic = 1; 332 let MemoryVT = i32; 333} 334 335def atomic_load_64_glue : PatFrag<(ops node:$ptr), 336 (AMDGPUatomic_ld_glue node:$ptr)> { 337 let IsAtomic = 1; 338 let MemoryVT = i64; 339} 340 341def extload_glue : PatFrag<(ops node:$ptr), (unindexedload_glue node:$ptr)> { 342 let IsLoad = 1; 343 let IsAnyExtLoad = 1; 344} 345 346def sextload_glue : PatFrag<(ops node:$ptr), (unindexedload_glue node:$ptr)> { 347 let IsLoad = 1; 348 let IsSignExtLoad = 1; 349} 350 351def zextload_glue : PatFrag<(ops node:$ptr), (unindexedload_glue node:$ptr)> { 352 let IsLoad = 1; 353 let IsZeroExtLoad = 1; 354} 355 356def extloadi8_glue : PatFrag<(ops node:$ptr), (extload_glue node:$ptr)> { 357 let IsLoad = 1; 358 let MemoryVT = i8; 359} 360 361def zextloadi8_glue : PatFrag<(ops node:$ptr), (zextload_glue node:$ptr)> { 362 let IsLoad = 1; 363 let MemoryVT = i8; 364} 365 366def extloadi16_glue : PatFrag<(ops node:$ptr), (extload_glue node:$ptr)> { 367 let IsLoad = 1; 368 let MemoryVT = i16; 369} 370 371def zextloadi16_glue : PatFrag<(ops node:$ptr), (zextload_glue node:$ptr)> { 372 let IsLoad = 1; 373 let MemoryVT = i16; 374} 375 376def sextloadi8_glue : PatFrag<(ops node:$ptr), (sextload_glue node:$ptr)> { 377 let IsLoad = 1; 378 let MemoryVT = i8; 379} 380 381def sextloadi16_glue : PatFrag<(ops node:$ptr), (sextload_glue node:$ptr)> { 382 let IsLoad = 1; 383 let MemoryVT = i16; 384} 385 386 387let IsLoad = 1, AddressSpaces = LoadAddress_local.AddrSpaces in { 388def load_local_m0 : PatFrag<(ops node:$ptr), (load_glue node:$ptr)> { 389 let IsNonExtLoad = 1; 390} 391 392let MemoryVT = i8 in { 393def extloadi8_local_m0 : PatFrag<(ops node:$ptr), (extloadi8_glue node:$ptr)>; 394def sextloadi8_local_m0 : PatFrag<(ops node:$ptr), (sextloadi8_glue node:$ptr)>; 395def zextloadi8_local_m0 : PatFrag<(ops node:$ptr), (zextloadi8_glue node:$ptr)>; 396} 397 398let MemoryVT = i16 in { 399def extloadi16_local_m0 : PatFrag<(ops node:$ptr), (extloadi16_glue node:$ptr)>; 400def sextloadi16_local_m0 : PatFrag<(ops node:$ptr), (sextloadi16_glue node:$ptr)>; 401def zextloadi16_local_m0 : PatFrag<(ops node:$ptr), (zextloadi16_glue node:$ptr)>; 402} 403 404def load_align8_local_m0 : PatFrag<(ops node:$ptr), 405 (load_local_m0 node:$ptr)>, Aligned<8> { 406 let IsLoad = 1; 407 let IsNonExtLoad = 1; 408} 409 410def load_align16_local_m0 : PatFrag<(ops node:$ptr), 411 (load_local_m0 node:$ptr)>, Aligned<16> { 412 let IsLoad = 1; 413 let IsNonExtLoad = 1; 414} 415 416} // End IsLoad = 1 417 418let IsAtomic = 1, AddressSpaces = LoadAddress_local.AddrSpaces in { 419def atomic_load_32_local_m0 : PatFrag<(ops node:$ptr), 420 (atomic_load_32_glue node:$ptr)> { 421 let MemoryVT = i32; 422} 423def atomic_load_64_local_m0 : PatFrag<(ops node:$ptr), 424 (atomic_load_64_glue node:$ptr)> { 425 let MemoryVT = i64; 426} 427 428} // End let AddressSpaces = LoadAddress_local.AddrSpaces 429 430 431def AMDGPUst_glue : SDNode <"ISD::STORE", SDTStore, 432 [SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPInGlue] 433>; 434 435def AMDGPUatomic_st_glue : SDNode <"ISD::ATOMIC_STORE", SDTAtomicStore, 436 [SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPInGlue] 437>; 438 439def unindexedstore_glue : PatFrag<(ops node:$val, node:$ptr), 440 (AMDGPUst_glue node:$val, node:$ptr)> { 441 let IsStore = 1; 442 let IsUnindexed = 1; 443} 444 445def store_glue : PatFrag<(ops node:$val, node:$ptr), 446 (unindexedstore_glue node:$val, node:$ptr)> { 447 let IsStore = 1; 448 let IsTruncStore = 0; 449} 450 451def truncstore_glue : PatFrag<(ops node:$val, node:$ptr), 452 (unindexedstore_glue node:$val, node:$ptr)> { 453 let IsStore = 1; 454 let IsTruncStore = 1; 455} 456 457def truncstorei8_glue : PatFrag<(ops node:$val, node:$ptr), 458 (truncstore_glue node:$val, node:$ptr)> { 459 let IsStore = 1; 460 let MemoryVT = i8; 461} 462 463def truncstorei16_glue : PatFrag<(ops node:$val, node:$ptr), 464 (truncstore_glue node:$val, node:$ptr)> { 465 let IsStore = 1; 466 let MemoryVT = i16; 467} 468 469let IsStore = 1, AddressSpaces = StoreAddress_local.AddrSpaces in { 470def store_local_m0 : PatFrag<(ops node:$val, node:$ptr), 471 (store_glue node:$val, node:$ptr)> { 472 let IsStore = 1; 473 let IsTruncStore = 0; 474} 475 476def truncstorei8_local_m0 : PatFrag<(ops node:$val, node:$ptr), 477 (unindexedstore_glue node:$val, node:$ptr)> { 478 let IsStore = 1; 479 let MemoryVT = i8; 480} 481 482def truncstorei16_local_m0 : PatFrag<(ops node:$val, node:$ptr), 483 (unindexedstore_glue node:$val, node:$ptr)> { 484 let IsStore = 1; 485 let MemoryVT = i16; 486} 487} 488 489def store_align8_local_m0 : PatFrag <(ops node:$value, node:$ptr), 490 (store_local_m0 node:$value, node:$ptr)>, 491 Aligned<8> { 492 let IsStore = 1; 493 let IsTruncStore = 0; 494} 495 496def store_align16_local_m0 : PatFrag <(ops node:$value, node:$ptr), 497 (store_local_m0 node:$value, node:$ptr)>, 498 Aligned<16> { 499 let IsStore = 1; 500 let IsTruncStore = 0; 501} 502 503let AddressSpaces = StoreAddress_local.AddrSpaces in { 504 505def atomic_store_local_32_m0 : PatFrag < 506 (ops node:$value, node:$ptr), 507 (AMDGPUatomic_st_glue node:$value, node:$ptr)> { 508 let IsAtomic = 1; 509 let MemoryVT = i32; 510} 511def atomic_store_local_64_m0 : PatFrag < 512 (ops node:$value, node:$ptr), 513 (AMDGPUatomic_st_glue node:$value, node:$ptr)> { 514 let IsAtomic = 1; 515 let MemoryVT = i64; 516} 517} // End let AddressSpaces = StoreAddress_local.AddrSpaces 518 519 520def si_setcc_uniform : PatFrag < 521 (ops node:$lhs, node:$rhs, node:$cond), 522 (setcc node:$lhs, node:$rhs, node:$cond), [{ 523 for (SDNode *Use : N->uses()) { 524 if (Use->isMachineOpcode() || Use->getOpcode() != ISD::CopyToReg) 525 return false; 526 527 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg(); 528 if (Reg != AMDGPU::SCC) 529 return false; 530 } 531 return true; 532}]>; 533 534//===----------------------------------------------------------------------===// 535// SDNodes PatFrags for a16 loads and stores with 3 components. 536// v3f16/v3i16 is widened to v4f16/v4i16, so we need to match on the memory 537// load/store size. 538//===----------------------------------------------------------------------===// 539 540class mubuf_intrinsic_load<SDPatternOperator name, ValueType vt> : PatFrag < 541 (ops node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, 542 node:$auxiliary, node:$idxen), 543 (name node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, 544 node:$auxiliary, node:$idxen)> { 545 let IsLoad = 1; 546 let MemoryVT = vt; 547} 548 549class mubuf_intrinsic_store<SDPatternOperator name, ValueType vt> : PatFrag < 550 (ops node:$vdata, node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, 551 node:$auxiliary, node:$idxen), 552 (name node:$vdata, node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, 553 node:$auxiliary, node:$idxen)> { 554 let IsStore = 1; 555 let MemoryVT = vt; 556} 557 558class mtbuf_intrinsic_load<SDPatternOperator name, ValueType vt> : PatFrag < 559 (ops node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, 560 node:$format, node:$auxiliary, node:$idxen), 561 (name node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, 562 node:$format, node:$auxiliary, node:$idxen)> { 563 let IsLoad = 1; 564 let MemoryVT = vt; 565} 566 567class mtbuf_intrinsic_store<SDPatternOperator name, ValueType vt> : PatFrag < 568 (ops node:$vdata, node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, 569 node:$format, node:$auxiliary, node:$idxen), 570 (name node:$vdata, node:$rsrc, node:$vindex, node:$voffset, node:$soffset, node:$offset, 571 node:$format, node:$auxiliary, node:$idxen)> { 572 let IsStore = 1; 573 let MemoryVT = vt; 574} 575 576//===----------------------------------------------------------------------===// 577// SDNodes PatFrags for d16 loads 578//===----------------------------------------------------------------------===// 579 580class LoadD16Frag <SDPatternOperator op> : PatFrag< 581 (ops node:$ptr, node:$tied_in), 582 (op node:$ptr, node:$tied_in)> { 583 let IsLoad = 1; 584} 585 586foreach as = [ "global", "flat", "constant", "local", "private", "region" ] in { 587let AddressSpaces = !cast<AddressSpaceList>("LoadAddress_"#as).AddrSpaces in { 588 589def load_d16_hi_#as : LoadD16Frag <SIload_d16_hi>; 590 591def az_extloadi8_d16_hi_#as : LoadD16Frag <SIload_d16_hi_u8> { 592 let MemoryVT = i8; 593} 594 595def sextloadi8_d16_hi_#as : LoadD16Frag <SIload_d16_hi_i8> { 596 let MemoryVT = i8; 597} 598 599def load_d16_lo_#as : LoadD16Frag <SIload_d16_lo>; 600 601def az_extloadi8_d16_lo_#as : LoadD16Frag <SIload_d16_lo_u8> { 602 let MemoryVT = i8; 603} 604 605def sextloadi8_d16_lo_#as : LoadD16Frag <SIload_d16_lo_i8> { 606 let MemoryVT = i8; 607} 608 609} // End let AddressSpaces = ... 610} // End foreach AddrSpace 611 612def lshr_rev : PatFrag < 613 (ops node:$src1, node:$src0), 614 (srl $src0, $src1) 615>; 616 617def ashr_rev : PatFrag < 618 (ops node:$src1, node:$src0), 619 (sra $src0, $src1) 620>; 621 622def lshl_rev : PatFrag < 623 (ops node:$src1, node:$src0), 624 (shl $src0, $src1) 625>; 626 627def add_ctpop : PatFrag < 628 (ops node:$src0, node:$src1), 629 (add (ctpop $src0), $src1) 630>; 631 632foreach I = 1-4 in { 633def shl#I#_add : PatFrag < 634 (ops node:$src0, node:$src1), 635 (add (shl_oneuse $src0, (i32 I)), $src1)> { 636 // FIXME: Poor substitute for disabling pattern in SelectionDAG 637 let PredicateCode = [{return false;}]; 638 let GISelPredicateCode = [{return true;}]; 639} 640} 641 642multiclass SIAtomicM0Glue2 <string op_name, bit is_amdgpu = 0, 643 SDTypeProfile tc = SDTAtomic2, 644 bit IsInt = 1> { 645 646 def _glue : SDNode < 647 !if(is_amdgpu, "AMDGPUISD", "ISD")#"::ATOMIC_"#op_name, tc, 648 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand, SDNPInGlue] 649 >; 650 651 let AddressSpaces = StoreAddress_local.AddrSpaces in { 652 defm _local_m0 : binary_atomic_op <!cast<SDNode>(NAME#"_glue"), IsInt>; 653 } 654 655 let AddressSpaces = StoreAddress_region.AddrSpaces in { 656 defm _region_m0 : binary_atomic_op <!cast<SDNode>(NAME#"_glue"), IsInt>; 657 } 658} 659 660defm atomic_load_add : SIAtomicM0Glue2 <"LOAD_ADD">; 661defm atomic_load_sub : SIAtomicM0Glue2 <"LOAD_SUB">; 662defm atomic_inc : SIAtomicM0Glue2 <"INC", 1>; 663defm atomic_dec : SIAtomicM0Glue2 <"DEC", 1>; 664defm atomic_load_and : SIAtomicM0Glue2 <"LOAD_AND">; 665defm atomic_load_min : SIAtomicM0Glue2 <"LOAD_MIN">; 666defm atomic_load_max : SIAtomicM0Glue2 <"LOAD_MAX">; 667defm atomic_load_or : SIAtomicM0Glue2 <"LOAD_OR">; 668defm atomic_load_xor : SIAtomicM0Glue2 <"LOAD_XOR">; 669defm atomic_load_umin : SIAtomicM0Glue2 <"LOAD_UMIN">; 670defm atomic_load_umax : SIAtomicM0Glue2 <"LOAD_UMAX">; 671defm atomic_swap : SIAtomicM0Glue2 <"SWAP">; 672defm atomic_load_fadd : SIAtomicM0Glue2 <"LOAD_FADD", 0, SDTAtomic2_f32, 0>; 673defm atomic_load_fmin : SIAtomicM0Glue2 <"LOAD_FMIN", 1, SDTAtomic2_f32, 0>; 674defm atomic_load_fmax : SIAtomicM0Glue2 <"LOAD_FMAX", 1, SDTAtomic2_f32, 0>; 675 676def as_i1timm : SDNodeXForm<timm, [{ 677 return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i1); 678}]>; 679 680def as_i8imm : SDNodeXForm<imm, [{ 681 return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i8); 682}]>; 683 684def as_i8timm : SDNodeXForm<timm, [{ 685 return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i16); 686}]>; 687 688def as_i16imm : SDNodeXForm<imm, [{ 689 return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i16); 690}]>; 691 692def as_i16timm : SDNodeXForm<timm, [{ 693 return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i16); 694}]>; 695 696def as_i32imm: SDNodeXForm<imm, [{ 697 return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i32); 698}]>; 699 700def as_i32timm: SDNodeXForm<timm, [{ 701 return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i32); 702}]>; 703 704def as_i64imm: SDNodeXForm<imm, [{ 705 return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i64); 706}]>; 707 708def cond_as_i32imm: SDNodeXForm<cond, [{ 709 return CurDAG->getTargetConstant(N->get(), SDLoc(N), MVT::i32); 710}]>; 711 712// Copied from the AArch64 backend: 713def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{ 714return CurDAG->getTargetConstant( 715 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32); 716}]>; 717 718def frameindex_to_targetframeindex : SDNodeXForm<frameindex, [{ 719 auto FI = cast<FrameIndexSDNode>(N); 720 return CurDAG->getTargetFrameIndex(FI->getIndex(), MVT::i32); 721}]>; 722 723// Copied from the AArch64 backend: 724def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{ 725return CurDAG->getTargetConstant( 726 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64); 727}]>; 728 729class bitextract_imm<int bitnum> : SDNodeXForm<imm, [{ 730 uint64_t Imm = N->getZExtValue(); 731 unsigned Bit = (Imm >> }] # bitnum # [{ ) & 1; 732 return CurDAG->getTargetConstant(Bit, SDLoc(N), MVT::i1); 733}]>; 734 735def SIMM16bit : ImmLeaf <i32, 736 [{return isInt<16>(Imm);}] 737>; 738 739def UIMM16bit : ImmLeaf <i32, 740 [{return isUInt<16>(Imm);}] 741>; 742 743def i64imm_32bit : ImmLeaf<i64, [{ 744 return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm); 745}]>; 746 747def InlineImm16 : ImmLeaf<i16, [{ 748 return isInlineImmediate16(Imm); 749}]>; 750 751def InlineImm32 : ImmLeaf<i32, [{ 752 return isInlineImmediate32(Imm); 753}]>; 754 755def InlineImm64 : ImmLeaf<i64, [{ 756 return isInlineImmediate64(Imm); 757}]>; 758 759def InlineImmFP32 : FPImmLeaf<f32, [{ 760 return isInlineImmediate(Imm); 761}]>; 762 763def InlineImmFP64 : FPImmLeaf<f64, [{ 764 return isInlineImmediate(Imm); 765}]>; 766 767 768class VGPRImm <dag frag> : PatLeaf<frag, [{ 769 return isVGPRImm(N); 770}]>; 771 772def NegateImm : SDNodeXForm<imm, [{ 773 return CurDAG->getConstant(-N->getSExtValue(), SDLoc(N), MVT::i32); 774}]>; 775 776// TODO: When FP inline imm values work? 777def NegSubInlineConst32 : ImmLeaf<i32, [{ 778 return Imm < -16 && Imm >= -64; 779}], NegateImm>; 780 781def NegSubInlineIntConst16 : ImmLeaf<i16, [{ 782 return Imm < -16 && Imm >= -64; 783}], NegateImm>; 784 785def ShiftAmt32Imm : ImmLeaf <i32, [{ 786 return Imm < 32; 787}]>; 788 789def getNegV2I16Imm : SDNodeXForm<build_vector, [{ 790 return SDValue(packNegConstantV2I16(N, *CurDAG), 0); 791}]>; 792 793def NegSubInlineConstV216 : PatLeaf<(build_vector), [{ 794 assert(N->getNumOperands() == 2); 795 assert(N->getOperand(0).getValueType().getSizeInBits() == 16); 796 SDValue Src0 = N->getOperand(0); 797 SDValue Src1 = N->getOperand(1); 798 if (Src0 == Src1) 799 return isNegInlineImmediate(Src0.getNode()); 800 801 return (isNullConstantOrUndef(Src0) && isNegInlineImmediate(Src1.getNode())) || 802 (isNullConstantOrUndef(Src1) && isNegInlineImmediate(Src0.getNode())); 803}], getNegV2I16Imm>; 804 805//===----------------------------------------------------------------------===// 806// MUBUF/SMEM Patterns 807//===----------------------------------------------------------------------===// 808 809def extract_glc : SDNodeXForm<timm, [{ 810 return CurDAG->getTargetConstant(N->getZExtValue() & 1, SDLoc(N), MVT::i8); 811}]>; 812 813def extract_slc : SDNodeXForm<timm, [{ 814 return CurDAG->getTargetConstant((N->getZExtValue() >> 1) & 1, SDLoc(N), MVT::i8); 815}]>; 816 817def extract_dlc : SDNodeXForm<timm, [{ 818 return CurDAG->getTargetConstant((N->getZExtValue() >> 2) & 1, SDLoc(N), MVT::i8); 819}]>; 820 821def extract_swz : SDNodeXForm<timm, [{ 822 return CurDAG->getTargetConstant((N->getZExtValue() >> 3) & 1, SDLoc(N), MVT::i8); 823}]>; 824 825//===----------------------------------------------------------------------===// 826// Custom Operands 827//===----------------------------------------------------------------------===// 828 829def SoppBrTarget : AsmOperandClass { 830 let Name = "SoppBrTarget"; 831 let ParserMethod = "parseSOppBrTarget"; 832} 833 834def sopp_brtarget : Operand<OtherVT> { 835 let EncoderMethod = "getSOPPBrEncoding"; 836 let DecoderMethod = "decodeSoppBrTarget"; 837 let OperandType = "OPERAND_PCREL"; 838 let ParserMatchClass = SoppBrTarget; 839} 840 841def si_ga : Operand<iPTR>; 842 843def InterpSlotMatchClass : AsmOperandClass { 844 let Name = "InterpSlot"; 845 let PredicateMethod = "isInterpSlot"; 846 let ParserMethod = "parseInterpSlot"; 847 let RenderMethod = "addImmOperands"; 848} 849 850def InterpSlot : Operand<i32> { 851 let PrintMethod = "printInterpSlot"; 852 let ParserMatchClass = InterpSlotMatchClass; 853 let OperandType = "OPERAND_IMMEDIATE"; 854} 855 856def AttrMatchClass : AsmOperandClass { 857 let Name = "Attr"; 858 let PredicateMethod = "isInterpAttr"; 859 let ParserMethod = "parseInterpAttr"; 860 let RenderMethod = "addImmOperands"; 861} 862 863// It appears to be necessary to create a separate operand for this to 864// be able to parse attr<num> with no space. 865def Attr : Operand<i32> { 866 let PrintMethod = "printInterpAttr"; 867 let ParserMatchClass = AttrMatchClass; 868 let OperandType = "OPERAND_IMMEDIATE"; 869} 870 871def AttrChanMatchClass : AsmOperandClass { 872 let Name = "AttrChan"; 873 let PredicateMethod = "isAttrChan"; 874 let RenderMethod = "addImmOperands"; 875} 876 877def AttrChan : Operand<i32> { 878 let PrintMethod = "printInterpAttrChan"; 879 let ParserMatchClass = AttrChanMatchClass; 880 let OperandType = "OPERAND_IMMEDIATE"; 881} 882 883def SendMsgMatchClass : AsmOperandClass { 884 let Name = "SendMsg"; 885 let PredicateMethod = "isSendMsg"; 886 let ParserMethod = "parseSendMsgOp"; 887 let RenderMethod = "addImmOperands"; 888} 889 890def SwizzleMatchClass : AsmOperandClass { 891 let Name = "Swizzle"; 892 let PredicateMethod = "isSwizzle"; 893 let ParserMethod = "parseSwizzleOp"; 894 let RenderMethod = "addImmOperands"; 895 let IsOptional = 1; 896} 897 898def EndpgmMatchClass : AsmOperandClass { 899 let Name = "EndpgmImm"; 900 let PredicateMethod = "isEndpgm"; 901 let ParserMethod = "parseEndpgmOp"; 902 let RenderMethod = "addImmOperands"; 903 let IsOptional = 1; 904} 905 906def ExpTgtMatchClass : AsmOperandClass { 907 let Name = "ExpTgt"; 908 let PredicateMethod = "isExpTgt"; 909 let ParserMethod = "parseExpTgt"; 910 let RenderMethod = "printExpTgt"; 911} 912 913def SWaitMatchClass : AsmOperandClass { 914 let Name = "SWaitCnt"; 915 let RenderMethod = "addImmOperands"; 916 let ParserMethod = "parseSWaitCntOps"; 917} 918 919def VReg32OrOffClass : AsmOperandClass { 920 let Name = "VReg32OrOff"; 921 let ParserMethod = "parseVReg32OrOff"; 922} 923 924let OperandType = "OPERAND_IMMEDIATE" in { 925def SendMsgImm : Operand<i32> { 926 let PrintMethod = "printSendMsg"; 927 let ParserMatchClass = SendMsgMatchClass; 928} 929 930def SwizzleImm : Operand<i16> { 931 let PrintMethod = "printSwizzle"; 932 let ParserMatchClass = SwizzleMatchClass; 933} 934 935def EndpgmImm : Operand<i16> { 936 let PrintMethod = "printEndpgm"; 937 let ParserMatchClass = EndpgmMatchClass; 938} 939 940def WAIT_FLAG : Operand <i32> { 941 let ParserMatchClass = SWaitMatchClass; 942 let PrintMethod = "printWaitFlag"; 943} 944} // End OperandType = "OPERAND_IMMEDIATE" 945 946include "SIInstrFormats.td" 947include "VIInstrFormats.td" 948 949def BoolReg : AsmOperandClass { 950 let Name = "BoolReg"; 951 let ParserMethod = "parseBoolReg"; 952 let RenderMethod = "addRegOperands"; 953} 954 955class BoolRC : RegisterOperand<SReg_1> { 956 let ParserMatchClass = BoolReg; 957 let DecoderMethod = "decodeBoolReg"; 958} 959 960def SSrc_i1 : RegisterOperand<SReg_1_XEXEC> { 961 let ParserMatchClass = BoolReg; 962 let DecoderMethod = "decodeBoolReg"; 963} 964 965def VOPDstS64orS32 : BoolRC { 966 let PrintMethod = "printVOPDst"; 967} 968 969// SCSrc_i1 is the operand for pseudo instructions only. 970// Boolean immediates shall not be exposed to codegen instructions. 971def SCSrc_i1 : RegisterOperand<SReg_1_XEXEC> { 972 let OperandNamespace = "AMDGPU"; 973 let OperandType = "OPERAND_REG_IMM_INT32"; 974 let ParserMatchClass = BoolReg; 975 let DecoderMethod = "decodeBoolReg"; 976} 977 978// ===----------------------------------------------------------------------===// 979// ExpSrc* Special cases for exp src operands which are printed as 980// "off" depending on en operand. 981// ===----------------------------------------------------------------------===// 982 983def ExpSrc0 : RegisterOperand<VGPR_32> { 984 let PrintMethod = "printExpSrc0"; 985 let ParserMatchClass = VReg32OrOffClass; 986} 987 988def ExpSrc1 : RegisterOperand<VGPR_32> { 989 let PrintMethod = "printExpSrc1"; 990 let ParserMatchClass = VReg32OrOffClass; 991} 992 993def ExpSrc2 : RegisterOperand<VGPR_32> { 994 let PrintMethod = "printExpSrc2"; 995 let ParserMatchClass = VReg32OrOffClass; 996} 997 998def ExpSrc3 : RegisterOperand<VGPR_32> { 999 let PrintMethod = "printExpSrc3"; 1000 let ParserMatchClass = VReg32OrOffClass; 1001} 1002 1003class SDWASrc<ValueType vt> : RegisterOperand<VS_32> { 1004 let OperandNamespace = "AMDGPU"; 1005 string Type = !if(isFloatType<vt>.ret, "FP", "INT"); 1006 let OperandType = "OPERAND_REG_INLINE_C_"#Type#vt.Size; 1007 let DecoderMethod = "decodeSDWASrc"#vt.Size; 1008 let EncoderMethod = "getSDWASrcEncoding"; 1009} 1010 1011def SDWASrc_i32 : SDWASrc<i32>; 1012def SDWASrc_i16 : SDWASrc<i16>; 1013def SDWASrc_f32 : SDWASrc<f32>; 1014def SDWASrc_f16 : SDWASrc<f16>; 1015 1016def SDWAVopcDst : BoolRC { 1017 let OperandNamespace = "AMDGPU"; 1018 let OperandType = "OPERAND_SDWA_VOPC_DST"; 1019 let EncoderMethod = "getSDWAVopcDstEncoding"; 1020 let DecoderMethod = "decodeSDWAVopcDst"; 1021 let PrintMethod = "printVOPDst"; 1022} 1023 1024class NamedMatchClass<string CName, bit Optional = 1> : AsmOperandClass { 1025 let Name = "Imm"#CName; 1026 let PredicateMethod = "is"#CName; 1027 let ParserMethod = !if(Optional, "parseOptionalOperand", "parse"#CName); 1028 let RenderMethod = "addImmOperands"; 1029 let IsOptional = Optional; 1030 let DefaultMethod = !if(Optional, "default"#CName, ?); 1031} 1032 1033class NamedOperandBit<string Name, AsmOperandClass MatchClass> : Operand<i1> { 1034 let PrintMethod = "print"#Name; 1035 let ParserMatchClass = MatchClass; 1036} 1037 1038class NamedOperandBit_0<string Name, AsmOperandClass MatchClass> : 1039 OperandWithDefaultOps<i1, (ops (i1 0))> { 1040 let PrintMethod = "print"#Name; 1041 let ParserMatchClass = MatchClass; 1042} 1043 1044class NamedOperandBit_1<string Name, AsmOperandClass MatchClass> : 1045 OperandWithDefaultOps<i1, (ops (i1 1))> { 1046 let PrintMethod = "print"#Name; 1047 let ParserMatchClass = MatchClass; 1048} 1049 1050class NamedOperandU8<string Name, AsmOperandClass MatchClass> : Operand<i8> { 1051 let PrintMethod = "print"#Name; 1052 let ParserMatchClass = MatchClass; 1053} 1054 1055class NamedOperandU16<string Name, AsmOperandClass MatchClass> : Operand<i16> { 1056 let PrintMethod = "print"#Name; 1057 let ParserMatchClass = MatchClass; 1058} 1059 1060class NamedOperandU32<string Name, AsmOperandClass MatchClass> : Operand<i32> { 1061 let PrintMethod = "print"#Name; 1062 let ParserMatchClass = MatchClass; 1063} 1064 1065class NamedOperandU32_0<string Name, AsmOperandClass MatchClass> : 1066 OperandWithDefaultOps<i32, (ops (i32 0))> { 1067 let PrintMethod = "print"#Name; 1068 let ParserMatchClass = MatchClass; 1069} 1070 1071class NamedOperandU32Default0<string Name, AsmOperandClass MatchClass> : 1072 OperandWithDefaultOps<i32, (ops (i32 0))> { 1073 let PrintMethod = "print"#Name; 1074 let ParserMatchClass = MatchClass; 1075} 1076 1077let OperandType = "OPERAND_IMMEDIATE" in { 1078 1079def offen : NamedOperandBit<"Offen", NamedMatchClass<"Offen">>; 1080def idxen : NamedOperandBit<"Idxen", NamedMatchClass<"Idxen">>; 1081def addr64 : NamedOperandBit<"Addr64", NamedMatchClass<"Addr64">>; 1082 1083def flat_offset : NamedOperandU16<"FlatOffset", NamedMatchClass<"FlatOffset">>; 1084def offset : NamedOperandU16<"Offset", NamedMatchClass<"Offset">>; 1085def offset0 : NamedOperandU8<"Offset0", NamedMatchClass<"Offset0">>; 1086def offset1 : NamedOperandU8<"Offset1", NamedMatchClass<"Offset1">>; 1087 1088def gds : NamedOperandBit<"GDS", NamedMatchClass<"GDS">>; 1089 1090def omod : NamedOperandU32<"OModSI", NamedMatchClass<"OModSI">>; 1091def omod0 : NamedOperandU32_0<"OModSI", NamedMatchClass<"OModSI">>; 1092 1093// We need to make the cases with a default of 0 distinct from no 1094// default to help deal with some cases where the operand appears 1095// before a mandatory operand. 1096def clampmod : NamedOperandBit<"ClampSI", NamedMatchClass<"ClampSI">>; 1097def clampmod0 : NamedOperandBit_0<"ClampSI", NamedMatchClass<"ClampSI">>; 1098def highmod : NamedOperandBit<"High", NamedMatchClass<"High">>; 1099 1100def DLC : NamedOperandBit<"DLC", NamedMatchClass<"DLC">>; 1101def DLC_0 : NamedOperandBit_0<"DLC", NamedMatchClass<"DLC">>; 1102 1103def GLC : NamedOperandBit<"GLC", NamedMatchClass<"GLC">>; 1104def GLC_0 : NamedOperandBit_0<"GLC", NamedMatchClass<"GLC">>; 1105def GLC_1 : NamedOperandBit_1<"GLC", NamedMatchClass<"GLC_1">>; 1106 1107def SLC : NamedOperandBit<"SLC", NamedMatchClass<"SLC">>; 1108def SLC_0 : NamedOperandBit_0<"SLC", NamedMatchClass<"SLC">>; 1109 1110def TFE : NamedOperandBit<"TFE", NamedMatchClass<"TFE">>; 1111def SWZ : NamedOperandBit<"SWZ", NamedMatchClass<"SWZ">>; 1112def UNorm : NamedOperandBit<"UNorm", NamedMatchClass<"UNorm">>; 1113def DA : NamedOperandBit<"DA", NamedMatchClass<"DA">>; 1114def R128A16 : NamedOperandBit<"R128A16", NamedMatchClass<"R128A16">>; 1115def GFX10A16 : NamedOperandBit<"GFX10A16", NamedMatchClass<"GFX10A16">>; 1116def D16 : NamedOperandBit<"D16", NamedMatchClass<"D16">>; 1117def LWE : NamedOperandBit<"LWE", NamedMatchClass<"LWE">>; 1118def exp_compr : NamedOperandBit<"ExpCompr", NamedMatchClass<"ExpCompr">>; 1119def exp_vm : NamedOperandBit<"ExpVM", NamedMatchClass<"ExpVM">>; 1120 1121def FORMAT : NamedOperandU8<"FORMAT", NamedMatchClass<"FORMAT", 0>>; 1122 1123def DMask : NamedOperandU16<"DMask", NamedMatchClass<"DMask">>; 1124def Dim : NamedOperandU8<"Dim", NamedMatchClass<"Dim", 0>>; 1125 1126def dpp8 : NamedOperandU32<"DPP8", NamedMatchClass<"DPP8", 0>>; 1127 1128def dpp_ctrl : NamedOperandU32<"DPPCtrl", NamedMatchClass<"DPPCtrl", 0>>; 1129def row_mask : NamedOperandU32<"RowMask", NamedMatchClass<"RowMask">>; 1130def bank_mask : NamedOperandU32<"BankMask", NamedMatchClass<"BankMask">>; 1131def bound_ctrl : NamedOperandBit<"BoundCtrl", NamedMatchClass<"BoundCtrl">>; 1132def FI : NamedOperandU32<"FI", NamedMatchClass<"FI">>; 1133 1134def dst_sel : NamedOperandU32<"SDWADstSel", NamedMatchClass<"SDWADstSel">>; 1135def src0_sel : NamedOperandU32<"SDWASrc0Sel", NamedMatchClass<"SDWASrc0Sel">>; 1136def src1_sel : NamedOperandU32<"SDWASrc1Sel", NamedMatchClass<"SDWASrc1Sel">>; 1137def dst_unused : NamedOperandU32<"SDWADstUnused", NamedMatchClass<"SDWADstUnused">>; 1138 1139def op_sel0 : NamedOperandU32Default0<"OpSel", NamedMatchClass<"OpSel">>; 1140def op_sel_hi0 : NamedOperandU32Default0<"OpSelHi", NamedMatchClass<"OpSelHi">>; 1141def neg_lo0 : NamedOperandU32Default0<"NegLo", NamedMatchClass<"NegLo">>; 1142def neg_hi0 : NamedOperandU32Default0<"NegHi", NamedMatchClass<"NegHi">>; 1143 1144def blgp : NamedOperandU32<"BLGP", NamedMatchClass<"BLGP">>; 1145def cbsz : NamedOperandU32<"CBSZ", NamedMatchClass<"CBSZ">>; 1146def abid : NamedOperandU32<"ABID", NamedMatchClass<"ABID">>; 1147 1148def hwreg : NamedOperandU32<"Hwreg", NamedMatchClass<"Hwreg", 0>>; 1149 1150def exp_tgt : NamedOperandU32<"ExpTgt", NamedMatchClass<"ExpTgt", 0>> { 1151 1152} 1153 1154} // End OperandType = "OPERAND_IMMEDIATE" 1155 1156class KImmMatchClass<int size> : AsmOperandClass { 1157 let Name = "KImmFP"#size; 1158 let PredicateMethod = "isKImmFP"#size; 1159 let ParserMethod = "parseImm"; 1160 let RenderMethod = "addKImmFP"#size#"Operands"; 1161} 1162 1163class kimmOperand<ValueType vt> : Operand<vt> { 1164 let OperandNamespace = "AMDGPU"; 1165 let OperandType = "OPERAND_KIMM"#vt.Size; 1166 let PrintMethod = "printU"#vt.Size#"ImmOperand"; 1167 let ParserMatchClass = !cast<AsmOperandClass>("KImmFP"#vt.Size#"MatchClass"); 1168} 1169 1170// 32-bit VALU immediate operand that uses the constant bus. 1171def KImmFP32MatchClass : KImmMatchClass<32>; 1172def f32kimm : kimmOperand<i32>; 1173 1174// 32-bit VALU immediate operand with a 16-bit value that uses the 1175// constant bus. 1176def KImmFP16MatchClass : KImmMatchClass<16>; 1177def f16kimm : kimmOperand<i16>; 1178 1179class FPInputModsMatchClass <int opSize> : AsmOperandClass { 1180 let Name = "RegOrImmWithFP"#opSize#"InputMods"; 1181 let ParserMethod = "parseRegOrImmWithFPInputMods"; 1182 let PredicateMethod = "isRegOrImmWithFP"#opSize#"InputMods"; 1183} 1184 1185def FP16InputModsMatchClass : FPInputModsMatchClass<16>; 1186def FP32InputModsMatchClass : FPInputModsMatchClass<32>; 1187def FP64InputModsMatchClass : FPInputModsMatchClass<64>; 1188 1189class InputMods <AsmOperandClass matchClass> : Operand <i32> { 1190 let OperandNamespace = "AMDGPU"; 1191 let OperandType = "OPERAND_INPUT_MODS"; 1192 let ParserMatchClass = matchClass; 1193} 1194 1195class FPInputMods <FPInputModsMatchClass matchClass> : InputMods <matchClass> { 1196 let PrintMethod = "printOperandAndFPInputMods"; 1197} 1198 1199def FP16InputMods : FPInputMods<FP16InputModsMatchClass>; 1200def FP32InputMods : FPInputMods<FP32InputModsMatchClass>; 1201def FP64InputMods : FPInputMods<FP64InputModsMatchClass>; 1202 1203class IntInputModsMatchClass <int opSize> : AsmOperandClass { 1204 let Name = "RegOrImmWithInt"#opSize#"InputMods"; 1205 let ParserMethod = "parseRegOrImmWithIntInputMods"; 1206 let PredicateMethod = "isRegOrImmWithInt"#opSize#"InputMods"; 1207} 1208def Int32InputModsMatchClass : IntInputModsMatchClass<32>; 1209def Int64InputModsMatchClass : IntInputModsMatchClass<64>; 1210 1211class IntInputMods <IntInputModsMatchClass matchClass> : InputMods <matchClass> { 1212 let PrintMethod = "printOperandAndIntInputMods"; 1213} 1214def Int32InputMods : IntInputMods<Int32InputModsMatchClass>; 1215def Int64InputMods : IntInputMods<Int64InputModsMatchClass>; 1216 1217class OpSelModsMatchClass : AsmOperandClass { 1218 let Name = "OpSelMods"; 1219 let ParserMethod = "parseRegOrImm"; 1220 let PredicateMethod = "isRegOrImm"; 1221} 1222 1223def IntOpSelModsMatchClass : OpSelModsMatchClass; 1224def IntOpSelMods : InputMods<IntOpSelModsMatchClass>; 1225 1226class FPSDWAInputModsMatchClass <int opSize> : AsmOperandClass { 1227 let Name = "SDWAWithFP"#opSize#"InputMods"; 1228 let ParserMethod = "parseRegOrImmWithFPInputMods"; 1229 let PredicateMethod = "isSDWAFP"#opSize#"Operand"; 1230} 1231 1232def FP16SDWAInputModsMatchClass : FPSDWAInputModsMatchClass<16>; 1233def FP32SDWAInputModsMatchClass : FPSDWAInputModsMatchClass<32>; 1234 1235class FPSDWAInputMods <FPSDWAInputModsMatchClass matchClass> : 1236 InputMods <matchClass> { 1237 let PrintMethod = "printOperandAndFPInputMods"; 1238} 1239 1240def FP16SDWAInputMods : FPSDWAInputMods<FP16SDWAInputModsMatchClass>; 1241def FP32SDWAInputMods : FPSDWAInputMods<FP32SDWAInputModsMatchClass>; 1242 1243def FPVRegInputModsMatchClass : AsmOperandClass { 1244 let Name = "VRegWithFPInputMods"; 1245 let ParserMethod = "parseRegWithFPInputMods"; 1246 let PredicateMethod = "isVReg32"; 1247} 1248 1249def FPVRegInputMods : InputMods <FPVRegInputModsMatchClass> { 1250 let PrintMethod = "printOperandAndFPInputMods"; 1251} 1252 1253class IntSDWAInputModsMatchClass <int opSize> : AsmOperandClass { 1254 let Name = "SDWAWithInt"#opSize#"InputMods"; 1255 let ParserMethod = "parseRegOrImmWithIntInputMods"; 1256 let PredicateMethod = "isSDWAInt"#opSize#"Operand"; 1257} 1258 1259def Int16SDWAInputModsMatchClass : IntSDWAInputModsMatchClass<16>; 1260def Int32SDWAInputModsMatchClass : IntSDWAInputModsMatchClass<32>; 1261 1262class IntSDWAInputMods <IntSDWAInputModsMatchClass matchClass> : 1263 InputMods <matchClass> { 1264 let PrintMethod = "printOperandAndIntInputMods"; 1265} 1266 1267def Int16SDWAInputMods : IntSDWAInputMods<Int16SDWAInputModsMatchClass>; 1268def Int32SDWAInputMods : IntSDWAInputMods<Int32SDWAInputModsMatchClass>; 1269 1270def IntVRegInputModsMatchClass : AsmOperandClass { 1271 let Name = "VRegWithIntInputMods"; 1272 let ParserMethod = "parseRegWithIntInputMods"; 1273 let PredicateMethod = "isVReg32"; 1274} 1275 1276def IntVRegInputMods : InputMods <IntVRegInputModsMatchClass> { 1277 let PrintMethod = "printOperandAndIntInputMods"; 1278} 1279 1280class PackedFPInputModsMatchClass <int opSize> : AsmOperandClass { 1281 let Name = "PackedFP"#opSize#"InputMods"; 1282 let ParserMethod = "parseRegOrImm"; 1283 let PredicateMethod = "isRegOrImm"; 1284// let PredicateMethod = "isPackedFP"#opSize#"InputMods"; 1285} 1286 1287class PackedIntInputModsMatchClass <int opSize> : AsmOperandClass { 1288 let Name = "PackedInt"#opSize#"InputMods"; 1289 let ParserMethod = "parseRegOrImm"; 1290 let PredicateMethod = "isRegOrImm"; 1291// let PredicateMethod = "isPackedInt"#opSize#"InputMods"; 1292} 1293 1294def PackedF16InputModsMatchClass : PackedFPInputModsMatchClass<16>; 1295def PackedI16InputModsMatchClass : PackedIntInputModsMatchClass<16>; 1296 1297class PackedFPInputMods <PackedFPInputModsMatchClass matchClass> : InputMods <matchClass> { 1298// let PrintMethod = "printPackedFPInputMods"; 1299} 1300 1301class PackedIntInputMods <PackedIntInputModsMatchClass matchClass> : InputMods <matchClass> { 1302 //let PrintMethod = "printPackedIntInputMods"; 1303} 1304 1305def PackedF16InputMods : PackedFPInputMods<PackedF16InputModsMatchClass>; 1306def PackedI16InputMods : PackedIntInputMods<PackedI16InputModsMatchClass>; 1307 1308//===----------------------------------------------------------------------===// 1309// Complex patterns 1310//===----------------------------------------------------------------------===// 1311 1312def DS1Addr1Offset : ComplexPattern<i32, 2, "SelectDS1Addr1Offset">; 1313def DS64Bit4ByteAligned : ComplexPattern<i32, 3, "SelectDS64Bit4ByteAligned">; 1314def DS128Bit8ByteAligned : ComplexPattern<i64, 3, "SelectDS128Bit8ByteAligned">; 1315 1316def MOVRELOffset : ComplexPattern<i32, 2, "SelectMOVRELOffset">; 1317 1318def VOP3Mods0 : ComplexPattern<untyped, 4, "SelectVOP3Mods0">; 1319def VOP3Mods : ComplexPattern<untyped, 2, "SelectVOP3Mods">; 1320def VOP3NoMods : ComplexPattern<untyped, 1, "SelectVOP3NoMods">; 1321// VOP3Mods, but the input source is known to never be NaN. 1322def VOP3Mods_nnan : ComplexPattern<fAny, 2, "SelectVOP3Mods_NNaN">; 1323 1324def VOP3OMods : ComplexPattern<untyped, 3, "SelectVOP3OMods">; 1325 1326def VOP3PMods : ComplexPattern<untyped, 2, "SelectVOP3PMods">; 1327 1328def VOP3OpSel : ComplexPattern<untyped, 2, "SelectVOP3OpSel">; 1329 1330def VOP3OpSelMods : ComplexPattern<untyped, 2, "SelectVOP3OpSelMods">; 1331 1332def VOP3PMadMixMods : ComplexPattern<untyped, 2, "SelectVOP3PMadMixMods">; 1333 1334//===----------------------------------------------------------------------===// 1335// SI assembler operands 1336//===----------------------------------------------------------------------===// 1337 1338def SIOperand { 1339 int ZERO = 0x80; 1340 int VCC = 0x6A; 1341 int FLAT_SCR = 0x68; 1342} 1343 1344// This should be kept in sync with SISrcMods enum 1345def SRCMODS { 1346 int NONE = 0; 1347 int NEG = 1; 1348 int ABS = 2; 1349 int NEG_ABS = 3; 1350 1351 int NEG_HI = ABS; 1352 int OP_SEL_0 = 4; 1353 int OP_SEL_1 = 8; 1354 int DST_OP_SEL = 8; 1355} 1356 1357def DSTCLAMP { 1358 int NONE = 0; 1359 int ENABLE = 1; 1360} 1361 1362def DSTOMOD { 1363 int NONE = 0; 1364} 1365 1366def TRAPID{ 1367 int LLVM_TRAP = 2; 1368 int LLVM_DEBUG_TRAP = 3; 1369} 1370 1371def HWREG { 1372 int MODE = 1; 1373 int STATUS = 2; 1374 int TRAPSTS = 3; 1375 int HW_ID = 4; 1376 int GPR_ALLOC = 5; 1377 int LDS_ALLOC = 6; 1378 int IB_STS = 7; 1379 int MEM_BASES = 15; 1380 int TBA_LO = 16; 1381 int TBA_HI = 17; 1382 int TMA_LO = 18; 1383 int TMA_HI = 19; 1384 int FLAT_SCR_LO = 20; 1385 int FLAT_SCR_HI = 21; 1386 int XNACK_MASK = 22; 1387 int POPS_PACKER = 25; 1388 int SHADER_CYCLES = 29; 1389} 1390 1391class getHwRegImm<int Reg, int Offset = 0, int Size = 32> { 1392 int ret = !and(!or(Reg, 1393 !shl(Offset, 6), 1394 !shl(!add(Size, -1), 11)), 65535); 1395} 1396 1397//===----------------------------------------------------------------------===// 1398// 1399// SI Instruction multiclass helpers. 1400// 1401// Instructions with _32 take 32-bit operands. 1402// Instructions with _64 take 64-bit operands. 1403// 1404// VOP_* instructions can use either a 32-bit or 64-bit encoding. The 32-bit 1405// encoding is the standard encoding, but instruction that make use of 1406// any of the instruction modifiers must use the 64-bit encoding. 1407// 1408// Instructions with _e32 use the 32-bit encoding. 1409// Instructions with _e64 use the 64-bit encoding. 1410// 1411//===----------------------------------------------------------------------===// 1412 1413class SIMCInstr <string pseudo, int subtarget> { 1414 string PseudoInstr = pseudo; 1415 int Subtarget = subtarget; 1416} 1417 1418//===----------------------------------------------------------------------===// 1419// Vector ALU classes 1420//===----------------------------------------------------------------------===// 1421 1422class getNumSrcArgs<ValueType Src0, ValueType Src1, ValueType Src2> { 1423 int ret = 1424 !if (!eq(Src0.Value, untyped.Value), 0, 1425 !if (!eq(Src1.Value, untyped.Value), 1, // VOP1 1426 !if (!eq(Src2.Value, untyped.Value), 2, // VOP2 1427 3))); // VOP3 1428} 1429 1430// Returns the register class to use for the destination of VOP[123C] 1431// instructions for the given VT. 1432class getVALUDstForVT<ValueType VT> { 1433 RegisterOperand ret = !if(!eq(VT.Size, 32), VOPDstOperand<VGPR_32>, 1434 !if(!eq(VT.Size, 128), VOPDstOperand<VReg_128>, 1435 !if(!eq(VT.Size, 64), VOPDstOperand<VReg_64>, 1436 !if(!eq(VT.Size, 16), VOPDstOperand<VGPR_32>, 1437 VOPDstS64orS32)))); // else VT == i1 1438} 1439 1440// Returns the register class to use for the destination of VOP[12C] 1441// instructions with SDWA extension 1442class getSDWADstForVT<ValueType VT> { 1443 RegisterOperand ret = !if(!eq(VT.Size, 1), 1444 SDWAVopcDst, // VOPC 1445 VOPDstOperand<VGPR_32>); // VOP1/2 32-bit dst 1446} 1447 1448// Returns the register class to use for source 0 of VOP[12C] 1449// instructions for the given VT. 1450class getVOPSrc0ForVT<ValueType VT> { 1451 bit isFP = isFloatType<VT>.ret; 1452 1453 RegisterOperand ret = 1454 !if(isFP, 1455 !if(!eq(VT.Size, 64), 1456 VSrc_f64, 1457 !if(!eq(VT.Value, f16.Value), 1458 VSrc_f16, 1459 !if(!eq(VT.Value, v2f16.Value), 1460 VSrc_v2f16, 1461 !if(!eq(VT.Value, v4f16.Value), 1462 AVSrc_64, 1463 VSrc_f32 1464 ) 1465 ) 1466 ) 1467 ), 1468 !if(!eq(VT.Size, 64), 1469 VSrc_b64, 1470 !if(!eq(VT.Value, i16.Value), 1471 VSrc_b16, 1472 !if(!eq(VT.Value, v2i16.Value), 1473 VSrc_v2b16, 1474 VSrc_b32 1475 ) 1476 ) 1477 ) 1478 ); 1479} 1480 1481class getSOPSrcForVT<ValueType VT> { 1482 RegisterOperand ret = !if(!eq(VT.Size, 64), SSrc_b64, SSrc_b32); 1483} 1484 1485// Returns the vreg register class to use for source operand given VT 1486class getVregSrcForVT<ValueType VT> { 1487 RegisterClass ret = !if(!eq(VT.Size, 128), VReg_128, 1488 !if(!eq(VT.Size, 96), VReg_96, 1489 !if(!eq(VT.Size, 64), VReg_64, 1490 !if(!eq(VT.Size, 48), VReg_64, 1491 VGPR_32)))); 1492} 1493 1494class getSDWASrcForVT <ValueType VT> { 1495 bit isFP = isFloatType<VT>.ret; 1496 RegisterOperand retFlt = !if(!eq(VT.Size, 16), SDWASrc_f16, SDWASrc_f32); 1497 RegisterOperand retInt = !if(!eq(VT.Size, 16), SDWASrc_i16, SDWASrc_i32); 1498 RegisterOperand ret = !if(isFP, retFlt, retInt); 1499} 1500 1501// Returns the register class to use for sources of VOP3 instructions for the 1502// given VT. 1503class getVOP3SrcForVT<ValueType VT> { 1504 bit isFP = isFloatType<VT>.ret; 1505 RegisterOperand ret = 1506 !if(!eq(VT.Size, 128), 1507 VSrc_128, 1508 !if(!eq(VT.Size, 64), 1509 !if(isFP, 1510 VSrc_f64, 1511 VSrc_b64), 1512 !if(!eq(VT.Value, i1.Value), 1513 SSrc_i1, 1514 !if(isFP, 1515 !if(!eq(VT.Value, f16.Value), 1516 VSrc_f16, 1517 !if(!eq(VT.Value, v2f16.Value), 1518 VSrc_v2f16, 1519 !if(!eq(VT.Value, v4f16.Value), 1520 AVSrc_64, 1521 VSrc_f32 1522 ) 1523 ) 1524 ), 1525 !if(!eq(VT.Value, i16.Value), 1526 VSrc_b16, 1527 !if(!eq(VT.Value, v2i16.Value), 1528 VSrc_v2b16, 1529 VSrc_b32 1530 ) 1531 ) 1532 ) 1533 ) 1534 ) 1535 ); 1536} 1537 1538// Float or packed int 1539class isModifierType<ValueType SrcVT> { 1540 bit ret = !or(!eq(SrcVT.Value, f16.Value), 1541 !eq(SrcVT.Value, f32.Value), 1542 !eq(SrcVT.Value, f64.Value), 1543 !eq(SrcVT.Value, v2f16.Value), 1544 !eq(SrcVT.Value, v2i16.Value)); 1545} 1546 1547// Return type of input modifiers operand for specified input operand 1548class getSrcMod <ValueType VT, bit EnableF32SrcMods> { 1549 bit isFP = isFloatType<VT>.ret; 1550 bit isPacked = isPackedType<VT>.ret; 1551 Operand ret = !if(!eq(VT.Size, 64), 1552 !if(isFP, FP64InputMods, Int64InputMods), 1553 !if(isFP, 1554 !if(!eq(VT.Value, f16.Value), 1555 FP16InputMods, 1556 FP32InputMods 1557 ), 1558 !if(EnableF32SrcMods, FP32InputMods, Int32InputMods)) 1559 ); 1560} 1561 1562class getOpSelMod <ValueType VT> { 1563 Operand ret = !if(!eq(VT.Value, f16.Value), FP16InputMods, IntOpSelMods); 1564} 1565 1566// Return type of input modifiers operand specified input operand for DPP 1567class getSrcModDPP <ValueType VT> { 1568 bit isFP = isFloatType<VT>.ret; 1569 Operand ret = !if(isFP, FPVRegInputMods, IntVRegInputMods); 1570} 1571 1572// Return type of input modifiers operand specified input operand for SDWA 1573class getSrcModSDWA <ValueType VT> { 1574 Operand ret = !if(!eq(VT.Value, f16.Value), FP16SDWAInputMods, 1575 !if(!eq(VT.Value, f32.Value), FP32SDWAInputMods, 1576 !if(!eq(VT.Value, i16.Value), Int16SDWAInputMods, 1577 Int32SDWAInputMods))); 1578} 1579 1580// Returns the input arguments for VOP[12C] instructions for the given SrcVT. 1581class getIns32 <RegisterOperand Src0RC, RegisterClass Src1RC, int NumSrcArgs> { 1582 dag ret = !if(!eq(NumSrcArgs, 1), (ins Src0RC:$src0), // VOP1 1583 !if(!eq(NumSrcArgs, 2), (ins Src0RC:$src0, Src1RC:$src1), // VOP2 1584 (ins))); 1585} 1586 1587// Returns the input arguments for VOP3 instructions for the given SrcVT. 1588class getIns64 <RegisterOperand Src0RC, RegisterOperand Src1RC, 1589 RegisterOperand Src2RC, int NumSrcArgs, 1590 bit HasClamp, bit HasModifiers, bit HasSrc2Mods, bit HasOMod, 1591 Operand Src0Mod, Operand Src1Mod, Operand Src2Mod> { 1592 1593 dag ret = 1594 !if (!eq(NumSrcArgs, 0), 1595 // VOP1 without input operands (V_NOP, V_CLREXCP) 1596 (ins), 1597 /* else */ 1598 !if (!eq(NumSrcArgs, 1), 1599 !if (HasModifiers, 1600 // VOP1 with modifiers 1601 (ins Src0Mod:$src0_modifiers, Src0RC:$src0, 1602 clampmod0:$clamp, omod0:$omod) 1603 /* else */, 1604 // VOP1 without modifiers 1605 !if (HasClamp, 1606 (ins Src0RC:$src0, clampmod0:$clamp), 1607 (ins Src0RC:$src0)) 1608 /* endif */ ), 1609 !if (!eq(NumSrcArgs, 2), 1610 !if (HasModifiers, 1611 // VOP 2 with modifiers 1612 !if(HasOMod, 1613 (ins Src0Mod:$src0_modifiers, Src0RC:$src0, 1614 Src1Mod:$src1_modifiers, Src1RC:$src1, 1615 clampmod0:$clamp, omod0:$omod), 1616 (ins Src0Mod:$src0_modifiers, Src0RC:$src0, 1617 Src1Mod:$src1_modifiers, Src1RC:$src1, 1618 clampmod0:$clamp)) 1619 /* else */, 1620 // VOP2 without modifiers 1621 !if (HasClamp, 1622 (ins Src0RC:$src0, Src1RC:$src1, clampmod0:$clamp), 1623 (ins Src0RC:$src0, Src1RC:$src1)) 1624 1625 /* endif */ ) 1626 /* NumSrcArgs == 3 */, 1627 !if (HasModifiers, 1628 !if (HasSrc2Mods, 1629 // VOP3 with modifiers 1630 !if (HasOMod, 1631 (ins Src0Mod:$src0_modifiers, Src0RC:$src0, 1632 Src1Mod:$src1_modifiers, Src1RC:$src1, 1633 Src2Mod:$src2_modifiers, Src2RC:$src2, 1634 clampmod0:$clamp, omod0:$omod), 1635 !if (HasClamp, 1636 (ins Src0Mod:$src0_modifiers, Src0RC:$src0, 1637 Src1Mod:$src1_modifiers, Src1RC:$src1, 1638 Src2Mod:$src2_modifiers, Src2RC:$src2, 1639 clampmod0:$clamp), 1640 (ins Src0Mod:$src0_modifiers, Src0RC:$src0, 1641 Src1Mod:$src1_modifiers, Src1RC:$src1, 1642 Src2Mod:$src2_modifiers, Src2RC:$src2))), 1643 // VOP3 with modifiers except src2 1644 !if (HasOMod, 1645 (ins Src0Mod:$src0_modifiers, Src0RC:$src0, 1646 Src1Mod:$src1_modifiers, Src1RC:$src1, 1647 Src2RC:$src2, clampmod0:$clamp, omod0:$omod), 1648 !if (HasClamp, 1649 (ins Src0Mod:$src0_modifiers, Src0RC:$src0, 1650 Src1Mod:$src1_modifiers, Src1RC:$src1, 1651 Src2RC:$src2, clampmod0:$clamp), 1652 (ins Src0Mod:$src0_modifiers, Src0RC:$src0, 1653 Src1Mod:$src1_modifiers, Src1RC:$src1, 1654 Src2RC:$src2)))) 1655 /* else */, 1656 // VOP3 without modifiers 1657 !if (HasClamp, 1658 (ins Src0RC:$src0, Src1RC:$src1, Src2RC:$src2, clampmod0:$clamp), 1659 (ins Src0RC:$src0, Src1RC:$src1, Src2RC:$src2)) 1660 /* endif */ )))); 1661} 1662 1663class getInsVOP3Base<RegisterOperand Src0RC, RegisterOperand Src1RC, 1664 RegisterOperand Src2RC, int NumSrcArgs, 1665 bit HasClamp, bit HasModifiers, bit HasSrc2Mods, bit HasOMod, 1666 Operand Src0Mod, Operand Src1Mod, Operand Src2Mod, bit HasOpSel, 1667 bit IsVOP3P> { 1668 // getInst64 handles clamp and omod. implicit mutex between vop3p and omod 1669 dag base = getIns64 <Src0RC, Src1RC, Src2RC, NumSrcArgs, 1670 HasClamp, HasModifiers, HasSrc2Mods, HasOMod, 1671 Src0Mod, Src1Mod, Src2Mod>.ret; 1672 dag opsel = (ins op_sel0:$op_sel); 1673 dag vop3pFields = (ins op_sel_hi0:$op_sel_hi, neg_lo0:$neg_lo, neg_hi0:$neg_hi); 1674 dag ret = !con(base, 1675 !if(HasOpSel, opsel,(ins)), 1676 !if(IsVOP3P, vop3pFields,(ins))); 1677} 1678 1679class getInsVOP3P <RegisterOperand Src0RC, RegisterOperand Src1RC, 1680 RegisterOperand Src2RC, int NumSrcArgs, bit HasClamp, 1681 Operand Src0Mod, Operand Src1Mod, Operand Src2Mod> { 1682 dag ret = getInsVOP3Base<Src0RC, Src1RC, Src2RC, NumSrcArgs, 1683 HasClamp, 1/*HasModifiers*/, 1/*HasSrc2Mods*/, 1684 0/*HasOMod*/, Src0Mod, Src1Mod, Src2Mod, 1685 1/*HasOpSel*/, 1/*IsVOP3P*/>.ret; 1686} 1687 1688class getInsVOP3OpSel <RegisterOperand Src0RC, RegisterOperand Src1RC, 1689 RegisterOperand Src2RC, int NumSrcArgs, 1690 bit HasClamp, bit HasOMod, 1691 Operand Src0Mod, Operand Src1Mod, Operand Src2Mod> { 1692 dag ret = getInsVOP3Base<Src0RC, Src1RC, 1693 Src2RC, NumSrcArgs, 1694 HasClamp, 1/*HasModifiers*/, 1/*HasSrc2Mods*/, HasOMod, 1695 Src0Mod, Src1Mod, Src2Mod, 1/*HasOpSel*/, 0>.ret; 1696} 1697 1698class getInsDPPBase <RegisterOperand DstRC, RegisterClass Src0RC, RegisterClass Src1RC, 1699 int NumSrcArgs, bit HasModifiers, 1700 Operand Src0Mod, Operand Src1Mod> { 1701 1702 dag ret = !if (!eq(NumSrcArgs, 0), 1703 // VOP1 without input operands (V_NOP) 1704 (ins ), 1705 !if (!eq(NumSrcArgs, 1), 1706 !if (HasModifiers, 1707 // VOP1_DPP with modifiers 1708 (ins DstRC:$old, Src0Mod:$src0_modifiers, 1709 Src0RC:$src0) 1710 /* else */, 1711 // VOP1_DPP without modifiers 1712 (ins DstRC:$old, Src0RC:$src0) 1713 /* endif */), 1714 !if (HasModifiers, 1715 // VOP2_DPP with modifiers 1716 (ins DstRC:$old, 1717 Src0Mod:$src0_modifiers, Src0RC:$src0, 1718 Src1Mod:$src1_modifiers, Src1RC:$src1) 1719 /* else */, 1720 // VOP2_DPP without modifiers 1721 (ins DstRC:$old, 1722 Src0RC:$src0, Src1RC:$src1) 1723 ))); 1724} 1725 1726class getInsDPP <RegisterOperand DstRC, RegisterClass Src0RC, RegisterClass Src1RC, 1727 int NumSrcArgs, bit HasModifiers, 1728 Operand Src0Mod, Operand Src1Mod> { 1729 dag ret = !con(getInsDPPBase<DstRC, Src0RC, Src1RC, NumSrcArgs, 1730 HasModifiers, Src0Mod, Src1Mod>.ret, 1731 (ins dpp_ctrl:$dpp_ctrl, row_mask:$row_mask, 1732 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)); 1733} 1734 1735class getInsDPP16 <RegisterOperand DstRC, RegisterClass Src0RC, RegisterClass Src1RC, 1736 int NumSrcArgs, bit HasModifiers, 1737 Operand Src0Mod, Operand Src1Mod> { 1738 dag ret = !con(getInsDPP<DstRC, Src0RC, Src1RC, NumSrcArgs, 1739 HasModifiers, Src0Mod, Src1Mod>.ret, 1740 (ins FI:$fi)); 1741} 1742 1743class getInsDPP8 <RegisterOperand DstRC, RegisterClass Src0RC, RegisterClass Src1RC, 1744 int NumSrcArgs, bit HasModifiers, 1745 Operand Src0Mod, Operand Src1Mod> { 1746 dag ret = !con(getInsDPPBase<DstRC, Src0RC, Src1RC, NumSrcArgs, 1747 HasModifiers, Src0Mod, Src1Mod>.ret, 1748 (ins dpp8:$dpp8, FI:$fi)); 1749} 1750 1751 1752// Ins for SDWA 1753class getInsSDWA <RegisterOperand Src0RC, RegisterOperand Src1RC, int NumSrcArgs, 1754 bit HasSDWAOMod, Operand Src0Mod, Operand Src1Mod, 1755 ValueType DstVT> { 1756 1757 dag ret = !if(!eq(NumSrcArgs, 0), 1758 // VOP1 without input operands (V_NOP) 1759 (ins), 1760 !if(!eq(NumSrcArgs, 1), 1761 // VOP1 1762 !if(!not(HasSDWAOMod), 1763 // VOP1_SDWA without omod 1764 (ins Src0Mod:$src0_modifiers, Src0RC:$src0, 1765 clampmod:$clamp, 1766 dst_sel:$dst_sel, dst_unused:$dst_unused, 1767 src0_sel:$src0_sel), 1768 // VOP1_SDWA with omod 1769 (ins Src0Mod:$src0_modifiers, Src0RC:$src0, 1770 clampmod:$clamp, omod:$omod, 1771 dst_sel:$dst_sel, dst_unused:$dst_unused, 1772 src0_sel:$src0_sel)), 1773 !if(!eq(NumSrcArgs, 2), 1774 !if(!eq(DstVT.Size, 1), 1775 // VOPC_SDWA 1776 (ins Src0Mod:$src0_modifiers, Src0RC:$src0, 1777 Src1Mod:$src1_modifiers, Src1RC:$src1, 1778 clampmod:$clamp, src0_sel:$src0_sel, src1_sel:$src1_sel), 1779 // VOP2_SDWA 1780 !if(!not(HasSDWAOMod), 1781 // VOP2_SDWA without omod 1782 (ins Src0Mod:$src0_modifiers, Src0RC:$src0, 1783 Src1Mod:$src1_modifiers, Src1RC:$src1, 1784 clampmod:$clamp, 1785 dst_sel:$dst_sel, dst_unused:$dst_unused, 1786 src0_sel:$src0_sel, src1_sel:$src1_sel), 1787 // VOP2_SDWA with omod 1788 (ins Src0Mod:$src0_modifiers, Src0RC:$src0, 1789 Src1Mod:$src1_modifiers, Src1RC:$src1, 1790 clampmod:$clamp, omod:$omod, 1791 dst_sel:$dst_sel, dst_unused:$dst_unused, 1792 src0_sel:$src0_sel, src1_sel:$src1_sel))), 1793 (ins)/* endif */))); 1794} 1795 1796// Outs for DPP 1797class getOutsDPP <bit HasDst, ValueType DstVT, RegisterOperand DstRCDPP> { 1798 dag ret = !if(HasDst, 1799 !if(!eq(DstVT.Size, 1), 1800 (outs), // no dst for VOPC, we use "vcc"-token as dst in SDWA VOPC instructions 1801 (outs DstRCDPP:$vdst)), 1802 (outs)); // V_NOP 1803} 1804 1805// Outs for SDWA 1806class getOutsSDWA <bit HasDst, ValueType DstVT, RegisterOperand DstRCSDWA> { 1807 dag ret = !if(HasDst, 1808 !if(!eq(DstVT.Size, 1), 1809 (outs DstRCSDWA:$sdst), 1810 (outs DstRCSDWA:$vdst)), 1811 (outs)); // V_NOP 1812} 1813 1814// Returns the assembly string for the inputs and outputs of a VOP[12C] 1815// instruction. This does not add the _e32 suffix, so it can be reused 1816// by getAsm64. 1817class getAsm32 <bit HasDst, int NumSrcArgs, ValueType DstVT = i32> { 1818 string dst = !if(!eq(DstVT.Size, 1), "$sdst", "$vdst"); // use $sdst for VOPC 1819 string src0 = ", $src0"; 1820 string src1 = ", $src1"; 1821 string src2 = ", $src2"; 1822 string ret = !if(HasDst, dst, "") # 1823 !if(!eq(NumSrcArgs, 1), src0, "") # 1824 !if(!eq(NumSrcArgs, 2), src0#src1, "") # 1825 !if(!eq(NumSrcArgs, 3), src0#src1#src2, ""); 1826} 1827 1828// Returns the assembly string for the inputs and outputs of a VOP3 1829// instruction. 1830class getAsm64 <bit HasDst, int NumSrcArgs, bit HasIntClamp, bit HasModifiers, 1831 bit HasOMod, ValueType DstVT = i32> { 1832 string dst = !if(!eq(DstVT.Size, 1), "$sdst", "$vdst"); // use $sdst for VOPC 1833 string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,"); 1834 string src1 = !if(!eq(NumSrcArgs, 1), "", 1835 !if(!eq(NumSrcArgs, 2), " $src1_modifiers", 1836 " $src1_modifiers,")); 1837 string src2 = !if(!eq(NumSrcArgs, 3), " $src2_modifiers", ""); 1838 string iclamp = !if(HasIntClamp, "$clamp", ""); 1839 string ret = 1840 !if(!not(HasModifiers), 1841 getAsm32<HasDst, NumSrcArgs, DstVT>.ret # iclamp, 1842 dst#", "#src0#src1#src2#"$clamp"#!if(HasOMod, "$omod", "")); 1843} 1844 1845// Returns the assembly string for the inputs and outputs of a VOP3P 1846// instruction. 1847class getAsmVOP3P <bit HasDst, int NumSrcArgs, bit HasModifiers, 1848 bit HasClamp, ValueType DstVT = i32> { 1849 string dst = " $vdst"; 1850 string src0 = !if(!eq(NumSrcArgs, 1), "$src0", "$src0,"); 1851 string src1 = !if(!eq(NumSrcArgs, 1), "", 1852 !if(!eq(NumSrcArgs, 2), " $src1", 1853 " $src1,")); 1854 string src2 = !if(!eq(NumSrcArgs, 3), " $src2", ""); 1855 1856 string mods = !if(HasModifiers, "$neg_lo$neg_hi", ""); 1857 string clamp = !if(HasClamp, "$clamp", ""); 1858 1859 // Each modifier is printed as an array of bits for each operand, so 1860 // all operands are printed as part of src0_modifiers. 1861 string ret = dst#", "#src0#src1#src2#"$op_sel$op_sel_hi"#mods#clamp; 1862} 1863 1864class getAsmVOP3OpSel <int NumSrcArgs, 1865 bit HasClamp, 1866 bit HasOMod, 1867 bit Src0HasMods, 1868 bit Src1HasMods, 1869 bit Src2HasMods> { 1870 string dst = " $vdst"; 1871 1872 string isrc0 = !if(!eq(NumSrcArgs, 1), "$src0", "$src0,"); 1873 string isrc1 = !if(!eq(NumSrcArgs, 1), "", 1874 !if(!eq(NumSrcArgs, 2), " $src1", 1875 " $src1,")); 1876 string isrc2 = !if(!eq(NumSrcArgs, 3), " $src2", ""); 1877 1878 string fsrc0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,"); 1879 string fsrc1 = !if(!eq(NumSrcArgs, 1), "", 1880 !if(!eq(NumSrcArgs, 2), " $src1_modifiers", 1881 " $src1_modifiers,")); 1882 string fsrc2 = !if(!eq(NumSrcArgs, 3), " $src2_modifiers", ""); 1883 1884 string src0 = !if(Src0HasMods, fsrc0, isrc0); 1885 string src1 = !if(Src1HasMods, fsrc1, isrc1); 1886 string src2 = !if(Src2HasMods, fsrc2, isrc2); 1887 1888 string clamp = !if(HasClamp, "$clamp", ""); 1889 1890 string ret = dst#", "#src0#src1#src2#"$op_sel"#clamp; 1891} 1892 1893class getAsmDPP <bit HasDst, int NumSrcArgs, bit HasModifiers, ValueType DstVT = i32> { 1894 string dst = !if(HasDst, 1895 !if(!eq(DstVT.Size, 1), 1896 "$sdst", 1897 "$vdst"), 1898 ""); // use $sdst for VOPC 1899 string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,"); 1900 string src1 = !if(!eq(NumSrcArgs, 1), "", 1901 !if(!eq(NumSrcArgs, 2), " $src1_modifiers", 1902 " $src1_modifiers,")); 1903 string args = !if(!not(HasModifiers), 1904 getAsm32<0, NumSrcArgs, DstVT>.ret, 1905 ", "#src0#src1); 1906 string ret = dst#args#" $dpp_ctrl$row_mask$bank_mask$bound_ctrl"; 1907} 1908 1909class getAsmDPP16 <bit HasDst, int NumSrcArgs, bit HasModifiers, ValueType DstVT = i32> { 1910 string ret = getAsmDPP<HasDst, NumSrcArgs, HasModifiers, DstVT>.ret#"$fi"; 1911} 1912 1913class getAsmDPP8 <bit HasDst, int NumSrcArgs, bit HasModifiers, ValueType DstVT = i32> 1914 : getAsmDPP<HasDst, NumSrcArgs, HasModifiers, DstVT> { 1915 let ret = dst#args#" $dpp8$fi"; 1916} 1917 1918 1919class getAsmSDWA <bit HasDst, int NumSrcArgs, ValueType DstVT = i32> { 1920 string dst = !if(HasDst, 1921 !if(!eq(DstVT.Size, 1), 1922 " vcc", // use vcc token as dst for VOPC instructioins 1923 "$vdst"), 1924 ""); 1925 string src0 = "$src0_modifiers"; 1926 string src1 = "$src1_modifiers"; 1927 string args = !if(!eq(NumSrcArgs, 0), 1928 "", 1929 !if(!eq(NumSrcArgs, 1), 1930 ", "#src0#"$clamp", 1931 ", "#src0#", "#src1#"$clamp" 1932 ) 1933 ); 1934 string sdwa = !if(!eq(NumSrcArgs, 0), 1935 "", 1936 !if(!eq(NumSrcArgs, 1), 1937 " $dst_sel $dst_unused $src0_sel", 1938 !if(!eq(DstVT.Size, 1), 1939 " $src0_sel $src1_sel", // No dst_sel and dst_unused for VOPC 1940 " $dst_sel $dst_unused $src0_sel $src1_sel" 1941 ) 1942 ) 1943 ); 1944 string ret = dst#args#sdwa; 1945} 1946 1947class getAsmSDWA9 <bit HasDst, bit HasOMod, int NumSrcArgs, 1948 ValueType DstVT = i32> { 1949 string dst = !if(HasDst, 1950 !if(!eq(DstVT.Size, 1), 1951 "$sdst", // VOPC 1952 "$vdst"), // VOP1/2 1953 ""); 1954 string src0 = "$src0_modifiers"; 1955 string src1 = "$src1_modifiers"; 1956 string out_mods = !if(!not(HasOMod), "$clamp", "$clamp$omod"); 1957 string args = !if(!eq(NumSrcArgs, 0), "", 1958 !if(!eq(NumSrcArgs, 1), 1959 ", "#src0, 1960 ", "#src0#", "#src1 1961 ) 1962 ); 1963 string sdwa = !if(!eq(NumSrcArgs, 0), "", 1964 !if(!eq(NumSrcArgs, 1), 1965 out_mods#" $dst_sel $dst_unused $src0_sel", 1966 !if(!eq(DstVT.Size, 1), 1967 " $src0_sel $src1_sel", // No dst_sel, dst_unused and output modifiers for VOPC 1968 out_mods#" $dst_sel $dst_unused $src0_sel $src1_sel" 1969 ) 1970 ) 1971 ); 1972 string ret = dst#args#sdwa; 1973} 1974 1975 1976// Function that checks if instruction supports DPP and SDWA 1977class getHasExt <int NumSrcArgs, ValueType DstVT = i32, ValueType Src0VT = i32, 1978 ValueType Src1VT = i32> { 1979 bit ret = !if(!eq(NumSrcArgs, 3), 1980 0, // NumSrcArgs == 3 - No DPP or SDWA for VOP3 1981 !if(!eq(DstVT.Size, 64), 1982 0, // 64-bit dst - No DPP or SDWA for 64-bit operands 1983 !if(!eq(Src0VT.Size, 64), 1984 0, // 64-bit src0 1985 !if(!eq(Src1VT.Size, 64), 1986 0, // 64-bit src2 1987 1 1988 ) 1989 ) 1990 ) 1991 ); 1992} 1993 1994class getHasDPP <int NumSrcArgs, ValueType DstVT = i32, ValueType Src0VT = i32, 1995 ValueType Src1VT = i32> { 1996 bit ret = !if(!eq(NumSrcArgs, 0), 0, 1997 getHasExt<NumSrcArgs, DstVT, Src0VT, Src1VT>.ret); 1998} 1999 2000def PatGenMode { 2001 int NoPattern = 0; 2002 int Pattern = 1; 2003} 2004 2005class VOPProfile <list<ValueType> _ArgVT, bit _EnableF32SrcMods = 0, 2006 bit _EnableClamp = 0> { 2007 2008 field list<ValueType> ArgVT = _ArgVT; 2009 field bit EnableF32SrcMods = _EnableF32SrcMods; 2010 field bit EnableClamp = _EnableClamp; 2011 2012 field ValueType DstVT = ArgVT[0]; 2013 field ValueType Src0VT = ArgVT[1]; 2014 field ValueType Src1VT = ArgVT[2]; 2015 field ValueType Src2VT = ArgVT[3]; 2016 field RegisterOperand DstRC = getVALUDstForVT<DstVT>.ret; 2017 field RegisterOperand DstRCDPP = getVALUDstForVT<DstVT>.ret; 2018 field RegisterOperand DstRCSDWA = getSDWADstForVT<DstVT>.ret; 2019 field RegisterOperand Src0RC32 = getVOPSrc0ForVT<Src0VT>.ret; 2020 field RegisterClass Src1RC32 = getVregSrcForVT<Src1VT>.ret; 2021 field RegisterOperand Src0RC64 = getVOP3SrcForVT<Src0VT>.ret; 2022 field RegisterOperand Src1RC64 = getVOP3SrcForVT<Src1VT>.ret; 2023 field RegisterOperand Src2RC64 = getVOP3SrcForVT<Src2VT>.ret; 2024 field RegisterClass Src0DPP = getVregSrcForVT<Src0VT>.ret; 2025 field RegisterClass Src1DPP = getVregSrcForVT<Src1VT>.ret; 2026 field RegisterOperand Src0SDWA = getSDWASrcForVT<Src0VT>.ret; 2027 field RegisterOperand Src1SDWA = getSDWASrcForVT<Src0VT>.ret; 2028 field Operand Src0Mod = getSrcMod<Src0VT, EnableF32SrcMods>.ret; 2029 field Operand Src1Mod = getSrcMod<Src1VT, EnableF32SrcMods>.ret; 2030 field Operand Src2Mod = getSrcMod<Src2VT, EnableF32SrcMods>.ret; 2031 field Operand Src0ModDPP = getSrcModDPP<Src0VT>.ret; 2032 field Operand Src1ModDPP = getSrcModDPP<Src1VT>.ret; 2033 field Operand Src0ModSDWA = getSrcModSDWA<Src0VT>.ret; 2034 field Operand Src1ModSDWA = getSrcModSDWA<Src1VT>.ret; 2035 2036 2037 field bit HasDst = !ne(DstVT.Value, untyped.Value); 2038 field bit HasDst32 = HasDst; 2039 field bit EmitDst = HasDst; // force dst encoding, see v_movreld_b32 special case 2040 field int NumSrcArgs = getNumSrcArgs<Src0VT, Src1VT, Src2VT>.ret; 2041 field bit HasSrc0 = !ne(Src0VT.Value, untyped.Value); 2042 field bit HasSrc1 = !ne(Src1VT.Value, untyped.Value); 2043 field bit HasSrc2 = !ne(Src2VT.Value, untyped.Value); 2044 2045 // HasSrc*FloatMods affects the SDWA encoding. We ignore EnableF32SrcMods. 2046 field bit HasSrc0FloatMods = isFloatType<Src0VT>.ret; 2047 field bit HasSrc1FloatMods = isFloatType<Src1VT>.ret; 2048 field bit HasSrc2FloatMods = isFloatType<Src2VT>.ret; 2049 2050 // HasSrc*IntMods affects the SDWA encoding. We ignore EnableF32SrcMods. 2051 field bit HasSrc0IntMods = isIntType<Src0VT>.ret; 2052 field bit HasSrc1IntMods = isIntType<Src1VT>.ret; 2053 field bit HasSrc2IntMods = isIntType<Src2VT>.ret; 2054 2055 field bit HasClamp = !or(isModifierType<Src0VT>.ret, EnableClamp); 2056 field bit HasSDWAClamp = EmitDst; 2057 field bit HasFPClamp = !and(isFloatType<DstVT>.ret, HasClamp); 2058 field bit HasIntClamp = !if(isFloatType<DstVT>.ret, 0, HasClamp); 2059 field bit HasClampLo = HasClamp; 2060 field bit HasClampHi = !and(isPackedType<DstVT>.ret, HasClamp); 2061 field bit HasHigh = 0; 2062 2063 field bit IsPacked = isPackedType<Src0VT>.ret; 2064 field bit HasOpSel = IsPacked; 2065 field bit HasOMod = !if(HasOpSel, 0, isFloatType<DstVT>.ret); 2066 field bit HasSDWAOMod = isFloatType<DstVT>.ret; 2067 2068 field bit HasModifiers = !or(isModifierType<Src0VT>.ret, 2069 isModifierType<Src1VT>.ret, 2070 isModifierType<Src2VT>.ret, 2071 HasOMod, 2072 EnableF32SrcMods); 2073 2074 field bit HasSrc0Mods = HasModifiers; 2075 field bit HasSrc1Mods = !if(HasModifiers, !or(HasSrc1FloatMods, HasSrc1IntMods), 0); 2076 field bit HasSrc2Mods = !if(HasModifiers, !or(HasSrc2FloatMods, HasSrc2IntMods), 0); 2077 2078 field bit HasExt = getHasExt<NumSrcArgs, DstVT, Src0VT, Src1VT>.ret; 2079 field bit HasExtDPP = getHasDPP<NumSrcArgs, DstVT, Src0VT, Src1VT>.ret; 2080 field bit HasExtSDWA = HasExt; 2081 field bit HasExtSDWA9 = HasExt; 2082 field int NeedPatGen = PatGenMode.NoPattern; 2083 2084 field bit IsMAI = 0; 2085 field bit IsDOT = 0; 2086 2087 field Operand Src0PackedMod = !if(HasSrc0FloatMods, PackedF16InputMods, PackedI16InputMods); 2088 field Operand Src1PackedMod = !if(HasSrc1FloatMods, PackedF16InputMods, PackedI16InputMods); 2089 field Operand Src2PackedMod = !if(HasSrc2FloatMods, PackedF16InputMods, PackedI16InputMods); 2090 2091 field dag Outs = !if(HasDst,(outs DstRC:$vdst),(outs)); 2092 2093 // VOP3b instructions are a special case with a second explicit 2094 // output. This is manually overridden for them. 2095 field dag Outs32 = Outs; 2096 field dag Outs64 = Outs; 2097 field dag OutsDPP = getOutsDPP<HasDst, DstVT, DstRCDPP>.ret; 2098 field dag OutsDPP8 = getOutsDPP<HasDst, DstVT, DstRCDPP>.ret; 2099 field dag OutsSDWA = getOutsSDWA<HasDst, DstVT, DstRCSDWA>.ret; 2100 2101 field dag Ins32 = getIns32<Src0RC32, Src1RC32, NumSrcArgs>.ret; 2102 field dag Ins64 = getIns64<Src0RC64, Src1RC64, Src2RC64, NumSrcArgs, 2103 HasIntClamp, HasModifiers, HasSrc2Mods, 2104 HasOMod, Src0Mod, Src1Mod, Src2Mod>.ret; 2105 field dag InsVOP3P = getInsVOP3P<Src0RC64, Src1RC64, Src2RC64, 2106 NumSrcArgs, HasClamp, 2107 Src0PackedMod, Src1PackedMod, Src2PackedMod>.ret; 2108 field dag InsVOP3OpSel = getInsVOP3OpSel<Src0RC64, Src1RC64, Src2RC64, 2109 NumSrcArgs, HasClamp, HasOMod, 2110 getOpSelMod<Src0VT>.ret, 2111 getOpSelMod<Src1VT>.ret, 2112 getOpSelMod<Src2VT>.ret>.ret; 2113 field dag InsDPP = !if(HasExtDPP, 2114 getInsDPP<DstRCDPP, Src0DPP, Src1DPP, NumSrcArgs, 2115 HasModifiers, Src0ModDPP, Src1ModDPP>.ret, 2116 (ins)); 2117 field dag InsDPP16 = getInsDPP16<DstRCDPP, Src0DPP, Src1DPP, NumSrcArgs, 2118 HasModifiers, Src0ModDPP, Src1ModDPP>.ret; 2119 field dag InsDPP8 = getInsDPP8<DstRCDPP, Src0DPP, Src1DPP, NumSrcArgs, 0, 2120 Src0ModDPP, Src1ModDPP>.ret; 2121 field dag InsSDWA = getInsSDWA<Src0SDWA, Src1SDWA, NumSrcArgs, 2122 HasSDWAOMod, Src0ModSDWA, Src1ModSDWA, 2123 DstVT>.ret; 2124 2125 2126 field string Asm32 = getAsm32<HasDst, NumSrcArgs, DstVT>.ret; 2127 field string Asm64 = getAsm64<HasDst, NumSrcArgs, HasIntClamp, HasModifiers, HasOMod, DstVT>.ret; 2128 field string AsmVOP3P = getAsmVOP3P<HasDst, NumSrcArgs, HasModifiers, HasClamp, DstVT>.ret; 2129 field string AsmVOP3OpSel = getAsmVOP3OpSel<NumSrcArgs, 2130 HasClamp, HasOMod, 2131 HasSrc0FloatMods, 2132 HasSrc1FloatMods, 2133 HasSrc2FloatMods>.ret; 2134 field string AsmDPP = !if(HasExtDPP, 2135 getAsmDPP<HasDst, NumSrcArgs, HasModifiers, DstVT>.ret, ""); 2136 field string AsmDPP16 = getAsmDPP16<HasDst, NumSrcArgs, HasModifiers, DstVT>.ret; 2137 field string AsmDPP8 = getAsmDPP8<HasDst, NumSrcArgs, 0, DstVT>.ret; 2138 field string AsmSDWA = getAsmSDWA<HasDst, NumSrcArgs, DstVT>.ret; 2139 field string AsmSDWA9 = getAsmSDWA9<HasDst, HasSDWAOMod, NumSrcArgs, DstVT>.ret; 2140 2141 field string TieRegDPP = "$old"; 2142} 2143 2144class VOP_NO_EXT <VOPProfile p> : VOPProfile <p.ArgVT> { 2145 let HasExt = 0; 2146 let HasExtDPP = 0; 2147 let HasExtSDWA = 0; 2148 let HasExtSDWA9 = 0; 2149} 2150 2151class VOP_PAT_GEN <VOPProfile p, int mode=PatGenMode.Pattern> : VOPProfile <p.ArgVT> { 2152 let NeedPatGen = mode; 2153} 2154 2155def VOP_F16_F16 : VOPProfile <[f16, f16, untyped, untyped]>; 2156def VOP_F16_I16 : VOPProfile <[f16, i16, untyped, untyped]>; 2157def VOP_I16_F16 : VOPProfile <[i16, f16, untyped, untyped]>; 2158 2159def VOP_F16_F16_F16 : VOPProfile <[f16, f16, f16, untyped]>; 2160def VOP_F16_F16_I16 : VOPProfile <[f16, f16, i16, untyped]>; 2161def VOP_F16_F16_I32 : VOPProfile <[f16, f16, i32, untyped]>; 2162def VOP_I16_I16_I16 : VOPProfile <[i16, i16, i16, untyped]>; 2163def VOP_I16_I16_I16_ARITH : VOPProfile <[i16, i16, i16, untyped], 0, /*EnableClamp=*/1>; 2164 2165def VOP_I16_I16_I16_I16 : VOPProfile <[i16, i16, i16, i16, untyped]>; 2166def VOP_F16_F16_F16_F16 : VOPProfile <[f16, f16, f16, f16, untyped]>; 2167 2168def VOP_I32_I16_I16_I32 : VOPProfile <[i32, i16, i16, i32, untyped]>; 2169 2170def VOP_V2F16_V2F16_V2F16 : VOPProfile <[v2f16, v2f16, v2f16, untyped]>; 2171def VOP_V2I16_V2I16_V2I16 : VOPProfile <[v2i16, v2i16, v2i16, untyped]>; 2172def VOP_B32_F16_F16 : VOPProfile <[i32, f16, f16, untyped]>; 2173 2174def VOP_V2F16_V2F16_V2F16_V2F16 : VOPProfile <[v2f16, v2f16, v2f16, v2f16]>; 2175def VOP_V2I16_V2I16_V2I16_V2I16 : VOPProfile <[v2i16, v2i16, v2i16, v2i16]>; 2176def VOP_V2I16_F32_F32 : VOPProfile <[v2i16, f32, f32, untyped]>; 2177def VOP_V2I16_I32_I32 : VOPProfile <[v2i16, i32, i32, untyped]>; 2178 2179def VOP_F32_V2F16_V2F16_V2F16 : VOPProfile <[f32, v2f16, v2f16, v2f16]>; 2180 2181def VOP_NONE : VOPProfile <[untyped, untyped, untyped, untyped]>; 2182 2183def VOP_F32_F32 : VOPProfile <[f32, f32, untyped, untyped]>; 2184def VOP_F32_F64 : VOPProfile <[f32, f64, untyped, untyped]>; 2185def VOP_F32_I32 : VOPProfile <[f32, i32, untyped, untyped]>; 2186def VOP_F64_F32 : VOPProfile <[f64, f32, untyped, untyped]>; 2187def VOP_F64_F64 : VOPProfile <[f64, f64, untyped, untyped]>; 2188def VOP_F64_I32 : VOPProfile <[f64, i32, untyped, untyped]>; 2189def VOP_I32_F32 : VOPProfile <[i32, f32, untyped, untyped]>; 2190def VOP_I32_F64 : VOPProfile <[i32, f64, untyped, untyped]>; 2191def VOP_I32_I32 : VOPProfile <[i32, i32, untyped, untyped]>; 2192def VOP_F16_F32 : VOPProfile <[f16, f32, untyped, untyped]>; 2193def VOP_F32_F16 : VOPProfile <[f32, f16, untyped, untyped]>; 2194 2195def VOP_F32_F32_F16 : VOPProfile <[f32, f32, f16, untyped]>; 2196def VOP_F32_F32_F32 : VOPProfile <[f32, f32, f32, untyped]>; 2197def VOP_F32_F32_I32 : VOPProfile <[f32, f32, i32, untyped]>; 2198def VOP_F64_F64_F64 : VOPProfile <[f64, f64, f64, untyped]>; 2199def VOP_F64_F64_I32 : VOPProfile <[f64, f64, i32, untyped]>; 2200def VOP_I32_F32_F32 : VOPProfile <[i32, f32, f32, untyped]>; 2201def VOP_I32_F32_I32 : VOPProfile <[i32, f32, i32, untyped]>; 2202def VOP_I32_I32_I32 : VOPProfile <[i32, i32, i32, untyped]>; 2203def VOP_I32_I32_I32_ARITH : VOPProfile <[i32, i32, i32, untyped], 0, /*EnableClamp=*/1>; 2204def VOP_V2F16_F32_F32 : VOPProfile <[v2f16, f32, f32, untyped]>; 2205def VOP_F32_F16_F16_F16 : VOPProfile <[f32, f16, f16, f16]>; 2206 2207def VOP_I64_I64_I32 : VOPProfile <[i64, i64, i32, untyped]>; 2208def VOP_I64_I32_I64 : VOPProfile <[i64, i32, i64, untyped]>; 2209def VOP_I64_I64_I64 : VOPProfile <[i64, i64, i64, untyped]>; 2210 2211def VOP_F16_F32_F16_F32 : VOPProfile <[f16, f32, f16, f32]>; 2212def VOP_F32_F32_F16_F16 : VOPProfile <[f32, f32, f16, f16]>; 2213def VOP_F32_F32_F32_F32 : VOPProfile <[f32, f32, f32, f32]>; 2214def VOP_F64_F64_F64_F64 : VOPProfile <[f64, f64, f64, f64]>; 2215def VOP_I32_I32_I32_I32 : VOPProfile <[i32, i32, i32, i32]>; 2216def VOP_I64_I32_I32_I64 : VOPProfile <[i64, i32, i32, i64]>; 2217def VOP_I32_F32_I32_I32 : VOPProfile <[i32, f32, i32, i32]>; 2218def VOP_I64_I64_I32_I64 : VOPProfile <[i64, i64, i32, i64]>; 2219def VOP_V4I32_I64_I32_V4I32 : VOPProfile <[v4i32, i64, i32, v4i32]>; 2220 2221def VOP_F32_V2F16_V2F16_F32 : VOPProfile <[f32, v2f16, v2f16, f32]>; 2222def VOP_I32_V2I16_V2I16_I32 : VOPProfile <[i32, v2i16, v2i16, i32]>; 2223 2224def VOP_V4F32_F32_F32_V4F32 : VOPProfile <[v4f32, f32, f32, v4f32]>; 2225def VOP_V16F32_F32_F32_V16F32 : VOPProfile <[v16f32, f32, f32, v16f32]>; 2226def VOP_V32F32_F32_F32_V32F32 : VOPProfile <[v32f32, f32, f32, v32f32]>; 2227def VOP_V4F32_V4F16_V4F16_V4F32 : VOPProfile <[v4f32, v4f16, v4f16, v4f32]>; 2228def VOP_V16F32_V4F16_V4F16_V16F32 : VOPProfile <[v16f32, v4f16, v4f16, v16f32]>; 2229def VOP_V32F32_V4F16_V4F16_V32F32 : VOPProfile <[v32f32, v4f16, v4f16, v32f32]>; 2230def VOP_V4F32_V2I16_V2I16_V4F32 : VOPProfile <[v4f32, v2i16, v2i16, v4f32]>; 2231def VOP_V16F32_V2I16_V2I16_V16F32 : VOPProfile <[v16f32, v2i16, v2i16, v16f32]>; 2232def VOP_V32F32_V2I16_V2I16_V32F32 : VOPProfile <[v32f32, v2i16, v2i16, v32f32]>; 2233def VOP_V4I32_I32_I32_V4I32 : VOPProfile <[v4i32, i32, i32, v4i32]>; 2234def VOP_V16I32_I32_I32_V16I32 : VOPProfile <[v16i32, i32, i32, v16i32]>; 2235def VOP_V32I32_I32_I32_V32I32 : VOPProfile <[v32i32, i32, i32, v32i32]>; 2236 2237class Commutable_REV <string revOp, bit isOrig> { 2238 string RevOp = revOp; 2239 bit IsOrig = isOrig; 2240} 2241 2242class AtomicNoRet <string noRetOp, bit isRet> { 2243 string NoRetOp = noRetOp; 2244 bit IsRet = isRet; 2245} 2246 2247//===----------------------------------------------------------------------===// 2248// Interpolation opcodes 2249//===----------------------------------------------------------------------===// 2250 2251class VINTRPDstOperand <RegisterClass rc> : RegisterOperand <rc, "printVINTRPDst">; 2252 2253class VINTRP_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> : 2254 VINTRPCommon <outs, ins, "", pattern>, 2255 SIMCInstr<opName, SIEncodingFamily.NONE> { 2256 let isPseudo = 1; 2257 let isCodeGenOnly = 1; 2258} 2259 2260// FIXME-GFX10: WIP. 2261class VINTRP_Real_si <bits <2> op, string opName, dag outs, dag ins, 2262 string asm, int encodingFamily> : 2263 VINTRPCommon <outs, ins, asm, []>, 2264 VINTRPe <op>, 2265 SIMCInstr<opName, encodingFamily> { 2266} 2267 2268class VINTRP_Real_vi <bits <2> op, string opName, dag outs, dag ins, 2269 string asm> : 2270 VINTRPCommon <outs, ins, asm, []>, 2271 VINTRPe_vi <op>, 2272 SIMCInstr<opName, SIEncodingFamily.VI> { 2273 let AssemblerPredicate = VIAssemblerPredicate; 2274 let DecoderNamespace = "GFX8"; 2275} 2276 2277// FIXME-GFX10: WIP. 2278multiclass VINTRP_m <bits <2> op, dag outs, dag ins, string asm, 2279 list<dag> pattern = []> { 2280 def "" : VINTRP_Pseudo <NAME, outs, ins, pattern>; 2281 2282 let AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" in { 2283 def _si : VINTRP_Real_si <op, NAME, outs, ins, asm, SIEncodingFamily.SI>; 2284 } // End AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" 2285 2286 def _vi : VINTRP_Real_vi <op, NAME, outs, ins, asm>; 2287 2288 let AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10" in { 2289 def _gfx10 : VINTRP_Real_si<op, NAME, outs, ins, asm, SIEncodingFamily.GFX10>; 2290 } // End AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10" 2291} 2292//===----------------------------------------------------------------------===// 2293// Vector instruction mappings 2294//===----------------------------------------------------------------------===// 2295 2296// Maps an opcode in e32 form to its e64 equivalent 2297def getVOPe64 : InstrMapping { 2298 let FilterClass = "VOP"; 2299 let RowFields = ["OpName"]; 2300 let ColFields = ["Size", "VOP3"]; 2301 let KeyCol = ["4", "0"]; 2302 let ValueCols = [["8", "1"]]; 2303} 2304 2305// Maps an opcode in e64 form to its e32 equivalent 2306def getVOPe32 : InstrMapping { 2307 let FilterClass = "VOP"; 2308 let RowFields = ["OpName"]; 2309 let ColFields = ["Size", "VOP3"]; 2310 let KeyCol = ["8", "1"]; 2311 let ValueCols = [["4", "0"]]; 2312} 2313 2314// Maps ordinary instructions to their SDWA counterparts 2315def getSDWAOp : InstrMapping { 2316 let FilterClass = "VOP"; 2317 let RowFields = ["OpName"]; 2318 let ColFields = ["AsmVariantName"]; 2319 let KeyCol = ["Default"]; 2320 let ValueCols = [["SDWA"]]; 2321} 2322 2323// Maps SDWA instructions to their ordinary counterparts 2324def getBasicFromSDWAOp : InstrMapping { 2325 let FilterClass = "VOP"; 2326 let RowFields = ["OpName"]; 2327 let ColFields = ["AsmVariantName"]; 2328 let KeyCol = ["SDWA"]; 2329 let ValueCols = [["Default"]]; 2330} 2331 2332// Maps ordinary instructions to their DPP counterparts 2333def getDPPOp32 : InstrMapping { 2334 let FilterClass = "VOP"; 2335 let RowFields = ["OpName"]; 2336 let ColFields = ["AsmVariantName"]; 2337 let KeyCol = ["Default"]; 2338 let ValueCols = [["DPP"]]; 2339} 2340 2341// Maps an commuted opcode to its original version 2342def getCommuteOrig : InstrMapping { 2343 let FilterClass = "Commutable_REV"; 2344 let RowFields = ["RevOp"]; 2345 let ColFields = ["IsOrig"]; 2346 let KeyCol = ["0"]; 2347 let ValueCols = [["1"]]; 2348} 2349 2350// Maps an original opcode to its commuted version 2351def getCommuteRev : InstrMapping { 2352 let FilterClass = "Commutable_REV"; 2353 let RowFields = ["RevOp"]; 2354 let ColFields = ["IsOrig"]; 2355 let KeyCol = ["1"]; 2356 let ValueCols = [["0"]]; 2357} 2358 2359def getMCOpcodeGen : InstrMapping { 2360 let FilterClass = "SIMCInstr"; 2361 let RowFields = ["PseudoInstr"]; 2362 let ColFields = ["Subtarget"]; 2363 let KeyCol = [!cast<string>(SIEncodingFamily.NONE)]; 2364 let ValueCols = [[!cast<string>(SIEncodingFamily.SI)], 2365 [!cast<string>(SIEncodingFamily.VI)], 2366 [!cast<string>(SIEncodingFamily.SDWA)], 2367 [!cast<string>(SIEncodingFamily.SDWA9)], 2368 // GFX80 encoding is added to work around a multiple matching 2369 // issue for buffer instructions with unpacked d16 data. This 2370 // does not actually change the encoding, and thus may be 2371 // removed later. 2372 [!cast<string>(SIEncodingFamily.GFX80)], 2373 [!cast<string>(SIEncodingFamily.GFX9)], 2374 [!cast<string>(SIEncodingFamily.GFX10)], 2375 [!cast<string>(SIEncodingFamily.SDWA10)]]; 2376} 2377 2378// Get equivalent SOPK instruction. 2379def getSOPKOp : InstrMapping { 2380 let FilterClass = "SOPKInstTable"; 2381 let RowFields = ["BaseCmpOp"]; 2382 let ColFields = ["IsSOPK"]; 2383 let KeyCol = ["0"]; 2384 let ValueCols = [["1"]]; 2385} 2386 2387def getAddr64Inst : InstrMapping { 2388 let FilterClass = "MUBUFAddr64Table"; 2389 let RowFields = ["OpName"]; 2390 let ColFields = ["IsAddr64"]; 2391 let KeyCol = ["0"]; 2392 let ValueCols = [["1"]]; 2393} 2394 2395def getIfAddr64Inst : InstrMapping { 2396 let FilterClass = "MUBUFAddr64Table"; 2397 let RowFields = ["OpName"]; 2398 let ColFields = ["IsAddr64"]; 2399 let KeyCol = ["1"]; 2400 let ValueCols = [["1"]]; 2401} 2402 2403def getMUBUFNoLdsInst : InstrMapping { 2404 let FilterClass = "MUBUFLdsTable"; 2405 let RowFields = ["OpName"]; 2406 let ColFields = ["IsLds"]; 2407 let KeyCol = ["1"]; 2408 let ValueCols = [["0"]]; 2409} 2410 2411// Maps an atomic opcode to its version with a return value. 2412def getAtomicRetOp : InstrMapping { 2413 let FilterClass = "AtomicNoRet"; 2414 let RowFields = ["NoRetOp"]; 2415 let ColFields = ["IsRet"]; 2416 let KeyCol = ["0"]; 2417 let ValueCols = [["1"]]; 2418} 2419 2420// Maps an atomic opcode to its returnless version. 2421def getAtomicNoRetOp : InstrMapping { 2422 let FilterClass = "AtomicNoRet"; 2423 let RowFields = ["NoRetOp"]; 2424 let ColFields = ["IsRet"]; 2425 let KeyCol = ["1"]; 2426 let ValueCols = [["0"]]; 2427} 2428 2429// Maps a GLOBAL to its SADDR form. 2430def getGlobalSaddrOp : InstrMapping { 2431 let FilterClass = "GlobalSaddrTable"; 2432 let RowFields = ["SaddrOp"]; 2433 let ColFields = ["IsSaddr"]; 2434 let KeyCol = ["0"]; 2435 let ValueCols = [["1"]]; 2436} 2437 2438// Maps a v_cmpx opcode with sdst to opcode without sdst. 2439def getVCMPXNoSDstOp : InstrMapping { 2440 let FilterClass = "VCMPXNoSDstTable"; 2441 let RowFields = ["NoSDstOp"]; 2442 let ColFields = ["HasSDst"]; 2443 let KeyCol = ["1"]; 2444 let ValueCols = [["0"]]; 2445} 2446 2447// Maps a SOPP to a SOPP with S_NOP 2448def getSOPPWithRelaxation : InstrMapping { 2449 let FilterClass = "SOPPRelaxTable"; 2450 let RowFields = ["KeyName"]; 2451 let ColFields = ["IsRelaxed"]; 2452 let KeyCol = ["0"]; 2453 let ValueCols = [["1"]]; 2454} 2455 2456// Maps flat scratch opcodes by addressing modes 2457def getFlatScratchInstSTfromSS : InstrMapping { 2458 let FilterClass = "FlatScratchInst"; 2459 let RowFields = ["SVOp"]; 2460 let ColFields = ["Mode"]; 2461 let KeyCol = ["SS"]; 2462 let ValueCols = [["ST"]]; 2463} 2464 2465def getFlatScratchInstSSfromSV : InstrMapping { 2466 let FilterClass = "FlatScratchInst"; 2467 let RowFields = ["SVOp"]; 2468 let ColFields = ["Mode"]; 2469 let KeyCol = ["SV"]; 2470 let ValueCols = [["SS"]]; 2471} 2472 2473include "SIInstructions.td" 2474 2475include "DSInstructions.td" 2476include "MIMGInstructions.td" 2477