xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDKernelCodeT.h (revision 0b57cec536236d46e3dba9bd041533462f33dbb7)
1*0b57cec5SDimitry Andric //===-- AMDGPUKernelCodeT.h - Print AMDGPU assembly code ---------*- C++ -*-===//
2*0b57cec5SDimitry Andric //
3*0b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*0b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
5*0b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*0b57cec5SDimitry Andric //
7*0b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
8*0b57cec5SDimitry Andric /// \file AMDKernelCodeT.h
9*0b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
10*0b57cec5SDimitry Andric 
11*0b57cec5SDimitry Andric #ifndef AMDKERNELCODET_H
12*0b57cec5SDimitry Andric #define AMDKERNELCODET_H
13*0b57cec5SDimitry Andric 
14*0b57cec5SDimitry Andric #include "llvm/MC/SubtargetFeature.h"
15*0b57cec5SDimitry Andric 
16*0b57cec5SDimitry Andric #include <cstddef>
17*0b57cec5SDimitry Andric #include <cstdint>
18*0b57cec5SDimitry Andric 
19*0b57cec5SDimitry Andric #include "llvm/Support/Debug.h"
20*0b57cec5SDimitry Andric //---------------------------------------------------------------------------//
21*0b57cec5SDimitry Andric // AMD Kernel Code, and its dependencies                                     //
22*0b57cec5SDimitry Andric //---------------------------------------------------------------------------//
23*0b57cec5SDimitry Andric 
24*0b57cec5SDimitry Andric typedef uint8_t hsa_powertwo8_t;
25*0b57cec5SDimitry Andric typedef uint32_t hsa_ext_code_kind_t;
26*0b57cec5SDimitry Andric typedef uint8_t hsa_ext_brig_profile8_t;
27*0b57cec5SDimitry Andric typedef uint8_t hsa_ext_brig_machine_model8_t;
28*0b57cec5SDimitry Andric typedef uint64_t hsa_ext_control_directive_present64_t;
29*0b57cec5SDimitry Andric typedef uint16_t hsa_ext_exception_kind16_t;
30*0b57cec5SDimitry Andric typedef uint32_t hsa_ext_code_kind32_t;
31*0b57cec5SDimitry Andric 
32*0b57cec5SDimitry Andric typedef struct hsa_dim3_s {
33*0b57cec5SDimitry Andric   uint32_t x;
34*0b57cec5SDimitry Andric   uint32_t y;
35*0b57cec5SDimitry Andric   uint32_t z;
36*0b57cec5SDimitry Andric } hsa_dim3_t;
37*0b57cec5SDimitry Andric 
38*0b57cec5SDimitry Andric /// The version of the amd_*_code_t struct. Minor versions must be
39*0b57cec5SDimitry Andric /// backward compatible.
40*0b57cec5SDimitry Andric typedef uint32_t amd_code_version32_t;
41*0b57cec5SDimitry Andric enum amd_code_version_t {
42*0b57cec5SDimitry Andric   AMD_CODE_VERSION_MAJOR = 0,
43*0b57cec5SDimitry Andric   AMD_CODE_VERSION_MINOR = 1
44*0b57cec5SDimitry Andric };
45*0b57cec5SDimitry Andric 
46*0b57cec5SDimitry Andric // Sets val bits for specified mask in specified dst packed instance.
47*0b57cec5SDimitry Andric #define AMD_HSA_BITS_SET(dst, mask, val)                                       \
48*0b57cec5SDimitry Andric   dst &= (~(1 << mask ## _SHIFT) & ~mask);                                     \
49*0b57cec5SDimitry Andric   dst |= (((val) << mask ## _SHIFT) & mask)
50*0b57cec5SDimitry Andric 
51*0b57cec5SDimitry Andric // Gets bits for specified mask from specified src packed instance.
52*0b57cec5SDimitry Andric #define AMD_HSA_BITS_GET(src, mask)                                            \
53*0b57cec5SDimitry Andric   ((src & mask) >> mask ## _SHIFT)                                             \
54*0b57cec5SDimitry Andric 
55*0b57cec5SDimitry Andric /// The values used to define the number of bytes to use for the
56*0b57cec5SDimitry Andric /// swizzle element size.
57*0b57cec5SDimitry Andric enum amd_element_byte_size_t {
58*0b57cec5SDimitry Andric   AMD_ELEMENT_2_BYTES = 0,
59*0b57cec5SDimitry Andric   AMD_ELEMENT_4_BYTES = 1,
60*0b57cec5SDimitry Andric   AMD_ELEMENT_8_BYTES = 2,
61*0b57cec5SDimitry Andric   AMD_ELEMENT_16_BYTES = 3
62*0b57cec5SDimitry Andric };
63*0b57cec5SDimitry Andric 
64*0b57cec5SDimitry Andric /// Shader program settings for CS. Contains COMPUTE_PGM_RSRC1 and
65*0b57cec5SDimitry Andric /// COMPUTE_PGM_RSRC2 registers.
66*0b57cec5SDimitry Andric typedef uint64_t amd_compute_pgm_resource_register64_t;
67*0b57cec5SDimitry Andric 
68*0b57cec5SDimitry Andric /// Every amd_*_code_t has the following properties, which are composed of
69*0b57cec5SDimitry Andric /// a number of bit fields. Every bit field has a mask (AMD_CODE_PROPERTY_*),
70*0b57cec5SDimitry Andric /// bit width (AMD_CODE_PROPERTY_*_WIDTH, and bit shift amount
71*0b57cec5SDimitry Andric /// (AMD_CODE_PROPERTY_*_SHIFT) for convenient access. Unused bits must be 0.
72*0b57cec5SDimitry Andric ///
73*0b57cec5SDimitry Andric /// (Note that bit fields cannot be used as their layout is
74*0b57cec5SDimitry Andric /// implementation defined in the C standard and so cannot be used to
75*0b57cec5SDimitry Andric /// specify an ABI)
76*0b57cec5SDimitry Andric typedef uint32_t amd_code_property32_t;
77*0b57cec5SDimitry Andric enum amd_code_property_mask_t {
78*0b57cec5SDimitry Andric 
79*0b57cec5SDimitry Andric   /// Enable the setup of the SGPR user data registers
80*0b57cec5SDimitry Andric   /// (AMD_CODE_PROPERTY_ENABLE_SGPR_*), see documentation of amd_kernel_code_t
81*0b57cec5SDimitry Andric   /// for initial register state.
82*0b57cec5SDimitry Andric   ///
83*0b57cec5SDimitry Andric   /// The total number of SGPRuser data registers requested must not
84*0b57cec5SDimitry Andric   /// exceed 16. Any requests beyond 16 will be ignored.
85*0b57cec5SDimitry Andric   ///
86*0b57cec5SDimitry Andric   /// Used to set COMPUTE_PGM_RSRC2.USER_SGPR (set to total count of
87*0b57cec5SDimitry Andric   /// SGPR user data registers enabled up to 16).
88*0b57cec5SDimitry Andric 
89*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT = 0,
90*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_WIDTH = 1,
91*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT,
92*0b57cec5SDimitry Andric 
93*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT = 1,
94*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_WIDTH = 1,
95*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT,
96*0b57cec5SDimitry Andric 
97*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT = 2,
98*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_WIDTH = 1,
99*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT,
100*0b57cec5SDimitry Andric 
101*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT = 3,
102*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_WIDTH = 1,
103*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT,
104*0b57cec5SDimitry Andric 
105*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT = 4,
106*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_WIDTH = 1,
107*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT,
108*0b57cec5SDimitry Andric 
109*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT = 5,
110*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_WIDTH = 1,
111*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT,
112*0b57cec5SDimitry Andric 
113*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT = 6,
114*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_WIDTH = 1,
115*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT,
116*0b57cec5SDimitry Andric 
117*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_SHIFT = 7,
118*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_WIDTH = 1,
119*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_SHIFT,
120*0b57cec5SDimitry Andric 
121*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_SHIFT = 8,
122*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_WIDTH = 1,
123*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_SHIFT,
124*0b57cec5SDimitry Andric 
125*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT = 9,
126*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_WIDTH = 1,
127*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT,
128*0b57cec5SDimitry Andric 
129*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32_SHIFT = 10,
130*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32_WIDTH = 1,
131*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32 = ((1 << AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32_SHIFT,
132*0b57cec5SDimitry Andric 
133*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_RESERVED1_SHIFT = 11,
134*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_RESERVED1_WIDTH = 5,
135*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_RESERVED1 = ((1 << AMD_CODE_PROPERTY_RESERVED1_WIDTH) - 1) << AMD_CODE_PROPERTY_RESERVED1_SHIFT,
136*0b57cec5SDimitry Andric 
137*0b57cec5SDimitry Andric   /// Control wave ID base counter for GDS ordered-append. Used to set
138*0b57cec5SDimitry Andric   /// COMPUTE_DISPATCH_INITIATOR.ORDERED_APPEND_ENBL. (Not sure if
139*0b57cec5SDimitry Andric   /// ORDERED_APPEND_MODE also needs to be settable)
140*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT = 16,
141*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_WIDTH = 1,
142*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS = ((1 << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT,
143*0b57cec5SDimitry Andric 
144*0b57cec5SDimitry Andric   /// The interleave (swizzle) element size in bytes required by the
145*0b57cec5SDimitry Andric   /// code for private memory. This must be 2, 4, 8 or 16. This value
146*0b57cec5SDimitry Andric   /// is provided to the finalizer when it is invoked and is recorded
147*0b57cec5SDimitry Andric   /// here. The hardware will interleave the memory requests of each
148*0b57cec5SDimitry Andric   /// lane of a wavefront by this element size to ensure each
149*0b57cec5SDimitry Andric   /// work-item gets a distinct memory memory location. Therefore, the
150*0b57cec5SDimitry Andric   /// finalizer ensures that all load and store operations done to
151*0b57cec5SDimitry Andric   /// private memory do not exceed this size. For example, if the
152*0b57cec5SDimitry Andric   /// element size is 4 (32-bits or dword) and a 64-bit value must be
153*0b57cec5SDimitry Andric   /// loaded, the finalizer will generate two 32-bit loads. This
154*0b57cec5SDimitry Andric   /// ensures that the interleaving will get the work-item
155*0b57cec5SDimitry Andric   /// specific dword for both halves of the 64-bit value. If it just
156*0b57cec5SDimitry Andric   /// did a 64-bit load then it would get one dword which belonged to
157*0b57cec5SDimitry Andric   /// its own work-item, but the second dword would belong to the
158*0b57cec5SDimitry Andric   /// adjacent lane work-item since the interleaving is in dwords.
159*0b57cec5SDimitry Andric   ///
160*0b57cec5SDimitry Andric   /// The value used must match the value that the runtime configures
161*0b57cec5SDimitry Andric   /// the GPU flat scratch (SH_STATIC_MEM_CONFIG.ELEMENT_SIZE). This
162*0b57cec5SDimitry Andric   /// is generally DWORD.
163*0b57cec5SDimitry Andric   ///
164*0b57cec5SDimitry Andric   /// uSE VALUES FROM THE AMD_ELEMENT_BYTE_SIZE_T ENUM.
165*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT = 17,
166*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_WIDTH = 2,
167*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE = ((1 << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_WIDTH) - 1) << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT,
168*0b57cec5SDimitry Andric 
169*0b57cec5SDimitry Andric   /// Are global memory addresses 64 bits. Must match
170*0b57cec5SDimitry Andric   /// amd_kernel_code_t.hsail_machine_model ==
171*0b57cec5SDimitry Andric   /// HSA_MACHINE_LARGE. Must also match
172*0b57cec5SDimitry Andric   /// SH_MEM_CONFIG.PTR32 (GFX6 (SI)/GFX7 (CI)),
173*0b57cec5SDimitry Andric   /// SH_MEM_CONFIG.ADDRESS_MODE (GFX8 (VI)+).
174*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_IS_PTR64_SHIFT = 19,
175*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_IS_PTR64_WIDTH = 1,
176*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_IS_PTR64 = ((1 << AMD_CODE_PROPERTY_IS_PTR64_WIDTH) - 1) << AMD_CODE_PROPERTY_IS_PTR64_SHIFT,
177*0b57cec5SDimitry Andric 
178*0b57cec5SDimitry Andric   /// Indicate if the generated ISA is using a dynamically sized call
179*0b57cec5SDimitry Andric   /// stack. This can happen if calls are implemented using a call
180*0b57cec5SDimitry Andric   /// stack and recursion, alloca or calls to indirect functions are
181*0b57cec5SDimitry Andric   /// present. In these cases the Finalizer cannot compute the total
182*0b57cec5SDimitry Andric   /// private segment size at compile time. In this case the
183*0b57cec5SDimitry Andric   /// workitem_private_segment_byte_size only specifies the statically
184*0b57cec5SDimitry Andric   /// know private segment size, and additional space must be added
185*0b57cec5SDimitry Andric   /// for the call stack.
186*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT = 20,
187*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_WIDTH = 1,
188*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK = ((1 << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_WIDTH) - 1) << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT,
189*0b57cec5SDimitry Andric 
190*0b57cec5SDimitry Andric   /// Indicate if code generated has support for debugging.
191*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT = 21,
192*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_WIDTH = 1,
193*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED = ((1 << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_WIDTH) - 1) << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT,
194*0b57cec5SDimitry Andric 
195*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_SHIFT = 22,
196*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_WIDTH = 1,
197*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED = ((1 << AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_WIDTH) - 1) << AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_SHIFT,
198*0b57cec5SDimitry Andric 
199*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_RESERVED2_SHIFT = 23,
200*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_RESERVED2_WIDTH = 9,
201*0b57cec5SDimitry Andric   AMD_CODE_PROPERTY_RESERVED2 = ((1 << AMD_CODE_PROPERTY_RESERVED2_WIDTH) - 1) << AMD_CODE_PROPERTY_RESERVED2_SHIFT
202*0b57cec5SDimitry Andric };
203*0b57cec5SDimitry Andric 
204*0b57cec5SDimitry Andric /// The hsa_ext_control_directives_t specifies the values for the HSAIL
205*0b57cec5SDimitry Andric /// control directives. These control how the finalizer generates code. This
206*0b57cec5SDimitry Andric /// struct is used both as an argument to hsaFinalizeKernel to specify values for
207*0b57cec5SDimitry Andric /// the control directives, and is used in HsaKernelCode to record the values of
208*0b57cec5SDimitry Andric /// the control directives that the finalize used when generating the code which
209*0b57cec5SDimitry Andric /// either came from the finalizer argument or explicit HSAIL control
210*0b57cec5SDimitry Andric /// directives. See the definition of the control directives in HSA Programmer's
211*0b57cec5SDimitry Andric /// Reference Manual which also defines how the values specified as finalizer
212*0b57cec5SDimitry Andric /// arguments have to agree with the control directives in the HSAIL code.
213*0b57cec5SDimitry Andric typedef struct hsa_ext_control_directives_s {
214*0b57cec5SDimitry Andric   /// This is a bit set indicating which control directives have been
215*0b57cec5SDimitry Andric   /// specified. If the value is 0 then there are no control directives specified
216*0b57cec5SDimitry Andric   /// and the rest of the fields can be ignored. The bits are accessed using the
217*0b57cec5SDimitry Andric   /// hsa_ext_control_directives_present_mask_t. Any control directive that is not
218*0b57cec5SDimitry Andric   /// enabled in this bit set must have the value of all 0s.
219*0b57cec5SDimitry Andric   hsa_ext_control_directive_present64_t enabled_control_directives;
220*0b57cec5SDimitry Andric 
221*0b57cec5SDimitry Andric   /// If enableBreakExceptions is not enabled then must be 0, otherwise must be
222*0b57cec5SDimitry Andric   /// non-0 and specifies the set of HSAIL exceptions that must have the BREAK
223*0b57cec5SDimitry Andric   /// policy enabled. If this set is not empty then the generated code may have
224*0b57cec5SDimitry Andric   /// lower performance than if the set is empty. If the kernel being finalized
225*0b57cec5SDimitry Andric   /// has any enablebreakexceptions control directives, then the values specified
226*0b57cec5SDimitry Andric   /// by this argument are unioned with the values in these control
227*0b57cec5SDimitry Andric   /// directives. If any of the functions the kernel calls have an
228*0b57cec5SDimitry Andric   /// enablebreakexceptions control directive, then they must be equal or a
229*0b57cec5SDimitry Andric   /// subset of, this union.
230*0b57cec5SDimitry Andric   hsa_ext_exception_kind16_t enable_break_exceptions;
231*0b57cec5SDimitry Andric 
232*0b57cec5SDimitry Andric   /// If enableDetectExceptions is not enabled then must be 0, otherwise must be
233*0b57cec5SDimitry Andric   /// non-0 and specifies the set of HSAIL exceptions that must have the DETECT
234*0b57cec5SDimitry Andric   /// policy enabled. If this set is not empty then the generated code may have
235*0b57cec5SDimitry Andric   /// lower performance than if the set is empty. However, an implementation
236*0b57cec5SDimitry Andric   /// should endeavour to make the performance impact small. If the kernel being
237*0b57cec5SDimitry Andric   /// finalized has any enabledetectexceptions control directives, then the
238*0b57cec5SDimitry Andric   /// values specified by this argument are unioned with the values in these
239*0b57cec5SDimitry Andric   /// control directives. If any of the functions the kernel calls have an
240*0b57cec5SDimitry Andric   /// enabledetectexceptions control directive, then they must be equal or a
241*0b57cec5SDimitry Andric   /// subset of, this union.
242*0b57cec5SDimitry Andric   hsa_ext_exception_kind16_t enable_detect_exceptions;
243*0b57cec5SDimitry Andric 
244*0b57cec5SDimitry Andric   /// If maxDynamicGroupSize is not enabled then must be 0, and any amount of
245*0b57cec5SDimitry Andric   /// dynamic group segment can be allocated for a dispatch, otherwise the value
246*0b57cec5SDimitry Andric   /// specifies the maximum number of bytes of dynamic group segment that can be
247*0b57cec5SDimitry Andric   /// allocated for a dispatch. If the kernel being finalized has any
248*0b57cec5SDimitry Andric   /// maxdynamicsize control directives, then the values must be the same, and
249*0b57cec5SDimitry Andric   /// must be the same as this argument if it is enabled. This value can be used
250*0b57cec5SDimitry Andric   /// by the finalizer to determine the maximum number of bytes of group memory
251*0b57cec5SDimitry Andric   /// used by each work-group by adding this value to the group memory required
252*0b57cec5SDimitry Andric   /// for all group segment variables used by the kernel and all functions it
253*0b57cec5SDimitry Andric   /// calls, and group memory used to implement other HSAIL features such as
254*0b57cec5SDimitry Andric   /// fbarriers and the detect exception operations. This can allow the finalizer
255*0b57cec5SDimitry Andric   /// to determine the expected number of work-groups that can be executed by a
256*0b57cec5SDimitry Andric   /// compute unit and allow more resources to be allocated to the work-items if
257*0b57cec5SDimitry Andric   /// it is known that fewer work-groups can be executed due to group memory
258*0b57cec5SDimitry Andric   /// limitations.
259*0b57cec5SDimitry Andric   uint32_t max_dynamic_group_size;
260*0b57cec5SDimitry Andric 
261*0b57cec5SDimitry Andric   /// If maxFlatGridSize is not enabled then must be 0, otherwise must be greater
262*0b57cec5SDimitry Andric   /// than 0. See HSA Programmer's Reference Manual description of
263*0b57cec5SDimitry Andric   /// maxflatgridsize control directive.
264*0b57cec5SDimitry Andric   uint32_t max_flat_grid_size;
265*0b57cec5SDimitry Andric 
266*0b57cec5SDimitry Andric   /// If maxFlatWorkgroupSize is not enabled then must be 0, otherwise must be
267*0b57cec5SDimitry Andric   /// greater than 0. See HSA Programmer's Reference Manual description of
268*0b57cec5SDimitry Andric   /// maxflatworkgroupsize control directive.
269*0b57cec5SDimitry Andric   uint32_t max_flat_workgroup_size;
270*0b57cec5SDimitry Andric 
271*0b57cec5SDimitry Andric   /// If requestedWorkgroupsPerCu is not enabled then must be 0, and the
272*0b57cec5SDimitry Andric   /// finalizer is free to generate ISA that may result in any number of
273*0b57cec5SDimitry Andric   /// work-groups executing on a single compute unit. Otherwise, the finalizer
274*0b57cec5SDimitry Andric   /// should attempt to generate ISA that will allow the specified number of
275*0b57cec5SDimitry Andric   /// work-groups to execute on a single compute unit. This is only a hint and
276*0b57cec5SDimitry Andric   /// can be ignored by the finalizer. If the kernel being finalized, or any of
277*0b57cec5SDimitry Andric   /// the functions it calls, has a requested control directive, then the values
278*0b57cec5SDimitry Andric   /// must be the same. This can be used to determine the number of resources
279*0b57cec5SDimitry Andric   /// that should be allocated to a single work-group and work-item. For example,
280*0b57cec5SDimitry Andric   /// a low value may allow more resources to be allocated, resulting in higher
281*0b57cec5SDimitry Andric   /// per work-item performance, as it is known there will never be more than the
282*0b57cec5SDimitry Andric   /// specified number of work-groups actually executing on the compute
283*0b57cec5SDimitry Andric   /// unit. Conversely, a high value may allocate fewer resources, resulting in
284*0b57cec5SDimitry Andric   /// lower per work-item performance, which is offset by the fact it allows more
285*0b57cec5SDimitry Andric   /// work-groups to actually execute on the compute unit.
286*0b57cec5SDimitry Andric   uint32_t requested_workgroups_per_cu;
287*0b57cec5SDimitry Andric 
288*0b57cec5SDimitry Andric   /// If not enabled then all elements for Dim3 must be 0, otherwise every
289*0b57cec5SDimitry Andric   /// element must be greater than 0. See HSA Programmer's Reference Manual
290*0b57cec5SDimitry Andric   /// description of requiredgridsize control directive.
291*0b57cec5SDimitry Andric   hsa_dim3_t required_grid_size;
292*0b57cec5SDimitry Andric 
293*0b57cec5SDimitry Andric   /// If requiredWorkgroupSize is not enabled then all elements for Dim3 must be
294*0b57cec5SDimitry Andric   /// 0, and the produced code can be dispatched with any legal work-group range
295*0b57cec5SDimitry Andric   /// consistent with the dispatch dimensions. Otherwise, the code produced must
296*0b57cec5SDimitry Andric   /// always be dispatched with the specified work-group range. No element of the
297*0b57cec5SDimitry Andric   /// specified range must be 0. It must be consistent with required_dimensions
298*0b57cec5SDimitry Andric   /// and max_flat_workgroup_size. If the kernel being finalized, or any of the
299*0b57cec5SDimitry Andric   /// functions it calls, has a requiredworkgroupsize control directive, then the
300*0b57cec5SDimitry Andric   /// values must be the same. Specifying a value can allow the finalizer to
301*0b57cec5SDimitry Andric   /// optimize work-group id operations, and if the number of work-items in the
302*0b57cec5SDimitry Andric   /// work-group is less than the WAVESIZE then barrier operations can be
303*0b57cec5SDimitry Andric   /// optimized to just a memory fence.
304*0b57cec5SDimitry Andric   hsa_dim3_t required_workgroup_size;
305*0b57cec5SDimitry Andric 
306*0b57cec5SDimitry Andric   /// If requiredDim is not enabled then must be 0 and the produced kernel code
307*0b57cec5SDimitry Andric   /// can be dispatched with 1, 2 or 3 dimensions. If enabled then the value is
308*0b57cec5SDimitry Andric   /// 1..3 and the code produced must only be dispatched with a dimension that
309*0b57cec5SDimitry Andric   /// matches. Other values are illegal. If the kernel being finalized, or any of
310*0b57cec5SDimitry Andric   /// the functions it calls, has a requireddimsize control directive, then the
311*0b57cec5SDimitry Andric   /// values must be the same. This can be used to optimize the code generated to
312*0b57cec5SDimitry Andric   /// compute the absolute and flat work-group and work-item id, and the dim
313*0b57cec5SDimitry Andric   /// HSAIL operations.
314*0b57cec5SDimitry Andric   uint8_t required_dim;
315*0b57cec5SDimitry Andric 
316*0b57cec5SDimitry Andric   /// Reserved. Must be 0.
317*0b57cec5SDimitry Andric   uint8_t reserved[75];
318*0b57cec5SDimitry Andric } hsa_ext_control_directives_t;
319*0b57cec5SDimitry Andric 
320*0b57cec5SDimitry Andric /// AMD Kernel Code Object (amd_kernel_code_t). GPU CP uses the AMD Kernel
321*0b57cec5SDimitry Andric /// Code Object to set up the hardware to execute the kernel dispatch.
322*0b57cec5SDimitry Andric ///
323*0b57cec5SDimitry Andric /// Initial Kernel Register State.
324*0b57cec5SDimitry Andric ///
325*0b57cec5SDimitry Andric /// Initial kernel register state will be set up by CP/SPI prior to the start
326*0b57cec5SDimitry Andric /// of execution of every wavefront. This is limited by the constraints of the
327*0b57cec5SDimitry Andric /// current hardware.
328*0b57cec5SDimitry Andric ///
329*0b57cec5SDimitry Andric /// The order of the SGPR registers is defined, but the Finalizer can specify
330*0b57cec5SDimitry Andric /// which ones are actually setup in the amd_kernel_code_t object using the
331*0b57cec5SDimitry Andric /// enable_sgpr_* bit fields. The register numbers used for enabled registers
332*0b57cec5SDimitry Andric /// are dense starting at SGPR0: the first enabled register is SGPR0, the next
333*0b57cec5SDimitry Andric /// enabled register is SGPR1 etc.; disabled registers do not have an SGPR
334*0b57cec5SDimitry Andric /// number.
335*0b57cec5SDimitry Andric ///
336*0b57cec5SDimitry Andric /// The initial SGPRs comprise up to 16 User SRGPs that are set up by CP and
337*0b57cec5SDimitry Andric /// apply to all waves of the grid. It is possible to specify more than 16 User
338*0b57cec5SDimitry Andric /// SGPRs using the enable_sgpr_* bit fields, in which case only the first 16
339*0b57cec5SDimitry Andric /// are actually initialized. These are then immediately followed by the System
340*0b57cec5SDimitry Andric /// SGPRs that are set up by ADC/SPI and can have different values for each wave
341*0b57cec5SDimitry Andric /// of the grid dispatch.
342*0b57cec5SDimitry Andric ///
343*0b57cec5SDimitry Andric /// SGPR register initial state is defined as follows:
344*0b57cec5SDimitry Andric ///
345*0b57cec5SDimitry Andric /// Private Segment Buffer (enable_sgpr_private_segment_buffer):
346*0b57cec5SDimitry Andric ///   Number of User SGPR registers: 4. V# that can be used, together with
347*0b57cec5SDimitry Andric ///   Scratch Wave Offset as an offset, to access the Private/Spill/Arg
348*0b57cec5SDimitry Andric ///   segments using a segment address. It must be set as follows:
349*0b57cec5SDimitry Andric ///     - Base address: of the scratch memory area used by the dispatch. It
350*0b57cec5SDimitry Andric ///       does not include the scratch wave offset. It will be the per process
351*0b57cec5SDimitry Andric ///       SH_HIDDEN_PRIVATE_BASE_VMID plus any offset from this dispatch (for
352*0b57cec5SDimitry Andric ///       example there may be a per pipe offset, or per AQL Queue offset).
353*0b57cec5SDimitry Andric ///     - Stride + data_format: Element Size * Index Stride (???)
354*0b57cec5SDimitry Andric ///     - Cache swizzle: ???
355*0b57cec5SDimitry Andric ///     - Swizzle enable: SH_STATIC_MEM_CONFIG.SWIZZLE_ENABLE (must be 1 for
356*0b57cec5SDimitry Andric ///       scratch)
357*0b57cec5SDimitry Andric ///     - Num records: Flat Scratch Work Item Size / Element Size (???)
358*0b57cec5SDimitry Andric ///     - Dst_sel_*: ???
359*0b57cec5SDimitry Andric ///     - Num_format: ???
360*0b57cec5SDimitry Andric ///     - Element_size: SH_STATIC_MEM_CONFIG.ELEMENT_SIZE (will be DWORD, must
361*0b57cec5SDimitry Andric ///       agree with amd_kernel_code_t.privateElementSize)
362*0b57cec5SDimitry Andric ///     - Index_stride: SH_STATIC_MEM_CONFIG.INDEX_STRIDE (will be 64 as must
363*0b57cec5SDimitry Andric ///       be number of wavefront lanes for scratch, must agree with
364*0b57cec5SDimitry Andric ///       amd_kernel_code_t.wavefrontSize)
365*0b57cec5SDimitry Andric ///     - Add tid enable: 1
366*0b57cec5SDimitry Andric ///     - ATC: from SH_MEM_CONFIG.PRIVATE_ATC,
367*0b57cec5SDimitry Andric ///     - Hash_enable: ???
368*0b57cec5SDimitry Andric ///     - Heap: ???
369*0b57cec5SDimitry Andric ///     - Mtype: from SH_STATIC_MEM_CONFIG.PRIVATE_MTYPE
370*0b57cec5SDimitry Andric ///     - Type: 0 (a buffer) (???)
371*0b57cec5SDimitry Andric ///
372*0b57cec5SDimitry Andric /// Dispatch Ptr (enable_sgpr_dispatch_ptr):
373*0b57cec5SDimitry Andric ///   Number of User SGPR registers: 2. 64 bit address of AQL dispatch packet
374*0b57cec5SDimitry Andric ///   for kernel actually executing.
375*0b57cec5SDimitry Andric ///
376*0b57cec5SDimitry Andric /// Queue Ptr (enable_sgpr_queue_ptr):
377*0b57cec5SDimitry Andric ///   Number of User SGPR registers: 2. 64 bit address of AmdQueue object for
378*0b57cec5SDimitry Andric ///   AQL queue on which the dispatch packet was queued.
379*0b57cec5SDimitry Andric ///
380*0b57cec5SDimitry Andric /// Kernarg Segment Ptr (enable_sgpr_kernarg_segment_ptr):
381*0b57cec5SDimitry Andric ///   Number of User SGPR registers: 2. 64 bit address of Kernarg segment. This
382*0b57cec5SDimitry Andric ///   is directly copied from the kernargPtr in the dispatch packet. Having CP
383*0b57cec5SDimitry Andric ///   load it once avoids loading it at the beginning of every wavefront.
384*0b57cec5SDimitry Andric ///
385*0b57cec5SDimitry Andric /// Dispatch Id (enable_sgpr_dispatch_id):
386*0b57cec5SDimitry Andric ///   Number of User SGPR registers: 2. 64 bit Dispatch ID of the dispatch
387*0b57cec5SDimitry Andric ///   packet being executed.
388*0b57cec5SDimitry Andric ///
389*0b57cec5SDimitry Andric /// Flat Scratch Init (enable_sgpr_flat_scratch_init):
390*0b57cec5SDimitry Andric ///   Number of User SGPR registers: 2. This is 2 SGPRs.
391*0b57cec5SDimitry Andric ///
392*0b57cec5SDimitry Andric ///   For CI/VI:
393*0b57cec5SDimitry Andric ///     The first SGPR is a 32 bit byte offset from SH_MEM_HIDDEN_PRIVATE_BASE
394*0b57cec5SDimitry Andric ///     to base of memory for scratch for this dispatch. This is the same offset
395*0b57cec5SDimitry Andric ///     used in computing the Scratch Segment Buffer base address. The value of
396*0b57cec5SDimitry Andric ///     Scratch Wave Offset must be added by the kernel code and moved to
397*0b57cec5SDimitry Andric ///     SGPRn-4 for use as the FLAT SCRATCH BASE in flat memory instructions.
398*0b57cec5SDimitry Andric ///
399*0b57cec5SDimitry Andric ///     The second SGPR is 32 bit byte size of a single work-item's scratch
400*0b57cec5SDimitry Andric ///     memory usage. This is directly loaded from the dispatch packet Private
401*0b57cec5SDimitry Andric ///     Segment Byte Size and rounded up to a multiple of DWORD.
402*0b57cec5SDimitry Andric ///
403*0b57cec5SDimitry Andric ///     \todo [Does CP need to round this to >4 byte alignment?]
404*0b57cec5SDimitry Andric ///
405*0b57cec5SDimitry Andric ///     The kernel code must move to SGPRn-3 for use as the FLAT SCRATCH SIZE in
406*0b57cec5SDimitry Andric ///     flat memory instructions. Having CP load it once avoids loading it at
407*0b57cec5SDimitry Andric ///     the beginning of every wavefront.
408*0b57cec5SDimitry Andric ///
409*0b57cec5SDimitry Andric ///   For PI:
410*0b57cec5SDimitry Andric ///     This is the 64 bit base address of the scratch backing memory for
411*0b57cec5SDimitry Andric ///     allocated by CP for this dispatch.
412*0b57cec5SDimitry Andric ///
413*0b57cec5SDimitry Andric /// Private Segment Size (enable_sgpr_private_segment_size):
414*0b57cec5SDimitry Andric ///   Number of User SGPR registers: 1. The 32 bit byte size of a single
415*0b57cec5SDimitry Andric ///   work-item's scratch memory allocation. This is the value from the dispatch
416*0b57cec5SDimitry Andric ///   packet. Private Segment Byte Size rounded up by CP to a multiple of DWORD.
417*0b57cec5SDimitry Andric ///
418*0b57cec5SDimitry Andric ///   \todo [Does CP need to round this to >4 byte alignment?]
419*0b57cec5SDimitry Andric ///
420*0b57cec5SDimitry Andric ///   Having CP load it once avoids loading it at the beginning of every
421*0b57cec5SDimitry Andric ///   wavefront.
422*0b57cec5SDimitry Andric ///
423*0b57cec5SDimitry Andric ///   \todo [This will not be used for CI/VI since it is the same value as
424*0b57cec5SDimitry Andric ///   the second SGPR of Flat Scratch Init. However, it is need for PI which
425*0b57cec5SDimitry Andric ///   changes meaning of Flat Scratchg Init..]
426*0b57cec5SDimitry Andric ///
427*0b57cec5SDimitry Andric /// Grid Work-Group Count X (enable_sgpr_grid_workgroup_count_x):
428*0b57cec5SDimitry Andric ///   Number of User SGPR registers: 1. 32 bit count of the number of
429*0b57cec5SDimitry Andric ///   work-groups in the X dimension for the grid being executed. Computed from
430*0b57cec5SDimitry Andric ///   the fields in the HsaDispatchPacket as
431*0b57cec5SDimitry Andric ///   ((gridSize.x+workgroupSize.x-1)/workgroupSize.x).
432*0b57cec5SDimitry Andric ///
433*0b57cec5SDimitry Andric /// Grid Work-Group Count Y (enable_sgpr_grid_workgroup_count_y):
434*0b57cec5SDimitry Andric ///   Number of User SGPR registers: 1. 32 bit count of the number of
435*0b57cec5SDimitry Andric ///   work-groups in the Y dimension for the grid being executed. Computed from
436*0b57cec5SDimitry Andric ///   the fields in the HsaDispatchPacket as
437*0b57cec5SDimitry Andric ///   ((gridSize.y+workgroupSize.y-1)/workgroupSize.y).
438*0b57cec5SDimitry Andric ///
439*0b57cec5SDimitry Andric ///   Only initialized if <16 previous SGPRs initialized.
440*0b57cec5SDimitry Andric ///
441*0b57cec5SDimitry Andric /// Grid Work-Group Count Z (enable_sgpr_grid_workgroup_count_z):
442*0b57cec5SDimitry Andric ///   Number of User SGPR registers: 1. 32 bit count of the number of
443*0b57cec5SDimitry Andric ///   work-groups in the Z dimension for the grid being executed. Computed
444*0b57cec5SDimitry Andric ///   from the fields in the HsaDispatchPacket as
445*0b57cec5SDimitry Andric ///   ((gridSize.z+workgroupSize.z-1)/workgroupSize.z).
446*0b57cec5SDimitry Andric ///
447*0b57cec5SDimitry Andric ///   Only initialized if <16 previous SGPRs initialized.
448*0b57cec5SDimitry Andric ///
449*0b57cec5SDimitry Andric /// Work-Group Id X (enable_sgpr_workgroup_id_x):
450*0b57cec5SDimitry Andric ///   Number of System SGPR registers: 1. 32 bit work group id in X dimension
451*0b57cec5SDimitry Andric ///   of grid for wavefront. Always present.
452*0b57cec5SDimitry Andric ///
453*0b57cec5SDimitry Andric /// Work-Group Id Y (enable_sgpr_workgroup_id_y):
454*0b57cec5SDimitry Andric ///   Number of System SGPR registers: 1. 32 bit work group id in Y dimension
455*0b57cec5SDimitry Andric ///   of grid for wavefront.
456*0b57cec5SDimitry Andric ///
457*0b57cec5SDimitry Andric /// Work-Group Id Z (enable_sgpr_workgroup_id_z):
458*0b57cec5SDimitry Andric ///   Number of System SGPR registers: 1. 32 bit work group id in Z dimension
459*0b57cec5SDimitry Andric ///   of grid for wavefront. If present then Work-group Id Y will also be
460*0b57cec5SDimitry Andric ///   present
461*0b57cec5SDimitry Andric ///
462*0b57cec5SDimitry Andric /// Work-Group Info (enable_sgpr_workgroup_info):
463*0b57cec5SDimitry Andric ///   Number of System SGPR registers: 1. {first_wave, 14'b0000,
464*0b57cec5SDimitry Andric ///   ordered_append_term[10:0], threadgroup_size_in_waves[5:0]}
465*0b57cec5SDimitry Andric ///
466*0b57cec5SDimitry Andric /// Private Segment Wave Byte Offset
467*0b57cec5SDimitry Andric /// (enable_sgpr_private_segment_wave_byte_offset):
468*0b57cec5SDimitry Andric ///   Number of System SGPR registers: 1. 32 bit byte offset from base of
469*0b57cec5SDimitry Andric ///   dispatch scratch base. Must be used as an offset with Private/Spill/Arg
470*0b57cec5SDimitry Andric ///   segment address when using Scratch Segment Buffer. It must be added to
471*0b57cec5SDimitry Andric ///   Flat Scratch Offset if setting up FLAT SCRATCH for flat addressing.
472*0b57cec5SDimitry Andric ///
473*0b57cec5SDimitry Andric ///
474*0b57cec5SDimitry Andric /// The order of the VGPR registers is defined, but the Finalizer can specify
475*0b57cec5SDimitry Andric /// which ones are actually setup in the amd_kernel_code_t object using the
476*0b57cec5SDimitry Andric /// enableVgpr*  bit fields. The register numbers used for enabled registers
477*0b57cec5SDimitry Andric /// are dense starting at VGPR0: the first enabled register is VGPR0, the next
478*0b57cec5SDimitry Andric /// enabled register is VGPR1 etc.; disabled registers do not have an VGPR
479*0b57cec5SDimitry Andric /// number.
480*0b57cec5SDimitry Andric ///
481*0b57cec5SDimitry Andric /// VGPR register initial state is defined as follows:
482*0b57cec5SDimitry Andric ///
483*0b57cec5SDimitry Andric /// Work-Item Id X (always initialized):
484*0b57cec5SDimitry Andric ///   Number of registers: 1. 32 bit work item id in X dimension of work-group
485*0b57cec5SDimitry Andric ///   for wavefront lane.
486*0b57cec5SDimitry Andric ///
487*0b57cec5SDimitry Andric /// Work-Item Id X (enable_vgpr_workitem_id > 0):
488*0b57cec5SDimitry Andric ///   Number of registers: 1. 32 bit work item id in Y dimension of work-group
489*0b57cec5SDimitry Andric ///   for wavefront lane.
490*0b57cec5SDimitry Andric ///
491*0b57cec5SDimitry Andric /// Work-Item Id X (enable_vgpr_workitem_id > 0):
492*0b57cec5SDimitry Andric ///   Number of registers: 1. 32 bit work item id in Z dimension of work-group
493*0b57cec5SDimitry Andric ///   for wavefront lane.
494*0b57cec5SDimitry Andric ///
495*0b57cec5SDimitry Andric ///
496*0b57cec5SDimitry Andric /// The setting of registers is being done by existing GPU hardware as follows:
497*0b57cec5SDimitry Andric ///   1) SGPRs before the Work-Group Ids are set by CP using the 16 User Data
498*0b57cec5SDimitry Andric ///      registers.
499*0b57cec5SDimitry Andric ///   2) Work-group Id registers X, Y, Z are set by SPI which supports any
500*0b57cec5SDimitry Andric ///      combination including none.
501*0b57cec5SDimitry Andric ///   3) Scratch Wave Offset is also set by SPI which is why its value cannot
502*0b57cec5SDimitry Andric ///      be added into the value Flat Scratch Offset which would avoid the
503*0b57cec5SDimitry Andric ///      Finalizer generated prolog having to do the add.
504*0b57cec5SDimitry Andric ///   4) The VGPRs are set by SPI which only supports specifying either (X),
505*0b57cec5SDimitry Andric ///      (X, Y) or (X, Y, Z).
506*0b57cec5SDimitry Andric ///
507*0b57cec5SDimitry Andric /// Flat Scratch Dispatch Offset and Flat Scratch Size are adjacent SGRRs so
508*0b57cec5SDimitry Andric /// they can be moved as a 64 bit value to the hardware required SGPRn-3 and
509*0b57cec5SDimitry Andric /// SGPRn-4 respectively using the Finalizer ?FLAT_SCRATCH? Register.
510*0b57cec5SDimitry Andric ///
511*0b57cec5SDimitry Andric /// The global segment can be accessed either using flat operations or buffer
512*0b57cec5SDimitry Andric /// operations. If buffer operations are used then the Global Buffer used to
513*0b57cec5SDimitry Andric /// access HSAIL Global/Readonly/Kernarg (which are combine) segments using a
514*0b57cec5SDimitry Andric /// segment address is not passed into the kernel code by CP since its base
515*0b57cec5SDimitry Andric /// address is always 0. Instead the Finalizer generates prolog code to
516*0b57cec5SDimitry Andric /// initialize 4 SGPRs with a V# that has the following properties, and then
517*0b57cec5SDimitry Andric /// uses that in the buffer instructions:
518*0b57cec5SDimitry Andric ///   - base address of 0
519*0b57cec5SDimitry Andric ///   - no swizzle
520*0b57cec5SDimitry Andric ///   - ATC=1
521*0b57cec5SDimitry Andric ///   - MTYPE set to support memory coherence specified in
522*0b57cec5SDimitry Andric ///     amd_kernel_code_t.globalMemoryCoherence
523*0b57cec5SDimitry Andric ///
524*0b57cec5SDimitry Andric /// When the Global Buffer is used to access the Kernarg segment, must add the
525*0b57cec5SDimitry Andric /// dispatch packet kernArgPtr to a kernarg segment address before using this V#.
526*0b57cec5SDimitry Andric /// Alternatively scalar loads can be used if the kernarg offset is uniform, as
527*0b57cec5SDimitry Andric /// the kernarg segment is constant for the duration of the kernel execution.
528*0b57cec5SDimitry Andric ///
529*0b57cec5SDimitry Andric 
530*0b57cec5SDimitry Andric typedef struct amd_kernel_code_s {
531*0b57cec5SDimitry Andric   uint32_t amd_kernel_code_version_major;
532*0b57cec5SDimitry Andric   uint32_t amd_kernel_code_version_minor;
533*0b57cec5SDimitry Andric   uint16_t amd_machine_kind;
534*0b57cec5SDimitry Andric   uint16_t amd_machine_version_major;
535*0b57cec5SDimitry Andric   uint16_t amd_machine_version_minor;
536*0b57cec5SDimitry Andric   uint16_t amd_machine_version_stepping;
537*0b57cec5SDimitry Andric 
538*0b57cec5SDimitry Andric   /// Byte offset (possibly negative) from start of amd_kernel_code_t
539*0b57cec5SDimitry Andric   /// object to kernel's entry point instruction. The actual code for
540*0b57cec5SDimitry Andric   /// the kernel is required to be 256 byte aligned to match hardware
541*0b57cec5SDimitry Andric   /// requirements (SQ cache line is 16). The code must be position
542*0b57cec5SDimitry Andric   /// independent code (PIC) for AMD devices to give runtime the
543*0b57cec5SDimitry Andric   /// option of copying code to discrete GPU memory or APU L2
544*0b57cec5SDimitry Andric   /// cache. The Finalizer should endeavour to allocate all kernel
545*0b57cec5SDimitry Andric   /// machine code in contiguous memory pages so that a device
546*0b57cec5SDimitry Andric   /// pre-fetcher will tend to only pre-fetch Kernel Code objects,
547*0b57cec5SDimitry Andric   /// improving cache performance.
548*0b57cec5SDimitry Andric   int64_t kernel_code_entry_byte_offset;
549*0b57cec5SDimitry Andric 
550*0b57cec5SDimitry Andric   /// Range of bytes to consider prefetching expressed as an offset
551*0b57cec5SDimitry Andric   /// and size. The offset is from the start (possibly negative) of
552*0b57cec5SDimitry Andric   /// amd_kernel_code_t object. Set both to 0 if no prefetch
553*0b57cec5SDimitry Andric   /// information is available.
554*0b57cec5SDimitry Andric   int64_t kernel_code_prefetch_byte_offset;
555*0b57cec5SDimitry Andric   uint64_t kernel_code_prefetch_byte_size;
556*0b57cec5SDimitry Andric 
557*0b57cec5SDimitry Andric   /// Reserved. Must be 0.
558*0b57cec5SDimitry Andric   uint64_t reserved0;
559*0b57cec5SDimitry Andric 
560*0b57cec5SDimitry Andric   /// Shader program settings for CS. Contains COMPUTE_PGM_RSRC1 and
561*0b57cec5SDimitry Andric   /// COMPUTE_PGM_RSRC2 registers.
562*0b57cec5SDimitry Andric   uint64_t compute_pgm_resource_registers;
563*0b57cec5SDimitry Andric 
564*0b57cec5SDimitry Andric   /// Code properties. See amd_code_property_mask_t for a full list of
565*0b57cec5SDimitry Andric   /// properties.
566*0b57cec5SDimitry Andric   uint32_t code_properties;
567*0b57cec5SDimitry Andric 
568*0b57cec5SDimitry Andric   /// The amount of memory required for the combined private, spill
569*0b57cec5SDimitry Andric   /// and arg segments for a work-item in bytes. If
570*0b57cec5SDimitry Andric   /// is_dynamic_callstack is 1 then additional space must be added to
571*0b57cec5SDimitry Andric   /// this value for the call stack.
572*0b57cec5SDimitry Andric   uint32_t workitem_private_segment_byte_size;
573*0b57cec5SDimitry Andric 
574*0b57cec5SDimitry Andric   /// The amount of group segment memory required by a work-group in
575*0b57cec5SDimitry Andric   /// bytes. This does not include any dynamically allocated group
576*0b57cec5SDimitry Andric   /// segment memory that may be added when the kernel is
577*0b57cec5SDimitry Andric   /// dispatched.
578*0b57cec5SDimitry Andric   uint32_t workgroup_group_segment_byte_size;
579*0b57cec5SDimitry Andric 
580*0b57cec5SDimitry Andric   /// Number of byte of GDS required by kernel dispatch. Must be 0 if
581*0b57cec5SDimitry Andric   /// not using GDS.
582*0b57cec5SDimitry Andric   uint32_t gds_segment_byte_size;
583*0b57cec5SDimitry Andric 
584*0b57cec5SDimitry Andric   /// The size in bytes of the kernarg segment that holds the values
585*0b57cec5SDimitry Andric   /// of the arguments to the kernel. This could be used by CP to
586*0b57cec5SDimitry Andric   /// prefetch the kernarg segment pointed to by the dispatch packet.
587*0b57cec5SDimitry Andric   uint64_t kernarg_segment_byte_size;
588*0b57cec5SDimitry Andric 
589*0b57cec5SDimitry Andric   /// Number of fbarrier's used in the kernel and all functions it
590*0b57cec5SDimitry Andric   /// calls. If the implementation uses group memory to allocate the
591*0b57cec5SDimitry Andric   /// fbarriers then that amount must already be included in the
592*0b57cec5SDimitry Andric   /// workgroup_group_segment_byte_size total.
593*0b57cec5SDimitry Andric   uint32_t workgroup_fbarrier_count;
594*0b57cec5SDimitry Andric 
595*0b57cec5SDimitry Andric   /// Number of scalar registers used by a wavefront. This includes
596*0b57cec5SDimitry Andric   /// the special SGPRs for VCC, Flat Scratch Base, Flat Scratch Size
597*0b57cec5SDimitry Andric   /// and XNACK (for GFX8 (VI)). It does not include the 16 SGPR added if a
598*0b57cec5SDimitry Andric   /// trap handler is enabled. Used to set COMPUTE_PGM_RSRC1.SGPRS.
599*0b57cec5SDimitry Andric   uint16_t wavefront_sgpr_count;
600*0b57cec5SDimitry Andric 
601*0b57cec5SDimitry Andric   /// Number of vector registers used by each work-item. Used to set
602*0b57cec5SDimitry Andric   /// COMPUTE_PGM_RSRC1.VGPRS.
603*0b57cec5SDimitry Andric   uint16_t workitem_vgpr_count;
604*0b57cec5SDimitry Andric 
605*0b57cec5SDimitry Andric   /// If reserved_vgpr_count is 0 then must be 0. Otherwise, this is the
606*0b57cec5SDimitry Andric   /// first fixed VGPR number reserved.
607*0b57cec5SDimitry Andric   uint16_t reserved_vgpr_first;
608*0b57cec5SDimitry Andric 
609*0b57cec5SDimitry Andric   /// The number of consecutive VGPRs reserved by the client. If
610*0b57cec5SDimitry Andric   /// is_debug_supported then this count includes VGPRs reserved
611*0b57cec5SDimitry Andric   /// for debugger use.
612*0b57cec5SDimitry Andric   uint16_t reserved_vgpr_count;
613*0b57cec5SDimitry Andric 
614*0b57cec5SDimitry Andric   /// If reserved_sgpr_count is 0 then must be 0. Otherwise, this is the
615*0b57cec5SDimitry Andric   /// first fixed SGPR number reserved.
616*0b57cec5SDimitry Andric   uint16_t reserved_sgpr_first;
617*0b57cec5SDimitry Andric 
618*0b57cec5SDimitry Andric   /// The number of consecutive SGPRs reserved by the client. If
619*0b57cec5SDimitry Andric   /// is_debug_supported then this count includes SGPRs reserved
620*0b57cec5SDimitry Andric   /// for debugger use.
621*0b57cec5SDimitry Andric   uint16_t reserved_sgpr_count;
622*0b57cec5SDimitry Andric 
623*0b57cec5SDimitry Andric   /// If is_debug_supported is 0 then must be 0. Otherwise, this is the
624*0b57cec5SDimitry Andric   /// fixed SGPR number used to hold the wave scratch offset for the
625*0b57cec5SDimitry Andric   /// entire kernel execution, or uint16_t(-1) if the register is not
626*0b57cec5SDimitry Andric   /// used or not known.
627*0b57cec5SDimitry Andric   uint16_t debug_wavefront_private_segment_offset_sgpr;
628*0b57cec5SDimitry Andric 
629*0b57cec5SDimitry Andric   /// If is_debug_supported is 0 then must be 0. Otherwise, this is the
630*0b57cec5SDimitry Andric   /// fixed SGPR number of the first of 4 SGPRs used to hold the
631*0b57cec5SDimitry Andric   /// scratch V# used for the entire kernel execution, or uint16_t(-1)
632*0b57cec5SDimitry Andric   /// if the registers are not used or not known.
633*0b57cec5SDimitry Andric   uint16_t debug_private_segment_buffer_sgpr;
634*0b57cec5SDimitry Andric 
635*0b57cec5SDimitry Andric   /// The maximum byte alignment of variables used by the kernel in
636*0b57cec5SDimitry Andric   /// the specified memory segment. Expressed as a power of two. Must
637*0b57cec5SDimitry Andric   /// be at least HSA_POWERTWO_16.
638*0b57cec5SDimitry Andric   uint8_t kernarg_segment_alignment;
639*0b57cec5SDimitry Andric   uint8_t group_segment_alignment;
640*0b57cec5SDimitry Andric   uint8_t private_segment_alignment;
641*0b57cec5SDimitry Andric 
642*0b57cec5SDimitry Andric   /// Wavefront size expressed as a power of two. Must be a power of 2
643*0b57cec5SDimitry Andric   /// in range 1..64 inclusive. Used to support runtime query that
644*0b57cec5SDimitry Andric   /// obtains wavefront size, which may be used by application to
645*0b57cec5SDimitry Andric   /// allocated dynamic group memory and set the dispatch work-group
646*0b57cec5SDimitry Andric   /// size.
647*0b57cec5SDimitry Andric   uint8_t wavefront_size;
648*0b57cec5SDimitry Andric 
649*0b57cec5SDimitry Andric   int32_t call_convention;
650*0b57cec5SDimitry Andric   uint8_t reserved3[12];
651*0b57cec5SDimitry Andric   uint64_t runtime_loader_kernel_symbol;
652*0b57cec5SDimitry Andric   uint64_t control_directives[16];
653*0b57cec5SDimitry Andric } amd_kernel_code_t;
654*0b57cec5SDimitry Andric 
655*0b57cec5SDimitry Andric #endif // AMDKERNELCODET_H
656