1 /*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/firmware.h>
24
25 #include "amdgpu.h"
26 #include "amdgpu_gfx.h"
27 #include "soc15.h"
28 #include "soc15d.h"
29 #include "soc15_common.h"
30 #include "vega10_enum.h"
31
32 #include "v9_structs.h"
33
34 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
35
36 #include "gc/gc_9_4_3_offset.h"
37 #include "gc/gc_9_4_3_sh_mask.h"
38
39 #include "gfx_v9_4_3.h"
40 #include "gfx_v9_4_3_cleaner_shader.h"
41 #include "amdgpu_xcp.h"
42 #include "amdgpu_aca.h"
43
44 MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
45 MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin");
46 MODULE_FIRMWARE("amdgpu/gc_9_5_0_mec.bin");
47 MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
48 MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
49 MODULE_FIRMWARE("amdgpu/gc_9_5_0_rlc.bin");
50 MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin");
51 MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin");
52
53 #define GFX9_MEC_HPD_SIZE 4096
54 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
55
56 #define GOLDEN_GB_ADDR_CONFIG 0x2a114042
57 #define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
58
59 #define XCC_REG_RANGE_0_LOW 0x2000 /* XCC gfxdec0 lower Bound */
60 #define XCC_REG_RANGE_0_HIGH 0x3400 /* XCC gfxdec0 upper Bound */
61 #define XCC_REG_RANGE_1_LOW 0xA000 /* XCC gfxdec1 lower Bound */
62 #define XCC_REG_RANGE_1_HIGH 0x10000 /* XCC gfxdec1 upper Bound */
63
64 #define NORMALIZE_XCC_REG_OFFSET(offset) \
65 (offset & 0xFFFF)
66
67 static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = {
68 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
69 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
70 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1),
71 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2),
72 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1),
73 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1),
74 SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT),
75 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT),
76 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT),
77 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS),
78 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR),
79 SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS),
80 SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS),
81 SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS),
82 SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT),
83 SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT),
84 SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS),
85 SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS),
86 SOC15_REG_ENTRY_STR(GC, 0, regSQC_DCACHE_UTCL1_STATUS),
87 SOC15_REG_ENTRY_STR(GC, 0, regSQC_ICACHE_UTCL1_STATUS),
88 SOC15_REG_ENTRY_STR(GC, 0, regSQ_UTCL1_STATUS),
89 SOC15_REG_ENTRY_STR(GC, 0, regTCP_UTCL1_STATUS),
90 SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS),
91 SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL),
92 SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS),
93 SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG),
94 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL),
95 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR),
96 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC2_INSTR_PNTR),
97 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
98 SOC15_REG_ENTRY_STR(GC, 0, regRLC_STAT),
99 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_COMMAND),
100 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_MESSAGE),
101 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_1),
102 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_2),
103 SOC15_REG_ENTRY_STR(GC, 0, regSMU_RLC_RESPONSE),
104 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SAFE_MODE),
105 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE),
106 SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT),
107 SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6),
108 /* SE status registers */
109 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
110 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
111 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2),
112 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3)
113 };
114
115 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = {
116 /* compute queue registers */
117 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
118 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ACTIVE),
119 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
120 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
121 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
122 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
123 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
124 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
125 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
126 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
127 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
128 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
129 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
130 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
131 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
132 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
133 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
134 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
135 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
136 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
137 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
138 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
139 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
140 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
141 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
142 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
143 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
144 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
145 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
146 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
147 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
148 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
149 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
150 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
151 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
152 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
153 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS),
154 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
155 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
156 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
157 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
158 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
159 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
160 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
161 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
162 };
163
164 struct amdgpu_gfx_ras gfx_v9_4_3_ras;
165
166 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
167 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev);
168 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
169 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
170 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
171 struct amdgpu_cu_info *cu_info);
172 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
173 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
174
gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring * kiq_ring,uint64_t queue_mask)175 static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
176 uint64_t queue_mask)
177 {
178 struct amdgpu_device *adev = kiq_ring->adev;
179 u64 shader_mc_addr;
180
181 /* Cleaner shader MC address */
182 shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
183
184 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
185 amdgpu_ring_write(kiq_ring,
186 PACKET3_SET_RESOURCES_VMID_MASK(0) |
187 /* vmid_mask:0* queue_type:0 (KIQ) */
188 PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
189 amdgpu_ring_write(kiq_ring,
190 lower_32_bits(queue_mask)); /* queue mask lo */
191 amdgpu_ring_write(kiq_ring,
192 upper_32_bits(queue_mask)); /* queue mask hi */
193 amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
194 amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
195 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
196 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
197 }
198
gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring)199 static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring,
200 struct amdgpu_ring *ring)
201 {
202 struct amdgpu_device *adev = kiq_ring->adev;
203 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
204 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
205 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
206
207 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
208 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
209 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
210 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
211 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
212 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
213 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
214 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
215 /*queue_type: normal compute queue */
216 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
217 /* alloc format: all_on_one_pipe */
218 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
219 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
220 /* num_queues: must be 1 */
221 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
222 amdgpu_ring_write(kiq_ring,
223 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
224 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
225 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
226 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
227 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
228 }
229
gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring,enum amdgpu_unmap_queues_action action,u64 gpu_addr,u64 seq)230 static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
231 struct amdgpu_ring *ring,
232 enum amdgpu_unmap_queues_action action,
233 u64 gpu_addr, u64 seq)
234 {
235 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
236
237 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
238 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
239 PACKET3_UNMAP_QUEUES_ACTION(action) |
240 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
241 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
242 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
243 amdgpu_ring_write(kiq_ring,
244 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
245
246 if (action == PREEMPT_QUEUES_NO_UNMAP) {
247 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
248 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
249 amdgpu_ring_write(kiq_ring, seq);
250 } else {
251 amdgpu_ring_write(kiq_ring, 0);
252 amdgpu_ring_write(kiq_ring, 0);
253 amdgpu_ring_write(kiq_ring, 0);
254 }
255 }
256
gfx_v9_4_3_kiq_query_status(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring,u64 addr,u64 seq)257 static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring,
258 struct amdgpu_ring *ring,
259 u64 addr,
260 u64 seq)
261 {
262 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
263
264 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
265 amdgpu_ring_write(kiq_ring,
266 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
267 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
268 PACKET3_QUERY_STATUS_COMMAND(2));
269 /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
270 amdgpu_ring_write(kiq_ring,
271 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
272 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
273 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
274 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
275 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
276 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
277 }
278
gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring * kiq_ring,uint16_t pasid,uint32_t flush_type,bool all_hub)279 static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
280 uint16_t pasid, uint32_t flush_type,
281 bool all_hub)
282 {
283 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
284 amdgpu_ring_write(kiq_ring,
285 PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
286 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
287 PACKET3_INVALIDATE_TLBS_PASID(pasid) |
288 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
289 }
290
gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring * kiq_ring,uint32_t queue_type,uint32_t me_id,uint32_t pipe_id,uint32_t queue_id,uint32_t xcc_id,uint32_t vmid)291 static void gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
292 uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
293 uint32_t xcc_id, uint32_t vmid)
294 {
295 struct amdgpu_device *adev = kiq_ring->adev;
296 unsigned i;
297
298 /* enter save mode */
299 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
300 mutex_lock(&adev->srbm_mutex);
301 soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, xcc_id);
302
303 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
304 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0x2);
305 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_COMPUTE_QUEUE_RESET, 0x1);
306 /* wait till dequeue take effects */
307 for (i = 0; i < adev->usec_timeout; i++) {
308 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
309 break;
310 udelay(1);
311 }
312 if (i >= adev->usec_timeout)
313 dev_err(adev->dev, "fail to wait on hqd deactive\n");
314 } else {
315 dev_err(adev->dev, "reset queue_type(%d) not supported\n\n", queue_type);
316 }
317
318 soc15_grbm_select(adev, 0, 0, 0, 0, 0);
319 mutex_unlock(&adev->srbm_mutex);
320 /* exit safe mode */
321 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
322 }
323
324 static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
325 .kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
326 .kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
327 .kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
328 .kiq_query_status = gfx_v9_4_3_kiq_query_status,
329 .kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
330 .kiq_reset_hw_queue = gfx_v9_4_3_kiq_reset_hw_queue,
331 .set_resources_size = 8,
332 .map_queues_size = 7,
333 .unmap_queues_size = 6,
334 .query_status_size = 7,
335 .invalidate_tlbs_size = 2,
336 };
337
gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device * adev)338 static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev)
339 {
340 int i, num_xcc;
341
342 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
343 for (i = 0; i < num_xcc; i++)
344 adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs;
345 }
346
gfx_v9_4_3_init_golden_registers(struct amdgpu_device * adev)347 static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
348 {
349 int i, num_xcc, dev_inst;
350
351 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
352 for (i = 0; i < num_xcc; i++) {
353 dev_inst = GET_INST(GC, i);
354
355 WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
356 GOLDEN_GB_ADDR_CONFIG);
357 WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1);
358 }
359 }
360
gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg)361 static uint32_t gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg)
362 {
363 uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg);
364
365 /* If it is an XCC reg, normalize the reg to keep
366 lower 16 bits in local xcc */
367
368 if (((normalized_reg >= XCC_REG_RANGE_0_LOW) && (normalized_reg < XCC_REG_RANGE_0_HIGH)) ||
369 ((normalized_reg >= XCC_REG_RANGE_1_LOW) && (normalized_reg < XCC_REG_RANGE_1_HIGH)))
370 return normalized_reg;
371 else
372 return reg;
373 }
374
gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring * ring,int eng_sel,bool wc,uint32_t reg,uint32_t val)375 static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
376 bool wc, uint32_t reg, uint32_t val)
377 {
378 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
379 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
380 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
381 WRITE_DATA_DST_SEL(0) |
382 (wc ? WR_CONFIRM : 0));
383 amdgpu_ring_write(ring, reg);
384 amdgpu_ring_write(ring, 0);
385 amdgpu_ring_write(ring, val);
386 }
387
gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring * ring,int eng_sel,int mem_space,int opt,uint32_t addr0,uint32_t addr1,uint32_t ref,uint32_t mask,uint32_t inv)388 static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
389 int mem_space, int opt, uint32_t addr0,
390 uint32_t addr1, uint32_t ref, uint32_t mask,
391 uint32_t inv)
392 {
393 /* Only do the normalization on regspace */
394 if (mem_space == 0) {
395 addr0 = gfx_v9_4_3_normalize_xcc_reg_offset(addr0);
396 addr1 = gfx_v9_4_3_normalize_xcc_reg_offset(addr1);
397 }
398
399 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
400 amdgpu_ring_write(ring,
401 /* memory (1) or register (0) */
402 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
403 WAIT_REG_MEM_OPERATION(opt) | /* wait */
404 WAIT_REG_MEM_FUNCTION(3) | /* equal */
405 WAIT_REG_MEM_ENGINE(eng_sel)));
406
407 if (mem_space)
408 BUG_ON(addr0 & 0x3); /* Dword align */
409 amdgpu_ring_write(ring, addr0);
410 amdgpu_ring_write(ring, addr1);
411 amdgpu_ring_write(ring, ref);
412 amdgpu_ring_write(ring, mask);
413 amdgpu_ring_write(ring, inv); /* poll interval */
414 }
415
gfx_v9_4_3_ring_test_ring(struct amdgpu_ring * ring)416 static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring)
417 {
418 uint32_t scratch_reg0_offset, xcc_offset;
419 struct amdgpu_device *adev = ring->adev;
420 uint32_t tmp = 0;
421 unsigned i;
422 int r;
423
424 /* Use register offset which is local to XCC in the packet */
425 xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
426 scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
427 WREG32(scratch_reg0_offset, 0xCAFEDEAD);
428 tmp = RREG32(scratch_reg0_offset);
429
430 r = amdgpu_ring_alloc(ring, 3);
431 if (r)
432 return r;
433
434 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
435 amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START);
436 amdgpu_ring_write(ring, 0xDEADBEEF);
437 amdgpu_ring_commit(ring);
438
439 for (i = 0; i < adev->usec_timeout; i++) {
440 tmp = RREG32(scratch_reg0_offset);
441 if (tmp == 0xDEADBEEF)
442 break;
443 udelay(1);
444 }
445
446 if (i >= adev->usec_timeout)
447 r = -ETIMEDOUT;
448 return r;
449 }
450
gfx_v9_4_3_ring_test_ib(struct amdgpu_ring * ring,long timeout)451 static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
452 {
453 struct amdgpu_device *adev = ring->adev;
454 struct amdgpu_ib ib;
455 struct dma_fence *f = NULL;
456
457 unsigned index;
458 uint64_t gpu_addr;
459 uint32_t tmp;
460 long r;
461
462 r = amdgpu_device_wb_get(adev, &index);
463 if (r)
464 return r;
465
466 gpu_addr = adev->wb.gpu_addr + (index * 4);
467 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
468 memset(&ib, 0, sizeof(ib));
469
470 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
471 if (r)
472 goto err1;
473
474 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
475 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
476 ib.ptr[2] = lower_32_bits(gpu_addr);
477 ib.ptr[3] = upper_32_bits(gpu_addr);
478 ib.ptr[4] = 0xDEADBEEF;
479 ib.length_dw = 5;
480
481 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
482 if (r)
483 goto err2;
484
485 r = dma_fence_wait_timeout(f, false, timeout);
486 if (r == 0) {
487 r = -ETIMEDOUT;
488 goto err2;
489 } else if (r < 0) {
490 goto err2;
491 }
492
493 tmp = adev->wb.wb[index];
494 if (tmp == 0xDEADBEEF)
495 r = 0;
496 else
497 r = -EINVAL;
498
499 err2:
500 amdgpu_ib_free(&ib, NULL);
501 dma_fence_put(f);
502 err1:
503 amdgpu_device_wb_free(adev, index);
504 return r;
505 }
506
507
508 /* This value might differs per partition */
gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device * adev)509 static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev)
510 {
511 uint64_t clock;
512
513 mutex_lock(&adev->gfx.gpu_clock_mutex);
514 WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
515 clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) |
516 ((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
517 mutex_unlock(&adev->gfx.gpu_clock_mutex);
518
519 return clock;
520 }
521
gfx_v9_4_3_free_microcode(struct amdgpu_device * adev)522 static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev)
523 {
524 amdgpu_ucode_release(&adev->gfx.pfp_fw);
525 amdgpu_ucode_release(&adev->gfx.me_fw);
526 amdgpu_ucode_release(&adev->gfx.ce_fw);
527 amdgpu_ucode_release(&adev->gfx.rlc_fw);
528 amdgpu_ucode_release(&adev->gfx.mec_fw);
529 amdgpu_ucode_release(&adev->gfx.mec2_fw);
530
531 kfree(adev->gfx.rlc.register_list_format);
532 }
533
gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device * adev,const char * chip_name)534 static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
535 const char *chip_name)
536 {
537 int err;
538 const struct rlc_firmware_header_v2_0 *rlc_hdr;
539 uint16_t version_major;
540 uint16_t version_minor;
541
542
543 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
544 AMDGPU_UCODE_REQUIRED,
545 "amdgpu/%s_rlc.bin", chip_name);
546 if (err)
547 goto out;
548 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
549
550 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
551 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
552 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
553 out:
554 if (err)
555 amdgpu_ucode_release(&adev->gfx.rlc_fw);
556
557 return err;
558 }
559
gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device * adev,const char * chip_name)560 static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
561 const char *chip_name)
562 {
563 int err;
564
565 if (amdgpu_sriov_vf(adev)) {
566 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
567 AMDGPU_UCODE_REQUIRED,
568 "amdgpu/%s_sjt_mec.bin", chip_name);
569
570 if (err)
571 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
572 AMDGPU_UCODE_REQUIRED,
573 "amdgpu/%s_mec.bin", chip_name);
574 } else
575 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
576 AMDGPU_UCODE_REQUIRED,
577 "amdgpu/%s_mec.bin", chip_name);
578 if (err)
579 goto out;
580 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
581 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
582
583 adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
584 adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
585
586 out:
587 if (err)
588 amdgpu_ucode_release(&adev->gfx.mec_fw);
589 return err;
590 }
591
gfx_v9_4_3_init_microcode(struct amdgpu_device * adev)592 static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
593 {
594 char ucode_prefix[15];
595 int r;
596
597 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
598
599 r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix);
600 if (r)
601 return r;
602
603 r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix);
604 if (r)
605 return r;
606
607 return r;
608 }
609
gfx_v9_4_3_mec_fini(struct amdgpu_device * adev)610 static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev)
611 {
612 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
613 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
614 }
615
gfx_v9_4_3_mec_init(struct amdgpu_device * adev)616 static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev)
617 {
618 int r, i, num_xcc;
619 u32 *hpd;
620 const __le32 *fw_data;
621 unsigned fw_size;
622 u32 *fw;
623 size_t mec_hpd_size;
624
625 const struct gfx_firmware_header_v1_0 *mec_hdr;
626
627 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
628 for (i = 0; i < num_xcc; i++)
629 bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
630 AMDGPU_MAX_COMPUTE_QUEUES);
631
632 /* take ownership of the relevant compute queues */
633 amdgpu_gfx_compute_queue_acquire(adev);
634 mec_hpd_size =
635 adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE;
636 if (mec_hpd_size) {
637 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
638 AMDGPU_GEM_DOMAIN_VRAM |
639 AMDGPU_GEM_DOMAIN_GTT,
640 &adev->gfx.mec.hpd_eop_obj,
641 &adev->gfx.mec.hpd_eop_gpu_addr,
642 (void **)&hpd);
643 if (r) {
644 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
645 gfx_v9_4_3_mec_fini(adev);
646 return r;
647 }
648
649 if (amdgpu_emu_mode == 1) {
650 for (i = 0; i < mec_hpd_size / 4; i++) {
651 memset((void *)(hpd + i), 0, 4);
652 if (i % 50 == 0)
653 msleep(1);
654 }
655 } else {
656 memset(hpd, 0, mec_hpd_size);
657 }
658
659 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
660 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
661 }
662
663 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
664
665 fw_data = (const __le32 *)
666 (adev->gfx.mec_fw->data +
667 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
668 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
669
670 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
671 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
672 &adev->gfx.mec.mec_fw_obj,
673 &adev->gfx.mec.mec_fw_gpu_addr,
674 (void **)&fw);
675 if (r) {
676 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
677 gfx_v9_4_3_mec_fini(adev);
678 return r;
679 }
680
681 memcpy(fw, fw_data, fw_size);
682
683 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
684 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
685
686 return 0;
687 }
688
gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 instance,int xcc_id)689 static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
690 u32 sh_num, u32 instance, int xcc_id)
691 {
692 u32 data;
693
694 if (instance == 0xffffffff)
695 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
696 INSTANCE_BROADCAST_WRITES, 1);
697 else
698 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
699 INSTANCE_INDEX, instance);
700
701 if (se_num == 0xffffffff)
702 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
703 SE_BROADCAST_WRITES, 1);
704 else
705 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
706
707 if (sh_num == 0xffffffff)
708 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
709 SH_BROADCAST_WRITES, 1);
710 else
711 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
712
713 WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
714 }
715
wave_read_ind(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t address)716 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address)
717 {
718 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
719 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
720 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
721 (address << SQ_IND_INDEX__INDEX__SHIFT) |
722 (SQ_IND_INDEX__FORCE_READ_MASK));
723 return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
724 }
725
wave_read_regs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t thread,uint32_t regno,uint32_t num,uint32_t * out)726 static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
727 uint32_t wave, uint32_t thread,
728 uint32_t regno, uint32_t num, uint32_t *out)
729 {
730 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
731 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
732 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
733 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
734 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
735 (SQ_IND_INDEX__FORCE_READ_MASK) |
736 (SQ_IND_INDEX__AUTO_INCR_MASK));
737 while (num--)
738 *(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
739 }
740
gfx_v9_4_3_read_wave_data(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t * dst,int * no_fields)741 static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev,
742 uint32_t xcc_id, uint32_t simd, uint32_t wave,
743 uint32_t *dst, int *no_fields)
744 {
745 /* type 1 wave data */
746 dst[(*no_fields)++] = 1;
747 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
748 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
749 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
750 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
751 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
752 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID);
753 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
754 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
755 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC);
756 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC);
757 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS);
758 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
759 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0);
760 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0);
761 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE);
762 }
763
gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t start,uint32_t size,uint32_t * dst)764 static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
765 uint32_t wave, uint32_t start,
766 uint32_t size, uint32_t *dst)
767 {
768 wave_read_regs(adev, xcc_id, simd, wave, 0,
769 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
770 }
771
gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t thread,uint32_t start,uint32_t size,uint32_t * dst)772 static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
773 uint32_t wave, uint32_t thread,
774 uint32_t start, uint32_t size,
775 uint32_t *dst)
776 {
777 wave_read_regs(adev, xcc_id, simd, wave, thread,
778 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
779 }
780
gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device * adev,u32 me,u32 pipe,u32 q,u32 vm,u32 xcc_id)781 static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
782 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
783 {
784 soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
785 }
786
gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device * adev)787 static int gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device *adev)
788 {
789 u32 xcp_ctl;
790
791 /* Value is expected to be the same on all, fetch from first instance */
792 xcp_ctl = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HYP_XCP_CTL);
793
794 return REG_GET_FIELD(xcp_ctl, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP);
795 }
796
gfx_v9_4_3_switch_compute_partition(struct amdgpu_device * adev,int num_xccs_per_xcp)797 static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
798 int num_xccs_per_xcp)
799 {
800 int ret, i, num_xcc;
801 u32 tmp = 0;
802
803 if (adev->psp.funcs) {
804 ret = psp_spatial_partition(&adev->psp,
805 NUM_XCC(adev->gfx.xcc_mask) /
806 num_xccs_per_xcp);
807 if (ret)
808 return ret;
809 } else {
810 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
811
812 for (i = 0; i < num_xcc; i++) {
813 tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
814 num_xccs_per_xcp);
815 tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
816 i % num_xccs_per_xcp);
817 WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL,
818 tmp);
819 }
820 ret = 0;
821 }
822
823 adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
824
825 return ret;
826 }
827
gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device * adev,int ih_node)828 static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
829 {
830 int xcc;
831
832 xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0));
833 if (!xcc) {
834 dev_err(adev->dev, "Couldn't find xcc mapping from IH node");
835 return -EINVAL;
836 }
837
838 return xcc - 1;
839 }
840
841 static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
842 .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
843 .select_se_sh = &gfx_v9_4_3_xcc_select_se_sh,
844 .read_wave_data = &gfx_v9_4_3_read_wave_data,
845 .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
846 .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
847 .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
848 .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
849 .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
850 .get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp,
851 };
852
gfx_v9_4_3_aca_bank_parser(struct aca_handle * handle,struct aca_bank * bank,enum aca_smu_type type,void * data)853 static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
854 struct aca_bank *bank, enum aca_smu_type type,
855 void *data)
856 {
857 struct aca_bank_info info;
858 u64 misc0;
859 u32 instlo;
860 int ret;
861
862 ret = aca_bank_info_decode(bank, &info);
863 if (ret)
864 return ret;
865
866 /* NOTE: overwrite info.die_id with xcd id for gfx */
867 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
868 instlo &= GENMASK(31, 1);
869 info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
870
871 misc0 = bank->regs[ACA_REG_IDX_MISC0];
872
873 switch (type) {
874 case ACA_SMU_TYPE_UE:
875 bank->aca_err_type = ACA_ERROR_TYPE_UE;
876 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1ULL);
877 break;
878 case ACA_SMU_TYPE_CE:
879 bank->aca_err_type = ACA_ERROR_TYPE_CE;
880 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
881 ACA_REG__MISC0__ERRCNT(misc0));
882 break;
883 default:
884 return -EINVAL;
885 }
886
887 return ret;
888 }
889
gfx_v9_4_3_aca_bank_is_valid(struct aca_handle * handle,struct aca_bank * bank,enum aca_smu_type type,void * data)890 static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
891 enum aca_smu_type type, void *data)
892 {
893 u32 instlo;
894
895 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
896 instlo &= GENMASK(31, 1);
897 switch (instlo) {
898 case mmSMNAID_XCD0_MCA_SMU:
899 case mmSMNAID_XCD1_MCA_SMU:
900 case mmSMNXCD_XCD0_MCA_SMU:
901 return true;
902 default:
903 break;
904 }
905
906 return false;
907 }
908
909 static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = {
910 .aca_bank_parser = gfx_v9_4_3_aca_bank_parser,
911 .aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid,
912 };
913
914 static const struct aca_info gfx_v9_4_3_aca_info = {
915 .hwip = ACA_HWIP_TYPE_SMU,
916 .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
917 .bank_ops = &gfx_v9_4_3_aca_bank_ops,
918 };
919
gfx_v9_4_3_gpu_early_init(struct amdgpu_device * adev)920 static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
921 {
922 adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
923 adev->gfx.ras = &gfx_v9_4_3_ras;
924
925 adev->gfx.config.max_hw_contexts = 8;
926 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
927 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
928 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
929 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
930 adev->gfx.config.gb_addr_config = GOLDEN_GB_ADDR_CONFIG;
931
932 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
933 REG_GET_FIELD(
934 adev->gfx.config.gb_addr_config,
935 GB_ADDR_CONFIG,
936 NUM_PIPES);
937
938 adev->gfx.config.max_tile_pipes =
939 adev->gfx.config.gb_addr_config_fields.num_pipes;
940
941 adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
942 REG_GET_FIELD(
943 adev->gfx.config.gb_addr_config,
944 GB_ADDR_CONFIG,
945 NUM_BANKS);
946 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
947 REG_GET_FIELD(
948 adev->gfx.config.gb_addr_config,
949 GB_ADDR_CONFIG,
950 MAX_COMPRESSED_FRAGS);
951 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
952 REG_GET_FIELD(
953 adev->gfx.config.gb_addr_config,
954 GB_ADDR_CONFIG,
955 NUM_RB_PER_SE);
956 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
957 REG_GET_FIELD(
958 adev->gfx.config.gb_addr_config,
959 GB_ADDR_CONFIG,
960 NUM_SHADER_ENGINES);
961 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
962 REG_GET_FIELD(
963 adev->gfx.config.gb_addr_config,
964 GB_ADDR_CONFIG,
965 PIPE_INTERLEAVE_SIZE));
966
967 return 0;
968 }
969
gfx_v9_4_3_compute_ring_init(struct amdgpu_device * adev,int ring_id,int xcc_id,int mec,int pipe,int queue)970 static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
971 int xcc_id, int mec, int pipe, int queue)
972 {
973 unsigned irq_type;
974 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
975 unsigned int hw_prio;
976 uint32_t xcc_doorbell_start;
977
978 ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
979 ring_id];
980
981 /* mec0 is me1 */
982 ring->xcc_id = xcc_id;
983 ring->me = mec + 1;
984 ring->pipe = pipe;
985 ring->queue = queue;
986
987 ring->ring_obj = NULL;
988 ring->use_doorbell = true;
989 xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
990 xcc_id * adev->doorbell_index.xcc_doorbell_range;
991 ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
992 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
993 (ring_id + xcc_id * adev->gfx.num_compute_rings) *
994 GFX9_MEC_HPD_SIZE;
995 ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
996 sprintf(ring->name, "comp_%d.%d.%d.%d",
997 ring->xcc_id, ring->me, ring->pipe, ring->queue);
998
999 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1000 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1001 + ring->pipe;
1002 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1003 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
1004 /* type-2 packets are deprecated on MEC, use type-3 instead */
1005 return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1006 hw_prio, NULL);
1007 }
1008
gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device * adev)1009 static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev)
1010 {
1011 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
1012 uint32_t *ptr, num_xcc, inst;
1013
1014 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1015
1016 ptr = kcalloc(reg_count * num_xcc, sizeof(uint32_t), GFP_KERNEL);
1017 if (!ptr) {
1018 DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
1019 adev->gfx.ip_dump_core = NULL;
1020 } else {
1021 adev->gfx.ip_dump_core = ptr;
1022 }
1023
1024 /* Allocate memory for compute queue registers for all the instances */
1025 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
1026 inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
1027 adev->gfx.mec.num_queue_per_pipe;
1028
1029 ptr = kcalloc(reg_count * inst * num_xcc, sizeof(uint32_t), GFP_KERNEL);
1030 if (!ptr) {
1031 DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
1032 adev->gfx.ip_dump_compute_queues = NULL;
1033 } else {
1034 adev->gfx.ip_dump_compute_queues = ptr;
1035 }
1036 }
1037
gfx_v9_4_3_sw_init(struct amdgpu_ip_block * ip_block)1038 static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
1039 {
1040 int i, j, k, r, ring_id, xcc_id, num_xcc;
1041 struct amdgpu_device *adev = ip_block->adev;
1042
1043 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1044 case IP_VERSION(9, 4, 3):
1045 case IP_VERSION(9, 4, 4):
1046 adev->gfx.cleaner_shader_ptr = gfx_9_4_3_cleaner_shader_hex;
1047 adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_3_cleaner_shader_hex);
1048 if (adev->gfx.mec_fw_version >= 153) {
1049 adev->gfx.enable_cleaner_shader = true;
1050 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1051 if (r) {
1052 adev->gfx.enable_cleaner_shader = false;
1053 dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1054 }
1055 }
1056 break;
1057 default:
1058 adev->gfx.enable_cleaner_shader = false;
1059 break;
1060 }
1061
1062 adev->gfx.mec.num_mec = 2;
1063 adev->gfx.mec.num_pipe_per_mec = 4;
1064 adev->gfx.mec.num_queue_per_pipe = 8;
1065
1066 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1067
1068 /* EOP Event */
1069 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
1070 if (r)
1071 return r;
1072
1073 /* Bad opcode Event */
1074 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1075 GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
1076 &adev->gfx.bad_op_irq);
1077 if (r)
1078 return r;
1079
1080 /* Privileged reg */
1081 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
1082 &adev->gfx.priv_reg_irq);
1083 if (r)
1084 return r;
1085
1086 /* Privileged inst */
1087 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
1088 &adev->gfx.priv_inst_irq);
1089 if (r)
1090 return r;
1091
1092 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1093
1094 r = adev->gfx.rlc.funcs->init(adev);
1095 if (r) {
1096 DRM_ERROR("Failed to init rlc BOs!\n");
1097 return r;
1098 }
1099
1100 r = gfx_v9_4_3_mec_init(adev);
1101 if (r) {
1102 DRM_ERROR("Failed to init MEC BOs!\n");
1103 return r;
1104 }
1105
1106 /* set up the compute queues - allocate horizontally across pipes */
1107 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1108 ring_id = 0;
1109 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1110 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1111 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec;
1112 k++) {
1113 if (!amdgpu_gfx_is_mec_queue_enabled(
1114 adev, xcc_id, i, k, j))
1115 continue;
1116
1117 r = gfx_v9_4_3_compute_ring_init(adev,
1118 ring_id,
1119 xcc_id,
1120 i, k, j);
1121 if (r)
1122 return r;
1123
1124 ring_id++;
1125 }
1126 }
1127 }
1128
1129 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id);
1130 if (r) {
1131 DRM_ERROR("Failed to init KIQ BOs!\n");
1132 return r;
1133 }
1134
1135 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1136 if (r)
1137 return r;
1138
1139 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1140 r = amdgpu_gfx_mqd_sw_init(adev,
1141 sizeof(struct v9_mqd_allocation), xcc_id);
1142 if (r)
1143 return r;
1144 }
1145
1146 adev->gfx.compute_supported_reset =
1147 amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
1148 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1149 case IP_VERSION(9, 4, 3):
1150 case IP_VERSION(9, 4, 4):
1151 if ((adev->gfx.mec_fw_version >= 155) &&
1152 !amdgpu_sriov_vf(adev)) {
1153 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1154 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
1155 }
1156 break;
1157 case IP_VERSION(9, 5, 0):
1158 if ((adev->gfx.mec_fw_version >= 21) &&
1159 !amdgpu_sriov_vf(adev)) {
1160 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1161 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
1162 }
1163 break;
1164 default:
1165 break;
1166 }
1167 r = gfx_v9_4_3_gpu_early_init(adev);
1168 if (r)
1169 return r;
1170
1171 r = amdgpu_gfx_ras_sw_init(adev);
1172 if (r)
1173 return r;
1174
1175 r = amdgpu_gfx_sysfs_init(adev);
1176 if (r)
1177 return r;
1178
1179 gfx_v9_4_3_alloc_ip_dump(adev);
1180
1181 return 0;
1182 }
1183
gfx_v9_4_3_sw_fini(struct amdgpu_ip_block * ip_block)1184 static int gfx_v9_4_3_sw_fini(struct amdgpu_ip_block *ip_block)
1185 {
1186 int i, num_xcc;
1187 struct amdgpu_device *adev = ip_block->adev;
1188
1189 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1190 for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
1191 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1192
1193 for (i = 0; i < num_xcc; i++) {
1194 amdgpu_gfx_mqd_sw_fini(adev, i);
1195 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
1196 amdgpu_gfx_kiq_fini(adev, i);
1197 }
1198
1199 amdgpu_gfx_cleaner_shader_sw_fini(adev);
1200
1201 gfx_v9_4_3_mec_fini(adev);
1202 amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
1203 gfx_v9_4_3_free_microcode(adev);
1204 amdgpu_gfx_sysfs_fini(adev);
1205
1206 kfree(adev->gfx.ip_dump_core);
1207 kfree(adev->gfx.ip_dump_compute_queues);
1208
1209 return 0;
1210 }
1211
1212 #define DEFAULT_SH_MEM_BASES (0x6000)
gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device * adev,int xcc_id)1213 static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
1214 int xcc_id)
1215 {
1216 int i;
1217 uint32_t sh_mem_config;
1218 uint32_t sh_mem_bases;
1219 uint32_t data;
1220
1221 /*
1222 * Configure apertures:
1223 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1224 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1225 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1226 */
1227 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1228
1229 sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1230 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1231 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1232
1233 mutex_lock(&adev->srbm_mutex);
1234 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1235 soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1236 /* CP and shaders */
1237 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config);
1238 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
1239
1240 /* Enable trap for each kfd vmid. */
1241 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
1242 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1243 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
1244 }
1245 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1246 mutex_unlock(&adev->srbm_mutex);
1247
1248 /*
1249 * Initialize all compute VMIDs to have no GDS, GWS, or OA
1250 * access. These should be enabled by FW for target VMIDs.
1251 */
1252 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1253 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0);
1254 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0);
1255 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0);
1256 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0);
1257 }
1258 }
1259
gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device * adev,int xcc_id)1260 static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
1261 {
1262 int vmid;
1263
1264 /*
1265 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1266 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1267 * the driver can enable them for graphics. VMID0 should maintain
1268 * access so that HWS firmware can save/restore entries.
1269 */
1270 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
1271 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0);
1272 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0);
1273 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0);
1274 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0);
1275 }
1276 }
1277
1278 /* For ASICs that needs xnack chain and MEC version supports, set SG_CONFIG1
1279 * DISABLE_XNACK_CHECK_IN_RETRY_DISABLE bit and inform KFD to set xnack_chain
1280 * bit in SET_RESOURCES
1281 */
gfx_v9_4_3_xcc_init_sq(struct amdgpu_device * adev,int xcc_id)1282 static void gfx_v9_4_3_xcc_init_sq(struct amdgpu_device *adev, int xcc_id)
1283 {
1284 uint32_t data;
1285
1286 if (!(adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN))
1287 return;
1288
1289 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_CONFIG1);
1290 data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1);
1291 WREG32_SOC15(GC, xcc_id, regSQ_CONFIG1, data);
1292 }
1293
gfx_v9_4_3_xcc_constants_init(struct amdgpu_device * adev,int xcc_id)1294 static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
1295 int xcc_id)
1296 {
1297 u32 tmp;
1298 int i;
1299
1300 /* XXX SH_MEM regs */
1301 /* where to put LDS, scratch, GPUVM in FSA64 space */
1302 mutex_lock(&adev->srbm_mutex);
1303 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1304 soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1305 /* CP and shaders */
1306 if (i == 0) {
1307 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1308 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1309 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1310 !!adev->gmc.noretry);
1311 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1312 regSH_MEM_CONFIG, tmp);
1313 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1314 regSH_MEM_BASES, 0);
1315 } else {
1316 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1317 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1318 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1319 !!adev->gmc.noretry);
1320 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1321 regSH_MEM_CONFIG, tmp);
1322 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1323 (adev->gmc.private_aperture_start >>
1324 48));
1325 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1326 (adev->gmc.shared_aperture_start >>
1327 48));
1328 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1329 regSH_MEM_BASES, tmp);
1330 }
1331 }
1332 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1333
1334 mutex_unlock(&adev->srbm_mutex);
1335
1336 gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
1337 gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
1338 gfx_v9_4_3_xcc_init_sq(adev, xcc_id);
1339 }
1340
gfx_v9_4_3_constants_init(struct amdgpu_device * adev)1341 static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
1342 {
1343 int i, num_xcc;
1344
1345 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1346
1347 gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info);
1348 adev->gfx.config.db_debug2 =
1349 RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
1350
1351 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1352 /* ToDo: GC 9.4.4 */
1353 case IP_VERSION(9, 4, 3):
1354 if (adev->gfx.mec_fw_version >= 184 &&
1355 (amdgpu_sriov_reg_access_sq_config(adev) ||
1356 !amdgpu_sriov_vf(adev)))
1357 adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
1358 break;
1359 case IP_VERSION(9, 5, 0):
1360 if (adev->gfx.mec_fw_version >= 23)
1361 adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
1362 break;
1363 default:
1364 break;
1365 }
1366
1367 for (i = 0; i < num_xcc; i++)
1368 gfx_v9_4_3_xcc_constants_init(adev, i);
1369 }
1370
1371 static void
gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device * adev,int xcc_id)1372 gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev,
1373 int xcc_id)
1374 {
1375 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1);
1376 }
1377
gfx_v9_4_3_xcc_init_pg(struct amdgpu_device * adev,int xcc_id)1378 static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id)
1379 {
1380 /*
1381 * Rlc save restore list is workable since v2_1.
1382 */
1383 gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
1384 }
1385
gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device * adev,int xcc_id)1386 static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
1387 {
1388 uint32_t data;
1389
1390 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
1391 data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK;
1392 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
1393 }
1394
gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device * adev)1395 static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
1396 {
1397 uint32_t rlc_setting;
1398
1399 /* if RLC is not enabled, do nothing */
1400 rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
1401 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
1402 return false;
1403
1404 return true;
1405 }
1406
gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device * adev,int xcc_id)1407 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
1408 {
1409 uint32_t data;
1410 unsigned i;
1411
1412 data = RLC_SAFE_MODE__CMD_MASK;
1413 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
1414 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1415
1416 /* wait for RLC_SAFE_MODE */
1417 for (i = 0; i < adev->usec_timeout; i++) {
1418 if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
1419 break;
1420 udelay(1);
1421 }
1422 }
1423
gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device * adev,int xcc_id)1424 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev,
1425 int xcc_id)
1426 {
1427 uint32_t data;
1428
1429 data = RLC_SAFE_MODE__CMD_MASK;
1430 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1431 }
1432
gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device * adev)1433 static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
1434 {
1435 int xcc_id, num_xcc;
1436 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1437
1438 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1439 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1440 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
1441 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0);
1442 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1);
1443 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2);
1444 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3);
1445 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
1446 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
1447 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT);
1448 }
1449 adev->gfx.rlc.rlcg_reg_access_supported = true;
1450 }
1451
gfx_v9_4_3_rlc_init(struct amdgpu_device * adev)1452 static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
1453 {
1454 /* init spm vmid with 0xf */
1455 if (adev->gfx.rlc.funcs->update_spm_vmid)
1456 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
1457
1458 return 0;
1459 }
1460
gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device * adev,int xcc_id)1461 static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev,
1462 int xcc_id)
1463 {
1464 u32 i, j, k;
1465 u32 mask;
1466
1467 mutex_lock(&adev->grbm_idx_mutex);
1468 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1469 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1470 gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff,
1471 xcc_id);
1472 for (k = 0; k < adev->usec_timeout; k++) {
1473 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0)
1474 break;
1475 udelay(1);
1476 }
1477 if (k == adev->usec_timeout) {
1478 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff,
1479 0xffffffff,
1480 0xffffffff, xcc_id);
1481 mutex_unlock(&adev->grbm_idx_mutex);
1482 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
1483 i, j);
1484 return;
1485 }
1486 }
1487 }
1488 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
1489 xcc_id);
1490 mutex_unlock(&adev->grbm_idx_mutex);
1491
1492 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1493 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1494 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1495 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1496 for (k = 0; k < adev->usec_timeout; k++) {
1497 if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1498 break;
1499 udelay(1);
1500 }
1501 }
1502
gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device * adev,bool enable,int xcc_id)1503 static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1504 bool enable, int xcc_id)
1505 {
1506 u32 tmp;
1507
1508 /* These interrupts should be enabled to drive DS clock */
1509
1510 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1511
1512 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1513 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1514 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1515
1516 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1517 }
1518
gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device * adev,int xcc_id)1519 static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id)
1520 {
1521 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1522 RLC_ENABLE_F32, 0);
1523 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
1524 gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id);
1525 }
1526
gfx_v9_4_3_rlc_stop(struct amdgpu_device * adev)1527 static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev)
1528 {
1529 int i, num_xcc;
1530
1531 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1532 for (i = 0; i < num_xcc; i++)
1533 gfx_v9_4_3_xcc_rlc_stop(adev, i);
1534 }
1535
gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device * adev,int xcc_id)1536 static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id)
1537 {
1538 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1539 SOFT_RESET_RLC, 1);
1540 udelay(50);
1541 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1542 SOFT_RESET_RLC, 0);
1543 udelay(50);
1544 }
1545
gfx_v9_4_3_rlc_reset(struct amdgpu_device * adev)1546 static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
1547 {
1548 int i, num_xcc;
1549
1550 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1551 for (i = 0; i < num_xcc; i++)
1552 gfx_v9_4_3_xcc_rlc_reset(adev, i);
1553 }
1554
gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device * adev,int xcc_id)1555 static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id)
1556 {
1557 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1558 RLC_ENABLE_F32, 1);
1559 udelay(50);
1560
1561 /* carrizo do enable cp interrupt after cp inited */
1562 if (!(adev->flags & AMD_IS_APU)) {
1563 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
1564 udelay(50);
1565 }
1566 }
1567
gfx_v9_4_3_rlc_start(struct amdgpu_device * adev)1568 static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
1569 {
1570 #ifdef AMDGPU_RLC_DEBUG_RETRY
1571 u32 rlc_ucode_ver;
1572 #endif
1573 int i, num_xcc;
1574
1575 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1576 for (i = 0; i < num_xcc; i++) {
1577 gfx_v9_4_3_xcc_rlc_start(adev, i);
1578 #ifdef AMDGPU_RLC_DEBUG_RETRY
1579 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
1580 rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6);
1581 if (rlc_ucode_ver == 0x108) {
1582 dev_info(adev->dev,
1583 "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
1584 rlc_ucode_ver, adev->gfx.rlc_fw_version);
1585 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
1586 * default is 0x9C4 to create a 100us interval */
1587 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4);
1588 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
1589 * to disable the page fault retry interrupts, default is
1590 * 0x100 (256) */
1591 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100);
1592 }
1593 #endif
1594 }
1595 }
1596
gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device * adev,int xcc_id)1597 static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1598 int xcc_id)
1599 {
1600 const struct rlc_firmware_header_v2_0 *hdr;
1601 const __le32 *fw_data;
1602 unsigned i, fw_size;
1603
1604 if (!adev->gfx.rlc_fw)
1605 return -EINVAL;
1606
1607 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1608 amdgpu_ucode_print_rlc_hdr(&hdr->header);
1609
1610 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1611 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1612 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1613
1614 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1615 RLCG_UCODE_LOADING_START_ADDRESS);
1616 for (i = 0; i < fw_size; i++) {
1617 if (amdgpu_emu_mode == 1 && i % 100 == 0) {
1618 dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i);
1619 msleep(1);
1620 }
1621 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
1622 }
1623 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1624
1625 return 0;
1626 }
1627
gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device * adev,int xcc_id)1628 static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id)
1629 {
1630 int r;
1631
1632 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1633 gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id);
1634 /* legacy rlc firmware loading */
1635 r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id);
1636 if (r)
1637 return r;
1638 gfx_v9_4_3_xcc_rlc_start(adev, xcc_id);
1639 }
1640
1641 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
1642 /* disable CG */
1643 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1644 gfx_v9_4_3_xcc_init_pg(adev, xcc_id);
1645 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
1646
1647 return 0;
1648 }
1649
gfx_v9_4_3_rlc_resume(struct amdgpu_device * adev)1650 static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
1651 {
1652 int r, i, num_xcc;
1653
1654 if (amdgpu_sriov_vf(adev))
1655 return 0;
1656
1657 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1658 for (i = 0; i < num_xcc; i++) {
1659 r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
1660 if (r)
1661 return r;
1662 }
1663
1664 return 0;
1665 }
1666
gfx_v9_4_3_update_spm_vmid(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned vmid)1667 static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1668 unsigned vmid)
1669 {
1670 u32 reg, pre_data, data;
1671
1672 reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL);
1673 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev))
1674 pre_data = RREG32_NO_KIQ(reg);
1675 else
1676 pre_data = RREG32(reg);
1677
1678 data = pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK);
1679 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
1680
1681 if (pre_data != data) {
1682 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) {
1683 WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1684 } else
1685 WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1686 }
1687 }
1688
1689 static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = {
1690 {SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)},
1691 {SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)},
1692 };
1693
gfx_v9_4_3_check_rlcg_range(struct amdgpu_device * adev,uint32_t offset,struct soc15_reg_rlcg * entries,int arr_size)1694 static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev,
1695 uint32_t offset,
1696 struct soc15_reg_rlcg *entries, int arr_size)
1697 {
1698 int i, inst;
1699 uint32_t reg;
1700
1701 if (!entries)
1702 return false;
1703
1704 for (i = 0; i < arr_size; i++) {
1705 const struct soc15_reg_rlcg *entry;
1706
1707 entry = &entries[i];
1708 inst = adev->ip_map.logical_to_dev_inst ?
1709 adev->ip_map.logical_to_dev_inst(
1710 adev, entry->hwip, entry->instance) :
1711 entry->instance;
1712 reg = adev->reg_offset[entry->hwip][inst][entry->segment] +
1713 entry->reg;
1714 if (offset == reg)
1715 return true;
1716 }
1717
1718 return false;
1719 }
1720
gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device * adev,u32 offset)1721 static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
1722 {
1723 return gfx_v9_4_3_check_rlcg_range(adev, offset,
1724 (void *)rlcg_access_gc_9_4_3,
1725 ARRAY_SIZE(rlcg_access_gc_9_4_3));
1726 }
1727
gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device * adev,bool enable,int xcc_id)1728 static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev,
1729 bool enable, int xcc_id)
1730 {
1731 if (enable) {
1732 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0);
1733 } else {
1734 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL,
1735 (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK |
1736 CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK |
1737 CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK |
1738 CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK |
1739 CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK |
1740 CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK |
1741 CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK |
1742 CP_MEC_CNTL__MEC_ME1_HALT_MASK |
1743 CP_MEC_CNTL__MEC_ME2_HALT_MASK));
1744 adev->gfx.kiq[xcc_id].ring.sched.ready = false;
1745 }
1746 udelay(50);
1747 }
1748
gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device * adev,int xcc_id)1749 static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev,
1750 int xcc_id)
1751 {
1752 const struct gfx_firmware_header_v1_0 *mec_hdr;
1753 const __le32 *fw_data;
1754 unsigned i;
1755 u32 tmp;
1756 u32 mec_ucode_addr_offset;
1757 u32 mec_ucode_data_offset;
1758
1759 if (!adev->gfx.mec_fw)
1760 return -EINVAL;
1761
1762 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
1763
1764 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1765 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1766
1767 fw_data = (const __le32 *)
1768 (adev->gfx.mec_fw->data +
1769 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1770 tmp = 0;
1771 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
1772 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
1773 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
1774
1775 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
1776 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
1777 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
1778 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
1779
1780 mec_ucode_addr_offset =
1781 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR);
1782 mec_ucode_data_offset =
1783 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA);
1784
1785 /* MEC1 */
1786 WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset);
1787 for (i = 0; i < mec_hdr->jt_size; i++)
1788 WREG32(mec_ucode_data_offset,
1789 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
1790
1791 WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version);
1792 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
1793
1794 return 0;
1795 }
1796
1797 /* KIQ functions */
gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring * ring,int xcc_id)1798 static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
1799 {
1800 uint32_t tmp;
1801 struct amdgpu_device *adev = ring->adev;
1802
1803 /* tell RLC which is KIQ queue */
1804 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
1805 tmp &= 0xffffff00;
1806 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1807 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp | 0x80);
1808 }
1809
gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring * ring,struct v9_mqd * mqd)1810 static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
1811 {
1812 struct amdgpu_device *adev = ring->adev;
1813
1814 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
1815 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
1816 mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
1817 mqd->cp_hqd_queue_priority =
1818 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
1819 }
1820 }
1821 }
1822
gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring * ring,int xcc_id)1823 static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id)
1824 {
1825 struct amdgpu_device *adev = ring->adev;
1826 struct v9_mqd *mqd = ring->mqd_ptr;
1827 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
1828 uint32_t tmp;
1829
1830 mqd->header = 0xC0310800;
1831 mqd->compute_pipelinestat_enable = 0x00000001;
1832 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
1833 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
1834 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
1835 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
1836 mqd->compute_misc_reserved = 0x00000003;
1837
1838 mqd->dynamic_cu_mask_addr_lo =
1839 lower_32_bits(ring->mqd_gpu_addr
1840 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1841 mqd->dynamic_cu_mask_addr_hi =
1842 upper_32_bits(ring->mqd_gpu_addr
1843 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1844
1845 eop_base_addr = ring->eop_gpu_addr >> 8;
1846 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
1847 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
1848
1849 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1850 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL);
1851 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
1852 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
1853
1854 mqd->cp_hqd_eop_control = tmp;
1855
1856 /* enable doorbell? */
1857 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL);
1858
1859 if (ring->use_doorbell) {
1860 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1861 DOORBELL_OFFSET, ring->doorbell_index);
1862 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1863 DOORBELL_EN, 1);
1864 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1865 DOORBELL_SOURCE, 0);
1866 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1867 DOORBELL_HIT, 0);
1868 if (amdgpu_sriov_multi_vf_mode(adev))
1869 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1870 DOORBELL_MODE, 1);
1871 } else {
1872 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1873 DOORBELL_EN, 0);
1874 }
1875
1876 mqd->cp_hqd_pq_doorbell_control = tmp;
1877
1878 /* disable the queue if it's active */
1879 ring->wptr = 0;
1880 mqd->cp_hqd_dequeue_request = 0;
1881 mqd->cp_hqd_pq_rptr = 0;
1882 mqd->cp_hqd_pq_wptr_lo = 0;
1883 mqd->cp_hqd_pq_wptr_hi = 0;
1884
1885 /* set the pointer to the MQD */
1886 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
1887 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
1888
1889 /* set MQD vmid to 0 */
1890 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL);
1891 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
1892 mqd->cp_mqd_control = tmp;
1893
1894 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1895 hqd_gpu_addr = ring->gpu_addr >> 8;
1896 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
1897 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
1898
1899 /* set up the HQD, this is similar to CP_RB0_CNTL */
1900 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL);
1901 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
1902 (order_base_2(ring->ring_size / 4) - 1));
1903 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
1904 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
1905 #ifdef __BIG_ENDIAN
1906 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
1907 #endif
1908 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
1909 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
1910 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
1911 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
1912 mqd->cp_hqd_pq_control = tmp;
1913
1914 /* set the wb address whether it's enabled or not */
1915 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
1916 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
1917 mqd->cp_hqd_pq_rptr_report_addr_hi =
1918 upper_32_bits(wb_gpu_addr) & 0xffff;
1919
1920 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1921 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1922 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
1923 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
1924
1925 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
1926 ring->wptr = 0;
1927 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR);
1928
1929 /* set the vmid for the queue */
1930 mqd->cp_hqd_vmid = 0;
1931
1932 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE);
1933 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
1934 mqd->cp_hqd_persistent_state = tmp;
1935
1936 /* set MIN_IB_AVAIL_SIZE */
1937 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL);
1938 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
1939 mqd->cp_hqd_ib_control = tmp;
1940
1941 /* set static priority for a queue/ring */
1942 gfx_v9_4_3_mqd_set_priority(ring, mqd);
1943 mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM);
1944
1945 /* map_queues packet doesn't need activate the queue,
1946 * so only kiq need set this field.
1947 */
1948 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
1949 mqd->cp_hqd_active = 1;
1950
1951 return 0;
1952 }
1953
gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring * ring,int xcc_id)1954 static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring,
1955 int xcc_id)
1956 {
1957 struct amdgpu_device *adev = ring->adev;
1958 struct v9_mqd *mqd = ring->mqd_ptr;
1959 int j;
1960
1961 /* disable wptr polling */
1962 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
1963
1964 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
1965 mqd->cp_hqd_eop_base_addr_lo);
1966 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
1967 mqd->cp_hqd_eop_base_addr_hi);
1968
1969 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1970 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
1971 mqd->cp_hqd_eop_control);
1972
1973 /* enable doorbell? */
1974 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
1975 mqd->cp_hqd_pq_doorbell_control);
1976
1977 /* disable the queue if it's active */
1978 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
1979 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
1980 for (j = 0; j < adev->usec_timeout; j++) {
1981 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
1982 break;
1983 udelay(1);
1984 }
1985 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
1986 mqd->cp_hqd_dequeue_request);
1987 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
1988 mqd->cp_hqd_pq_rptr);
1989 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
1990 mqd->cp_hqd_pq_wptr_lo);
1991 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
1992 mqd->cp_hqd_pq_wptr_hi);
1993 }
1994
1995 /* set the pointer to the MQD */
1996 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
1997 mqd->cp_mqd_base_addr_lo);
1998 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
1999 mqd->cp_mqd_base_addr_hi);
2000
2001 /* set MQD vmid to 0 */
2002 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
2003 mqd->cp_mqd_control);
2004
2005 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2006 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
2007 mqd->cp_hqd_pq_base_lo);
2008 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
2009 mqd->cp_hqd_pq_base_hi);
2010
2011 /* set up the HQD, this is similar to CP_RB0_CNTL */
2012 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
2013 mqd->cp_hqd_pq_control);
2014
2015 /* set the wb address whether it's enabled or not */
2016 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
2017 mqd->cp_hqd_pq_rptr_report_addr_lo);
2018 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2019 mqd->cp_hqd_pq_rptr_report_addr_hi);
2020
2021 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2022 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
2023 mqd->cp_hqd_pq_wptr_poll_addr_lo);
2024 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2025 mqd->cp_hqd_pq_wptr_poll_addr_hi);
2026
2027 /* enable the doorbell if requested */
2028 if (ring->use_doorbell) {
2029 WREG32_SOC15(
2030 GC, GET_INST(GC, xcc_id),
2031 regCP_MEC_DOORBELL_RANGE_LOWER,
2032 ((adev->doorbell_index.kiq +
2033 xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2034 2) << 2);
2035 WREG32_SOC15(
2036 GC, GET_INST(GC, xcc_id),
2037 regCP_MEC_DOORBELL_RANGE_UPPER,
2038 ((adev->doorbell_index.userqueue_end +
2039 xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2040 2) << 2);
2041 }
2042
2043 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2044 mqd->cp_hqd_pq_doorbell_control);
2045
2046 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2047 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2048 mqd->cp_hqd_pq_wptr_lo);
2049 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2050 mqd->cp_hqd_pq_wptr_hi);
2051
2052 /* set the vmid for the queue */
2053 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
2054
2055 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
2056 mqd->cp_hqd_persistent_state);
2057
2058 /* activate the queue */
2059 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
2060 mqd->cp_hqd_active);
2061
2062 if (ring->use_doorbell)
2063 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2064
2065 return 0;
2066 }
2067
gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring * ring,int xcc_id)2068 static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring,
2069 int xcc_id)
2070 {
2071 struct amdgpu_device *adev = ring->adev;
2072 int j;
2073
2074 /* disable the queue if it's active */
2075 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
2076
2077 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
2078
2079 for (j = 0; j < adev->usec_timeout; j++) {
2080 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
2081 break;
2082 udelay(1);
2083 }
2084
2085 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
2086 DRM_DEBUG("%s dequeue request failed.\n", ring->name);
2087
2088 /* Manual disable if dequeue request times out */
2089 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
2090 }
2091
2092 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
2093 0);
2094 }
2095
2096 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);
2097 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0);
2098 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT);
2099 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
2100 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);
2101 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
2102 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0);
2103 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0);
2104
2105 return 0;
2106 }
2107
gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring * ring,int xcc_id)2108 static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
2109 {
2110 struct amdgpu_device *adev = ring->adev;
2111 struct v9_mqd *mqd = ring->mqd_ptr;
2112 struct v9_mqd *tmp_mqd;
2113
2114 gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id);
2115
2116 /* GPU could be in bad state during probe, driver trigger the reset
2117 * after load the SMU, in this case , the mqd is not be initialized.
2118 * driver need to re-init the mqd.
2119 * check mqd->cp_hqd_pq_control since this value should not be 0
2120 */
2121 tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup;
2122 if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) {
2123 /* for GPU_RESET case , reset MQD to a clean status */
2124 if (adev->gfx.kiq[xcc_id].mqd_backup)
2125 memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation));
2126
2127 /* reset ring buffer */
2128 ring->wptr = 0;
2129 amdgpu_ring_clear_ring(ring);
2130 mutex_lock(&adev->srbm_mutex);
2131 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2132 gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
2133 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2134 mutex_unlock(&adev->srbm_mutex);
2135 } else {
2136 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2137 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2138 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2139 mutex_lock(&adev->srbm_mutex);
2140 if (amdgpu_sriov_vf(adev) && adev->in_suspend)
2141 amdgpu_ring_clear_ring(ring);
2142 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2143 gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
2144 gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
2145 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2146 mutex_unlock(&adev->srbm_mutex);
2147
2148 if (adev->gfx.kiq[xcc_id].mqd_backup)
2149 memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
2150 }
2151
2152 return 0;
2153 }
2154
gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring * ring,int xcc_id,bool restore)2155 static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore)
2156 {
2157 struct amdgpu_device *adev = ring->adev;
2158 struct v9_mqd *mqd = ring->mqd_ptr;
2159 int mqd_idx = ring - &adev->gfx.compute_ring[0];
2160 struct v9_mqd *tmp_mqd;
2161
2162 /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
2163 * is not be initialized before
2164 */
2165 tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
2166
2167 if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
2168 (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
2169 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2170 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2171 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2172 mutex_lock(&adev->srbm_mutex);
2173 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2174 gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
2175 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2176 mutex_unlock(&adev->srbm_mutex);
2177
2178 if (adev->gfx.mec.mqd_backup[mqd_idx])
2179 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
2180 } else {
2181 /* restore MQD to a clean status */
2182 if (adev->gfx.mec.mqd_backup[mqd_idx])
2183 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
2184 /* reset ring buffer */
2185 ring->wptr = 0;
2186 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
2187 amdgpu_ring_clear_ring(ring);
2188 }
2189
2190 return 0;
2191 }
2192
gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device * adev,int xcc_id)2193 static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
2194 {
2195 struct amdgpu_ring *ring;
2196 int j;
2197
2198 for (j = 0; j < adev->gfx.num_compute_rings; j++) {
2199 ring = &adev->gfx.compute_ring[j + xcc_id * adev->gfx.num_compute_rings];
2200 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2201 mutex_lock(&adev->srbm_mutex);
2202 soc15_grbm_select(adev, ring->me,
2203 ring->pipe,
2204 ring->queue, 0, GET_INST(GC, xcc_id));
2205 gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id);
2206 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2207 mutex_unlock(&adev->srbm_mutex);
2208 }
2209 }
2210
2211 return 0;
2212 }
2213
gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device * adev,int xcc_id)2214 static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
2215 {
2216 gfx_v9_4_3_xcc_kiq_init_queue(&adev->gfx.kiq[xcc_id].ring, xcc_id);
2217 return 0;
2218 }
2219
gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device * adev,int xcc_id)2220 static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
2221 {
2222 struct amdgpu_ring *ring;
2223 int i, r;
2224
2225 gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
2226
2227 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2228 ring = &adev->gfx.compute_ring[i + xcc_id *
2229 adev->gfx.num_compute_rings];
2230
2231 r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
2232 if (r)
2233 return r;
2234 }
2235
2236 return amdgpu_gfx_enable_kcq(adev, xcc_id);
2237 }
2238
gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device * adev,int xcc_id)2239 static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
2240 {
2241 struct amdgpu_ring *ring;
2242 int r, j;
2243
2244 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2245
2246 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2247 gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id);
2248
2249 r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id);
2250 if (r)
2251 return r;
2252 } else {
2253 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
2254 }
2255
2256 r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id);
2257 if (r)
2258 return r;
2259
2260 r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id);
2261 if (r)
2262 return r;
2263
2264 for (j = 0; j < adev->gfx.num_compute_rings; j++) {
2265 ring = &adev->gfx.compute_ring
2266 [j + xcc_id * adev->gfx.num_compute_rings];
2267 r = amdgpu_ring_test_helper(ring);
2268 if (r)
2269 return r;
2270 }
2271
2272 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
2273
2274 return 0;
2275 }
2276
gfx_v9_4_3_cp_resume(struct amdgpu_device * adev)2277 static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
2278 {
2279 int r = 0, i, num_xcc, num_xcp, num_xcc_per_xcp;
2280
2281 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2282 if (amdgpu_sriov_vf(adev)) {
2283 enum amdgpu_gfx_partition mode;
2284
2285 mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2286 AMDGPU_XCP_FL_NONE);
2287 if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2288 return -EINVAL;
2289 num_xcc_per_xcp = gfx_v9_4_3_get_xccs_per_xcp(adev);
2290 adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp;
2291 num_xcp = num_xcc / num_xcc_per_xcp;
2292 r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
2293
2294 } else {
2295 if (adev->in_suspend)
2296 amdgpu_xcp_restore_partition_mode(adev->xcp_mgr);
2297 else if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2298 AMDGPU_XCP_FL_NONE) ==
2299 AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2300 r = amdgpu_xcp_switch_partition_mode(
2301 adev->xcp_mgr, amdgpu_user_partt_mode);
2302 }
2303 if (r)
2304 return r;
2305
2306 for (i = 0; i < num_xcc; i++) {
2307 r = gfx_v9_4_3_xcc_cp_resume(adev, i);
2308 if (r)
2309 return r;
2310 }
2311
2312 return 0;
2313 }
2314
gfx_v9_4_3_xcc_fini(struct amdgpu_device * adev,int xcc_id)2315 static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
2316 {
2317 if (amdgpu_gfx_disable_kcq(adev, xcc_id))
2318 DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id);
2319
2320 if (amdgpu_sriov_vf(adev)) {
2321 /* must disable polling for SRIOV when hw finished, otherwise
2322 * CPC engine may still keep fetching WB address which is already
2323 * invalid after sw finished and trigger DMAR reading error in
2324 * hypervisor side.
2325 */
2326 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
2327 return;
2328 }
2329
2330 /* Use deinitialize sequence from CAIL when unbinding device
2331 * from driver, otherwise KIQ is hanging when binding back
2332 */
2333 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2334 mutex_lock(&adev->srbm_mutex);
2335 soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me,
2336 adev->gfx.kiq[xcc_id].ring.pipe,
2337 adev->gfx.kiq[xcc_id].ring.queue, 0,
2338 GET_INST(GC, xcc_id));
2339 gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring,
2340 xcc_id);
2341 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2342 mutex_unlock(&adev->srbm_mutex);
2343 }
2344
2345 gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id);
2346 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
2347 }
2348
gfx_v9_4_3_hw_init(struct amdgpu_ip_block * ip_block)2349 static int gfx_v9_4_3_hw_init(struct amdgpu_ip_block *ip_block)
2350 {
2351 int r;
2352 struct amdgpu_device *adev = ip_block->adev;
2353
2354 amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
2355 adev->gfx.cleaner_shader_ptr);
2356
2357 if (!amdgpu_sriov_vf(adev))
2358 gfx_v9_4_3_init_golden_registers(adev);
2359
2360 gfx_v9_4_3_constants_init(adev);
2361
2362 r = adev->gfx.rlc.funcs->resume(adev);
2363 if (r)
2364 return r;
2365
2366 r = gfx_v9_4_3_cp_resume(adev);
2367 if (r)
2368 return r;
2369
2370 return r;
2371 }
2372
gfx_v9_4_3_hw_fini(struct amdgpu_ip_block * ip_block)2373 static int gfx_v9_4_3_hw_fini(struct amdgpu_ip_block *ip_block)
2374 {
2375 struct amdgpu_device *adev = ip_block->adev;
2376 int i, num_xcc;
2377
2378 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2379 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2380 amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
2381
2382 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2383 for (i = 0; i < num_xcc; i++) {
2384 gfx_v9_4_3_xcc_fini(adev, i);
2385 }
2386
2387 return 0;
2388 }
2389
gfx_v9_4_3_suspend(struct amdgpu_ip_block * ip_block)2390 static int gfx_v9_4_3_suspend(struct amdgpu_ip_block *ip_block)
2391 {
2392 return gfx_v9_4_3_hw_fini(ip_block);
2393 }
2394
gfx_v9_4_3_resume(struct amdgpu_ip_block * ip_block)2395 static int gfx_v9_4_3_resume(struct amdgpu_ip_block *ip_block)
2396 {
2397 return gfx_v9_4_3_hw_init(ip_block);
2398 }
2399
gfx_v9_4_3_is_idle(struct amdgpu_ip_block * ip_block)2400 static bool gfx_v9_4_3_is_idle(struct amdgpu_ip_block *ip_block)
2401 {
2402 struct amdgpu_device *adev = ip_block->adev;
2403 int i, num_xcc;
2404
2405 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2406 for (i = 0; i < num_xcc; i++) {
2407 if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS),
2408 GRBM_STATUS, GUI_ACTIVE))
2409 return false;
2410 }
2411 return true;
2412 }
2413
gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block * ip_block)2414 static int gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
2415 {
2416 unsigned i;
2417 struct amdgpu_device *adev = ip_block->adev;
2418
2419 for (i = 0; i < adev->usec_timeout; i++) {
2420 if (gfx_v9_4_3_is_idle(ip_block))
2421 return 0;
2422 udelay(1);
2423 }
2424 return -ETIMEDOUT;
2425 }
2426
gfx_v9_4_3_soft_reset(struct amdgpu_ip_block * ip_block)2427 static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block)
2428 {
2429 u32 grbm_soft_reset = 0;
2430 u32 tmp;
2431 struct amdgpu_device *adev = ip_block->adev;
2432
2433 /* GRBM_STATUS */
2434 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS);
2435 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
2436 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
2437 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
2438 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
2439 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
2440 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
2441 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2442 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2443 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2444 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
2445 }
2446
2447 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
2448 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2449 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2450 }
2451
2452 /* GRBM_STATUS2 */
2453 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2);
2454 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
2455 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2456 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2457
2458
2459 if (grbm_soft_reset) {
2460 /* stop the rlc */
2461 adev->gfx.rlc.funcs->stop(adev);
2462
2463 /* Disable MEC parsing/prefetching */
2464 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
2465
2466 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2467 tmp |= grbm_soft_reset;
2468 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2469 WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2470 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2471
2472 udelay(50);
2473
2474 tmp &= ~grbm_soft_reset;
2475 WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2476 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2477
2478 /* Wait a little for things to settle down */
2479 udelay(50);
2480 }
2481 return 0;
2482 }
2483
gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring * ring,uint32_t vmid,uint32_t gds_base,uint32_t gds_size,uint32_t gws_base,uint32_t gws_size,uint32_t oa_base,uint32_t oa_size)2484 static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring,
2485 uint32_t vmid,
2486 uint32_t gds_base, uint32_t gds_size,
2487 uint32_t gws_base, uint32_t gws_size,
2488 uint32_t oa_base, uint32_t oa_size)
2489 {
2490 struct amdgpu_device *adev = ring->adev;
2491
2492 /* GDS Base */
2493 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2494 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid,
2495 gds_base);
2496
2497 /* GDS Size */
2498 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2499 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid,
2500 gds_size);
2501
2502 /* GWS */
2503 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2504 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid,
2505 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
2506
2507 /* OA */
2508 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2509 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid,
2510 (1 << (oa_size + oa_base)) - (1 << oa_base));
2511 }
2512
gfx_v9_4_3_early_init(struct amdgpu_ip_block * ip_block)2513 static int gfx_v9_4_3_early_init(struct amdgpu_ip_block *ip_block)
2514 {
2515 struct amdgpu_device *adev = ip_block->adev;
2516
2517 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2518 AMDGPU_MAX_COMPUTE_RINGS);
2519 gfx_v9_4_3_set_kiq_pm4_funcs(adev);
2520 gfx_v9_4_3_set_ring_funcs(adev);
2521 gfx_v9_4_3_set_irq_funcs(adev);
2522 gfx_v9_4_3_set_gds_init(adev);
2523 gfx_v9_4_3_set_rlc_funcs(adev);
2524
2525 /* init rlcg reg access ctrl */
2526 gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev);
2527
2528 return gfx_v9_4_3_init_microcode(adev);
2529 }
2530
gfx_v9_4_3_late_init(struct amdgpu_ip_block * ip_block)2531 static int gfx_v9_4_3_late_init(struct amdgpu_ip_block *ip_block)
2532 {
2533 struct amdgpu_device *adev = ip_block->adev;
2534 int r;
2535
2536 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2537 if (r)
2538 return r;
2539
2540 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2541 if (r)
2542 return r;
2543
2544 r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
2545 if (r)
2546 return r;
2547
2548 if (adev->gfx.ras &&
2549 adev->gfx.ras->enable_watchdog_timer)
2550 adev->gfx.ras->enable_watchdog_timer(adev);
2551
2552 return 0;
2553 }
2554
gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device * adev,bool enable,int xcc_id)2555 static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev,
2556 bool enable, int xcc_id)
2557 {
2558 uint32_t def, data;
2559
2560 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
2561 return;
2562
2563 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2564 regRLC_CGTT_MGCG_OVERRIDE);
2565
2566 if (enable)
2567 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2568 else
2569 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2570
2571 if (def != data)
2572 WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2573 regRLC_CGTT_MGCG_OVERRIDE, data);
2574
2575 }
2576
gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device * adev,bool enable,int xcc_id)2577 static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
2578 bool enable, int xcc_id)
2579 {
2580 uint32_t def, data;
2581
2582 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
2583 return;
2584
2585 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2586 regRLC_CGTT_MGCG_OVERRIDE);
2587
2588 if (enable)
2589 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2590 else
2591 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2592
2593 if (def != data)
2594 WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2595 regRLC_CGTT_MGCG_OVERRIDE, data);
2596 }
2597
2598 static void
gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device * adev,bool enable,int xcc_id)2599 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
2600 bool enable, int xcc_id)
2601 {
2602 uint32_t data, def;
2603
2604 /* It is disabled by HW by default */
2605 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
2606 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
2607 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2608
2609 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2610 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2611 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2612 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2613
2614 if (def != data)
2615 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2616
2617 /* MGLS is a global flag to control all MGLS in GFX */
2618 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
2619 /* 2 - RLC memory Light sleep */
2620 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
2621 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2622 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2623 if (def != data)
2624 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2625 }
2626 /* 3 - CP memory Light sleep */
2627 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
2628 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2629 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2630 if (def != data)
2631 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2632 }
2633 }
2634 } else {
2635 /* 1 - MGCG_OVERRIDE */
2636 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2637
2638 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2639 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2640 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2641 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2642
2643 if (def != data)
2644 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2645
2646 /* 2 - disable MGLS in RLC */
2647 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2648 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
2649 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2650 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2651 }
2652
2653 /* 3 - disable MGLS in CP */
2654 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2655 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
2656 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2657 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2658 }
2659 }
2660
2661 }
2662
2663 static void
gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device * adev,bool enable,int xcc_id)2664 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
2665 bool enable, int xcc_id)
2666 {
2667 uint32_t def, data;
2668
2669 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
2670
2671 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2672 /* unset CGCG override */
2673 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
2674 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2675 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2676 else
2677 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2678 /* update CGCG and CGLS override bits */
2679 if (def != data)
2680 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2681
2682 /* CGCG Hysteresis: 400us */
2683 def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2684
2685 data = (0x2710
2686 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
2687 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2688 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2689 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
2690 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2691 if (def != data)
2692 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2693
2694 /* set IDLE_POLL_COUNT(0x33450100)*/
2695 def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
2696 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
2697 (0x3345 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2698 if (def != data)
2699 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
2700 } else {
2701 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2702 /* reset CGCG/CGLS bits */
2703 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
2704 /* disable cgcg and cgls in FSM */
2705 if (def != data)
2706 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2707 }
2708
2709 }
2710
gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device * adev,bool enable,int xcc_id)2711 static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
2712 bool enable, int xcc_id)
2713 {
2714 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
2715
2716 if (enable) {
2717 /* FGCG */
2718 gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2719 gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2720
2721 /* CGCG/CGLS should be enabled after MGCG/MGLS
2722 * === MGCG + MGLS ===
2723 */
2724 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2725 xcc_id);
2726 /* === CGCG + CGLS === */
2727 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2728 xcc_id);
2729 } else {
2730 /* CGCG/CGLS should be disabled before MGCG/MGLS
2731 * === CGCG + CGLS ===
2732 */
2733 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2734 xcc_id);
2735 /* === MGCG + MGLS === */
2736 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2737 xcc_id);
2738
2739 /* FGCG */
2740 gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2741 gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2742 }
2743
2744 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
2745
2746 return 0;
2747 }
2748
2749 static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
2750 .is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled,
2751 .set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode,
2752 .unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode,
2753 .init = gfx_v9_4_3_rlc_init,
2754 .resume = gfx_v9_4_3_rlc_resume,
2755 .stop = gfx_v9_4_3_rlc_stop,
2756 .reset = gfx_v9_4_3_rlc_reset,
2757 .start = gfx_v9_4_3_rlc_start,
2758 .update_spm_vmid = gfx_v9_4_3_update_spm_vmid,
2759 .is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
2760 };
2761
gfx_v9_4_3_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)2762 static int gfx_v9_4_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
2763 enum amd_powergating_state state)
2764 {
2765 return 0;
2766 }
2767
gfx_v9_4_3_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)2768 static int gfx_v9_4_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
2769 enum amd_clockgating_state state)
2770 {
2771 struct amdgpu_device *adev = ip_block->adev;
2772 int i, num_xcc;
2773
2774 if (amdgpu_sriov_vf(adev))
2775 return 0;
2776
2777 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2778 for (i = 0; i < num_xcc; i++)
2779 gfx_v9_4_3_xcc_update_gfx_clock_gating(
2780 adev, state == AMD_CG_STATE_GATE, i);
2781
2782 return 0;
2783 }
2784
gfx_v9_4_3_get_clockgating_state(struct amdgpu_ip_block * ip_block,u64 * flags)2785 static void gfx_v9_4_3_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
2786 {
2787 struct amdgpu_device *adev = ip_block->adev;
2788 int data;
2789
2790 if (amdgpu_sriov_vf(adev))
2791 *flags = 0;
2792
2793 /* AMD_CG_SUPPORT_GFX_MGCG */
2794 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE));
2795 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
2796 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
2797
2798 /* AMD_CG_SUPPORT_GFX_CGCG */
2799 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL));
2800 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
2801 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
2802
2803 /* AMD_CG_SUPPORT_GFX_CGLS */
2804 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
2805 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
2806
2807 /* AMD_CG_SUPPORT_GFX_RLC_LS */
2808 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL));
2809 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
2810 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
2811
2812 /* AMD_CG_SUPPORT_GFX_CP_LS */
2813 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL));
2814 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
2815 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
2816 }
2817
gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring * ring)2818 static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2819 {
2820 struct amdgpu_device *adev = ring->adev;
2821 u32 ref_and_mask, reg_mem_engine;
2822 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
2823
2824 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2825 switch (ring->me) {
2826 case 1:
2827 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
2828 break;
2829 case 2:
2830 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
2831 break;
2832 default:
2833 return;
2834 }
2835 reg_mem_engine = 0;
2836 } else {
2837 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
2838 reg_mem_engine = 1; /* pfp */
2839 }
2840
2841 gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1,
2842 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
2843 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
2844 ref_and_mask, ref_and_mask, 0x20);
2845 }
2846
gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)2847 static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring,
2848 struct amdgpu_job *job,
2849 struct amdgpu_ib *ib,
2850 uint32_t flags)
2851 {
2852 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2853 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
2854
2855 /* Currently, there is a high possibility to get wave ID mismatch
2856 * between ME and GDS, leading to a hw deadlock, because ME generates
2857 * different wave IDs than the GDS expects. This situation happens
2858 * randomly when at least 5 compute pipes use GDS ordered append.
2859 * The wave IDs generated by ME are also wrong after suspend/resume.
2860 * Those are probably bugs somewhere else in the kernel driver.
2861 *
2862 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
2863 * GDS to 0 for this ring (me/pipe).
2864 */
2865 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
2866 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2867 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
2868 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2869 }
2870
2871 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2872 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
2873 amdgpu_ring_write(ring,
2874 #ifdef __BIG_ENDIAN
2875 (2 << 0) |
2876 #endif
2877 lower_32_bits(ib->gpu_addr));
2878 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
2879 amdgpu_ring_write(ring, control);
2880 }
2881
gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)2882 static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
2883 u64 seq, unsigned flags)
2884 {
2885 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2886 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2887 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
2888
2889 /* RELEASE_MEM - flush caches, send int */
2890 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
2891 amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
2892 EOP_TC_NC_ACTION_EN) :
2893 (EOP_TCL1_ACTION_EN |
2894 EOP_TC_ACTION_EN |
2895 EOP_TC_WB_ACTION_EN |
2896 EOP_TC_MD_ACTION_EN)) |
2897 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2898 EVENT_INDEX(5)));
2899 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2900
2901 /*
2902 * the address should be Qword aligned if 64bit write, Dword
2903 * aligned if only send 32bit data low (discard data high)
2904 */
2905 if (write64bit)
2906 BUG_ON(addr & 0x7);
2907 else
2908 BUG_ON(addr & 0x3);
2909 amdgpu_ring_write(ring, lower_32_bits(addr));
2910 amdgpu_ring_write(ring, upper_32_bits(addr));
2911 amdgpu_ring_write(ring, lower_32_bits(seq));
2912 amdgpu_ring_write(ring, upper_32_bits(seq));
2913 amdgpu_ring_write(ring, 0);
2914 }
2915
gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring * ring)2916 static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
2917 {
2918 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2919 uint32_t seq = ring->fence_drv.sync_seq;
2920 uint64_t addr = ring->fence_drv.gpu_addr;
2921
2922 gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0,
2923 lower_32_bits(addr), upper_32_bits(addr),
2924 seq, 0xffffffff, 4);
2925 }
2926
gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)2927 static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring,
2928 unsigned vmid, uint64_t pd_addr)
2929 {
2930 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
2931 }
2932
gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring * ring)2933 static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring)
2934 {
2935 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
2936 }
2937
gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring * ring)2938 static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring)
2939 {
2940 u64 wptr;
2941
2942 /* XXX check if swapping is necessary on BE */
2943 if (ring->use_doorbell)
2944 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
2945 else
2946 BUG();
2947 return wptr;
2948 }
2949
gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring * ring)2950 static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring)
2951 {
2952 struct amdgpu_device *adev = ring->adev;
2953
2954 /* XXX check if swapping is necessary on BE */
2955 if (ring->use_doorbell) {
2956 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
2957 WDOORBELL64(ring->doorbell_index, ring->wptr);
2958 } else {
2959 BUG(); /* only DOORBELL method supported on gfx9 now */
2960 }
2961 }
2962
gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned int flags)2963 static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
2964 u64 seq, unsigned int flags)
2965 {
2966 struct amdgpu_device *adev = ring->adev;
2967
2968 /* we only allocate 32bit for each seq wb address */
2969 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
2970
2971 /* write fence seq to the "addr" */
2972 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2973 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2974 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
2975 amdgpu_ring_write(ring, lower_32_bits(addr));
2976 amdgpu_ring_write(ring, upper_32_bits(addr));
2977 amdgpu_ring_write(ring, lower_32_bits(seq));
2978
2979 if (flags & AMDGPU_FENCE_FLAG_INT) {
2980 /* set register to trigger INT */
2981 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2982 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2983 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
2984 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
2985 amdgpu_ring_write(ring, 0);
2986 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
2987 }
2988 }
2989
gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t reg_val_offs)2990 static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
2991 uint32_t reg_val_offs)
2992 {
2993 struct amdgpu_device *adev = ring->adev;
2994
2995 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
2996
2997 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
2998 amdgpu_ring_write(ring, 0 | /* src: register*/
2999 (5 << 8) | /* dst: memory */
3000 (1 << 20)); /* write confirm */
3001 amdgpu_ring_write(ring, reg);
3002 amdgpu_ring_write(ring, 0);
3003 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3004 reg_val_offs * 4));
3005 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3006 reg_val_offs * 4));
3007 }
3008
gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)3009 static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
3010 uint32_t val)
3011 {
3012 uint32_t cmd = 0;
3013
3014 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
3015
3016 switch (ring->funcs->type) {
3017 case AMDGPU_RING_TYPE_GFX:
3018 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
3019 break;
3020 case AMDGPU_RING_TYPE_KIQ:
3021 cmd = (1 << 16); /* no inc addr */
3022 break;
3023 default:
3024 cmd = WR_CONFIRM;
3025 break;
3026 }
3027 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3028 amdgpu_ring_write(ring, cmd);
3029 amdgpu_ring_write(ring, reg);
3030 amdgpu_ring_write(ring, 0);
3031 amdgpu_ring_write(ring, val);
3032 }
3033
gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)3034 static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
3035 uint32_t val, uint32_t mask)
3036 {
3037 gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
3038 }
3039
gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring * ring,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)3040 static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
3041 uint32_t reg0, uint32_t reg1,
3042 uint32_t ref, uint32_t mask)
3043 {
3044 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
3045 ref, mask);
3046 }
3047
gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring * ring,unsigned vmid)3048 static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring,
3049 unsigned vmid)
3050 {
3051 struct amdgpu_device *adev = ring->adev;
3052 uint32_t value = 0;
3053
3054 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
3055 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
3056 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
3057 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
3058 amdgpu_gfx_rlc_enter_safe_mode(adev, ring->xcc_id);
3059 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regSQ_CMD, value);
3060 amdgpu_gfx_rlc_exit_safe_mode(adev, ring->xcc_id);
3061 }
3062
gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(struct amdgpu_device * adev,int me,int pipe,enum amdgpu_interrupt_state state,int xcc_id)3063 static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3064 struct amdgpu_device *adev, int me, int pipe,
3065 enum amdgpu_interrupt_state state, int xcc_id)
3066 {
3067 u32 mec_int_cntl, mec_int_cntl_reg;
3068
3069 /*
3070 * amdgpu controls only the first MEC. That's why this function only
3071 * handles the setting of interrupts for this specific MEC. All other
3072 * pipes' interrupts are set by amdkfd.
3073 */
3074
3075 if (me == 1) {
3076 switch (pipe) {
3077 case 0:
3078 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
3079 break;
3080 case 1:
3081 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
3082 break;
3083 case 2:
3084 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
3085 break;
3086 case 3:
3087 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
3088 break;
3089 default:
3090 DRM_DEBUG("invalid pipe %d\n", pipe);
3091 return;
3092 }
3093 } else {
3094 DRM_DEBUG("invalid me %d\n", me);
3095 return;
3096 }
3097
3098 switch (state) {
3099 case AMDGPU_IRQ_STATE_DISABLE:
3100 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3101 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3102 TIME_STAMP_INT_ENABLE, 0);
3103 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3104 break;
3105 case AMDGPU_IRQ_STATE_ENABLE:
3106 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3107 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3108 TIME_STAMP_INT_ENABLE, 1);
3109 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3110 break;
3111 default:
3112 break;
3113 }
3114 }
3115
gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device * adev,int xcc_id,int me,int pipe)3116 static u32 gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device *adev,
3117 int xcc_id, int me, int pipe)
3118 {
3119 /*
3120 * amdgpu controls only the first MEC. That's why this function only
3121 * handles the setting of interrupts for this specific MEC. All other
3122 * pipes' interrupts are set by amdkfd.
3123 */
3124 if (me != 1)
3125 return 0;
3126
3127 switch (pipe) {
3128 case 0:
3129 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
3130 case 1:
3131 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
3132 case 2:
3133 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
3134 case 3:
3135 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
3136 default:
3137 return 0;
3138 }
3139 }
3140
gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)3141 static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev,
3142 struct amdgpu_irq_src *source,
3143 unsigned type,
3144 enum amdgpu_interrupt_state state)
3145 {
3146 u32 mec_int_cntl_reg, mec_int_cntl;
3147 int i, j, k, num_xcc;
3148
3149 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3150 switch (state) {
3151 case AMDGPU_IRQ_STATE_DISABLE:
3152 case AMDGPU_IRQ_STATE_ENABLE:
3153 for (i = 0; i < num_xcc; i++) {
3154 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3155 PRIV_REG_INT_ENABLE,
3156 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3157 for (j = 0; j < adev->gfx.mec.num_mec; j++) {
3158 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
3159 /* MECs start at 1 */
3160 mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
3161
3162 if (mec_int_cntl_reg) {
3163 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
3164 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3165 PRIV_REG_INT_ENABLE,
3166 state == AMDGPU_IRQ_STATE_ENABLE ?
3167 1 : 0);
3168 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
3169 }
3170 }
3171 }
3172 }
3173 break;
3174 default:
3175 break;
3176 }
3177
3178 return 0;
3179 }
3180
gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)3181 static int gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device *adev,
3182 struct amdgpu_irq_src *source,
3183 unsigned type,
3184 enum amdgpu_interrupt_state state)
3185 {
3186 u32 mec_int_cntl_reg, mec_int_cntl;
3187 int i, j, k, num_xcc;
3188
3189 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3190 switch (state) {
3191 case AMDGPU_IRQ_STATE_DISABLE:
3192 case AMDGPU_IRQ_STATE_ENABLE:
3193 for (i = 0; i < num_xcc; i++) {
3194 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3195 OPCODE_ERROR_INT_ENABLE,
3196 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3197 for (j = 0; j < adev->gfx.mec.num_mec; j++) {
3198 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
3199 /* MECs start at 1 */
3200 mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
3201
3202 if (mec_int_cntl_reg) {
3203 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
3204 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3205 OPCODE_ERROR_INT_ENABLE,
3206 state == AMDGPU_IRQ_STATE_ENABLE ?
3207 1 : 0);
3208 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
3209 }
3210 }
3211 }
3212 }
3213 break;
3214 default:
3215 break;
3216 }
3217
3218 return 0;
3219 }
3220
gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)3221 static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev,
3222 struct amdgpu_irq_src *source,
3223 unsigned type,
3224 enum amdgpu_interrupt_state state)
3225 {
3226 int i, num_xcc;
3227
3228 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3229 switch (state) {
3230 case AMDGPU_IRQ_STATE_DISABLE:
3231 case AMDGPU_IRQ_STATE_ENABLE:
3232 for (i = 0; i < num_xcc; i++)
3233 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3234 PRIV_INSTR_INT_ENABLE,
3235 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3236 break;
3237 default:
3238 break;
3239 }
3240
3241 return 0;
3242 }
3243
gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)3244 static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev,
3245 struct amdgpu_irq_src *src,
3246 unsigned type,
3247 enum amdgpu_interrupt_state state)
3248 {
3249 int i, num_xcc;
3250
3251 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3252 for (i = 0; i < num_xcc; i++) {
3253 switch (type) {
3254 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3255 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3256 adev, 1, 0, state, i);
3257 break;
3258 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3259 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3260 adev, 1, 1, state, i);
3261 break;
3262 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
3263 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3264 adev, 1, 2, state, i);
3265 break;
3266 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
3267 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3268 adev, 1, 3, state, i);
3269 break;
3270 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
3271 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3272 adev, 2, 0, state, i);
3273 break;
3274 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
3275 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3276 adev, 2, 1, state, i);
3277 break;
3278 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
3279 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3280 adev, 2, 2, state, i);
3281 break;
3282 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
3283 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3284 adev, 2, 3, state, i);
3285 break;
3286 default:
3287 break;
3288 }
3289 }
3290
3291 return 0;
3292 }
3293
gfx_v9_4_3_eop_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3294 static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev,
3295 struct amdgpu_irq_src *source,
3296 struct amdgpu_iv_entry *entry)
3297 {
3298 int i, xcc_id;
3299 u8 me_id, pipe_id, queue_id;
3300 struct amdgpu_ring *ring;
3301
3302 DRM_DEBUG("IH: CP EOP\n");
3303 me_id = (entry->ring_id & 0x0c) >> 2;
3304 pipe_id = (entry->ring_id & 0x03) >> 0;
3305 queue_id = (entry->ring_id & 0x70) >> 4;
3306
3307 xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
3308
3309 if (xcc_id == -EINVAL)
3310 return -EINVAL;
3311
3312 switch (me_id) {
3313 case 0:
3314 case 1:
3315 case 2:
3316 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3317 ring = &adev->gfx.compute_ring
3318 [i +
3319 xcc_id * adev->gfx.num_compute_rings];
3320 /* Per-queue interrupt is supported for MEC starting from VI.
3321 * The interrupt can only be enabled/disabled per pipe instead of per queue.
3322 */
3323
3324 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
3325 amdgpu_fence_process(ring);
3326 }
3327 break;
3328 }
3329 return 0;
3330 }
3331
gfx_v9_4_3_fault(struct amdgpu_device * adev,struct amdgpu_iv_entry * entry)3332 static void gfx_v9_4_3_fault(struct amdgpu_device *adev,
3333 struct amdgpu_iv_entry *entry)
3334 {
3335 u8 me_id, pipe_id, queue_id;
3336 struct amdgpu_ring *ring;
3337 int i, xcc_id;
3338
3339 me_id = (entry->ring_id & 0x0c) >> 2;
3340 pipe_id = (entry->ring_id & 0x03) >> 0;
3341 queue_id = (entry->ring_id & 0x70) >> 4;
3342
3343 xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
3344
3345 if (xcc_id == -EINVAL)
3346 return;
3347
3348 switch (me_id) {
3349 case 0:
3350 case 1:
3351 case 2:
3352 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3353 ring = &adev->gfx.compute_ring
3354 [i +
3355 xcc_id * adev->gfx.num_compute_rings];
3356 if (ring->me == me_id && ring->pipe == pipe_id &&
3357 ring->queue == queue_id)
3358 drm_sched_fault(&ring->sched);
3359 }
3360 break;
3361 }
3362 }
3363
gfx_v9_4_3_priv_reg_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3364 static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev,
3365 struct amdgpu_irq_src *source,
3366 struct amdgpu_iv_entry *entry)
3367 {
3368 DRM_ERROR("Illegal register access in command stream\n");
3369 gfx_v9_4_3_fault(adev, entry);
3370 return 0;
3371 }
3372
gfx_v9_4_3_bad_op_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3373 static int gfx_v9_4_3_bad_op_irq(struct amdgpu_device *adev,
3374 struct amdgpu_irq_src *source,
3375 struct amdgpu_iv_entry *entry)
3376 {
3377 DRM_ERROR("Illegal opcode in command stream\n");
3378 gfx_v9_4_3_fault(adev, entry);
3379 return 0;
3380 }
3381
gfx_v9_4_3_priv_inst_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3382 static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev,
3383 struct amdgpu_irq_src *source,
3384 struct amdgpu_iv_entry *entry)
3385 {
3386 DRM_ERROR("Illegal instruction in command stream\n");
3387 gfx_v9_4_3_fault(adev, entry);
3388 return 0;
3389 }
3390
gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring * ring)3391 static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring)
3392 {
3393 const unsigned int cp_coher_cntl =
3394 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
3395 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
3396 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
3397 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
3398 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
3399
3400 /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
3401 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
3402 amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
3403 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
3404 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
3405 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3406 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
3407 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
3408 }
3409
gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring * ring,uint32_t pipe,bool enable)3410 static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring,
3411 uint32_t pipe, bool enable)
3412 {
3413 struct amdgpu_device *adev = ring->adev;
3414 uint32_t val;
3415 uint32_t wcl_cs_reg;
3416
3417 /* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
3418 val = enable ? 0x1 : 0x7f;
3419
3420 switch (pipe) {
3421 case 0:
3422 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0);
3423 break;
3424 case 1:
3425 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1);
3426 break;
3427 case 2:
3428 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2);
3429 break;
3430 case 3:
3431 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3);
3432 break;
3433 default:
3434 DRM_DEBUG("invalid pipe %d\n", pipe);
3435 return;
3436 }
3437
3438 amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
3439
3440 }
gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring * ring,bool enable)3441 static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
3442 {
3443 struct amdgpu_device *adev = ring->adev;
3444 uint32_t val;
3445 int i;
3446
3447 /* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
3448 * number of gfx waves. Setting 5 bit will make sure gfx only gets
3449 * around 25% of gpu resources.
3450 */
3451 val = enable ? 0x1f : 0x07ffffff;
3452 amdgpu_ring_emit_wreg(ring,
3453 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX),
3454 val);
3455
3456 /* Restrict waves for normal/low priority compute queues as well
3457 * to get best QoS for high priority compute jobs.
3458 *
3459 * amdgpu controls only 1st ME(0-3 CS pipes).
3460 */
3461 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3462 if (i != ring->pipe)
3463 gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable);
3464
3465 }
3466 }
3467
gfx_v9_4_3_unmap_done(struct amdgpu_device * adev,uint32_t me,uint32_t pipe,uint32_t queue,uint32_t xcc_id)3468 static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me,
3469 uint32_t pipe, uint32_t queue,
3470 uint32_t xcc_id)
3471 {
3472 int i, r;
3473 /* make sure dequeue is complete*/
3474 gfx_v9_4_3_xcc_set_safe_mode(adev, xcc_id);
3475 mutex_lock(&adev->srbm_mutex);
3476 soc15_grbm_select(adev, me, pipe, queue, 0, GET_INST(GC, xcc_id));
3477 for (i = 0; i < adev->usec_timeout; i++) {
3478 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
3479 break;
3480 udelay(1);
3481 }
3482 if (i >= adev->usec_timeout)
3483 r = -ETIMEDOUT;
3484 else
3485 r = 0;
3486 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
3487 mutex_unlock(&adev->srbm_mutex);
3488 gfx_v9_4_3_xcc_unset_safe_mode(adev, xcc_id);
3489
3490 return r;
3491
3492 }
3493
gfx_v9_4_3_pipe_reset_support(struct amdgpu_device * adev)3494 static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev)
3495 {
3496 if (!!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
3497 return true;
3498 else
3499 dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n");
3500
3501 return false;
3502 }
3503
gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring * ring)3504 static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring)
3505 {
3506 struct amdgpu_device *adev = ring->adev;
3507 uint32_t reset_pipe, clean_pipe;
3508 int r;
3509
3510 if (!gfx_v9_4_3_pipe_reset_support(adev))
3511 return -EINVAL;
3512
3513 gfx_v9_4_3_xcc_set_safe_mode(adev, ring->xcc_id);
3514 mutex_lock(&adev->srbm_mutex);
3515
3516 reset_pipe = RREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL);
3517 clean_pipe = reset_pipe;
3518
3519 if (ring->me == 1) {
3520 switch (ring->pipe) {
3521 case 0:
3522 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3523 MEC_ME1_PIPE0_RESET, 1);
3524 break;
3525 case 1:
3526 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3527 MEC_ME1_PIPE1_RESET, 1);
3528 break;
3529 case 2:
3530 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3531 MEC_ME1_PIPE2_RESET, 1);
3532 break;
3533 case 3:
3534 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3535 MEC_ME1_PIPE3_RESET, 1);
3536 break;
3537 default:
3538 break;
3539 }
3540 } else {
3541 if (ring->pipe)
3542 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3543 MEC_ME2_PIPE1_RESET, 1);
3544 else
3545 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3546 MEC_ME2_PIPE0_RESET, 1);
3547 }
3548
3549 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, reset_pipe);
3550 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, clean_pipe);
3551 mutex_unlock(&adev->srbm_mutex);
3552 gfx_v9_4_3_xcc_unset_safe_mode(adev, ring->xcc_id);
3553
3554 r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
3555 return r;
3556 }
3557
gfx_v9_4_3_reset_kcq(struct amdgpu_ring * ring,unsigned int vmid,struct amdgpu_fence * timedout_fence)3558 static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
3559 unsigned int vmid,
3560 struct amdgpu_fence *timedout_fence)
3561 {
3562 struct amdgpu_device *adev = ring->adev;
3563 struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
3564 struct amdgpu_ring *kiq_ring = &kiq->ring;
3565 int reset_mode = AMDGPU_RESET_TYPE_PER_QUEUE;
3566 unsigned long flags;
3567 int r;
3568
3569 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
3570 return -EINVAL;
3571
3572 amdgpu_ring_reset_helper_begin(ring, timedout_fence);
3573
3574 spin_lock_irqsave(&kiq->ring_lock, flags);
3575
3576 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
3577 spin_unlock_irqrestore(&kiq->ring_lock, flags);
3578 return -ENOMEM;
3579 }
3580
3581 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
3582 0, 0);
3583 amdgpu_ring_commit(kiq_ring);
3584
3585 spin_unlock_irqrestore(&kiq->ring_lock, flags);
3586
3587 r = amdgpu_ring_test_ring(kiq_ring);
3588 if (r) {
3589 dev_err(adev->dev, "kiq ring test failed after ring: %s queue reset\n",
3590 ring->name);
3591 goto pipe_reset;
3592 }
3593
3594 r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
3595 if (r)
3596 dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
3597
3598 pipe_reset:
3599 if (r) {
3600 if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
3601 return -EOPNOTSUPP;
3602 r = gfx_v9_4_3_reset_hw_pipe(ring);
3603 reset_mode = AMDGPU_RESET_TYPE_PER_PIPE;
3604 dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
3605 r ? "failed" : "successfully");
3606 if (r)
3607 return r;
3608 }
3609
3610 r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
3611 if (r) {
3612 dev_err(adev->dev, "fail to init kcq\n");
3613 return r;
3614 }
3615 spin_lock_irqsave(&kiq->ring_lock, flags);
3616 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
3617 if (r) {
3618 spin_unlock_irqrestore(&kiq->ring_lock, flags);
3619 return -ENOMEM;
3620 }
3621 kiq->pmf->kiq_map_queues(kiq_ring, ring);
3622 amdgpu_ring_commit(kiq_ring);
3623 r = amdgpu_ring_test_ring(kiq_ring);
3624 spin_unlock_irqrestore(&kiq->ring_lock, flags);
3625 if (r) {
3626 if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE)
3627 goto pipe_reset;
3628
3629 dev_err(adev->dev, "fail to remap queue\n");
3630 return r;
3631 }
3632
3633 if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE) {
3634 r = amdgpu_ring_test_ring(ring);
3635 if (r)
3636 goto pipe_reset;
3637 }
3638
3639
3640 return amdgpu_ring_reset_helper_end(ring, timedout_fence);
3641 }
3642
3643 enum amdgpu_gfx_cp_ras_mem_id {
3644 AMDGPU_GFX_CP_MEM1 = 1,
3645 AMDGPU_GFX_CP_MEM2,
3646 AMDGPU_GFX_CP_MEM3,
3647 AMDGPU_GFX_CP_MEM4,
3648 AMDGPU_GFX_CP_MEM5,
3649 };
3650
3651 enum amdgpu_gfx_gcea_ras_mem_id {
3652 AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4,
3653 AMDGPU_GFX_GCEA_IORD_CMDMEM,
3654 AMDGPU_GFX_GCEA_GMIWR_CMDMEM,
3655 AMDGPU_GFX_GCEA_GMIRD_CMDMEM,
3656 AMDGPU_GFX_GCEA_DRAMWR_CMDMEM,
3657 AMDGPU_GFX_GCEA_DRAMRD_CMDMEM,
3658 AMDGPU_GFX_GCEA_MAM_DMEM0,
3659 AMDGPU_GFX_GCEA_MAM_DMEM1,
3660 AMDGPU_GFX_GCEA_MAM_DMEM2,
3661 AMDGPU_GFX_GCEA_MAM_DMEM3,
3662 AMDGPU_GFX_GCEA_MAM_AMEM0,
3663 AMDGPU_GFX_GCEA_MAM_AMEM1,
3664 AMDGPU_GFX_GCEA_MAM_AMEM2,
3665 AMDGPU_GFX_GCEA_MAM_AMEM3,
3666 AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER,
3667 AMDGPU_GFX_GCEA_WRET_TAGMEM,
3668 AMDGPU_GFX_GCEA_RRET_TAGMEM,
3669 AMDGPU_GFX_GCEA_IOWR_DATAMEM,
3670 AMDGPU_GFX_GCEA_GMIWR_DATAMEM,
3671 AMDGPU_GFX_GCEA_DRAM_DATAMEM,
3672 };
3673
3674 enum amdgpu_gfx_gc_cane_ras_mem_id {
3675 AMDGPU_GFX_GC_CANE_MEM0 = 0,
3676 };
3677
3678 enum amdgpu_gfx_gcutcl2_ras_mem_id {
3679 AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160,
3680 };
3681
3682 enum amdgpu_gfx_gds_ras_mem_id {
3683 AMDGPU_GFX_GDS_MEM0 = 0,
3684 };
3685
3686 enum amdgpu_gfx_lds_ras_mem_id {
3687 AMDGPU_GFX_LDS_BANK0 = 0,
3688 AMDGPU_GFX_LDS_BANK1,
3689 AMDGPU_GFX_LDS_BANK2,
3690 AMDGPU_GFX_LDS_BANK3,
3691 AMDGPU_GFX_LDS_BANK4,
3692 AMDGPU_GFX_LDS_BANK5,
3693 AMDGPU_GFX_LDS_BANK6,
3694 AMDGPU_GFX_LDS_BANK7,
3695 AMDGPU_GFX_LDS_BANK8,
3696 AMDGPU_GFX_LDS_BANK9,
3697 AMDGPU_GFX_LDS_BANK10,
3698 AMDGPU_GFX_LDS_BANK11,
3699 AMDGPU_GFX_LDS_BANK12,
3700 AMDGPU_GFX_LDS_BANK13,
3701 AMDGPU_GFX_LDS_BANK14,
3702 AMDGPU_GFX_LDS_BANK15,
3703 AMDGPU_GFX_LDS_BANK16,
3704 AMDGPU_GFX_LDS_BANK17,
3705 AMDGPU_GFX_LDS_BANK18,
3706 AMDGPU_GFX_LDS_BANK19,
3707 AMDGPU_GFX_LDS_BANK20,
3708 AMDGPU_GFX_LDS_BANK21,
3709 AMDGPU_GFX_LDS_BANK22,
3710 AMDGPU_GFX_LDS_BANK23,
3711 AMDGPU_GFX_LDS_BANK24,
3712 AMDGPU_GFX_LDS_BANK25,
3713 AMDGPU_GFX_LDS_BANK26,
3714 AMDGPU_GFX_LDS_BANK27,
3715 AMDGPU_GFX_LDS_BANK28,
3716 AMDGPU_GFX_LDS_BANK29,
3717 AMDGPU_GFX_LDS_BANK30,
3718 AMDGPU_GFX_LDS_BANK31,
3719 AMDGPU_GFX_LDS_SP_BUFFER_A,
3720 AMDGPU_GFX_LDS_SP_BUFFER_B,
3721 };
3722
3723 enum amdgpu_gfx_rlc_ras_mem_id {
3724 AMDGPU_GFX_RLC_GPMF32 = 1,
3725 AMDGPU_GFX_RLC_RLCVF32,
3726 AMDGPU_GFX_RLC_SCRATCH,
3727 AMDGPU_GFX_RLC_SRM_ARAM,
3728 AMDGPU_GFX_RLC_SRM_DRAM,
3729 AMDGPU_GFX_RLC_TCTAG,
3730 AMDGPU_GFX_RLC_SPM_SE,
3731 AMDGPU_GFX_RLC_SPM_GRBMT,
3732 };
3733
3734 enum amdgpu_gfx_sp_ras_mem_id {
3735 AMDGPU_GFX_SP_SIMDID0 = 0,
3736 };
3737
3738 enum amdgpu_gfx_spi_ras_mem_id {
3739 AMDGPU_GFX_SPI_MEM0 = 0,
3740 AMDGPU_GFX_SPI_MEM1,
3741 AMDGPU_GFX_SPI_MEM2,
3742 AMDGPU_GFX_SPI_MEM3,
3743 };
3744
3745 enum amdgpu_gfx_sqc_ras_mem_id {
3746 AMDGPU_GFX_SQC_INST_CACHE_A = 100,
3747 AMDGPU_GFX_SQC_INST_CACHE_B = 101,
3748 AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102,
3749 AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103,
3750 AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104,
3751 AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105,
3752 AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106,
3753 AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107,
3754 AMDGPU_GFX_SQC_DATA_CACHE_A = 200,
3755 AMDGPU_GFX_SQC_DATA_CACHE_B = 201,
3756 AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202,
3757 AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203,
3758 AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204,
3759 AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205,
3760 AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206,
3761 AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207,
3762 AMDGPU_GFX_SQC_DIRTY_BIT_A = 208,
3763 AMDGPU_GFX_SQC_DIRTY_BIT_B = 209,
3764 AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210,
3765 AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211,
3766 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212,
3767 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213,
3768 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108,
3769 };
3770
3771 enum amdgpu_gfx_sq_ras_mem_id {
3772 AMDGPU_GFX_SQ_SGPR_MEM0 = 0,
3773 AMDGPU_GFX_SQ_SGPR_MEM1,
3774 AMDGPU_GFX_SQ_SGPR_MEM2,
3775 AMDGPU_GFX_SQ_SGPR_MEM3,
3776 };
3777
3778 enum amdgpu_gfx_ta_ras_mem_id {
3779 AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1,
3780 AMDGPU_GFX_TA_FS_AFIFO_RAM_HI,
3781 AMDGPU_GFX_TA_FS_CFIFO_RAM,
3782 AMDGPU_GFX_TA_FSX_LFIFO,
3783 AMDGPU_GFX_TA_FS_DFIFO_RAM,
3784 };
3785
3786 enum amdgpu_gfx_tcc_ras_mem_id {
3787 AMDGPU_GFX_TCC_MEM1 = 1,
3788 };
3789
3790 enum amdgpu_gfx_tca_ras_mem_id {
3791 AMDGPU_GFX_TCA_MEM1 = 1,
3792 };
3793
3794 enum amdgpu_gfx_tci_ras_mem_id {
3795 AMDGPU_GFX_TCIW_MEM = 1,
3796 };
3797
3798 enum amdgpu_gfx_tcp_ras_mem_id {
3799 AMDGPU_GFX_TCP_LFIFO0 = 1,
3800 AMDGPU_GFX_TCP_SET0BANK0_RAM,
3801 AMDGPU_GFX_TCP_SET0BANK1_RAM,
3802 AMDGPU_GFX_TCP_SET0BANK2_RAM,
3803 AMDGPU_GFX_TCP_SET0BANK3_RAM,
3804 AMDGPU_GFX_TCP_SET1BANK0_RAM,
3805 AMDGPU_GFX_TCP_SET1BANK1_RAM,
3806 AMDGPU_GFX_TCP_SET1BANK2_RAM,
3807 AMDGPU_GFX_TCP_SET1BANK3_RAM,
3808 AMDGPU_GFX_TCP_SET2BANK0_RAM,
3809 AMDGPU_GFX_TCP_SET2BANK1_RAM,
3810 AMDGPU_GFX_TCP_SET2BANK2_RAM,
3811 AMDGPU_GFX_TCP_SET2BANK3_RAM,
3812 AMDGPU_GFX_TCP_SET3BANK0_RAM,
3813 AMDGPU_GFX_TCP_SET3BANK1_RAM,
3814 AMDGPU_GFX_TCP_SET3BANK2_RAM,
3815 AMDGPU_GFX_TCP_SET3BANK3_RAM,
3816 AMDGPU_GFX_TCP_VM_FIFO,
3817 AMDGPU_GFX_TCP_DB_TAGRAM0,
3818 AMDGPU_GFX_TCP_DB_TAGRAM1,
3819 AMDGPU_GFX_TCP_DB_TAGRAM2,
3820 AMDGPU_GFX_TCP_DB_TAGRAM3,
3821 AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0,
3822 AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1,
3823 AMDGPU_GFX_TCP_CMD_FIFO,
3824 };
3825
3826 enum amdgpu_gfx_td_ras_mem_id {
3827 AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1,
3828 AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM,
3829 AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM,
3830 };
3831
3832 enum amdgpu_gfx_tcx_ras_mem_id {
3833 AMDGPU_GFX_TCX_FIFOD0 = 0,
3834 AMDGPU_GFX_TCX_FIFOD1,
3835 AMDGPU_GFX_TCX_FIFOD2,
3836 AMDGPU_GFX_TCX_FIFOD3,
3837 AMDGPU_GFX_TCX_FIFOD4,
3838 AMDGPU_GFX_TCX_FIFOD5,
3839 AMDGPU_GFX_TCX_FIFOD6,
3840 AMDGPU_GFX_TCX_FIFOD7,
3841 AMDGPU_GFX_TCX_FIFOB0,
3842 AMDGPU_GFX_TCX_FIFOB1,
3843 AMDGPU_GFX_TCX_FIFOB2,
3844 AMDGPU_GFX_TCX_FIFOB3,
3845 AMDGPU_GFX_TCX_FIFOB4,
3846 AMDGPU_GFX_TCX_FIFOB5,
3847 AMDGPU_GFX_TCX_FIFOB6,
3848 AMDGPU_GFX_TCX_FIFOB7,
3849 AMDGPU_GFX_TCX_FIFOA0,
3850 AMDGPU_GFX_TCX_FIFOA1,
3851 AMDGPU_GFX_TCX_FIFOA2,
3852 AMDGPU_GFX_TCX_FIFOA3,
3853 AMDGPU_GFX_TCX_FIFOA4,
3854 AMDGPU_GFX_TCX_FIFOA5,
3855 AMDGPU_GFX_TCX_FIFOA6,
3856 AMDGPU_GFX_TCX_FIFOA7,
3857 AMDGPU_GFX_TCX_CFIFO0,
3858 AMDGPU_GFX_TCX_CFIFO1,
3859 AMDGPU_GFX_TCX_CFIFO2,
3860 AMDGPU_GFX_TCX_CFIFO3,
3861 AMDGPU_GFX_TCX_CFIFO4,
3862 AMDGPU_GFX_TCX_CFIFO5,
3863 AMDGPU_GFX_TCX_CFIFO6,
3864 AMDGPU_GFX_TCX_CFIFO7,
3865 AMDGPU_GFX_TCX_FIFO_ACKB0,
3866 AMDGPU_GFX_TCX_FIFO_ACKB1,
3867 AMDGPU_GFX_TCX_FIFO_ACKB2,
3868 AMDGPU_GFX_TCX_FIFO_ACKB3,
3869 AMDGPU_GFX_TCX_FIFO_ACKB4,
3870 AMDGPU_GFX_TCX_FIFO_ACKB5,
3871 AMDGPU_GFX_TCX_FIFO_ACKB6,
3872 AMDGPU_GFX_TCX_FIFO_ACKB7,
3873 AMDGPU_GFX_TCX_FIFO_ACKD0,
3874 AMDGPU_GFX_TCX_FIFO_ACKD1,
3875 AMDGPU_GFX_TCX_FIFO_ACKD2,
3876 AMDGPU_GFX_TCX_FIFO_ACKD3,
3877 AMDGPU_GFX_TCX_FIFO_ACKD4,
3878 AMDGPU_GFX_TCX_FIFO_ACKD5,
3879 AMDGPU_GFX_TCX_FIFO_ACKD6,
3880 AMDGPU_GFX_TCX_FIFO_ACKD7,
3881 AMDGPU_GFX_TCX_DST_FIFOA0,
3882 AMDGPU_GFX_TCX_DST_FIFOA1,
3883 AMDGPU_GFX_TCX_DST_FIFOA2,
3884 AMDGPU_GFX_TCX_DST_FIFOA3,
3885 AMDGPU_GFX_TCX_DST_FIFOA4,
3886 AMDGPU_GFX_TCX_DST_FIFOA5,
3887 AMDGPU_GFX_TCX_DST_FIFOA6,
3888 AMDGPU_GFX_TCX_DST_FIFOA7,
3889 AMDGPU_GFX_TCX_DST_FIFOB0,
3890 AMDGPU_GFX_TCX_DST_FIFOB1,
3891 AMDGPU_GFX_TCX_DST_FIFOB2,
3892 AMDGPU_GFX_TCX_DST_FIFOB3,
3893 AMDGPU_GFX_TCX_DST_FIFOB4,
3894 AMDGPU_GFX_TCX_DST_FIFOB5,
3895 AMDGPU_GFX_TCX_DST_FIFOB6,
3896 AMDGPU_GFX_TCX_DST_FIFOB7,
3897 AMDGPU_GFX_TCX_DST_FIFOD0,
3898 AMDGPU_GFX_TCX_DST_FIFOD1,
3899 AMDGPU_GFX_TCX_DST_FIFOD2,
3900 AMDGPU_GFX_TCX_DST_FIFOD3,
3901 AMDGPU_GFX_TCX_DST_FIFOD4,
3902 AMDGPU_GFX_TCX_DST_FIFOD5,
3903 AMDGPU_GFX_TCX_DST_FIFOD6,
3904 AMDGPU_GFX_TCX_DST_FIFOD7,
3905 AMDGPU_GFX_TCX_DST_FIFO_ACKB0,
3906 AMDGPU_GFX_TCX_DST_FIFO_ACKB1,
3907 AMDGPU_GFX_TCX_DST_FIFO_ACKB2,
3908 AMDGPU_GFX_TCX_DST_FIFO_ACKB3,
3909 AMDGPU_GFX_TCX_DST_FIFO_ACKB4,
3910 AMDGPU_GFX_TCX_DST_FIFO_ACKB5,
3911 AMDGPU_GFX_TCX_DST_FIFO_ACKB6,
3912 AMDGPU_GFX_TCX_DST_FIFO_ACKB7,
3913 AMDGPU_GFX_TCX_DST_FIFO_ACKD0,
3914 AMDGPU_GFX_TCX_DST_FIFO_ACKD1,
3915 AMDGPU_GFX_TCX_DST_FIFO_ACKD2,
3916 AMDGPU_GFX_TCX_DST_FIFO_ACKD3,
3917 AMDGPU_GFX_TCX_DST_FIFO_ACKD4,
3918 AMDGPU_GFX_TCX_DST_FIFO_ACKD5,
3919 AMDGPU_GFX_TCX_DST_FIFO_ACKD6,
3920 AMDGPU_GFX_TCX_DST_FIFO_ACKD7,
3921 };
3922
3923 enum amdgpu_gfx_atc_l2_ras_mem_id {
3924 AMDGPU_GFX_ATC_L2_MEM0 = 0,
3925 };
3926
3927 enum amdgpu_gfx_utcl2_ras_mem_id {
3928 AMDGPU_GFX_UTCL2_MEM0 = 0,
3929 };
3930
3931 enum amdgpu_gfx_vml2_ras_mem_id {
3932 AMDGPU_GFX_VML2_MEM0 = 0,
3933 };
3934
3935 enum amdgpu_gfx_vml2_walker_ras_mem_id {
3936 AMDGPU_GFX_VML2_WALKER_MEM0 = 0,
3937 };
3938
3939 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = {
3940 {AMDGPU_GFX_CP_MEM1, "CP_MEM1"},
3941 {AMDGPU_GFX_CP_MEM2, "CP_MEM2"},
3942 {AMDGPU_GFX_CP_MEM3, "CP_MEM3"},
3943 {AMDGPU_GFX_CP_MEM4, "CP_MEM4"},
3944 {AMDGPU_GFX_CP_MEM5, "CP_MEM5"},
3945 };
3946
3947 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = {
3948 {AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"},
3949 {AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"},
3950 {AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"},
3951 {AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"},
3952 {AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"},
3953 {AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"},
3954 {AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"},
3955 {AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"},
3956 {AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"},
3957 {AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"},
3958 {AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"},
3959 {AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"},
3960 {AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"},
3961 {AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"},
3962 {AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"},
3963 {AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"},
3964 {AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"},
3965 {AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"},
3966 {AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"},
3967 {AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"},
3968 };
3969
3970 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = {
3971 {AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"},
3972 };
3973
3974 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = {
3975 {AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"},
3976 };
3977
3978 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = {
3979 {AMDGPU_GFX_GDS_MEM0, "GDS_MEM"},
3980 };
3981
3982 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = {
3983 {AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"},
3984 {AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"},
3985 {AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"},
3986 {AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"},
3987 {AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"},
3988 {AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"},
3989 {AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"},
3990 {AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"},
3991 {AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"},
3992 {AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"},
3993 {AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"},
3994 {AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"},
3995 {AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"},
3996 {AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"},
3997 {AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"},
3998 {AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"},
3999 {AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"},
4000 {AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"},
4001 {AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"},
4002 {AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"},
4003 {AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"},
4004 {AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"},
4005 {AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"},
4006 {AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"},
4007 {AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"},
4008 {AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"},
4009 {AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"},
4010 {AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"},
4011 {AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"},
4012 {AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"},
4013 {AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"},
4014 {AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"},
4015 {AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"},
4016 {AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"},
4017 };
4018
4019 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = {
4020 {AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"},
4021 {AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"},
4022 {AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"},
4023 {AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"},
4024 {AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"},
4025 {AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"},
4026 {AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"},
4027 {AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"},
4028 };
4029
4030 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = {
4031 {AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"},
4032 };
4033
4034 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = {
4035 {AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"},
4036 {AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"},
4037 {AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"},
4038 {AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"},
4039 };
4040
4041 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = {
4042 {AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"},
4043 {AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"},
4044 {AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"},
4045 {AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"},
4046 {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"},
4047 {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"},
4048 {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"},
4049 {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"},
4050 {AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"},
4051 {AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"},
4052 {AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"},
4053 {AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"},
4054 {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"},
4055 {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"},
4056 {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"},
4057 {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"},
4058 {AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"},
4059 {AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"},
4060 {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"},
4061 {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"},
4062 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"},
4063 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"},
4064 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"},
4065 };
4066
4067 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = {
4068 {AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"},
4069 {AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"},
4070 {AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"},
4071 {AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"},
4072 };
4073
4074 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = {
4075 {AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"},
4076 {AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"},
4077 {AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"},
4078 {AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"},
4079 {AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"},
4080 };
4081
4082 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = {
4083 {AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"},
4084 };
4085
4086 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = {
4087 {AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"},
4088 };
4089
4090 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = {
4091 {AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"},
4092 };
4093
4094 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = {
4095 {AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"},
4096 {AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"},
4097 {AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"},
4098 {AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"},
4099 {AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"},
4100 {AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"},
4101 {AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"},
4102 {AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"},
4103 {AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"},
4104 {AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"},
4105 {AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"},
4106 {AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"},
4107 {AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"},
4108 {AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"},
4109 {AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"},
4110 {AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"},
4111 {AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"},
4112 {AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"},
4113 {AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"},
4114 {AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"},
4115 {AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"},
4116 {AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"},
4117 {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"},
4118 {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"},
4119 {AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"},
4120 };
4121
4122 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = {
4123 {AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"},
4124 {AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"},
4125 {AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"},
4126 };
4127
4128 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = {
4129 {AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"},
4130 {AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"},
4131 {AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"},
4132 {AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"},
4133 {AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"},
4134 {AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"},
4135 {AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"},
4136 {AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"},
4137 {AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"},
4138 {AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"},
4139 {AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"},
4140 {AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"},
4141 {AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"},
4142 {AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"},
4143 {AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"},
4144 {AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"},
4145 {AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"},
4146 {AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"},
4147 {AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"},
4148 {AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"},
4149 {AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"},
4150 {AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"},
4151 {AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"},
4152 {AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"},
4153 {AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"},
4154 {AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"},
4155 {AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"},
4156 {AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"},
4157 {AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"},
4158 {AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"},
4159 {AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"},
4160 {AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"},
4161 {AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"},
4162 {AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"},
4163 {AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"},
4164 {AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"},
4165 {AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"},
4166 {AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"},
4167 {AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"},
4168 {AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"},
4169 {AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"},
4170 {AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"},
4171 {AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"},
4172 {AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"},
4173 {AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"},
4174 {AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"},
4175 {AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"},
4176 {AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"},
4177 {AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"},
4178 {AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"},
4179 {AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"},
4180 {AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"},
4181 {AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"},
4182 {AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"},
4183 {AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"},
4184 {AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"},
4185 {AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"},
4186 {AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"},
4187 {AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"},
4188 {AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"},
4189 {AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"},
4190 {AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"},
4191 {AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"},
4192 {AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"},
4193 {AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"},
4194 {AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"},
4195 {AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"},
4196 {AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"},
4197 {AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"},
4198 {AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"},
4199 {AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"},
4200 {AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"},
4201 {AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"},
4202 {AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"},
4203 {AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"},
4204 {AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"},
4205 {AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"},
4206 {AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"},
4207 {AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"},
4208 {AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"},
4209 {AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"},
4210 {AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"},
4211 {AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"},
4212 {AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"},
4213 {AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"},
4214 {AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"},
4215 {AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"},
4216 {AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"},
4217 };
4218
4219 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = {
4220 {AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"},
4221 };
4222
4223 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = {
4224 {AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"},
4225 };
4226
4227 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = {
4228 {AMDGPU_GFX_VML2_MEM, "VML2_MEM"},
4229 };
4230
4231 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = {
4232 {AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"},
4233 };
4234
4235 static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = {
4236 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list)
4237 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list)
4238 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list)
4239 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list)
4240 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list)
4241 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list)
4242 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list)
4243 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list)
4244 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list)
4245 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list)
4246 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list)
4247 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list)
4248 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list)
4249 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list)
4250 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list)
4251 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list)
4252 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list)
4253 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list)
4254 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list)
4255 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list)
4256 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list)
4257 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list)
4258 };
4259
4260 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = {
4261 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH),
4262 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
4263 AMDGPU_GFX_RLC_MEM, 1},
4264 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI),
4265 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
4266 AMDGPU_GFX_CP_MEM, 1},
4267 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI),
4268 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
4269 AMDGPU_GFX_CP_MEM, 1},
4270 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI),
4271 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
4272 AMDGPU_GFX_CP_MEM, 1},
4273 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI),
4274 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
4275 AMDGPU_GFX_GDS_MEM, 1},
4276 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI),
4277 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
4278 AMDGPU_GFX_GC_CANE_MEM, 1},
4279 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI),
4280 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
4281 AMDGPU_GFX_SPI_MEM, 1},
4282 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI),
4283 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
4284 AMDGPU_GFX_SP_MEM, 4},
4285 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI),
4286 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
4287 AMDGPU_GFX_SP_MEM, 4},
4288 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI),
4289 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
4290 AMDGPU_GFX_SQ_MEM, 4},
4291 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI),
4292 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
4293 AMDGPU_GFX_SQC_MEM, 4},
4294 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI),
4295 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
4296 AMDGPU_GFX_TCX_MEM, 1},
4297 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI),
4298 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
4299 AMDGPU_GFX_TCC_MEM, 1},
4300 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI),
4301 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
4302 AMDGPU_GFX_TA_MEM, 4},
4303 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG),
4304 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
4305 AMDGPU_GFX_TCI_MEM, 1},
4306 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG),
4307 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
4308 AMDGPU_GFX_TCP_MEM, 4},
4309 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI),
4310 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
4311 AMDGPU_GFX_TD_MEM, 4},
4312 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI),
4313 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
4314 AMDGPU_GFX_GCEA_MEM, 1},
4315 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI),
4316 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
4317 AMDGPU_GFX_LDS_MEM, 4},
4318 };
4319
4320 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = {
4321 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH),
4322 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
4323 AMDGPU_GFX_RLC_MEM, 1},
4324 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI),
4325 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
4326 AMDGPU_GFX_CP_MEM, 1},
4327 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI),
4328 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
4329 AMDGPU_GFX_CP_MEM, 1},
4330 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI),
4331 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
4332 AMDGPU_GFX_CP_MEM, 1},
4333 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI),
4334 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
4335 AMDGPU_GFX_GDS_MEM, 1},
4336 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI),
4337 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
4338 AMDGPU_GFX_GC_CANE_MEM, 1},
4339 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI),
4340 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
4341 AMDGPU_GFX_SPI_MEM, 1},
4342 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI),
4343 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
4344 AMDGPU_GFX_SP_MEM, 4},
4345 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI),
4346 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
4347 AMDGPU_GFX_SP_MEM, 4},
4348 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI),
4349 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
4350 AMDGPU_GFX_SQ_MEM, 4},
4351 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI),
4352 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
4353 AMDGPU_GFX_SQC_MEM, 4},
4354 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI),
4355 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
4356 AMDGPU_GFX_TCX_MEM, 1},
4357 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI),
4358 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
4359 AMDGPU_GFX_TCC_MEM, 1},
4360 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI),
4361 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
4362 AMDGPU_GFX_TA_MEM, 4},
4363 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG),
4364 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
4365 AMDGPU_GFX_TCI_MEM, 1},
4366 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG),
4367 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
4368 AMDGPU_GFX_TCP_MEM, 4},
4369 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI),
4370 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
4371 AMDGPU_GFX_TD_MEM, 4},
4372 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI),
4373 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"},
4374 AMDGPU_GFX_TCA_MEM, 1},
4375 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI),
4376 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
4377 AMDGPU_GFX_GCEA_MEM, 1},
4378 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI),
4379 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
4380 AMDGPU_GFX_LDS_MEM, 4},
4381 };
4382
gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device * adev,void * ras_error_status,int xcc_id)4383 static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
4384 void *ras_error_status, int xcc_id)
4385 {
4386 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
4387 unsigned long ce_count = 0, ue_count = 0;
4388 uint32_t i, j, k;
4389
4390 /* NOTE: convert xcc_id to physical XCD ID (XCD0 or XCD1) */
4391 struct amdgpu_smuio_mcm_config_info mcm_info = {
4392 .socket_id = adev->smuio.funcs->get_socket_id(adev),
4393 .die_id = xcc_id & 0x01 ? 1 : 0,
4394 };
4395
4396 mutex_lock(&adev->grbm_idx_mutex);
4397
4398 for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
4399 for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
4400 for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
4401 /* no need to select if instance number is 1 */
4402 if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
4403 gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
4404 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4405
4406 amdgpu_ras_inst_query_ras_error_count(adev,
4407 &(gfx_v9_4_3_ce_reg_list[i].reg_entry),
4408 1,
4409 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent,
4410 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size,
4411 GET_INST(GC, xcc_id),
4412 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
4413 &ce_count);
4414
4415 amdgpu_ras_inst_query_ras_error_count(adev,
4416 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4417 1,
4418 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
4419 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
4420 GET_INST(GC, xcc_id),
4421 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
4422 &ue_count);
4423 }
4424 }
4425 }
4426
4427 /* handle extra register entries of UE */
4428 for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
4429 for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
4430 for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
4431 /* no need to select if instance number is 1 */
4432 if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
4433 gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
4434 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4435
4436 amdgpu_ras_inst_query_ras_error_count(adev,
4437 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4438 1,
4439 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
4440 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
4441 GET_INST(GC, xcc_id),
4442 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
4443 &ue_count);
4444 }
4445 }
4446 }
4447
4448 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4449 xcc_id);
4450 mutex_unlock(&adev->grbm_idx_mutex);
4451
4452 /* the caller should make sure initialize value of
4453 * err_data->ue_count and err_data->ce_count
4454 */
4455 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
4456 amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
4457 }
4458
gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device * adev,void * ras_error_status,int xcc_id)4459 static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
4460 void *ras_error_status, int xcc_id)
4461 {
4462 uint32_t i, j, k;
4463
4464 mutex_lock(&adev->grbm_idx_mutex);
4465
4466 for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
4467 for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
4468 for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
4469 /* no need to select if instance number is 1 */
4470 if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
4471 gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
4472 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4473
4474 amdgpu_ras_inst_reset_ras_error_count(adev,
4475 &(gfx_v9_4_3_ce_reg_list[i].reg_entry),
4476 1,
4477 GET_INST(GC, xcc_id));
4478
4479 amdgpu_ras_inst_reset_ras_error_count(adev,
4480 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4481 1,
4482 GET_INST(GC, xcc_id));
4483 }
4484 }
4485 }
4486
4487 /* handle extra register entries of UE */
4488 for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
4489 for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
4490 for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
4491 /* no need to select if instance number is 1 */
4492 if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
4493 gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
4494 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4495
4496 amdgpu_ras_inst_reset_ras_error_count(adev,
4497 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4498 1,
4499 GET_INST(GC, xcc_id));
4500 }
4501 }
4502 }
4503
4504 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4505 xcc_id);
4506 mutex_unlock(&adev->grbm_idx_mutex);
4507 }
4508
gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device * adev,void * ras_error_status,int xcc_id)4509 static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
4510 void *ras_error_status, int xcc_id)
4511 {
4512 uint32_t i;
4513 uint32_t data;
4514
4515 if (amdgpu_sriov_vf(adev))
4516 return;
4517
4518 data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG);
4519 data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
4520 amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0);
4521
4522 if (amdgpu_watchdog_timer.timeout_fatal_disable &&
4523 (amdgpu_watchdog_timer.period < 1 ||
4524 amdgpu_watchdog_timer.period > 0x23)) {
4525 dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n");
4526 amdgpu_watchdog_timer.period = 0x23;
4527 }
4528 data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL,
4529 amdgpu_watchdog_timer.period);
4530
4531 mutex_lock(&adev->grbm_idx_mutex);
4532 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4533 gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id);
4534 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data);
4535 }
4536 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4537 xcc_id);
4538 mutex_unlock(&adev->grbm_idx_mutex);
4539 }
4540
gfx_v9_4_3_query_ras_error_count(struct amdgpu_device * adev,void * ras_error_status)4541 static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev,
4542 void *ras_error_status)
4543 {
4544 amdgpu_gfx_ras_error_func(adev, ras_error_status,
4545 gfx_v9_4_3_inst_query_ras_err_count);
4546 }
4547
gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device * adev)4548 static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev)
4549 {
4550 amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count);
4551 }
4552
gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device * adev)4553 static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
4554 {
4555 amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer);
4556 }
4557
gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring * ring,uint32_t num_nop)4558 static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
4559 {
4560 /* Header itself is a NOP packet */
4561 if (num_nop == 1) {
4562 amdgpu_ring_write(ring, ring->funcs->nop);
4563 return;
4564 }
4565
4566 /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
4567 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
4568
4569 /* Header is at index 0, followed by num_nops - 1 NOP packet's */
4570 amdgpu_ring_insert_nop(ring, num_nop - 1);
4571 }
4572
gfx_v9_4_3_ip_print(struct amdgpu_ip_block * ip_block,struct drm_printer * p)4573 static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
4574 {
4575 struct amdgpu_device *adev = ip_block->adev;
4576 uint32_t i, j, k;
4577 uint32_t xcc_id, xcc_offset, inst_offset;
4578 uint32_t num_xcc, reg, num_inst;
4579 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
4580
4581 if (!adev->gfx.ip_dump_core)
4582 return;
4583
4584 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4585 drm_printf(p, "Number of Instances:%d\n", num_xcc);
4586 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4587 xcc_offset = xcc_id * reg_count;
4588 drm_printf(p, "\nInstance id:%d\n", xcc_id);
4589 for (i = 0; i < reg_count; i++)
4590 drm_printf(p, "%-50s \t 0x%08x\n",
4591 gc_reg_list_9_4_3[i].reg_name,
4592 adev->gfx.ip_dump_core[xcc_offset + i]);
4593 }
4594
4595 /* print compute queue registers for all instances */
4596 if (!adev->gfx.ip_dump_compute_queues)
4597 return;
4598
4599 num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
4600 adev->gfx.mec.num_queue_per_pipe;
4601
4602 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
4603 drm_printf(p, "\nnum_xcc: %d num_mec: %d num_pipe: %d num_queue: %d\n",
4604 num_xcc,
4605 adev->gfx.mec.num_mec,
4606 adev->gfx.mec.num_pipe_per_mec,
4607 adev->gfx.mec.num_queue_per_pipe);
4608
4609 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4610 xcc_offset = xcc_id * reg_count * num_inst;
4611 inst_offset = 0;
4612 for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4613 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4614 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
4615 drm_printf(p,
4616 "\nxcc:%d mec:%d, pipe:%d, queue:%d\n",
4617 xcc_id, i, j, k);
4618 for (reg = 0; reg < reg_count; reg++) {
4619 if (i && gc_cp_reg_list_9_4_3[reg].reg_offset ==
4620 regCP_MEC_ME1_HEADER_DUMP)
4621 drm_printf(p,
4622 "%-50s \t 0x%08x\n",
4623 "regCP_MEC_ME2_HEADER_DUMP",
4624 adev->gfx.ip_dump_compute_queues
4625 [xcc_offset + inst_offset +
4626 reg]);
4627 else
4628 drm_printf(p,
4629 "%-50s \t 0x%08x\n",
4630 gc_cp_reg_list_9_4_3[reg].reg_name,
4631 adev->gfx.ip_dump_compute_queues
4632 [xcc_offset + inst_offset +
4633 reg]);
4634 }
4635 inst_offset += reg_count;
4636 }
4637 }
4638 }
4639 }
4640 }
4641
gfx_v9_4_3_ip_dump(struct amdgpu_ip_block * ip_block)4642 static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
4643 {
4644 struct amdgpu_device *adev = ip_block->adev;
4645 uint32_t i, j, k;
4646 uint32_t num_xcc, reg, num_inst;
4647 uint32_t xcc_id, xcc_offset, inst_offset;
4648 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
4649
4650 if (!adev->gfx.ip_dump_core)
4651 return;
4652
4653 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4654
4655 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4656 xcc_offset = xcc_id * reg_count;
4657 for (i = 0; i < reg_count; i++)
4658 adev->gfx.ip_dump_core[xcc_offset + i] =
4659 RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i],
4660 GET_INST(GC, xcc_id)));
4661 }
4662
4663 /* dump compute queue registers for all instances */
4664 if (!adev->gfx.ip_dump_compute_queues)
4665 return;
4666
4667 num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
4668 adev->gfx.mec.num_queue_per_pipe;
4669 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
4670 mutex_lock(&adev->srbm_mutex);
4671 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4672 xcc_offset = xcc_id * reg_count * num_inst;
4673 inst_offset = 0;
4674 for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4675 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4676 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
4677 /* ME0 is for GFX so start from 1 for CP */
4678 soc15_grbm_select(adev, 1 + i, j, k, 0,
4679 GET_INST(GC, xcc_id));
4680
4681 for (reg = 0; reg < reg_count; reg++) {
4682 if (i && gc_cp_reg_list_9_4_3[reg].reg_offset ==
4683 regCP_MEC_ME1_HEADER_DUMP)
4684 adev->gfx.ip_dump_compute_queues
4685 [xcc_offset +
4686 inst_offset + reg] =
4687 RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id),
4688 regCP_MEC_ME2_HEADER_DUMP));
4689 else
4690 adev->gfx.ip_dump_compute_queues
4691 [xcc_offset +
4692 inst_offset + reg] =
4693 RREG32(SOC15_REG_ENTRY_OFFSET_INST(
4694 gc_cp_reg_list_9_4_3[reg],
4695 GET_INST(GC, xcc_id)));
4696 }
4697 inst_offset += reg_count;
4698 }
4699 }
4700 }
4701 }
4702 soc15_grbm_select(adev, 0, 0, 0, 0, 0);
4703 mutex_unlock(&adev->srbm_mutex);
4704 }
4705
gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring * ring)4706 static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
4707 {
4708 /* Emit the cleaner shader */
4709 amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
4710 amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
4711 }
4712
4713 static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
4714 .name = "gfx_v9_4_3",
4715 .early_init = gfx_v9_4_3_early_init,
4716 .late_init = gfx_v9_4_3_late_init,
4717 .sw_init = gfx_v9_4_3_sw_init,
4718 .sw_fini = gfx_v9_4_3_sw_fini,
4719 .hw_init = gfx_v9_4_3_hw_init,
4720 .hw_fini = gfx_v9_4_3_hw_fini,
4721 .suspend = gfx_v9_4_3_suspend,
4722 .resume = gfx_v9_4_3_resume,
4723 .is_idle = gfx_v9_4_3_is_idle,
4724 .wait_for_idle = gfx_v9_4_3_wait_for_idle,
4725 .soft_reset = gfx_v9_4_3_soft_reset,
4726 .set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
4727 .set_powergating_state = gfx_v9_4_3_set_powergating_state,
4728 .get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
4729 .dump_ip_state = gfx_v9_4_3_ip_dump,
4730 .print_ip_state = gfx_v9_4_3_ip_print,
4731 };
4732
4733 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
4734 .type = AMDGPU_RING_TYPE_COMPUTE,
4735 .align_mask = 0xff,
4736 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4737 .support_64bit_ptrs = true,
4738 .get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4739 .get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4740 .set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4741 .emit_frame_size =
4742 20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4743 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4744 5 + /* hdp invalidate */
4745 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4746 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4747 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4748 2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4749 8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */
4750 7 + /* gfx_v9_4_3_emit_mem_sync */
4751 5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */
4752 15 + /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
4753 2, /* gfx_v9_4_3_ring_emit_cleaner_shader */
4754 .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */
4755 .emit_ib = gfx_v9_4_3_ring_emit_ib_compute,
4756 .emit_fence = gfx_v9_4_3_ring_emit_fence,
4757 .emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync,
4758 .emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush,
4759 .emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch,
4760 .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
4761 .test_ring = gfx_v9_4_3_ring_test_ring,
4762 .test_ib = gfx_v9_4_3_ring_test_ib,
4763 .insert_nop = gfx_v9_4_3_ring_insert_nop,
4764 .pad_ib = amdgpu_ring_generic_pad_ib,
4765 .emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4766 .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4767 .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4768 .soft_recovery = gfx_v9_4_3_ring_soft_recovery,
4769 .emit_mem_sync = gfx_v9_4_3_emit_mem_sync,
4770 .emit_wave_limit = gfx_v9_4_3_emit_wave_limit,
4771 .reset = gfx_v9_4_3_reset_kcq,
4772 .emit_cleaner_shader = gfx_v9_4_3_ring_emit_cleaner_shader,
4773 .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
4774 .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
4775 };
4776
4777 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
4778 .type = AMDGPU_RING_TYPE_KIQ,
4779 .align_mask = 0xff,
4780 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4781 .support_64bit_ptrs = true,
4782 .get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4783 .get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4784 .set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4785 .emit_frame_size =
4786 20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4787 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4788 5 + /* hdp invalidate */
4789 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4790 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4791 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4792 2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4793 8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */
4794 .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */
4795 .emit_fence = gfx_v9_4_3_ring_emit_fence_kiq,
4796 .test_ring = gfx_v9_4_3_ring_test_ring,
4797 .insert_nop = amdgpu_ring_insert_nop,
4798 .pad_ib = amdgpu_ring_generic_pad_ib,
4799 .emit_rreg = gfx_v9_4_3_ring_emit_rreg,
4800 .emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4801 .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4802 .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4803 };
4804
gfx_v9_4_3_set_ring_funcs(struct amdgpu_device * adev)4805 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
4806 {
4807 int i, j, num_xcc;
4808
4809 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4810 for (i = 0; i < num_xcc; i++) {
4811 adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq;
4812
4813 for (j = 0; j < adev->gfx.num_compute_rings; j++)
4814 adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs
4815 = &gfx_v9_4_3_ring_funcs_compute;
4816 }
4817 }
4818
4819 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = {
4820 .set = gfx_v9_4_3_set_eop_interrupt_state,
4821 .process = gfx_v9_4_3_eop_irq,
4822 };
4823
4824 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = {
4825 .set = gfx_v9_4_3_set_priv_reg_fault_state,
4826 .process = gfx_v9_4_3_priv_reg_irq,
4827 };
4828
4829 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_bad_op_irq_funcs = {
4830 .set = gfx_v9_4_3_set_bad_op_fault_state,
4831 .process = gfx_v9_4_3_bad_op_irq,
4832 };
4833
4834 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = {
4835 .set = gfx_v9_4_3_set_priv_inst_fault_state,
4836 .process = gfx_v9_4_3_priv_inst_irq,
4837 };
4838
gfx_v9_4_3_set_irq_funcs(struct amdgpu_device * adev)4839 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev)
4840 {
4841 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4842 adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs;
4843
4844 adev->gfx.priv_reg_irq.num_types = 1;
4845 adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs;
4846
4847 adev->gfx.bad_op_irq.num_types = 1;
4848 adev->gfx.bad_op_irq.funcs = &gfx_v9_4_3_bad_op_irq_funcs;
4849
4850 adev->gfx.priv_inst_irq.num_types = 1;
4851 adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs;
4852 }
4853
gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device * adev)4854 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev)
4855 {
4856 adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs;
4857 }
4858
4859
gfx_v9_4_3_set_gds_init(struct amdgpu_device * adev)4860 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
4861 {
4862 /* 9.4.3 variants removed all the GDS internal memory,
4863 * only support GWS opcode in kernel, like barrier
4864 * semaphore.etc */
4865
4866 /* init asic gds info */
4867 adev->gds.gds_size = 0;
4868 adev->gds.gds_compute_max_wave_id = 0;
4869 adev->gds.gws_size = 64;
4870 adev->gds.oa_size = 16;
4871 }
4872
gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device * adev,u32 bitmap,int xcc_id)4873 static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4874 u32 bitmap, int xcc_id)
4875 {
4876 u32 data;
4877
4878 if (!bitmap)
4879 return;
4880
4881 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4882 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4883
4884 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
4885 }
4886
gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device * adev,int xcc_id)4887 static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id)
4888 {
4889 u32 data, mask;
4890
4891 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
4892 data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
4893
4894 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4895 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4896
4897 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4898
4899 return (~data) & mask;
4900 }
4901
gfx_v9_4_3_get_cu_info(struct amdgpu_device * adev,struct amdgpu_cu_info * cu_info)4902 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
4903 struct amdgpu_cu_info *cu_info)
4904 {
4905 int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0;
4906 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp;
4907 unsigned disable_masks[4 * 4];
4908 bool is_symmetric_cus;
4909
4910 if (!adev || !cu_info)
4911 return -EINVAL;
4912
4913 /*
4914 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
4915 */
4916 if (adev->gfx.config.max_shader_engines *
4917 adev->gfx.config.max_sh_per_se > 16)
4918 return -EINVAL;
4919
4920 amdgpu_gfx_parse_disable_cu(disable_masks,
4921 adev->gfx.config.max_shader_engines,
4922 adev->gfx.config.max_sh_per_se);
4923
4924 mutex_lock(&adev->grbm_idx_mutex);
4925 for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
4926 is_symmetric_cus = true;
4927 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4928 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4929 mask = 1;
4930 ao_bitmap = 0;
4931 counter = 0;
4932 gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
4933 gfx_v9_4_3_set_user_cu_inactive_bitmap(
4934 adev,
4935 disable_masks[i * adev->gfx.config.max_sh_per_se + j],
4936 xcc_id);
4937 bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id);
4938
4939 cu_info->bitmap[xcc_id][i][j] = bitmap;
4940
4941 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
4942 if (bitmap & mask) {
4943 if (counter < adev->gfx.config.max_cu_per_sh)
4944 ao_bitmap |= mask;
4945 counter++;
4946 }
4947 mask <<= 1;
4948 }
4949 active_cu_number += counter;
4950 if (i < 2 && j < 2)
4951 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4952 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4953 }
4954 if (i && is_symmetric_cus && prev_counter != counter)
4955 is_symmetric_cus = false;
4956 prev_counter = counter;
4957 }
4958 if (is_symmetric_cus) {
4959 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG);
4960 tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1);
4961 tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1);
4962 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp);
4963 }
4964 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4965 xcc_id);
4966 }
4967 mutex_unlock(&adev->grbm_idx_mutex);
4968
4969 cu_info->number = active_cu_number;
4970 cu_info->ao_cu_mask = ao_cu_mask;
4971 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
4972
4973 return 0;
4974 }
4975
4976 const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = {
4977 .type = AMD_IP_BLOCK_TYPE_GFX,
4978 .major = 9,
4979 .minor = 4,
4980 .rev = 3,
4981 .funcs = &gfx_v9_4_3_ip_funcs,
4982 };
4983
gfx_v9_4_3_xcp_resume(void * handle,uint32_t inst_mask)4984 static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask)
4985 {
4986 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4987 uint32_t tmp_mask;
4988 int i, r;
4989
4990 /* TODO : Initialize golden regs */
4991 /* gfx_v9_4_3_init_golden_registers(adev); */
4992
4993 tmp_mask = inst_mask;
4994 for_each_inst(i, tmp_mask)
4995 gfx_v9_4_3_xcc_constants_init(adev, i);
4996
4997 if (!amdgpu_sriov_vf(adev)) {
4998 tmp_mask = inst_mask;
4999 for_each_inst(i, tmp_mask) {
5000 r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
5001 if (r)
5002 return r;
5003 }
5004 }
5005
5006 tmp_mask = inst_mask;
5007 for_each_inst(i, tmp_mask) {
5008 r = gfx_v9_4_3_xcc_cp_resume(adev, i);
5009 if (r)
5010 return r;
5011 }
5012
5013 return 0;
5014 }
5015
gfx_v9_4_3_xcp_suspend(void * handle,uint32_t inst_mask)5016 static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask)
5017 {
5018 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5019 int i;
5020
5021 for_each_inst(i, inst_mask)
5022 gfx_v9_4_3_xcc_fini(adev, i);
5023
5024 return 0;
5025 }
5026
5027 struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = {
5028 .suspend = &gfx_v9_4_3_xcp_suspend,
5029 .resume = &gfx_v9_4_3_xcp_resume
5030 };
5031
5032 struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = {
5033 .query_ras_error_count = &gfx_v9_4_3_query_ras_error_count,
5034 .reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
5035 };
5036
gfx_v9_4_3_ras_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)5037 static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
5038 {
5039 int r;
5040
5041 r = amdgpu_ras_block_late_init(adev, ras_block);
5042 if (r)
5043 return r;
5044
5045 r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__GFX,
5046 &gfx_v9_4_3_aca_info,
5047 NULL);
5048 if (r)
5049 goto late_fini;
5050
5051 return 0;
5052
5053 late_fini:
5054 amdgpu_ras_block_late_fini(adev, ras_block);
5055
5056 return r;
5057 }
5058
5059 struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
5060 .ras_block = {
5061 .hw_ops = &gfx_v9_4_3_ras_ops,
5062 .ras_late_init = &gfx_v9_4_3_ras_late_init,
5063 },
5064 .enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer,
5065 };
5066