1 /*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/firmware.h>
24
25 #include "amdgpu.h"
26 #include "amdgpu_gfx.h"
27 #include "soc15.h"
28 #include "soc15d.h"
29 #include "soc15_common.h"
30 #include "vega10_enum.h"
31
32 #include "v9_structs.h"
33
34 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
35
36 #include "gc/gc_9_4_3_offset.h"
37 #include "gc/gc_9_4_3_sh_mask.h"
38
39 #include "gfx_v9_4_3.h"
40 #include "gfx_v9_4_3_cleaner_shader.h"
41 #include "amdgpu_xcp.h"
42 #include "amdgpu_aca.h"
43
44 MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
45 MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin");
46 MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
47 MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
48
49 #define GFX9_MEC_HPD_SIZE 4096
50 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
51
52 #define GOLDEN_GB_ADDR_CONFIG 0x2a114042
53 #define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
54
55 #define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
56 #define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
57 #define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
58
59 #define XCC_REG_RANGE_0_LOW 0x2000 /* XCC gfxdec0 lower Bound */
60 #define XCC_REG_RANGE_0_HIGH 0x3400 /* XCC gfxdec0 upper Bound */
61 #define XCC_REG_RANGE_1_LOW 0xA000 /* XCC gfxdec1 lower Bound */
62 #define XCC_REG_RANGE_1_HIGH 0x10000 /* XCC gfxdec1 upper Bound */
63
64 #define NORMALIZE_XCC_REG_OFFSET(offset) \
65 (offset & 0xFFFF)
66
67 static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = {
68 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
69 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
70 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1),
71 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2),
72 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1),
73 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1),
74 SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT),
75 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT),
76 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT),
77 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS),
78 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR),
79 SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS),
80 SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS),
81 SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS),
82 SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT),
83 SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT),
84 SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS),
85 SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS),
86 SOC15_REG_ENTRY_STR(GC, 0, regSQC_DCACHE_UTCL1_STATUS),
87 SOC15_REG_ENTRY_STR(GC, 0, regSQC_ICACHE_UTCL1_STATUS),
88 SOC15_REG_ENTRY_STR(GC, 0, regSQ_UTCL1_STATUS),
89 SOC15_REG_ENTRY_STR(GC, 0, regTCP_UTCL1_STATUS),
90 SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS),
91 SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL),
92 SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS),
93 SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG),
94 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL),
95 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR),
96 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC2_INSTR_PNTR),
97 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
98 SOC15_REG_ENTRY_STR(GC, 0, regRLC_STAT),
99 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_COMMAND),
100 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_MESSAGE),
101 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_1),
102 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_2),
103 SOC15_REG_ENTRY_STR(GC, 0, regSMU_RLC_RESPONSE),
104 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SAFE_MODE),
105 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE),
106 SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT),
107 SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6),
108 /* cp header registers */
109 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
110 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME2_HEADER_DUMP),
111 /* SE status registers */
112 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
113 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
114 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2),
115 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3)
116 };
117
118 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = {
119 /* compute queue registers */
120 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
121 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ACTIVE),
122 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
123 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
124 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
125 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
126 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
127 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
128 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
129 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
130 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
131 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
132 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
133 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
134 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
135 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
136 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
137 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
138 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
139 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
140 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
141 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
142 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
143 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
144 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
145 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
146 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
147 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
148 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
149 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
150 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
151 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
152 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
153 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
154 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
155 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
156 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS),
157 };
158
159 struct amdgpu_gfx_ras gfx_v9_4_3_ras;
160
161 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
162 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev);
163 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
164 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
165 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
166 struct amdgpu_cu_info *cu_info);
167 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
168 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
169
gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring * kiq_ring,uint64_t queue_mask)170 static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
171 uint64_t queue_mask)
172 {
173 struct amdgpu_device *adev = kiq_ring->adev;
174 u64 shader_mc_addr;
175
176 /* Cleaner shader MC address */
177 shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
178
179 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
180 amdgpu_ring_write(kiq_ring,
181 PACKET3_SET_RESOURCES_VMID_MASK(0) |
182 /* vmid_mask:0* queue_type:0 (KIQ) */
183 PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
184 amdgpu_ring_write(kiq_ring,
185 lower_32_bits(queue_mask)); /* queue mask lo */
186 amdgpu_ring_write(kiq_ring,
187 upper_32_bits(queue_mask)); /* queue mask hi */
188 amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
189 amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
190 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
191 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
192 }
193
gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring)194 static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring,
195 struct amdgpu_ring *ring)
196 {
197 struct amdgpu_device *adev = kiq_ring->adev;
198 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
199 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
200 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
201
202 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
203 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
204 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
205 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
206 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
207 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
208 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
209 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
210 /*queue_type: normal compute queue */
211 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
212 /* alloc format: all_on_one_pipe */
213 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
214 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
215 /* num_queues: must be 1 */
216 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
217 amdgpu_ring_write(kiq_ring,
218 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
219 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
220 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
221 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
222 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
223 }
224
gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring,enum amdgpu_unmap_queues_action action,u64 gpu_addr,u64 seq)225 static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
226 struct amdgpu_ring *ring,
227 enum amdgpu_unmap_queues_action action,
228 u64 gpu_addr, u64 seq)
229 {
230 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
231
232 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
233 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
234 PACKET3_UNMAP_QUEUES_ACTION(action) |
235 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
236 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
237 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
238 amdgpu_ring_write(kiq_ring,
239 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
240
241 if (action == PREEMPT_QUEUES_NO_UNMAP) {
242 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
243 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
244 amdgpu_ring_write(kiq_ring, seq);
245 } else {
246 amdgpu_ring_write(kiq_ring, 0);
247 amdgpu_ring_write(kiq_ring, 0);
248 amdgpu_ring_write(kiq_ring, 0);
249 }
250 }
251
gfx_v9_4_3_kiq_query_status(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring,u64 addr,u64 seq)252 static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring,
253 struct amdgpu_ring *ring,
254 u64 addr,
255 u64 seq)
256 {
257 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
258
259 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
260 amdgpu_ring_write(kiq_ring,
261 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
262 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
263 PACKET3_QUERY_STATUS_COMMAND(2));
264 /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
265 amdgpu_ring_write(kiq_ring,
266 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
267 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
268 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
269 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
270 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
271 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
272 }
273
gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring * kiq_ring,uint16_t pasid,uint32_t flush_type,bool all_hub)274 static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
275 uint16_t pasid, uint32_t flush_type,
276 bool all_hub)
277 {
278 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
279 amdgpu_ring_write(kiq_ring,
280 PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
281 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
282 PACKET3_INVALIDATE_TLBS_PASID(pasid) |
283 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
284 }
285
gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring * kiq_ring,uint32_t queue_type,uint32_t me_id,uint32_t pipe_id,uint32_t queue_id,uint32_t xcc_id,uint32_t vmid)286 static void gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
287 uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
288 uint32_t xcc_id, uint32_t vmid)
289 {
290 struct amdgpu_device *adev = kiq_ring->adev;
291 unsigned i;
292
293 /* enter save mode */
294 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
295 mutex_lock(&adev->srbm_mutex);
296 soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, xcc_id);
297
298 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
299 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0x2);
300 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_COMPUTE_QUEUE_RESET, 0x1);
301 /* wait till dequeue take effects */
302 for (i = 0; i < adev->usec_timeout; i++) {
303 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
304 break;
305 udelay(1);
306 }
307 if (i >= adev->usec_timeout)
308 dev_err(adev->dev, "fail to wait on hqd deactive\n");
309 } else {
310 dev_err(adev->dev, "reset queue_type(%d) not supported\n\n", queue_type);
311 }
312
313 soc15_grbm_select(adev, 0, 0, 0, 0, 0);
314 mutex_unlock(&adev->srbm_mutex);
315 /* exit safe mode */
316 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
317 }
318
319 static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
320 .kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
321 .kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
322 .kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
323 .kiq_query_status = gfx_v9_4_3_kiq_query_status,
324 .kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
325 .kiq_reset_hw_queue = gfx_v9_4_3_kiq_reset_hw_queue,
326 .set_resources_size = 8,
327 .map_queues_size = 7,
328 .unmap_queues_size = 6,
329 .query_status_size = 7,
330 .invalidate_tlbs_size = 2,
331 };
332
gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device * adev)333 static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev)
334 {
335 int i, num_xcc;
336
337 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
338 for (i = 0; i < num_xcc; i++)
339 adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs;
340 }
341
gfx_v9_4_3_init_golden_registers(struct amdgpu_device * adev)342 static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
343 {
344 int i, num_xcc, dev_inst;
345
346 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
347 for (i = 0; i < num_xcc; i++) {
348 dev_inst = GET_INST(GC, i);
349
350 WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
351 GOLDEN_GB_ADDR_CONFIG);
352 /* Golden settings applied by driver for ASIC with rev_id 0 */
353 if (adev->rev_id == 0) {
354 WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
355 REDUCE_FIFO_DEPTH_BY_2, 2);
356 } else {
357 WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
358 SPARE, 0x1);
359 }
360 }
361 }
362
gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg)363 static uint32_t gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg)
364 {
365 uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg);
366
367 /* If it is an XCC reg, normalize the reg to keep
368 lower 16 bits in local xcc */
369
370 if (((normalized_reg >= XCC_REG_RANGE_0_LOW) && (normalized_reg < XCC_REG_RANGE_0_HIGH)) ||
371 ((normalized_reg >= XCC_REG_RANGE_1_LOW) && (normalized_reg < XCC_REG_RANGE_1_HIGH)))
372 return normalized_reg;
373 else
374 return reg;
375 }
376
gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring * ring,int eng_sel,bool wc,uint32_t reg,uint32_t val)377 static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
378 bool wc, uint32_t reg, uint32_t val)
379 {
380 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
381 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
382 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
383 WRITE_DATA_DST_SEL(0) |
384 (wc ? WR_CONFIRM : 0));
385 amdgpu_ring_write(ring, reg);
386 amdgpu_ring_write(ring, 0);
387 amdgpu_ring_write(ring, val);
388 }
389
gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring * ring,int eng_sel,int mem_space,int opt,uint32_t addr0,uint32_t addr1,uint32_t ref,uint32_t mask,uint32_t inv)390 static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
391 int mem_space, int opt, uint32_t addr0,
392 uint32_t addr1, uint32_t ref, uint32_t mask,
393 uint32_t inv)
394 {
395 /* Only do the normalization on regspace */
396 if (mem_space == 0) {
397 addr0 = gfx_v9_4_3_normalize_xcc_reg_offset(addr0);
398 addr1 = gfx_v9_4_3_normalize_xcc_reg_offset(addr1);
399 }
400
401 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
402 amdgpu_ring_write(ring,
403 /* memory (1) or register (0) */
404 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
405 WAIT_REG_MEM_OPERATION(opt) | /* wait */
406 WAIT_REG_MEM_FUNCTION(3) | /* equal */
407 WAIT_REG_MEM_ENGINE(eng_sel)));
408
409 if (mem_space)
410 BUG_ON(addr0 & 0x3); /* Dword align */
411 amdgpu_ring_write(ring, addr0);
412 amdgpu_ring_write(ring, addr1);
413 amdgpu_ring_write(ring, ref);
414 amdgpu_ring_write(ring, mask);
415 amdgpu_ring_write(ring, inv); /* poll interval */
416 }
417
gfx_v9_4_3_ring_test_ring(struct amdgpu_ring * ring)418 static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring)
419 {
420 uint32_t scratch_reg0_offset, xcc_offset;
421 struct amdgpu_device *adev = ring->adev;
422 uint32_t tmp = 0;
423 unsigned i;
424 int r;
425
426 /* Use register offset which is local to XCC in the packet */
427 xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
428 scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
429 WREG32(scratch_reg0_offset, 0xCAFEDEAD);
430 tmp = RREG32(scratch_reg0_offset);
431
432 r = amdgpu_ring_alloc(ring, 3);
433 if (r)
434 return r;
435
436 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
437 amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START);
438 amdgpu_ring_write(ring, 0xDEADBEEF);
439 amdgpu_ring_commit(ring);
440
441 for (i = 0; i < adev->usec_timeout; i++) {
442 tmp = RREG32(scratch_reg0_offset);
443 if (tmp == 0xDEADBEEF)
444 break;
445 udelay(1);
446 }
447
448 if (i >= adev->usec_timeout)
449 r = -ETIMEDOUT;
450 return r;
451 }
452
gfx_v9_4_3_ring_test_ib(struct amdgpu_ring * ring,long timeout)453 static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
454 {
455 struct amdgpu_device *adev = ring->adev;
456 struct amdgpu_ib ib;
457 struct dma_fence *f = NULL;
458
459 unsigned index;
460 uint64_t gpu_addr;
461 uint32_t tmp;
462 long r;
463
464 r = amdgpu_device_wb_get(adev, &index);
465 if (r)
466 return r;
467
468 gpu_addr = adev->wb.gpu_addr + (index * 4);
469 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
470 memset(&ib, 0, sizeof(ib));
471
472 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
473 if (r)
474 goto err1;
475
476 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
477 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
478 ib.ptr[2] = lower_32_bits(gpu_addr);
479 ib.ptr[3] = upper_32_bits(gpu_addr);
480 ib.ptr[4] = 0xDEADBEEF;
481 ib.length_dw = 5;
482
483 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
484 if (r)
485 goto err2;
486
487 r = dma_fence_wait_timeout(f, false, timeout);
488 if (r == 0) {
489 r = -ETIMEDOUT;
490 goto err2;
491 } else if (r < 0) {
492 goto err2;
493 }
494
495 tmp = adev->wb.wb[index];
496 if (tmp == 0xDEADBEEF)
497 r = 0;
498 else
499 r = -EINVAL;
500
501 err2:
502 amdgpu_ib_free(adev, &ib, NULL);
503 dma_fence_put(f);
504 err1:
505 amdgpu_device_wb_free(adev, index);
506 return r;
507 }
508
509
510 /* This value might differs per partition */
gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device * adev)511 static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev)
512 {
513 uint64_t clock;
514
515 mutex_lock(&adev->gfx.gpu_clock_mutex);
516 WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
517 clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) |
518 ((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
519 mutex_unlock(&adev->gfx.gpu_clock_mutex);
520
521 return clock;
522 }
523
gfx_v9_4_3_free_microcode(struct amdgpu_device * adev)524 static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev)
525 {
526 amdgpu_ucode_release(&adev->gfx.pfp_fw);
527 amdgpu_ucode_release(&adev->gfx.me_fw);
528 amdgpu_ucode_release(&adev->gfx.ce_fw);
529 amdgpu_ucode_release(&adev->gfx.rlc_fw);
530 amdgpu_ucode_release(&adev->gfx.mec_fw);
531 amdgpu_ucode_release(&adev->gfx.mec2_fw);
532
533 kfree(adev->gfx.rlc.register_list_format);
534 }
535
gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device * adev,const char * chip_name)536 static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
537 const char *chip_name)
538 {
539 int err;
540 const struct rlc_firmware_header_v2_0 *rlc_hdr;
541 uint16_t version_major;
542 uint16_t version_minor;
543
544
545 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
546 "amdgpu/%s_rlc.bin", chip_name);
547 if (err)
548 goto out;
549 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
550
551 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
552 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
553 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
554 out:
555 if (err)
556 amdgpu_ucode_release(&adev->gfx.rlc_fw);
557
558 return err;
559 }
560
gfx_v9_4_3_should_disable_gfxoff(struct pci_dev * pdev)561 static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev)
562 {
563 return true;
564 }
565
gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device * adev)566 static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev)
567 {
568 if (gfx_v9_4_3_should_disable_gfxoff(adev->pdev))
569 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
570 }
571
gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device * adev,const char * chip_name)572 static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
573 const char *chip_name)
574 {
575 int err;
576
577 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
578 "amdgpu/%s_mec.bin", chip_name);
579 if (err)
580 goto out;
581 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
582 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
583
584 adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
585 adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
586
587 gfx_v9_4_3_check_if_need_gfxoff(adev);
588
589 out:
590 if (err)
591 amdgpu_ucode_release(&adev->gfx.mec_fw);
592 return err;
593 }
594
gfx_v9_4_3_init_microcode(struct amdgpu_device * adev)595 static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
596 {
597 char ucode_prefix[15];
598 int r;
599
600 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
601
602 r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix);
603 if (r)
604 return r;
605
606 r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix);
607 if (r)
608 return r;
609
610 return r;
611 }
612
gfx_v9_4_3_mec_fini(struct amdgpu_device * adev)613 static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev)
614 {
615 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
616 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
617 }
618
gfx_v9_4_3_mec_init(struct amdgpu_device * adev)619 static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev)
620 {
621 int r, i, num_xcc;
622 u32 *hpd;
623 const __le32 *fw_data;
624 unsigned fw_size;
625 u32 *fw;
626 size_t mec_hpd_size;
627
628 const struct gfx_firmware_header_v1_0 *mec_hdr;
629
630 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
631 for (i = 0; i < num_xcc; i++)
632 bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
633 AMDGPU_MAX_COMPUTE_QUEUES);
634
635 /* take ownership of the relevant compute queues */
636 amdgpu_gfx_compute_queue_acquire(adev);
637 mec_hpd_size =
638 adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE;
639 if (mec_hpd_size) {
640 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
641 AMDGPU_GEM_DOMAIN_VRAM |
642 AMDGPU_GEM_DOMAIN_GTT,
643 &adev->gfx.mec.hpd_eop_obj,
644 &adev->gfx.mec.hpd_eop_gpu_addr,
645 (void **)&hpd);
646 if (r) {
647 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
648 gfx_v9_4_3_mec_fini(adev);
649 return r;
650 }
651
652 if (amdgpu_emu_mode == 1) {
653 for (i = 0; i < mec_hpd_size / 4; i++) {
654 memset((void *)(hpd + i), 0, 4);
655 if (i % 50 == 0)
656 msleep(1);
657 }
658 } else {
659 memset(hpd, 0, mec_hpd_size);
660 }
661
662 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
663 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
664 }
665
666 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
667
668 fw_data = (const __le32 *)
669 (adev->gfx.mec_fw->data +
670 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
671 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
672
673 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
674 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
675 &adev->gfx.mec.mec_fw_obj,
676 &adev->gfx.mec.mec_fw_gpu_addr,
677 (void **)&fw);
678 if (r) {
679 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
680 gfx_v9_4_3_mec_fini(adev);
681 return r;
682 }
683
684 memcpy(fw, fw_data, fw_size);
685
686 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
687 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
688
689 return 0;
690 }
691
gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 instance,int xcc_id)692 static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
693 u32 sh_num, u32 instance, int xcc_id)
694 {
695 u32 data;
696
697 if (instance == 0xffffffff)
698 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
699 INSTANCE_BROADCAST_WRITES, 1);
700 else
701 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
702 INSTANCE_INDEX, instance);
703
704 if (se_num == 0xffffffff)
705 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
706 SE_BROADCAST_WRITES, 1);
707 else
708 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
709
710 if (sh_num == 0xffffffff)
711 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
712 SH_BROADCAST_WRITES, 1);
713 else
714 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
715
716 WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
717 }
718
wave_read_ind(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t address)719 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address)
720 {
721 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
722 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
723 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
724 (address << SQ_IND_INDEX__INDEX__SHIFT) |
725 (SQ_IND_INDEX__FORCE_READ_MASK));
726 return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
727 }
728
wave_read_regs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t thread,uint32_t regno,uint32_t num,uint32_t * out)729 static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
730 uint32_t wave, uint32_t thread,
731 uint32_t regno, uint32_t num, uint32_t *out)
732 {
733 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
734 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
735 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
736 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
737 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
738 (SQ_IND_INDEX__FORCE_READ_MASK) |
739 (SQ_IND_INDEX__AUTO_INCR_MASK));
740 while (num--)
741 *(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
742 }
743
gfx_v9_4_3_read_wave_data(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t * dst,int * no_fields)744 static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev,
745 uint32_t xcc_id, uint32_t simd, uint32_t wave,
746 uint32_t *dst, int *no_fields)
747 {
748 /* type 1 wave data */
749 dst[(*no_fields)++] = 1;
750 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
751 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
752 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
753 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
754 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
755 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID);
756 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
757 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
758 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC);
759 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC);
760 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS);
761 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
762 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0);
763 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0);
764 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE);
765 }
766
gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t start,uint32_t size,uint32_t * dst)767 static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
768 uint32_t wave, uint32_t start,
769 uint32_t size, uint32_t *dst)
770 {
771 wave_read_regs(adev, xcc_id, simd, wave, 0,
772 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
773 }
774
gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t thread,uint32_t start,uint32_t size,uint32_t * dst)775 static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
776 uint32_t wave, uint32_t thread,
777 uint32_t start, uint32_t size,
778 uint32_t *dst)
779 {
780 wave_read_regs(adev, xcc_id, simd, wave, thread,
781 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
782 }
783
gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device * adev,u32 me,u32 pipe,u32 q,u32 vm,u32 xcc_id)784 static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
785 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
786 {
787 soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
788 }
789
gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device * adev)790 static int gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device *adev)
791 {
792 u32 xcp_ctl;
793
794 /* Value is expected to be the same on all, fetch from first instance */
795 xcp_ctl = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HYP_XCP_CTL);
796
797 return REG_GET_FIELD(xcp_ctl, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP);
798 }
799
gfx_v9_4_3_switch_compute_partition(struct amdgpu_device * adev,int num_xccs_per_xcp)800 static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
801 int num_xccs_per_xcp)
802 {
803 int ret, i, num_xcc;
804 u32 tmp = 0;
805
806 if (adev->psp.funcs) {
807 ret = psp_spatial_partition(&adev->psp,
808 NUM_XCC(adev->gfx.xcc_mask) /
809 num_xccs_per_xcp);
810 if (ret)
811 return ret;
812 } else {
813 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
814
815 for (i = 0; i < num_xcc; i++) {
816 tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
817 num_xccs_per_xcp);
818 tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
819 i % num_xccs_per_xcp);
820 WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL,
821 tmp);
822 }
823 ret = 0;
824 }
825
826 adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
827
828 return ret;
829 }
830
gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device * adev,int ih_node)831 static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
832 {
833 int xcc;
834
835 xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0));
836 if (!xcc) {
837 dev_err(adev->dev, "Couldn't find xcc mapping from IH node");
838 return -EINVAL;
839 }
840
841 return xcc - 1;
842 }
843
844 static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
845 .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
846 .select_se_sh = &gfx_v9_4_3_xcc_select_se_sh,
847 .read_wave_data = &gfx_v9_4_3_read_wave_data,
848 .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
849 .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
850 .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
851 .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
852 .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
853 .get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp,
854 };
855
gfx_v9_4_3_aca_bank_parser(struct aca_handle * handle,struct aca_bank * bank,enum aca_smu_type type,void * data)856 static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
857 struct aca_bank *bank, enum aca_smu_type type,
858 void *data)
859 {
860 struct aca_bank_info info;
861 u64 misc0;
862 u32 instlo;
863 int ret;
864
865 ret = aca_bank_info_decode(bank, &info);
866 if (ret)
867 return ret;
868
869 /* NOTE: overwrite info.die_id with xcd id for gfx */
870 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
871 instlo &= GENMASK(31, 1);
872 info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
873
874 misc0 = bank->regs[ACA_REG_IDX_MISC0];
875
876 switch (type) {
877 case ACA_SMU_TYPE_UE:
878 ret = aca_error_cache_log_bank_error(handle, &info,
879 ACA_ERROR_TYPE_UE, 1ULL);
880 break;
881 case ACA_SMU_TYPE_CE:
882 ret = aca_error_cache_log_bank_error(handle, &info,
883 ACA_ERROR_TYPE_CE, ACA_REG__MISC0__ERRCNT(misc0));
884 break;
885 default:
886 return -EINVAL;
887 }
888
889 return ret;
890 }
891
gfx_v9_4_3_aca_bank_is_valid(struct aca_handle * handle,struct aca_bank * bank,enum aca_smu_type type,void * data)892 static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
893 enum aca_smu_type type, void *data)
894 {
895 u32 instlo;
896
897 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
898 instlo &= GENMASK(31, 1);
899 switch (instlo) {
900 case mmSMNAID_XCD0_MCA_SMU:
901 case mmSMNAID_XCD1_MCA_SMU:
902 case mmSMNXCD_XCD0_MCA_SMU:
903 return true;
904 default:
905 break;
906 }
907
908 return false;
909 }
910
911 static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = {
912 .aca_bank_parser = gfx_v9_4_3_aca_bank_parser,
913 .aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid,
914 };
915
916 static const struct aca_info gfx_v9_4_3_aca_info = {
917 .hwip = ACA_HWIP_TYPE_SMU,
918 .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
919 .bank_ops = &gfx_v9_4_3_aca_bank_ops,
920 };
921
gfx_v9_4_3_gpu_early_init(struct amdgpu_device * adev)922 static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
923 {
924 u32 gb_addr_config;
925
926 adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
927 adev->gfx.ras = &gfx_v9_4_3_ras;
928
929 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
930 case IP_VERSION(9, 4, 3):
931 case IP_VERSION(9, 4, 4):
932 adev->gfx.config.max_hw_contexts = 8;
933 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
934 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
935 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
936 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
937 gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG);
938 break;
939 default:
940 BUG();
941 break;
942 }
943
944 adev->gfx.config.gb_addr_config = gb_addr_config;
945
946 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
947 REG_GET_FIELD(
948 adev->gfx.config.gb_addr_config,
949 GB_ADDR_CONFIG,
950 NUM_PIPES);
951
952 adev->gfx.config.max_tile_pipes =
953 adev->gfx.config.gb_addr_config_fields.num_pipes;
954
955 adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
956 REG_GET_FIELD(
957 adev->gfx.config.gb_addr_config,
958 GB_ADDR_CONFIG,
959 NUM_BANKS);
960 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
961 REG_GET_FIELD(
962 adev->gfx.config.gb_addr_config,
963 GB_ADDR_CONFIG,
964 MAX_COMPRESSED_FRAGS);
965 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
966 REG_GET_FIELD(
967 adev->gfx.config.gb_addr_config,
968 GB_ADDR_CONFIG,
969 NUM_RB_PER_SE);
970 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
971 REG_GET_FIELD(
972 adev->gfx.config.gb_addr_config,
973 GB_ADDR_CONFIG,
974 NUM_SHADER_ENGINES);
975 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
976 REG_GET_FIELD(
977 adev->gfx.config.gb_addr_config,
978 GB_ADDR_CONFIG,
979 PIPE_INTERLEAVE_SIZE));
980
981 return 0;
982 }
983
gfx_v9_4_3_compute_ring_init(struct amdgpu_device * adev,int ring_id,int xcc_id,int mec,int pipe,int queue)984 static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
985 int xcc_id, int mec, int pipe, int queue)
986 {
987 unsigned irq_type;
988 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
989 unsigned int hw_prio;
990 uint32_t xcc_doorbell_start;
991
992 ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
993 ring_id];
994
995 /* mec0 is me1 */
996 ring->xcc_id = xcc_id;
997 ring->me = mec + 1;
998 ring->pipe = pipe;
999 ring->queue = queue;
1000
1001 ring->ring_obj = NULL;
1002 ring->use_doorbell = true;
1003 xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
1004 xcc_id * adev->doorbell_index.xcc_doorbell_range;
1005 ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
1006 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
1007 (ring_id + xcc_id * adev->gfx.num_compute_rings) *
1008 GFX9_MEC_HPD_SIZE;
1009 ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
1010 sprintf(ring->name, "comp_%d.%d.%d.%d",
1011 ring->xcc_id, ring->me, ring->pipe, ring->queue);
1012
1013 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1014 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1015 + ring->pipe;
1016 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1017 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
1018 /* type-2 packets are deprecated on MEC, use type-3 instead */
1019 return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1020 hw_prio, NULL);
1021 }
1022
gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device * adev)1023 static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev)
1024 {
1025 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
1026 uint32_t *ptr, num_xcc, inst;
1027
1028 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1029
1030 ptr = kcalloc(reg_count * num_xcc, sizeof(uint32_t), GFP_KERNEL);
1031 if (!ptr) {
1032 DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
1033 adev->gfx.ip_dump_core = NULL;
1034 } else {
1035 adev->gfx.ip_dump_core = ptr;
1036 }
1037
1038 /* Allocate memory for compute queue registers for all the instances */
1039 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
1040 inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
1041 adev->gfx.mec.num_queue_per_pipe;
1042
1043 ptr = kcalloc(reg_count * inst * num_xcc, sizeof(uint32_t), GFP_KERNEL);
1044 if (!ptr) {
1045 DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
1046 adev->gfx.ip_dump_compute_queues = NULL;
1047 } else {
1048 adev->gfx.ip_dump_compute_queues = ptr;
1049 }
1050 }
1051
gfx_v9_4_3_sw_init(void * handle)1052 static int gfx_v9_4_3_sw_init(void *handle)
1053 {
1054 int i, j, k, r, ring_id, xcc_id, num_xcc;
1055 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1056
1057 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1058 case IP_VERSION(9, 4, 3):
1059 case IP_VERSION(9, 4, 4):
1060 adev->gfx.cleaner_shader_ptr = gfx_9_4_3_cleaner_shader_hex;
1061 adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_3_cleaner_shader_hex);
1062 if (adev->gfx.mec_fw_version >= 153) {
1063 adev->gfx.enable_cleaner_shader = true;
1064 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1065 if (r) {
1066 adev->gfx.enable_cleaner_shader = false;
1067 dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1068 }
1069 }
1070 break;
1071 default:
1072 adev->gfx.enable_cleaner_shader = false;
1073 break;
1074 }
1075
1076 adev->gfx.mec.num_mec = 2;
1077 adev->gfx.mec.num_pipe_per_mec = 4;
1078 adev->gfx.mec.num_queue_per_pipe = 8;
1079
1080 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1081
1082 /* EOP Event */
1083 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
1084 if (r)
1085 return r;
1086
1087 /* Bad opcode Event */
1088 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1089 GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
1090 &adev->gfx.bad_op_irq);
1091 if (r)
1092 return r;
1093
1094 /* Privileged reg */
1095 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
1096 &adev->gfx.priv_reg_irq);
1097 if (r)
1098 return r;
1099
1100 /* Privileged inst */
1101 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
1102 &adev->gfx.priv_inst_irq);
1103 if (r)
1104 return r;
1105
1106 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1107
1108 r = adev->gfx.rlc.funcs->init(adev);
1109 if (r) {
1110 DRM_ERROR("Failed to init rlc BOs!\n");
1111 return r;
1112 }
1113
1114 r = gfx_v9_4_3_mec_init(adev);
1115 if (r) {
1116 DRM_ERROR("Failed to init MEC BOs!\n");
1117 return r;
1118 }
1119
1120 /* set up the compute queues - allocate horizontally across pipes */
1121 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1122 ring_id = 0;
1123 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1124 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1125 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec;
1126 k++) {
1127 if (!amdgpu_gfx_is_mec_queue_enabled(
1128 adev, xcc_id, i, k, j))
1129 continue;
1130
1131 r = gfx_v9_4_3_compute_ring_init(adev,
1132 ring_id,
1133 xcc_id,
1134 i, k, j);
1135 if (r)
1136 return r;
1137
1138 ring_id++;
1139 }
1140 }
1141 }
1142
1143 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id);
1144 if (r) {
1145 DRM_ERROR("Failed to init KIQ BOs!\n");
1146 return r;
1147 }
1148
1149 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1150 if (r)
1151 return r;
1152
1153 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1154 r = amdgpu_gfx_mqd_sw_init(adev,
1155 sizeof(struct v9_mqd_allocation), xcc_id);
1156 if (r)
1157 return r;
1158 }
1159
1160 r = gfx_v9_4_3_gpu_early_init(adev);
1161 if (r)
1162 return r;
1163
1164 r = amdgpu_gfx_ras_sw_init(adev);
1165 if (r)
1166 return r;
1167
1168
1169 if (!amdgpu_sriov_vf(adev)) {
1170 r = amdgpu_gfx_sysfs_init(adev);
1171 if (r)
1172 return r;
1173 }
1174
1175 gfx_v9_4_3_alloc_ip_dump(adev);
1176
1177 r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
1178 if (r)
1179 return r;
1180
1181 return 0;
1182 }
1183
gfx_v9_4_3_sw_fini(void * handle)1184 static int gfx_v9_4_3_sw_fini(void *handle)
1185 {
1186 int i, num_xcc;
1187 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1188
1189 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1190 for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
1191 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1192
1193 for (i = 0; i < num_xcc; i++) {
1194 amdgpu_gfx_mqd_sw_fini(adev, i);
1195 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
1196 amdgpu_gfx_kiq_fini(adev, i);
1197 }
1198
1199 amdgpu_gfx_cleaner_shader_sw_fini(adev);
1200
1201 gfx_v9_4_3_mec_fini(adev);
1202 amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
1203 gfx_v9_4_3_free_microcode(adev);
1204 if (!amdgpu_sriov_vf(adev))
1205 amdgpu_gfx_sysfs_fini(adev);
1206 amdgpu_gfx_sysfs_isolation_shader_fini(adev);
1207
1208 kfree(adev->gfx.ip_dump_core);
1209 kfree(adev->gfx.ip_dump_compute_queues);
1210
1211 return 0;
1212 }
1213
1214 #define DEFAULT_SH_MEM_BASES (0x6000)
gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device * adev,int xcc_id)1215 static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
1216 int xcc_id)
1217 {
1218 int i;
1219 uint32_t sh_mem_config;
1220 uint32_t sh_mem_bases;
1221 uint32_t data;
1222
1223 /*
1224 * Configure apertures:
1225 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1226 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1227 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1228 */
1229 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1230
1231 sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1232 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1233 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1234
1235 mutex_lock(&adev->srbm_mutex);
1236 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1237 soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1238 /* CP and shaders */
1239 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config);
1240 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
1241
1242 /* Enable trap for each kfd vmid. */
1243 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
1244 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1245 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
1246 }
1247 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1248 mutex_unlock(&adev->srbm_mutex);
1249
1250 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
1251 acccess. These should be enabled by FW for target VMIDs. */
1252 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1253 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0);
1254 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0);
1255 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0);
1256 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0);
1257 }
1258 }
1259
gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device * adev,int xcc_id)1260 static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
1261 {
1262 int vmid;
1263
1264 /*
1265 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1266 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1267 * the driver can enable them for graphics. VMID0 should maintain
1268 * access so that HWS firmware can save/restore entries.
1269 */
1270 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
1271 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0);
1272 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0);
1273 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0);
1274 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0);
1275 }
1276 }
1277
gfx_v9_4_3_xcc_constants_init(struct amdgpu_device * adev,int xcc_id)1278 static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
1279 int xcc_id)
1280 {
1281 u32 tmp;
1282 int i;
1283
1284 /* XXX SH_MEM regs */
1285 /* where to put LDS, scratch, GPUVM in FSA64 space */
1286 mutex_lock(&adev->srbm_mutex);
1287 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1288 soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1289 /* CP and shaders */
1290 if (i == 0) {
1291 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1292 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1293 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1294 !!adev->gmc.noretry);
1295 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1296 regSH_MEM_CONFIG, tmp);
1297 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1298 regSH_MEM_BASES, 0);
1299 } else {
1300 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1301 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1302 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1303 !!adev->gmc.noretry);
1304 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1305 regSH_MEM_CONFIG, tmp);
1306 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1307 (adev->gmc.private_aperture_start >>
1308 48));
1309 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1310 (adev->gmc.shared_aperture_start >>
1311 48));
1312 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1313 regSH_MEM_BASES, tmp);
1314 }
1315 }
1316 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1317
1318 mutex_unlock(&adev->srbm_mutex);
1319
1320 gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
1321 gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
1322 }
1323
gfx_v9_4_3_constants_init(struct amdgpu_device * adev)1324 static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
1325 {
1326 int i, num_xcc;
1327
1328 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1329
1330 gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info);
1331 adev->gfx.config.db_debug2 =
1332 RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
1333
1334 for (i = 0; i < num_xcc; i++)
1335 gfx_v9_4_3_xcc_constants_init(adev, i);
1336 }
1337
1338 static void
gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device * adev,int xcc_id)1339 gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev,
1340 int xcc_id)
1341 {
1342 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1);
1343 }
1344
gfx_v9_4_3_xcc_init_pg(struct amdgpu_device * adev,int xcc_id)1345 static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id)
1346 {
1347 /*
1348 * Rlc save restore list is workable since v2_1.
1349 * And it's needed by gfxoff feature.
1350 */
1351 if (adev->gfx.rlc.is_rlc_v2_1)
1352 gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
1353 }
1354
gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device * adev,int xcc_id)1355 static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
1356 {
1357 uint32_t data;
1358
1359 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
1360 data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK;
1361 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
1362 }
1363
gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device * adev)1364 static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
1365 {
1366 uint32_t rlc_setting;
1367
1368 /* if RLC is not enabled, do nothing */
1369 rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
1370 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
1371 return false;
1372
1373 return true;
1374 }
1375
gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device * adev,int xcc_id)1376 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
1377 {
1378 uint32_t data;
1379 unsigned i;
1380
1381 data = RLC_SAFE_MODE__CMD_MASK;
1382 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
1383 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1384
1385 /* wait for RLC_SAFE_MODE */
1386 for (i = 0; i < adev->usec_timeout; i++) {
1387 if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
1388 break;
1389 udelay(1);
1390 }
1391 }
1392
gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device * adev,int xcc_id)1393 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev,
1394 int xcc_id)
1395 {
1396 uint32_t data;
1397
1398 data = RLC_SAFE_MODE__CMD_MASK;
1399 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1400 }
1401
gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device * adev)1402 static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
1403 {
1404 int xcc_id, num_xcc;
1405 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1406
1407 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1408 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1409 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
1410 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0);
1411 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1);
1412 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2);
1413 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3);
1414 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
1415 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
1416 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT);
1417 }
1418 adev->gfx.rlc.rlcg_reg_access_supported = true;
1419 }
1420
gfx_v9_4_3_rlc_init(struct amdgpu_device * adev)1421 static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
1422 {
1423 /* init spm vmid with 0xf */
1424 if (adev->gfx.rlc.funcs->update_spm_vmid)
1425 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
1426
1427 return 0;
1428 }
1429
gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device * adev,int xcc_id)1430 static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev,
1431 int xcc_id)
1432 {
1433 u32 i, j, k;
1434 u32 mask;
1435
1436 mutex_lock(&adev->grbm_idx_mutex);
1437 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1438 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1439 gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff,
1440 xcc_id);
1441 for (k = 0; k < adev->usec_timeout; k++) {
1442 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0)
1443 break;
1444 udelay(1);
1445 }
1446 if (k == adev->usec_timeout) {
1447 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff,
1448 0xffffffff,
1449 0xffffffff, xcc_id);
1450 mutex_unlock(&adev->grbm_idx_mutex);
1451 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
1452 i, j);
1453 return;
1454 }
1455 }
1456 }
1457 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
1458 xcc_id);
1459 mutex_unlock(&adev->grbm_idx_mutex);
1460
1461 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1462 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1463 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1464 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1465 for (k = 0; k < adev->usec_timeout; k++) {
1466 if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1467 break;
1468 udelay(1);
1469 }
1470 }
1471
gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device * adev,bool enable,int xcc_id)1472 static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1473 bool enable, int xcc_id)
1474 {
1475 u32 tmp;
1476
1477 /* These interrupts should be enabled to drive DS clock */
1478
1479 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1480
1481 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1482 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1483 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1484
1485 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1486 }
1487
gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device * adev,int xcc_id)1488 static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id)
1489 {
1490 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1491 RLC_ENABLE_F32, 0);
1492 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
1493 gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id);
1494 }
1495
gfx_v9_4_3_rlc_stop(struct amdgpu_device * adev)1496 static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev)
1497 {
1498 int i, num_xcc;
1499
1500 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1501 for (i = 0; i < num_xcc; i++)
1502 gfx_v9_4_3_xcc_rlc_stop(adev, i);
1503 }
1504
gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device * adev,int xcc_id)1505 static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id)
1506 {
1507 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1508 SOFT_RESET_RLC, 1);
1509 udelay(50);
1510 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1511 SOFT_RESET_RLC, 0);
1512 udelay(50);
1513 }
1514
gfx_v9_4_3_rlc_reset(struct amdgpu_device * adev)1515 static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
1516 {
1517 int i, num_xcc;
1518
1519 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1520 for (i = 0; i < num_xcc; i++)
1521 gfx_v9_4_3_xcc_rlc_reset(adev, i);
1522 }
1523
gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device * adev,int xcc_id)1524 static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id)
1525 {
1526 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1527 RLC_ENABLE_F32, 1);
1528 udelay(50);
1529
1530 /* carrizo do enable cp interrupt after cp inited */
1531 if (!(adev->flags & AMD_IS_APU)) {
1532 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
1533 udelay(50);
1534 }
1535 }
1536
gfx_v9_4_3_rlc_start(struct amdgpu_device * adev)1537 static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
1538 {
1539 #ifdef AMDGPU_RLC_DEBUG_RETRY
1540 u32 rlc_ucode_ver;
1541 #endif
1542 int i, num_xcc;
1543
1544 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1545 for (i = 0; i < num_xcc; i++) {
1546 gfx_v9_4_3_xcc_rlc_start(adev, i);
1547 #ifdef AMDGPU_RLC_DEBUG_RETRY
1548 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
1549 rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6);
1550 if (rlc_ucode_ver == 0x108) {
1551 dev_info(adev->dev,
1552 "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
1553 rlc_ucode_ver, adev->gfx.rlc_fw_version);
1554 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
1555 * default is 0x9C4 to create a 100us interval */
1556 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4);
1557 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
1558 * to disable the page fault retry interrupts, default is
1559 * 0x100 (256) */
1560 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100);
1561 }
1562 #endif
1563 }
1564 }
1565
gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device * adev,int xcc_id)1566 static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1567 int xcc_id)
1568 {
1569 const struct rlc_firmware_header_v2_0 *hdr;
1570 const __le32 *fw_data;
1571 unsigned i, fw_size;
1572
1573 if (!adev->gfx.rlc_fw)
1574 return -EINVAL;
1575
1576 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1577 amdgpu_ucode_print_rlc_hdr(&hdr->header);
1578
1579 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1580 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1581 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1582
1583 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1584 RLCG_UCODE_LOADING_START_ADDRESS);
1585 for (i = 0; i < fw_size; i++) {
1586 if (amdgpu_emu_mode == 1 && i % 100 == 0) {
1587 dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i);
1588 msleep(1);
1589 }
1590 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
1591 }
1592 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1593
1594 return 0;
1595 }
1596
gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device * adev,int xcc_id)1597 static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id)
1598 {
1599 int r;
1600
1601 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1602 gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id);
1603 /* legacy rlc firmware loading */
1604 r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id);
1605 if (r)
1606 return r;
1607 gfx_v9_4_3_xcc_rlc_start(adev, xcc_id);
1608 }
1609
1610 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
1611 /* disable CG */
1612 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1613 gfx_v9_4_3_xcc_init_pg(adev, xcc_id);
1614 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
1615
1616 return 0;
1617 }
1618
gfx_v9_4_3_rlc_resume(struct amdgpu_device * adev)1619 static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
1620 {
1621 int r, i, num_xcc;
1622
1623 if (amdgpu_sriov_vf(adev))
1624 return 0;
1625
1626 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1627 for (i = 0; i < num_xcc; i++) {
1628 r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
1629 if (r)
1630 return r;
1631 }
1632
1633 return 0;
1634 }
1635
gfx_v9_4_3_update_spm_vmid(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned vmid)1636 static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1637 unsigned vmid)
1638 {
1639 u32 reg, pre_data, data;
1640
1641 reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL);
1642 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev))
1643 pre_data = RREG32_NO_KIQ(reg);
1644 else
1645 pre_data = RREG32(reg);
1646
1647 data = pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK);
1648 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
1649
1650 if (pre_data != data) {
1651 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) {
1652 WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1653 } else
1654 WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1655 }
1656 }
1657
1658 static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = {
1659 {SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)},
1660 {SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)},
1661 };
1662
gfx_v9_4_3_check_rlcg_range(struct amdgpu_device * adev,uint32_t offset,struct soc15_reg_rlcg * entries,int arr_size)1663 static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev,
1664 uint32_t offset,
1665 struct soc15_reg_rlcg *entries, int arr_size)
1666 {
1667 int i, inst;
1668 uint32_t reg;
1669
1670 if (!entries)
1671 return false;
1672
1673 for (i = 0; i < arr_size; i++) {
1674 const struct soc15_reg_rlcg *entry;
1675
1676 entry = &entries[i];
1677 inst = adev->ip_map.logical_to_dev_inst ?
1678 adev->ip_map.logical_to_dev_inst(
1679 adev, entry->hwip, entry->instance) :
1680 entry->instance;
1681 reg = adev->reg_offset[entry->hwip][inst][entry->segment] +
1682 entry->reg;
1683 if (offset == reg)
1684 return true;
1685 }
1686
1687 return false;
1688 }
1689
gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device * adev,u32 offset)1690 static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
1691 {
1692 return gfx_v9_4_3_check_rlcg_range(adev, offset,
1693 (void *)rlcg_access_gc_9_4_3,
1694 ARRAY_SIZE(rlcg_access_gc_9_4_3));
1695 }
1696
gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device * adev,bool enable,int xcc_id)1697 static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev,
1698 bool enable, int xcc_id)
1699 {
1700 if (enable) {
1701 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0);
1702 } else {
1703 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL,
1704 (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK |
1705 CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK |
1706 CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK |
1707 CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK |
1708 CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK |
1709 CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK |
1710 CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK |
1711 CP_MEC_CNTL__MEC_ME1_HALT_MASK |
1712 CP_MEC_CNTL__MEC_ME2_HALT_MASK));
1713 adev->gfx.kiq[xcc_id].ring.sched.ready = false;
1714 }
1715 udelay(50);
1716 }
1717
gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device * adev,int xcc_id)1718 static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev,
1719 int xcc_id)
1720 {
1721 const struct gfx_firmware_header_v1_0 *mec_hdr;
1722 const __le32 *fw_data;
1723 unsigned i;
1724 u32 tmp;
1725 u32 mec_ucode_addr_offset;
1726 u32 mec_ucode_data_offset;
1727
1728 if (!adev->gfx.mec_fw)
1729 return -EINVAL;
1730
1731 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
1732
1733 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1734 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1735
1736 fw_data = (const __le32 *)
1737 (adev->gfx.mec_fw->data +
1738 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1739 tmp = 0;
1740 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
1741 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
1742 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
1743
1744 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
1745 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
1746 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
1747 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
1748
1749 mec_ucode_addr_offset =
1750 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR);
1751 mec_ucode_data_offset =
1752 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA);
1753
1754 /* MEC1 */
1755 WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset);
1756 for (i = 0; i < mec_hdr->jt_size; i++)
1757 WREG32(mec_ucode_data_offset,
1758 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
1759
1760 WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version);
1761 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
1762
1763 return 0;
1764 }
1765
1766 /* KIQ functions */
gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring * ring,int xcc_id)1767 static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
1768 {
1769 uint32_t tmp;
1770 struct amdgpu_device *adev = ring->adev;
1771
1772 /* tell RLC which is KIQ queue */
1773 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
1774 tmp &= 0xffffff00;
1775 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1776 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1777 tmp |= 0x80;
1778 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1779 }
1780
gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring * ring,struct v9_mqd * mqd)1781 static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
1782 {
1783 struct amdgpu_device *adev = ring->adev;
1784
1785 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
1786 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
1787 mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
1788 mqd->cp_hqd_queue_priority =
1789 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
1790 }
1791 }
1792 }
1793
gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring * ring,int xcc_id)1794 static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id)
1795 {
1796 struct amdgpu_device *adev = ring->adev;
1797 struct v9_mqd *mqd = ring->mqd_ptr;
1798 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
1799 uint32_t tmp;
1800
1801 mqd->header = 0xC0310800;
1802 mqd->compute_pipelinestat_enable = 0x00000001;
1803 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
1804 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
1805 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
1806 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
1807 mqd->compute_misc_reserved = 0x00000003;
1808
1809 mqd->dynamic_cu_mask_addr_lo =
1810 lower_32_bits(ring->mqd_gpu_addr
1811 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1812 mqd->dynamic_cu_mask_addr_hi =
1813 upper_32_bits(ring->mqd_gpu_addr
1814 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1815
1816 eop_base_addr = ring->eop_gpu_addr >> 8;
1817 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
1818 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
1819
1820 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1821 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL);
1822 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
1823 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
1824
1825 mqd->cp_hqd_eop_control = tmp;
1826
1827 /* enable doorbell? */
1828 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL);
1829
1830 if (ring->use_doorbell) {
1831 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1832 DOORBELL_OFFSET, ring->doorbell_index);
1833 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1834 DOORBELL_EN, 1);
1835 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1836 DOORBELL_SOURCE, 0);
1837 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1838 DOORBELL_HIT, 0);
1839 if (amdgpu_sriov_vf(adev))
1840 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1841 DOORBELL_MODE, 1);
1842 } else {
1843 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1844 DOORBELL_EN, 0);
1845 }
1846
1847 mqd->cp_hqd_pq_doorbell_control = tmp;
1848
1849 /* disable the queue if it's active */
1850 ring->wptr = 0;
1851 mqd->cp_hqd_dequeue_request = 0;
1852 mqd->cp_hqd_pq_rptr = 0;
1853 mqd->cp_hqd_pq_wptr_lo = 0;
1854 mqd->cp_hqd_pq_wptr_hi = 0;
1855
1856 /* set the pointer to the MQD */
1857 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
1858 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
1859
1860 /* set MQD vmid to 0 */
1861 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL);
1862 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
1863 mqd->cp_mqd_control = tmp;
1864
1865 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1866 hqd_gpu_addr = ring->gpu_addr >> 8;
1867 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
1868 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
1869
1870 /* set up the HQD, this is similar to CP_RB0_CNTL */
1871 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL);
1872 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
1873 (order_base_2(ring->ring_size / 4) - 1));
1874 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
1875 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
1876 #ifdef __BIG_ENDIAN
1877 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
1878 #endif
1879 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
1880 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
1881 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
1882 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
1883 mqd->cp_hqd_pq_control = tmp;
1884
1885 /* set the wb address whether it's enabled or not */
1886 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
1887 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
1888 mqd->cp_hqd_pq_rptr_report_addr_hi =
1889 upper_32_bits(wb_gpu_addr) & 0xffff;
1890
1891 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1892 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1893 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
1894 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
1895
1896 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
1897 ring->wptr = 0;
1898 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR);
1899
1900 /* set the vmid for the queue */
1901 mqd->cp_hqd_vmid = 0;
1902
1903 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE);
1904 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
1905 mqd->cp_hqd_persistent_state = tmp;
1906
1907 /* set MIN_IB_AVAIL_SIZE */
1908 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL);
1909 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
1910 mqd->cp_hqd_ib_control = tmp;
1911
1912 /* set static priority for a queue/ring */
1913 gfx_v9_4_3_mqd_set_priority(ring, mqd);
1914 mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM);
1915
1916 /* map_queues packet doesn't need activate the queue,
1917 * so only kiq need set this field.
1918 */
1919 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
1920 mqd->cp_hqd_active = 1;
1921
1922 return 0;
1923 }
1924
gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring * ring,int xcc_id)1925 static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring,
1926 int xcc_id)
1927 {
1928 struct amdgpu_device *adev = ring->adev;
1929 struct v9_mqd *mqd = ring->mqd_ptr;
1930 int j;
1931
1932 /* disable wptr polling */
1933 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
1934
1935 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
1936 mqd->cp_hqd_eop_base_addr_lo);
1937 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
1938 mqd->cp_hqd_eop_base_addr_hi);
1939
1940 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1941 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
1942 mqd->cp_hqd_eop_control);
1943
1944 /* enable doorbell? */
1945 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
1946 mqd->cp_hqd_pq_doorbell_control);
1947
1948 /* disable the queue if it's active */
1949 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
1950 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
1951 for (j = 0; j < adev->usec_timeout; j++) {
1952 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
1953 break;
1954 udelay(1);
1955 }
1956 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
1957 mqd->cp_hqd_dequeue_request);
1958 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
1959 mqd->cp_hqd_pq_rptr);
1960 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
1961 mqd->cp_hqd_pq_wptr_lo);
1962 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
1963 mqd->cp_hqd_pq_wptr_hi);
1964 }
1965
1966 /* set the pointer to the MQD */
1967 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
1968 mqd->cp_mqd_base_addr_lo);
1969 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
1970 mqd->cp_mqd_base_addr_hi);
1971
1972 /* set MQD vmid to 0 */
1973 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
1974 mqd->cp_mqd_control);
1975
1976 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1977 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
1978 mqd->cp_hqd_pq_base_lo);
1979 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
1980 mqd->cp_hqd_pq_base_hi);
1981
1982 /* set up the HQD, this is similar to CP_RB0_CNTL */
1983 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
1984 mqd->cp_hqd_pq_control);
1985
1986 /* set the wb address whether it's enabled or not */
1987 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
1988 mqd->cp_hqd_pq_rptr_report_addr_lo);
1989 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
1990 mqd->cp_hqd_pq_rptr_report_addr_hi);
1991
1992 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1993 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
1994 mqd->cp_hqd_pq_wptr_poll_addr_lo);
1995 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
1996 mqd->cp_hqd_pq_wptr_poll_addr_hi);
1997
1998 /* enable the doorbell if requested */
1999 if (ring->use_doorbell) {
2000 WREG32_SOC15(
2001 GC, GET_INST(GC, xcc_id),
2002 regCP_MEC_DOORBELL_RANGE_LOWER,
2003 ((adev->doorbell_index.kiq +
2004 xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2005 2) << 2);
2006 WREG32_SOC15(
2007 GC, GET_INST(GC, xcc_id),
2008 regCP_MEC_DOORBELL_RANGE_UPPER,
2009 ((adev->doorbell_index.userqueue_end +
2010 xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2011 2) << 2);
2012 }
2013
2014 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2015 mqd->cp_hqd_pq_doorbell_control);
2016
2017 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2018 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2019 mqd->cp_hqd_pq_wptr_lo);
2020 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2021 mqd->cp_hqd_pq_wptr_hi);
2022
2023 /* set the vmid for the queue */
2024 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
2025
2026 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
2027 mqd->cp_hqd_persistent_state);
2028
2029 /* activate the queue */
2030 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
2031 mqd->cp_hqd_active);
2032
2033 if (ring->use_doorbell)
2034 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2035
2036 return 0;
2037 }
2038
gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring * ring,int xcc_id)2039 static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring,
2040 int xcc_id)
2041 {
2042 struct amdgpu_device *adev = ring->adev;
2043 int j;
2044
2045 /* disable the queue if it's active */
2046 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
2047
2048 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
2049
2050 for (j = 0; j < adev->usec_timeout; j++) {
2051 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
2052 break;
2053 udelay(1);
2054 }
2055
2056 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
2057 DRM_DEBUG("%s dequeue request failed.\n", ring->name);
2058
2059 /* Manual disable if dequeue request times out */
2060 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
2061 }
2062
2063 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
2064 0);
2065 }
2066
2067 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);
2068 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0);
2069 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT);
2070 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
2071 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);
2072 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
2073 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0);
2074 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0);
2075
2076 return 0;
2077 }
2078
gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring * ring,int xcc_id)2079 static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
2080 {
2081 struct amdgpu_device *adev = ring->adev;
2082 struct v9_mqd *mqd = ring->mqd_ptr;
2083 struct v9_mqd *tmp_mqd;
2084
2085 gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id);
2086
2087 /* GPU could be in bad state during probe, driver trigger the reset
2088 * after load the SMU, in this case , the mqd is not be initialized.
2089 * driver need to re-init the mqd.
2090 * check mqd->cp_hqd_pq_control since this value should not be 0
2091 */
2092 tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup;
2093 if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) {
2094 /* for GPU_RESET case , reset MQD to a clean status */
2095 if (adev->gfx.kiq[xcc_id].mqd_backup)
2096 memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation));
2097
2098 /* reset ring buffer */
2099 ring->wptr = 0;
2100 amdgpu_ring_clear_ring(ring);
2101 mutex_lock(&adev->srbm_mutex);
2102 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2103 gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
2104 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2105 mutex_unlock(&adev->srbm_mutex);
2106 } else {
2107 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2108 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2109 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2110 mutex_lock(&adev->srbm_mutex);
2111 if (amdgpu_sriov_vf(adev) && adev->in_suspend)
2112 amdgpu_ring_clear_ring(ring);
2113 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2114 gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
2115 gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
2116 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2117 mutex_unlock(&adev->srbm_mutex);
2118
2119 if (adev->gfx.kiq[xcc_id].mqd_backup)
2120 memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
2121 }
2122
2123 return 0;
2124 }
2125
gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring * ring,int xcc_id,bool restore)2126 static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore)
2127 {
2128 struct amdgpu_device *adev = ring->adev;
2129 struct v9_mqd *mqd = ring->mqd_ptr;
2130 int mqd_idx = ring - &adev->gfx.compute_ring[0];
2131 struct v9_mqd *tmp_mqd;
2132
2133 /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
2134 * is not be initialized before
2135 */
2136 tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
2137
2138 if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
2139 (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
2140 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2141 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2142 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2143 mutex_lock(&adev->srbm_mutex);
2144 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2145 gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
2146 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2147 mutex_unlock(&adev->srbm_mutex);
2148
2149 if (adev->gfx.mec.mqd_backup[mqd_idx])
2150 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
2151 } else {
2152 /* restore MQD to a clean status */
2153 if (adev->gfx.mec.mqd_backup[mqd_idx])
2154 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
2155 /* reset ring buffer */
2156 ring->wptr = 0;
2157 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
2158 amdgpu_ring_clear_ring(ring);
2159 }
2160
2161 return 0;
2162 }
2163
gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device * adev,int xcc_id)2164 static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
2165 {
2166 struct amdgpu_ring *ring;
2167 int j;
2168
2169 for (j = 0; j < adev->gfx.num_compute_rings; j++) {
2170 ring = &adev->gfx.compute_ring[j + xcc_id * adev->gfx.num_compute_rings];
2171 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2172 mutex_lock(&adev->srbm_mutex);
2173 soc15_grbm_select(adev, ring->me,
2174 ring->pipe,
2175 ring->queue, 0, GET_INST(GC, xcc_id));
2176 gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id);
2177 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2178 mutex_unlock(&adev->srbm_mutex);
2179 }
2180 }
2181
2182 return 0;
2183 }
2184
gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device * adev,int xcc_id)2185 static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
2186 {
2187 struct amdgpu_ring *ring;
2188 int r;
2189
2190 ring = &adev->gfx.kiq[xcc_id].ring;
2191
2192 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2193 if (unlikely(r != 0))
2194 return r;
2195
2196 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2197 if (unlikely(r != 0)) {
2198 amdgpu_bo_unreserve(ring->mqd_obj);
2199 return r;
2200 }
2201
2202 gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id);
2203 amdgpu_bo_kunmap(ring->mqd_obj);
2204 ring->mqd_ptr = NULL;
2205 amdgpu_bo_unreserve(ring->mqd_obj);
2206 return 0;
2207 }
2208
gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device * adev,int xcc_id)2209 static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
2210 {
2211 struct amdgpu_ring *ring = NULL;
2212 int r = 0, i;
2213
2214 gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
2215
2216 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2217 ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2218
2219 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2220 if (unlikely(r != 0))
2221 goto done;
2222 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2223 if (!r) {
2224 r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
2225 amdgpu_bo_kunmap(ring->mqd_obj);
2226 ring->mqd_ptr = NULL;
2227 }
2228 amdgpu_bo_unreserve(ring->mqd_obj);
2229 if (r)
2230 goto done;
2231 }
2232
2233 r = amdgpu_gfx_enable_kcq(adev, xcc_id);
2234 done:
2235 return r;
2236 }
2237
gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device * adev,int xcc_id)2238 static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
2239 {
2240 struct amdgpu_ring *ring;
2241 int r, j;
2242
2243 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2244
2245 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2246 gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id);
2247
2248 r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id);
2249 if (r)
2250 return r;
2251 } else {
2252 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
2253 }
2254
2255 r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id);
2256 if (r)
2257 return r;
2258
2259 r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id);
2260 if (r)
2261 return r;
2262
2263 for (j = 0; j < adev->gfx.num_compute_rings; j++) {
2264 ring = &adev->gfx.compute_ring
2265 [j + xcc_id * adev->gfx.num_compute_rings];
2266 r = amdgpu_ring_test_helper(ring);
2267 if (r)
2268 return r;
2269 }
2270
2271 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
2272
2273 return 0;
2274 }
2275
gfx_v9_4_3_cp_resume(struct amdgpu_device * adev)2276 static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
2277 {
2278 int r = 0, i, num_xcc, num_xcp, num_xcc_per_xcp;
2279
2280 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2281 if (amdgpu_sriov_vf(adev)) {
2282 enum amdgpu_gfx_partition mode;
2283
2284 mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2285 AMDGPU_XCP_FL_NONE);
2286 if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2287 return -EINVAL;
2288 num_xcc_per_xcp = gfx_v9_4_3_get_xccs_per_xcp(adev);
2289 adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp;
2290 num_xcp = num_xcc / num_xcc_per_xcp;
2291 r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
2292
2293 } else {
2294 if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2295 AMDGPU_XCP_FL_NONE) ==
2296 AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2297 r = amdgpu_xcp_switch_partition_mode(
2298 adev->xcp_mgr, amdgpu_user_partt_mode);
2299 }
2300 if (r)
2301 return r;
2302
2303 for (i = 0; i < num_xcc; i++) {
2304 r = gfx_v9_4_3_xcc_cp_resume(adev, i);
2305 if (r)
2306 return r;
2307 }
2308
2309 return 0;
2310 }
2311
gfx_v9_4_3_xcc_fini(struct amdgpu_device * adev,int xcc_id)2312 static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
2313 {
2314 if (amdgpu_gfx_disable_kcq(adev, xcc_id))
2315 DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id);
2316
2317 if (amdgpu_sriov_vf(adev)) {
2318 /* must disable polling for SRIOV when hw finished, otherwise
2319 * CPC engine may still keep fetching WB address which is already
2320 * invalid after sw finished and trigger DMAR reading error in
2321 * hypervisor side.
2322 */
2323 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
2324 return;
2325 }
2326
2327 /* Use deinitialize sequence from CAIL when unbinding device
2328 * from driver, otherwise KIQ is hanging when binding back
2329 */
2330 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2331 mutex_lock(&adev->srbm_mutex);
2332 soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me,
2333 adev->gfx.kiq[xcc_id].ring.pipe,
2334 adev->gfx.kiq[xcc_id].ring.queue, 0,
2335 GET_INST(GC, xcc_id));
2336 gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring,
2337 xcc_id);
2338 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2339 mutex_unlock(&adev->srbm_mutex);
2340 }
2341
2342 gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id);
2343 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
2344 }
2345
gfx_v9_4_3_hw_init(void * handle)2346 static int gfx_v9_4_3_hw_init(void *handle)
2347 {
2348 int r;
2349 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2350
2351 amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
2352 adev->gfx.cleaner_shader_ptr);
2353
2354 if (!amdgpu_sriov_vf(adev))
2355 gfx_v9_4_3_init_golden_registers(adev);
2356
2357 gfx_v9_4_3_constants_init(adev);
2358
2359 r = adev->gfx.rlc.funcs->resume(adev);
2360 if (r)
2361 return r;
2362
2363 r = gfx_v9_4_3_cp_resume(adev);
2364 if (r)
2365 return r;
2366
2367 return r;
2368 }
2369
gfx_v9_4_3_hw_fini(void * handle)2370 static int gfx_v9_4_3_hw_fini(void *handle)
2371 {
2372 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2373 int i, num_xcc;
2374
2375 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2376 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2377 amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
2378
2379 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2380 for (i = 0; i < num_xcc; i++) {
2381 gfx_v9_4_3_xcc_fini(adev, i);
2382 }
2383
2384 return 0;
2385 }
2386
gfx_v9_4_3_suspend(void * handle)2387 static int gfx_v9_4_3_suspend(void *handle)
2388 {
2389 return gfx_v9_4_3_hw_fini(handle);
2390 }
2391
gfx_v9_4_3_resume(void * handle)2392 static int gfx_v9_4_3_resume(void *handle)
2393 {
2394 return gfx_v9_4_3_hw_init(handle);
2395 }
2396
gfx_v9_4_3_is_idle(void * handle)2397 static bool gfx_v9_4_3_is_idle(void *handle)
2398 {
2399 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2400 int i, num_xcc;
2401
2402 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2403 for (i = 0; i < num_xcc; i++) {
2404 if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS),
2405 GRBM_STATUS, GUI_ACTIVE))
2406 return false;
2407 }
2408 return true;
2409 }
2410
gfx_v9_4_3_wait_for_idle(void * handle)2411 static int gfx_v9_4_3_wait_for_idle(void *handle)
2412 {
2413 unsigned i;
2414 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2415
2416 for (i = 0; i < adev->usec_timeout; i++) {
2417 if (gfx_v9_4_3_is_idle(handle))
2418 return 0;
2419 udelay(1);
2420 }
2421 return -ETIMEDOUT;
2422 }
2423
gfx_v9_4_3_soft_reset(void * handle)2424 static int gfx_v9_4_3_soft_reset(void *handle)
2425 {
2426 u32 grbm_soft_reset = 0;
2427 u32 tmp;
2428 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2429
2430 /* GRBM_STATUS */
2431 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS);
2432 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
2433 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
2434 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
2435 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
2436 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
2437 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
2438 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2439 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2440 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2441 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
2442 }
2443
2444 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
2445 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2446 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2447 }
2448
2449 /* GRBM_STATUS2 */
2450 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2);
2451 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
2452 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2453 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2454
2455
2456 if (grbm_soft_reset) {
2457 /* stop the rlc */
2458 adev->gfx.rlc.funcs->stop(adev);
2459
2460 /* Disable MEC parsing/prefetching */
2461 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
2462
2463 if (grbm_soft_reset) {
2464 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2465 tmp |= grbm_soft_reset;
2466 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2467 WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2468 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2469
2470 udelay(50);
2471
2472 tmp &= ~grbm_soft_reset;
2473 WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2474 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2475 }
2476
2477 /* Wait a little for things to settle down */
2478 udelay(50);
2479 }
2480 return 0;
2481 }
2482
gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring * ring,uint32_t vmid,uint32_t gds_base,uint32_t gds_size,uint32_t gws_base,uint32_t gws_size,uint32_t oa_base,uint32_t oa_size)2483 static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring,
2484 uint32_t vmid,
2485 uint32_t gds_base, uint32_t gds_size,
2486 uint32_t gws_base, uint32_t gws_size,
2487 uint32_t oa_base, uint32_t oa_size)
2488 {
2489 struct amdgpu_device *adev = ring->adev;
2490
2491 /* GDS Base */
2492 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2493 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid,
2494 gds_base);
2495
2496 /* GDS Size */
2497 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2498 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid,
2499 gds_size);
2500
2501 /* GWS */
2502 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2503 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid,
2504 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
2505
2506 /* OA */
2507 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2508 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid,
2509 (1 << (oa_size + oa_base)) - (1 << oa_base));
2510 }
2511
gfx_v9_4_3_early_init(void * handle)2512 static int gfx_v9_4_3_early_init(void *handle)
2513 {
2514 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2515
2516 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2517 AMDGPU_MAX_COMPUTE_RINGS);
2518 gfx_v9_4_3_set_kiq_pm4_funcs(adev);
2519 gfx_v9_4_3_set_ring_funcs(adev);
2520 gfx_v9_4_3_set_irq_funcs(adev);
2521 gfx_v9_4_3_set_gds_init(adev);
2522 gfx_v9_4_3_set_rlc_funcs(adev);
2523
2524 /* init rlcg reg access ctrl */
2525 gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev);
2526
2527 return gfx_v9_4_3_init_microcode(adev);
2528 }
2529
gfx_v9_4_3_late_init(void * handle)2530 static int gfx_v9_4_3_late_init(void *handle)
2531 {
2532 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2533 int r;
2534
2535 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2536 if (r)
2537 return r;
2538
2539 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2540 if (r)
2541 return r;
2542
2543 r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
2544 if (r)
2545 return r;
2546
2547 if (adev->gfx.ras &&
2548 adev->gfx.ras->enable_watchdog_timer)
2549 adev->gfx.ras->enable_watchdog_timer(adev);
2550
2551 return 0;
2552 }
2553
gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device * adev,bool enable,int xcc_id)2554 static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev,
2555 bool enable, int xcc_id)
2556 {
2557 uint32_t def, data;
2558
2559 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
2560 return;
2561
2562 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2563 regRLC_CGTT_MGCG_OVERRIDE);
2564
2565 if (enable)
2566 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2567 else
2568 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2569
2570 if (def != data)
2571 WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2572 regRLC_CGTT_MGCG_OVERRIDE, data);
2573
2574 }
2575
gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device * adev,bool enable,int xcc_id)2576 static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
2577 bool enable, int xcc_id)
2578 {
2579 uint32_t def, data;
2580
2581 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
2582 return;
2583
2584 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2585 regRLC_CGTT_MGCG_OVERRIDE);
2586
2587 if (enable)
2588 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2589 else
2590 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2591
2592 if (def != data)
2593 WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2594 regRLC_CGTT_MGCG_OVERRIDE, data);
2595 }
2596
2597 static void
gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device * adev,bool enable,int xcc_id)2598 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
2599 bool enable, int xcc_id)
2600 {
2601 uint32_t data, def;
2602
2603 /* It is disabled by HW by default */
2604 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
2605 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
2606 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2607
2608 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2609 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2610 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2611 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2612
2613 if (def != data)
2614 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2615
2616 /* MGLS is a global flag to control all MGLS in GFX */
2617 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
2618 /* 2 - RLC memory Light sleep */
2619 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
2620 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2621 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2622 if (def != data)
2623 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2624 }
2625 /* 3 - CP memory Light sleep */
2626 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
2627 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2628 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2629 if (def != data)
2630 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2631 }
2632 }
2633 } else {
2634 /* 1 - MGCG_OVERRIDE */
2635 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2636
2637 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2638 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2639 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2640 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2641
2642 if (def != data)
2643 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2644
2645 /* 2 - disable MGLS in RLC */
2646 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2647 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
2648 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2649 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2650 }
2651
2652 /* 3 - disable MGLS in CP */
2653 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2654 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
2655 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2656 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2657 }
2658 }
2659
2660 }
2661
2662 static void
gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device * adev,bool enable,int xcc_id)2663 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
2664 bool enable, int xcc_id)
2665 {
2666 uint32_t def, data;
2667
2668 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
2669
2670 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2671 /* unset CGCG override */
2672 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
2673 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2674 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2675 else
2676 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2677 /* update CGCG and CGLS override bits */
2678 if (def != data)
2679 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2680
2681 /* CGCG Hysteresis: 400us */
2682 def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2683
2684 data = (0x2710
2685 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
2686 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2687 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2688 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
2689 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2690 if (def != data)
2691 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2692
2693 /* set IDLE_POLL_COUNT(0x33450100)*/
2694 def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
2695 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
2696 (0x3345 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2697 if (def != data)
2698 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
2699 } else {
2700 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2701 /* reset CGCG/CGLS bits */
2702 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
2703 /* disable cgcg and cgls in FSM */
2704 if (def != data)
2705 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2706 }
2707
2708 }
2709
gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device * adev,bool enable,int xcc_id)2710 static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
2711 bool enable, int xcc_id)
2712 {
2713 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
2714
2715 if (enable) {
2716 /* FGCG */
2717 gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2718 gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2719
2720 /* CGCG/CGLS should be enabled after MGCG/MGLS
2721 * === MGCG + MGLS ===
2722 */
2723 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2724 xcc_id);
2725 /* === CGCG + CGLS === */
2726 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2727 xcc_id);
2728 } else {
2729 /* CGCG/CGLS should be disabled before MGCG/MGLS
2730 * === CGCG + CGLS ===
2731 */
2732 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2733 xcc_id);
2734 /* === MGCG + MGLS === */
2735 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2736 xcc_id);
2737
2738 /* FGCG */
2739 gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2740 gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2741 }
2742
2743 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
2744
2745 return 0;
2746 }
2747
2748 static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
2749 .is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled,
2750 .set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode,
2751 .unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode,
2752 .init = gfx_v9_4_3_rlc_init,
2753 .resume = gfx_v9_4_3_rlc_resume,
2754 .stop = gfx_v9_4_3_rlc_stop,
2755 .reset = gfx_v9_4_3_rlc_reset,
2756 .start = gfx_v9_4_3_rlc_start,
2757 .update_spm_vmid = gfx_v9_4_3_update_spm_vmid,
2758 .is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
2759 };
2760
gfx_v9_4_3_set_powergating_state(void * handle,enum amd_powergating_state state)2761 static int gfx_v9_4_3_set_powergating_state(void *handle,
2762 enum amd_powergating_state state)
2763 {
2764 return 0;
2765 }
2766
gfx_v9_4_3_set_clockgating_state(void * handle,enum amd_clockgating_state state)2767 static int gfx_v9_4_3_set_clockgating_state(void *handle,
2768 enum amd_clockgating_state state)
2769 {
2770 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2771 int i, num_xcc;
2772
2773 if (amdgpu_sriov_vf(adev))
2774 return 0;
2775
2776 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2777 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2778 case IP_VERSION(9, 4, 3):
2779 case IP_VERSION(9, 4, 4):
2780 for (i = 0; i < num_xcc; i++)
2781 gfx_v9_4_3_xcc_update_gfx_clock_gating(
2782 adev, state == AMD_CG_STATE_GATE, i);
2783 break;
2784 default:
2785 break;
2786 }
2787 return 0;
2788 }
2789
gfx_v9_4_3_get_clockgating_state(void * handle,u64 * flags)2790 static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags)
2791 {
2792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2793 int data;
2794
2795 if (amdgpu_sriov_vf(adev))
2796 *flags = 0;
2797
2798 /* AMD_CG_SUPPORT_GFX_MGCG */
2799 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE));
2800 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
2801 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
2802
2803 /* AMD_CG_SUPPORT_GFX_CGCG */
2804 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL));
2805 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
2806 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
2807
2808 /* AMD_CG_SUPPORT_GFX_CGLS */
2809 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
2810 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
2811
2812 /* AMD_CG_SUPPORT_GFX_RLC_LS */
2813 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL));
2814 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
2815 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
2816
2817 /* AMD_CG_SUPPORT_GFX_CP_LS */
2818 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL));
2819 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
2820 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
2821 }
2822
gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring * ring)2823 static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2824 {
2825 struct amdgpu_device *adev = ring->adev;
2826 u32 ref_and_mask, reg_mem_engine;
2827 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
2828
2829 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2830 switch (ring->me) {
2831 case 1:
2832 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
2833 break;
2834 case 2:
2835 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
2836 break;
2837 default:
2838 return;
2839 }
2840 reg_mem_engine = 0;
2841 } else {
2842 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
2843 reg_mem_engine = 1; /* pfp */
2844 }
2845
2846 gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1,
2847 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
2848 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
2849 ref_and_mask, ref_and_mask, 0x20);
2850 }
2851
gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)2852 static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring,
2853 struct amdgpu_job *job,
2854 struct amdgpu_ib *ib,
2855 uint32_t flags)
2856 {
2857 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2858 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
2859
2860 /* Currently, there is a high possibility to get wave ID mismatch
2861 * between ME and GDS, leading to a hw deadlock, because ME generates
2862 * different wave IDs than the GDS expects. This situation happens
2863 * randomly when at least 5 compute pipes use GDS ordered append.
2864 * The wave IDs generated by ME are also wrong after suspend/resume.
2865 * Those are probably bugs somewhere else in the kernel driver.
2866 *
2867 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
2868 * GDS to 0 for this ring (me/pipe).
2869 */
2870 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
2871 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2872 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
2873 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2874 }
2875
2876 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2877 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
2878 amdgpu_ring_write(ring,
2879 #ifdef __BIG_ENDIAN
2880 (2 << 0) |
2881 #endif
2882 lower_32_bits(ib->gpu_addr));
2883 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
2884 amdgpu_ring_write(ring, control);
2885 }
2886
gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)2887 static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
2888 u64 seq, unsigned flags)
2889 {
2890 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2891 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2892 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
2893
2894 /* RELEASE_MEM - flush caches, send int */
2895 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
2896 amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
2897 EOP_TC_NC_ACTION_EN) :
2898 (EOP_TCL1_ACTION_EN |
2899 EOP_TC_ACTION_EN |
2900 EOP_TC_WB_ACTION_EN |
2901 EOP_TC_MD_ACTION_EN)) |
2902 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2903 EVENT_INDEX(5)));
2904 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2905
2906 /*
2907 * the address should be Qword aligned if 64bit write, Dword
2908 * aligned if only send 32bit data low (discard data high)
2909 */
2910 if (write64bit)
2911 BUG_ON(addr & 0x7);
2912 else
2913 BUG_ON(addr & 0x3);
2914 amdgpu_ring_write(ring, lower_32_bits(addr));
2915 amdgpu_ring_write(ring, upper_32_bits(addr));
2916 amdgpu_ring_write(ring, lower_32_bits(seq));
2917 amdgpu_ring_write(ring, upper_32_bits(seq));
2918 amdgpu_ring_write(ring, 0);
2919 }
2920
gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring * ring)2921 static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
2922 {
2923 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2924 uint32_t seq = ring->fence_drv.sync_seq;
2925 uint64_t addr = ring->fence_drv.gpu_addr;
2926
2927 gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0,
2928 lower_32_bits(addr), upper_32_bits(addr),
2929 seq, 0xffffffff, 4);
2930 }
2931
gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)2932 static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring,
2933 unsigned vmid, uint64_t pd_addr)
2934 {
2935 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
2936 }
2937
gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring * ring)2938 static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring)
2939 {
2940 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
2941 }
2942
gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring * ring)2943 static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring)
2944 {
2945 u64 wptr;
2946
2947 /* XXX check if swapping is necessary on BE */
2948 if (ring->use_doorbell)
2949 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
2950 else
2951 BUG();
2952 return wptr;
2953 }
2954
gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring * ring)2955 static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring)
2956 {
2957 struct amdgpu_device *adev = ring->adev;
2958
2959 /* XXX check if swapping is necessary on BE */
2960 if (ring->use_doorbell) {
2961 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
2962 WDOORBELL64(ring->doorbell_index, ring->wptr);
2963 } else {
2964 BUG(); /* only DOORBELL method supported on gfx9 now */
2965 }
2966 }
2967
gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned int flags)2968 static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
2969 u64 seq, unsigned int flags)
2970 {
2971 struct amdgpu_device *adev = ring->adev;
2972
2973 /* we only allocate 32bit for each seq wb address */
2974 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
2975
2976 /* write fence seq to the "addr" */
2977 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2978 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2979 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
2980 amdgpu_ring_write(ring, lower_32_bits(addr));
2981 amdgpu_ring_write(ring, upper_32_bits(addr));
2982 amdgpu_ring_write(ring, lower_32_bits(seq));
2983
2984 if (flags & AMDGPU_FENCE_FLAG_INT) {
2985 /* set register to trigger INT */
2986 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2987 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2988 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
2989 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
2990 amdgpu_ring_write(ring, 0);
2991 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
2992 }
2993 }
2994
gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t reg_val_offs)2995 static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
2996 uint32_t reg_val_offs)
2997 {
2998 struct amdgpu_device *adev = ring->adev;
2999
3000 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
3001
3002 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
3003 amdgpu_ring_write(ring, 0 | /* src: register*/
3004 (5 << 8) | /* dst: memory */
3005 (1 << 20)); /* write confirm */
3006 amdgpu_ring_write(ring, reg);
3007 amdgpu_ring_write(ring, 0);
3008 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3009 reg_val_offs * 4));
3010 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3011 reg_val_offs * 4));
3012 }
3013
gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)3014 static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
3015 uint32_t val)
3016 {
3017 uint32_t cmd = 0;
3018
3019 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
3020
3021 switch (ring->funcs->type) {
3022 case AMDGPU_RING_TYPE_GFX:
3023 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
3024 break;
3025 case AMDGPU_RING_TYPE_KIQ:
3026 cmd = (1 << 16); /* no inc addr */
3027 break;
3028 default:
3029 cmd = WR_CONFIRM;
3030 break;
3031 }
3032 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3033 amdgpu_ring_write(ring, cmd);
3034 amdgpu_ring_write(ring, reg);
3035 amdgpu_ring_write(ring, 0);
3036 amdgpu_ring_write(ring, val);
3037 }
3038
gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)3039 static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
3040 uint32_t val, uint32_t mask)
3041 {
3042 gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
3043 }
3044
gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring * ring,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)3045 static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
3046 uint32_t reg0, uint32_t reg1,
3047 uint32_t ref, uint32_t mask)
3048 {
3049 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
3050 ref, mask);
3051 }
3052
gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring * ring,unsigned vmid)3053 static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring,
3054 unsigned vmid)
3055 {
3056 struct amdgpu_device *adev = ring->adev;
3057 uint32_t value = 0;
3058
3059 if (!adev->debug_exp_resets)
3060 return;
3061
3062 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
3063 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
3064 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
3065 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
3066 amdgpu_gfx_rlc_enter_safe_mode(adev, ring->xcc_id);
3067 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regSQ_CMD, value);
3068 amdgpu_gfx_rlc_exit_safe_mode(adev, ring->xcc_id);
3069 }
3070
gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(struct amdgpu_device * adev,int me,int pipe,enum amdgpu_interrupt_state state,int xcc_id)3071 static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3072 struct amdgpu_device *adev, int me, int pipe,
3073 enum amdgpu_interrupt_state state, int xcc_id)
3074 {
3075 u32 mec_int_cntl, mec_int_cntl_reg;
3076
3077 /*
3078 * amdgpu controls only the first MEC. That's why this function only
3079 * handles the setting of interrupts for this specific MEC. All other
3080 * pipes' interrupts are set by amdkfd.
3081 */
3082
3083 if (me == 1) {
3084 switch (pipe) {
3085 case 0:
3086 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
3087 break;
3088 case 1:
3089 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
3090 break;
3091 case 2:
3092 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
3093 break;
3094 case 3:
3095 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
3096 break;
3097 default:
3098 DRM_DEBUG("invalid pipe %d\n", pipe);
3099 return;
3100 }
3101 } else {
3102 DRM_DEBUG("invalid me %d\n", me);
3103 return;
3104 }
3105
3106 switch (state) {
3107 case AMDGPU_IRQ_STATE_DISABLE:
3108 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3109 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3110 TIME_STAMP_INT_ENABLE, 0);
3111 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3112 break;
3113 case AMDGPU_IRQ_STATE_ENABLE:
3114 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3115 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3116 TIME_STAMP_INT_ENABLE, 1);
3117 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3118 break;
3119 default:
3120 break;
3121 }
3122 }
3123
gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device * adev,int xcc_id,int me,int pipe)3124 static u32 gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device *adev,
3125 int xcc_id, int me, int pipe)
3126 {
3127 /*
3128 * amdgpu controls only the first MEC. That's why this function only
3129 * handles the setting of interrupts for this specific MEC. All other
3130 * pipes' interrupts are set by amdkfd.
3131 */
3132 if (me != 1)
3133 return 0;
3134
3135 switch (pipe) {
3136 case 0:
3137 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
3138 case 1:
3139 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
3140 case 2:
3141 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
3142 case 3:
3143 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
3144 default:
3145 return 0;
3146 }
3147 }
3148
gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)3149 static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev,
3150 struct amdgpu_irq_src *source,
3151 unsigned type,
3152 enum amdgpu_interrupt_state state)
3153 {
3154 u32 mec_int_cntl_reg, mec_int_cntl;
3155 int i, j, k, num_xcc;
3156
3157 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3158 switch (state) {
3159 case AMDGPU_IRQ_STATE_DISABLE:
3160 case AMDGPU_IRQ_STATE_ENABLE:
3161 for (i = 0; i < num_xcc; i++) {
3162 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3163 PRIV_REG_INT_ENABLE,
3164 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3165 for (j = 0; j < adev->gfx.mec.num_mec; j++) {
3166 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
3167 /* MECs start at 1 */
3168 mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
3169
3170 if (mec_int_cntl_reg) {
3171 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
3172 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3173 PRIV_REG_INT_ENABLE,
3174 state == AMDGPU_IRQ_STATE_ENABLE ?
3175 1 : 0);
3176 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
3177 }
3178 }
3179 }
3180 }
3181 break;
3182 default:
3183 break;
3184 }
3185
3186 return 0;
3187 }
3188
gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)3189 static int gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device *adev,
3190 struct amdgpu_irq_src *source,
3191 unsigned type,
3192 enum amdgpu_interrupt_state state)
3193 {
3194 u32 mec_int_cntl_reg, mec_int_cntl;
3195 int i, j, k, num_xcc;
3196
3197 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3198 switch (state) {
3199 case AMDGPU_IRQ_STATE_DISABLE:
3200 case AMDGPU_IRQ_STATE_ENABLE:
3201 for (i = 0; i < num_xcc; i++) {
3202 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3203 OPCODE_ERROR_INT_ENABLE,
3204 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3205 for (j = 0; j < adev->gfx.mec.num_mec; j++) {
3206 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
3207 /* MECs start at 1 */
3208 mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
3209
3210 if (mec_int_cntl_reg) {
3211 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
3212 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3213 OPCODE_ERROR_INT_ENABLE,
3214 state == AMDGPU_IRQ_STATE_ENABLE ?
3215 1 : 0);
3216 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
3217 }
3218 }
3219 }
3220 }
3221 break;
3222 default:
3223 break;
3224 }
3225
3226 return 0;
3227 }
3228
gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)3229 static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev,
3230 struct amdgpu_irq_src *source,
3231 unsigned type,
3232 enum amdgpu_interrupt_state state)
3233 {
3234 int i, num_xcc;
3235
3236 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3237 switch (state) {
3238 case AMDGPU_IRQ_STATE_DISABLE:
3239 case AMDGPU_IRQ_STATE_ENABLE:
3240 for (i = 0; i < num_xcc; i++)
3241 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3242 PRIV_INSTR_INT_ENABLE,
3243 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3244 break;
3245 default:
3246 break;
3247 }
3248
3249 return 0;
3250 }
3251
gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)3252 static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev,
3253 struct amdgpu_irq_src *src,
3254 unsigned type,
3255 enum amdgpu_interrupt_state state)
3256 {
3257 int i, num_xcc;
3258
3259 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3260 for (i = 0; i < num_xcc; i++) {
3261 switch (type) {
3262 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3263 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3264 adev, 1, 0, state, i);
3265 break;
3266 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3267 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3268 adev, 1, 1, state, i);
3269 break;
3270 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
3271 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3272 adev, 1, 2, state, i);
3273 break;
3274 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
3275 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3276 adev, 1, 3, state, i);
3277 break;
3278 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
3279 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3280 adev, 2, 0, state, i);
3281 break;
3282 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
3283 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3284 adev, 2, 1, state, i);
3285 break;
3286 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
3287 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3288 adev, 2, 2, state, i);
3289 break;
3290 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
3291 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3292 adev, 2, 3, state, i);
3293 break;
3294 default:
3295 break;
3296 }
3297 }
3298
3299 return 0;
3300 }
3301
gfx_v9_4_3_eop_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3302 static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev,
3303 struct amdgpu_irq_src *source,
3304 struct amdgpu_iv_entry *entry)
3305 {
3306 int i, xcc_id;
3307 u8 me_id, pipe_id, queue_id;
3308 struct amdgpu_ring *ring;
3309
3310 DRM_DEBUG("IH: CP EOP\n");
3311 me_id = (entry->ring_id & 0x0c) >> 2;
3312 pipe_id = (entry->ring_id & 0x03) >> 0;
3313 queue_id = (entry->ring_id & 0x70) >> 4;
3314
3315 xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
3316
3317 if (xcc_id == -EINVAL)
3318 return -EINVAL;
3319
3320 switch (me_id) {
3321 case 0:
3322 case 1:
3323 case 2:
3324 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3325 ring = &adev->gfx.compute_ring
3326 [i +
3327 xcc_id * adev->gfx.num_compute_rings];
3328 /* Per-queue interrupt is supported for MEC starting from VI.
3329 * The interrupt can only be enabled/disabled per pipe instead of per queue.
3330 */
3331
3332 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
3333 amdgpu_fence_process(ring);
3334 }
3335 break;
3336 }
3337 return 0;
3338 }
3339
gfx_v9_4_3_fault(struct amdgpu_device * adev,struct amdgpu_iv_entry * entry)3340 static void gfx_v9_4_3_fault(struct amdgpu_device *adev,
3341 struct amdgpu_iv_entry *entry)
3342 {
3343 u8 me_id, pipe_id, queue_id;
3344 struct amdgpu_ring *ring;
3345 int i, xcc_id;
3346
3347 me_id = (entry->ring_id & 0x0c) >> 2;
3348 pipe_id = (entry->ring_id & 0x03) >> 0;
3349 queue_id = (entry->ring_id & 0x70) >> 4;
3350
3351 xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
3352
3353 if (xcc_id == -EINVAL)
3354 return;
3355
3356 switch (me_id) {
3357 case 0:
3358 case 1:
3359 case 2:
3360 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3361 ring = &adev->gfx.compute_ring
3362 [i +
3363 xcc_id * adev->gfx.num_compute_rings];
3364 if (ring->me == me_id && ring->pipe == pipe_id &&
3365 ring->queue == queue_id)
3366 drm_sched_fault(&ring->sched);
3367 }
3368 break;
3369 }
3370 }
3371
gfx_v9_4_3_priv_reg_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3372 static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev,
3373 struct amdgpu_irq_src *source,
3374 struct amdgpu_iv_entry *entry)
3375 {
3376 DRM_ERROR("Illegal register access in command stream\n");
3377 gfx_v9_4_3_fault(adev, entry);
3378 return 0;
3379 }
3380
gfx_v9_4_3_bad_op_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3381 static int gfx_v9_4_3_bad_op_irq(struct amdgpu_device *adev,
3382 struct amdgpu_irq_src *source,
3383 struct amdgpu_iv_entry *entry)
3384 {
3385 DRM_ERROR("Illegal opcode in command stream\n");
3386 gfx_v9_4_3_fault(adev, entry);
3387 return 0;
3388 }
3389
gfx_v9_4_3_priv_inst_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3390 static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev,
3391 struct amdgpu_irq_src *source,
3392 struct amdgpu_iv_entry *entry)
3393 {
3394 DRM_ERROR("Illegal instruction in command stream\n");
3395 gfx_v9_4_3_fault(adev, entry);
3396 return 0;
3397 }
3398
gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring * ring)3399 static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring)
3400 {
3401 const unsigned int cp_coher_cntl =
3402 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
3403 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
3404 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
3405 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
3406 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
3407
3408 /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
3409 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
3410 amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
3411 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
3412 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
3413 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3414 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
3415 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
3416 }
3417
gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring * ring,uint32_t pipe,bool enable)3418 static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring,
3419 uint32_t pipe, bool enable)
3420 {
3421 struct amdgpu_device *adev = ring->adev;
3422 uint32_t val;
3423 uint32_t wcl_cs_reg;
3424
3425 /* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
3426 val = enable ? 0x1 : 0x7f;
3427
3428 switch (pipe) {
3429 case 0:
3430 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0);
3431 break;
3432 case 1:
3433 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1);
3434 break;
3435 case 2:
3436 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2);
3437 break;
3438 case 3:
3439 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3);
3440 break;
3441 default:
3442 DRM_DEBUG("invalid pipe %d\n", pipe);
3443 return;
3444 }
3445
3446 amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
3447
3448 }
gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring * ring,bool enable)3449 static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
3450 {
3451 struct amdgpu_device *adev = ring->adev;
3452 uint32_t val;
3453 int i;
3454
3455 /* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
3456 * number of gfx waves. Setting 5 bit will make sure gfx only gets
3457 * around 25% of gpu resources.
3458 */
3459 val = enable ? 0x1f : 0x07ffffff;
3460 amdgpu_ring_emit_wreg(ring,
3461 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX),
3462 val);
3463
3464 /* Restrict waves for normal/low priority compute queues as well
3465 * to get best QoS for high priority compute jobs.
3466 *
3467 * amdgpu controls only 1st ME(0-3 CS pipes).
3468 */
3469 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3470 if (i != ring->pipe)
3471 gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable);
3472
3473 }
3474 }
3475
gfx_v9_4_3_unmap_done(struct amdgpu_device * adev,uint32_t me,uint32_t pipe,uint32_t queue,uint32_t xcc_id)3476 static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me,
3477 uint32_t pipe, uint32_t queue,
3478 uint32_t xcc_id)
3479 {
3480 int i, r;
3481 /* make sure dequeue is complete*/
3482 gfx_v9_4_3_xcc_set_safe_mode(adev, xcc_id);
3483 mutex_lock(&adev->srbm_mutex);
3484 soc15_grbm_select(adev, me, pipe, queue, 0, GET_INST(GC, xcc_id));
3485 for (i = 0; i < adev->usec_timeout; i++) {
3486 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
3487 break;
3488 udelay(1);
3489 }
3490 if (i >= adev->usec_timeout)
3491 r = -ETIMEDOUT;
3492 else
3493 r = 0;
3494 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
3495 mutex_unlock(&adev->srbm_mutex);
3496 gfx_v9_4_3_xcc_unset_safe_mode(adev, xcc_id);
3497
3498 return r;
3499
3500 }
3501
gfx_v9_4_3_pipe_reset_support(struct amdgpu_device * adev)3502 static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev)
3503 {
3504 /*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/
3505 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
3506 adev->gfx.mec_fw_version >= 0x0000009b)
3507 return true;
3508 else
3509 dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n");
3510
3511 return false;
3512 }
3513
gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring * ring)3514 static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring)
3515 {
3516 struct amdgpu_device *adev = ring->adev;
3517 uint32_t reset_pipe, clean_pipe;
3518 int r;
3519
3520 if (!gfx_v9_4_3_pipe_reset_support(adev))
3521 return -EINVAL;
3522
3523 gfx_v9_4_3_xcc_set_safe_mode(adev, ring->xcc_id);
3524 mutex_lock(&adev->srbm_mutex);
3525
3526 reset_pipe = RREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL);
3527 clean_pipe = reset_pipe;
3528
3529 if (ring->me == 1) {
3530 switch (ring->pipe) {
3531 case 0:
3532 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3533 MEC_ME1_PIPE0_RESET, 1);
3534 break;
3535 case 1:
3536 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3537 MEC_ME1_PIPE1_RESET, 1);
3538 break;
3539 case 2:
3540 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3541 MEC_ME1_PIPE2_RESET, 1);
3542 break;
3543 case 3:
3544 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3545 MEC_ME1_PIPE3_RESET, 1);
3546 break;
3547 default:
3548 break;
3549 }
3550 } else {
3551 if (ring->pipe)
3552 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3553 MEC_ME2_PIPE1_RESET, 1);
3554 else
3555 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3556 MEC_ME2_PIPE0_RESET, 1);
3557 }
3558
3559 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, reset_pipe);
3560 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, clean_pipe);
3561 mutex_unlock(&adev->srbm_mutex);
3562 gfx_v9_4_3_xcc_unset_safe_mode(adev, ring->xcc_id);
3563
3564 r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
3565 return r;
3566 }
3567
gfx_v9_4_3_reset_kcq(struct amdgpu_ring * ring,unsigned int vmid)3568 static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
3569 unsigned int vmid)
3570 {
3571 struct amdgpu_device *adev = ring->adev;
3572 struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
3573 struct amdgpu_ring *kiq_ring = &kiq->ring;
3574 unsigned long flags;
3575 int r;
3576
3577 if (!adev->debug_exp_resets)
3578 return -EINVAL;
3579
3580 if (amdgpu_sriov_vf(adev))
3581 return -EINVAL;
3582
3583 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
3584 return -EINVAL;
3585
3586 spin_lock_irqsave(&kiq->ring_lock, flags);
3587
3588 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
3589 spin_unlock_irqrestore(&kiq->ring_lock, flags);
3590 return -ENOMEM;
3591 }
3592
3593 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
3594 0, 0);
3595 amdgpu_ring_commit(kiq_ring);
3596
3597 spin_unlock_irqrestore(&kiq->ring_lock, flags);
3598
3599 r = amdgpu_ring_test_ring(kiq_ring);
3600 if (r) {
3601 dev_err(adev->dev, "kiq ring test failed after ring: %s queue reset\n",
3602 ring->name);
3603 goto pipe_reset;
3604 }
3605
3606 r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
3607 if (r)
3608 dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
3609
3610 pipe_reset:
3611 if(r) {
3612 r = gfx_v9_4_3_reset_hw_pipe(ring);
3613 dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
3614 r ? "failed" : "successfully");
3615 if (r)
3616 return r;
3617 }
3618
3619 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3620 if (unlikely(r != 0)){
3621 dev_err(adev->dev, "fail to resv mqd_obj\n");
3622 return r;
3623 }
3624 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3625 if (!r) {
3626 r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
3627 amdgpu_bo_kunmap(ring->mqd_obj);
3628 ring->mqd_ptr = NULL;
3629 }
3630 amdgpu_bo_unreserve(ring->mqd_obj);
3631 if (r) {
3632 dev_err(adev->dev, "fail to unresv mqd_obj\n");
3633 return r;
3634 }
3635 spin_lock_irqsave(&kiq->ring_lock, flags);
3636 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
3637 if (r) {
3638 spin_unlock_irqrestore(&kiq->ring_lock, flags);
3639 return -ENOMEM;
3640 }
3641 kiq->pmf->kiq_map_queues(kiq_ring, ring);
3642 amdgpu_ring_commit(kiq_ring);
3643 spin_unlock_irqrestore(&kiq->ring_lock, flags);
3644
3645 r = amdgpu_ring_test_ring(kiq_ring);
3646 if (r) {
3647 dev_err(adev->dev, "fail to remap queue\n");
3648 return r;
3649 }
3650 return amdgpu_ring_test_ring(ring);
3651 }
3652
3653 enum amdgpu_gfx_cp_ras_mem_id {
3654 AMDGPU_GFX_CP_MEM1 = 1,
3655 AMDGPU_GFX_CP_MEM2,
3656 AMDGPU_GFX_CP_MEM3,
3657 AMDGPU_GFX_CP_MEM4,
3658 AMDGPU_GFX_CP_MEM5,
3659 };
3660
3661 enum amdgpu_gfx_gcea_ras_mem_id {
3662 AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4,
3663 AMDGPU_GFX_GCEA_IORD_CMDMEM,
3664 AMDGPU_GFX_GCEA_GMIWR_CMDMEM,
3665 AMDGPU_GFX_GCEA_GMIRD_CMDMEM,
3666 AMDGPU_GFX_GCEA_DRAMWR_CMDMEM,
3667 AMDGPU_GFX_GCEA_DRAMRD_CMDMEM,
3668 AMDGPU_GFX_GCEA_MAM_DMEM0,
3669 AMDGPU_GFX_GCEA_MAM_DMEM1,
3670 AMDGPU_GFX_GCEA_MAM_DMEM2,
3671 AMDGPU_GFX_GCEA_MAM_DMEM3,
3672 AMDGPU_GFX_GCEA_MAM_AMEM0,
3673 AMDGPU_GFX_GCEA_MAM_AMEM1,
3674 AMDGPU_GFX_GCEA_MAM_AMEM2,
3675 AMDGPU_GFX_GCEA_MAM_AMEM3,
3676 AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER,
3677 AMDGPU_GFX_GCEA_WRET_TAGMEM,
3678 AMDGPU_GFX_GCEA_RRET_TAGMEM,
3679 AMDGPU_GFX_GCEA_IOWR_DATAMEM,
3680 AMDGPU_GFX_GCEA_GMIWR_DATAMEM,
3681 AMDGPU_GFX_GCEA_DRAM_DATAMEM,
3682 };
3683
3684 enum amdgpu_gfx_gc_cane_ras_mem_id {
3685 AMDGPU_GFX_GC_CANE_MEM0 = 0,
3686 };
3687
3688 enum amdgpu_gfx_gcutcl2_ras_mem_id {
3689 AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160,
3690 };
3691
3692 enum amdgpu_gfx_gds_ras_mem_id {
3693 AMDGPU_GFX_GDS_MEM0 = 0,
3694 };
3695
3696 enum amdgpu_gfx_lds_ras_mem_id {
3697 AMDGPU_GFX_LDS_BANK0 = 0,
3698 AMDGPU_GFX_LDS_BANK1,
3699 AMDGPU_GFX_LDS_BANK2,
3700 AMDGPU_GFX_LDS_BANK3,
3701 AMDGPU_GFX_LDS_BANK4,
3702 AMDGPU_GFX_LDS_BANK5,
3703 AMDGPU_GFX_LDS_BANK6,
3704 AMDGPU_GFX_LDS_BANK7,
3705 AMDGPU_GFX_LDS_BANK8,
3706 AMDGPU_GFX_LDS_BANK9,
3707 AMDGPU_GFX_LDS_BANK10,
3708 AMDGPU_GFX_LDS_BANK11,
3709 AMDGPU_GFX_LDS_BANK12,
3710 AMDGPU_GFX_LDS_BANK13,
3711 AMDGPU_GFX_LDS_BANK14,
3712 AMDGPU_GFX_LDS_BANK15,
3713 AMDGPU_GFX_LDS_BANK16,
3714 AMDGPU_GFX_LDS_BANK17,
3715 AMDGPU_GFX_LDS_BANK18,
3716 AMDGPU_GFX_LDS_BANK19,
3717 AMDGPU_GFX_LDS_BANK20,
3718 AMDGPU_GFX_LDS_BANK21,
3719 AMDGPU_GFX_LDS_BANK22,
3720 AMDGPU_GFX_LDS_BANK23,
3721 AMDGPU_GFX_LDS_BANK24,
3722 AMDGPU_GFX_LDS_BANK25,
3723 AMDGPU_GFX_LDS_BANK26,
3724 AMDGPU_GFX_LDS_BANK27,
3725 AMDGPU_GFX_LDS_BANK28,
3726 AMDGPU_GFX_LDS_BANK29,
3727 AMDGPU_GFX_LDS_BANK30,
3728 AMDGPU_GFX_LDS_BANK31,
3729 AMDGPU_GFX_LDS_SP_BUFFER_A,
3730 AMDGPU_GFX_LDS_SP_BUFFER_B,
3731 };
3732
3733 enum amdgpu_gfx_rlc_ras_mem_id {
3734 AMDGPU_GFX_RLC_GPMF32 = 1,
3735 AMDGPU_GFX_RLC_RLCVF32,
3736 AMDGPU_GFX_RLC_SCRATCH,
3737 AMDGPU_GFX_RLC_SRM_ARAM,
3738 AMDGPU_GFX_RLC_SRM_DRAM,
3739 AMDGPU_GFX_RLC_TCTAG,
3740 AMDGPU_GFX_RLC_SPM_SE,
3741 AMDGPU_GFX_RLC_SPM_GRBMT,
3742 };
3743
3744 enum amdgpu_gfx_sp_ras_mem_id {
3745 AMDGPU_GFX_SP_SIMDID0 = 0,
3746 };
3747
3748 enum amdgpu_gfx_spi_ras_mem_id {
3749 AMDGPU_GFX_SPI_MEM0 = 0,
3750 AMDGPU_GFX_SPI_MEM1,
3751 AMDGPU_GFX_SPI_MEM2,
3752 AMDGPU_GFX_SPI_MEM3,
3753 };
3754
3755 enum amdgpu_gfx_sqc_ras_mem_id {
3756 AMDGPU_GFX_SQC_INST_CACHE_A = 100,
3757 AMDGPU_GFX_SQC_INST_CACHE_B = 101,
3758 AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102,
3759 AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103,
3760 AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104,
3761 AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105,
3762 AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106,
3763 AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107,
3764 AMDGPU_GFX_SQC_DATA_CACHE_A = 200,
3765 AMDGPU_GFX_SQC_DATA_CACHE_B = 201,
3766 AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202,
3767 AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203,
3768 AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204,
3769 AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205,
3770 AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206,
3771 AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207,
3772 AMDGPU_GFX_SQC_DIRTY_BIT_A = 208,
3773 AMDGPU_GFX_SQC_DIRTY_BIT_B = 209,
3774 AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210,
3775 AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211,
3776 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212,
3777 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213,
3778 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108,
3779 };
3780
3781 enum amdgpu_gfx_sq_ras_mem_id {
3782 AMDGPU_GFX_SQ_SGPR_MEM0 = 0,
3783 AMDGPU_GFX_SQ_SGPR_MEM1,
3784 AMDGPU_GFX_SQ_SGPR_MEM2,
3785 AMDGPU_GFX_SQ_SGPR_MEM3,
3786 };
3787
3788 enum amdgpu_gfx_ta_ras_mem_id {
3789 AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1,
3790 AMDGPU_GFX_TA_FS_AFIFO_RAM_HI,
3791 AMDGPU_GFX_TA_FS_CFIFO_RAM,
3792 AMDGPU_GFX_TA_FSX_LFIFO,
3793 AMDGPU_GFX_TA_FS_DFIFO_RAM,
3794 };
3795
3796 enum amdgpu_gfx_tcc_ras_mem_id {
3797 AMDGPU_GFX_TCC_MEM1 = 1,
3798 };
3799
3800 enum amdgpu_gfx_tca_ras_mem_id {
3801 AMDGPU_GFX_TCA_MEM1 = 1,
3802 };
3803
3804 enum amdgpu_gfx_tci_ras_mem_id {
3805 AMDGPU_GFX_TCIW_MEM = 1,
3806 };
3807
3808 enum amdgpu_gfx_tcp_ras_mem_id {
3809 AMDGPU_GFX_TCP_LFIFO0 = 1,
3810 AMDGPU_GFX_TCP_SET0BANK0_RAM,
3811 AMDGPU_GFX_TCP_SET0BANK1_RAM,
3812 AMDGPU_GFX_TCP_SET0BANK2_RAM,
3813 AMDGPU_GFX_TCP_SET0BANK3_RAM,
3814 AMDGPU_GFX_TCP_SET1BANK0_RAM,
3815 AMDGPU_GFX_TCP_SET1BANK1_RAM,
3816 AMDGPU_GFX_TCP_SET1BANK2_RAM,
3817 AMDGPU_GFX_TCP_SET1BANK3_RAM,
3818 AMDGPU_GFX_TCP_SET2BANK0_RAM,
3819 AMDGPU_GFX_TCP_SET2BANK1_RAM,
3820 AMDGPU_GFX_TCP_SET2BANK2_RAM,
3821 AMDGPU_GFX_TCP_SET2BANK3_RAM,
3822 AMDGPU_GFX_TCP_SET3BANK0_RAM,
3823 AMDGPU_GFX_TCP_SET3BANK1_RAM,
3824 AMDGPU_GFX_TCP_SET3BANK2_RAM,
3825 AMDGPU_GFX_TCP_SET3BANK3_RAM,
3826 AMDGPU_GFX_TCP_VM_FIFO,
3827 AMDGPU_GFX_TCP_DB_TAGRAM0,
3828 AMDGPU_GFX_TCP_DB_TAGRAM1,
3829 AMDGPU_GFX_TCP_DB_TAGRAM2,
3830 AMDGPU_GFX_TCP_DB_TAGRAM3,
3831 AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0,
3832 AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1,
3833 AMDGPU_GFX_TCP_CMD_FIFO,
3834 };
3835
3836 enum amdgpu_gfx_td_ras_mem_id {
3837 AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1,
3838 AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM,
3839 AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM,
3840 };
3841
3842 enum amdgpu_gfx_tcx_ras_mem_id {
3843 AMDGPU_GFX_TCX_FIFOD0 = 0,
3844 AMDGPU_GFX_TCX_FIFOD1,
3845 AMDGPU_GFX_TCX_FIFOD2,
3846 AMDGPU_GFX_TCX_FIFOD3,
3847 AMDGPU_GFX_TCX_FIFOD4,
3848 AMDGPU_GFX_TCX_FIFOD5,
3849 AMDGPU_GFX_TCX_FIFOD6,
3850 AMDGPU_GFX_TCX_FIFOD7,
3851 AMDGPU_GFX_TCX_FIFOB0,
3852 AMDGPU_GFX_TCX_FIFOB1,
3853 AMDGPU_GFX_TCX_FIFOB2,
3854 AMDGPU_GFX_TCX_FIFOB3,
3855 AMDGPU_GFX_TCX_FIFOB4,
3856 AMDGPU_GFX_TCX_FIFOB5,
3857 AMDGPU_GFX_TCX_FIFOB6,
3858 AMDGPU_GFX_TCX_FIFOB7,
3859 AMDGPU_GFX_TCX_FIFOA0,
3860 AMDGPU_GFX_TCX_FIFOA1,
3861 AMDGPU_GFX_TCX_FIFOA2,
3862 AMDGPU_GFX_TCX_FIFOA3,
3863 AMDGPU_GFX_TCX_FIFOA4,
3864 AMDGPU_GFX_TCX_FIFOA5,
3865 AMDGPU_GFX_TCX_FIFOA6,
3866 AMDGPU_GFX_TCX_FIFOA7,
3867 AMDGPU_GFX_TCX_CFIFO0,
3868 AMDGPU_GFX_TCX_CFIFO1,
3869 AMDGPU_GFX_TCX_CFIFO2,
3870 AMDGPU_GFX_TCX_CFIFO3,
3871 AMDGPU_GFX_TCX_CFIFO4,
3872 AMDGPU_GFX_TCX_CFIFO5,
3873 AMDGPU_GFX_TCX_CFIFO6,
3874 AMDGPU_GFX_TCX_CFIFO7,
3875 AMDGPU_GFX_TCX_FIFO_ACKB0,
3876 AMDGPU_GFX_TCX_FIFO_ACKB1,
3877 AMDGPU_GFX_TCX_FIFO_ACKB2,
3878 AMDGPU_GFX_TCX_FIFO_ACKB3,
3879 AMDGPU_GFX_TCX_FIFO_ACKB4,
3880 AMDGPU_GFX_TCX_FIFO_ACKB5,
3881 AMDGPU_GFX_TCX_FIFO_ACKB6,
3882 AMDGPU_GFX_TCX_FIFO_ACKB7,
3883 AMDGPU_GFX_TCX_FIFO_ACKD0,
3884 AMDGPU_GFX_TCX_FIFO_ACKD1,
3885 AMDGPU_GFX_TCX_FIFO_ACKD2,
3886 AMDGPU_GFX_TCX_FIFO_ACKD3,
3887 AMDGPU_GFX_TCX_FIFO_ACKD4,
3888 AMDGPU_GFX_TCX_FIFO_ACKD5,
3889 AMDGPU_GFX_TCX_FIFO_ACKD6,
3890 AMDGPU_GFX_TCX_FIFO_ACKD7,
3891 AMDGPU_GFX_TCX_DST_FIFOA0,
3892 AMDGPU_GFX_TCX_DST_FIFOA1,
3893 AMDGPU_GFX_TCX_DST_FIFOA2,
3894 AMDGPU_GFX_TCX_DST_FIFOA3,
3895 AMDGPU_GFX_TCX_DST_FIFOA4,
3896 AMDGPU_GFX_TCX_DST_FIFOA5,
3897 AMDGPU_GFX_TCX_DST_FIFOA6,
3898 AMDGPU_GFX_TCX_DST_FIFOA7,
3899 AMDGPU_GFX_TCX_DST_FIFOB0,
3900 AMDGPU_GFX_TCX_DST_FIFOB1,
3901 AMDGPU_GFX_TCX_DST_FIFOB2,
3902 AMDGPU_GFX_TCX_DST_FIFOB3,
3903 AMDGPU_GFX_TCX_DST_FIFOB4,
3904 AMDGPU_GFX_TCX_DST_FIFOB5,
3905 AMDGPU_GFX_TCX_DST_FIFOB6,
3906 AMDGPU_GFX_TCX_DST_FIFOB7,
3907 AMDGPU_GFX_TCX_DST_FIFOD0,
3908 AMDGPU_GFX_TCX_DST_FIFOD1,
3909 AMDGPU_GFX_TCX_DST_FIFOD2,
3910 AMDGPU_GFX_TCX_DST_FIFOD3,
3911 AMDGPU_GFX_TCX_DST_FIFOD4,
3912 AMDGPU_GFX_TCX_DST_FIFOD5,
3913 AMDGPU_GFX_TCX_DST_FIFOD6,
3914 AMDGPU_GFX_TCX_DST_FIFOD7,
3915 AMDGPU_GFX_TCX_DST_FIFO_ACKB0,
3916 AMDGPU_GFX_TCX_DST_FIFO_ACKB1,
3917 AMDGPU_GFX_TCX_DST_FIFO_ACKB2,
3918 AMDGPU_GFX_TCX_DST_FIFO_ACKB3,
3919 AMDGPU_GFX_TCX_DST_FIFO_ACKB4,
3920 AMDGPU_GFX_TCX_DST_FIFO_ACKB5,
3921 AMDGPU_GFX_TCX_DST_FIFO_ACKB6,
3922 AMDGPU_GFX_TCX_DST_FIFO_ACKB7,
3923 AMDGPU_GFX_TCX_DST_FIFO_ACKD0,
3924 AMDGPU_GFX_TCX_DST_FIFO_ACKD1,
3925 AMDGPU_GFX_TCX_DST_FIFO_ACKD2,
3926 AMDGPU_GFX_TCX_DST_FIFO_ACKD3,
3927 AMDGPU_GFX_TCX_DST_FIFO_ACKD4,
3928 AMDGPU_GFX_TCX_DST_FIFO_ACKD5,
3929 AMDGPU_GFX_TCX_DST_FIFO_ACKD6,
3930 AMDGPU_GFX_TCX_DST_FIFO_ACKD7,
3931 };
3932
3933 enum amdgpu_gfx_atc_l2_ras_mem_id {
3934 AMDGPU_GFX_ATC_L2_MEM0 = 0,
3935 };
3936
3937 enum amdgpu_gfx_utcl2_ras_mem_id {
3938 AMDGPU_GFX_UTCL2_MEM0 = 0,
3939 };
3940
3941 enum amdgpu_gfx_vml2_ras_mem_id {
3942 AMDGPU_GFX_VML2_MEM0 = 0,
3943 };
3944
3945 enum amdgpu_gfx_vml2_walker_ras_mem_id {
3946 AMDGPU_GFX_VML2_WALKER_MEM0 = 0,
3947 };
3948
3949 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = {
3950 {AMDGPU_GFX_CP_MEM1, "CP_MEM1"},
3951 {AMDGPU_GFX_CP_MEM2, "CP_MEM2"},
3952 {AMDGPU_GFX_CP_MEM3, "CP_MEM3"},
3953 {AMDGPU_GFX_CP_MEM4, "CP_MEM4"},
3954 {AMDGPU_GFX_CP_MEM5, "CP_MEM5"},
3955 };
3956
3957 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = {
3958 {AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"},
3959 {AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"},
3960 {AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"},
3961 {AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"},
3962 {AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"},
3963 {AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"},
3964 {AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"},
3965 {AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"},
3966 {AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"},
3967 {AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"},
3968 {AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"},
3969 {AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"},
3970 {AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"},
3971 {AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"},
3972 {AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"},
3973 {AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"},
3974 {AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"},
3975 {AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"},
3976 {AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"},
3977 {AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"},
3978 };
3979
3980 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = {
3981 {AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"},
3982 };
3983
3984 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = {
3985 {AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"},
3986 };
3987
3988 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = {
3989 {AMDGPU_GFX_GDS_MEM0, "GDS_MEM"},
3990 };
3991
3992 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = {
3993 {AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"},
3994 {AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"},
3995 {AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"},
3996 {AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"},
3997 {AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"},
3998 {AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"},
3999 {AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"},
4000 {AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"},
4001 {AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"},
4002 {AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"},
4003 {AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"},
4004 {AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"},
4005 {AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"},
4006 {AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"},
4007 {AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"},
4008 {AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"},
4009 {AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"},
4010 {AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"},
4011 {AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"},
4012 {AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"},
4013 {AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"},
4014 {AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"},
4015 {AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"},
4016 {AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"},
4017 {AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"},
4018 {AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"},
4019 {AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"},
4020 {AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"},
4021 {AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"},
4022 {AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"},
4023 {AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"},
4024 {AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"},
4025 {AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"},
4026 {AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"},
4027 };
4028
4029 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = {
4030 {AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"},
4031 {AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"},
4032 {AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"},
4033 {AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"},
4034 {AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"},
4035 {AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"},
4036 {AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"},
4037 {AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"},
4038 };
4039
4040 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = {
4041 {AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"},
4042 };
4043
4044 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = {
4045 {AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"},
4046 {AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"},
4047 {AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"},
4048 {AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"},
4049 };
4050
4051 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = {
4052 {AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"},
4053 {AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"},
4054 {AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"},
4055 {AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"},
4056 {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"},
4057 {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"},
4058 {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"},
4059 {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"},
4060 {AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"},
4061 {AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"},
4062 {AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"},
4063 {AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"},
4064 {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"},
4065 {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"},
4066 {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"},
4067 {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"},
4068 {AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"},
4069 {AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"},
4070 {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"},
4071 {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"},
4072 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"},
4073 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"},
4074 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"},
4075 };
4076
4077 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = {
4078 {AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"},
4079 {AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"},
4080 {AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"},
4081 {AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"},
4082 };
4083
4084 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = {
4085 {AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"},
4086 {AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"},
4087 {AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"},
4088 {AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"},
4089 {AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"},
4090 };
4091
4092 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = {
4093 {AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"},
4094 };
4095
4096 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = {
4097 {AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"},
4098 };
4099
4100 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = {
4101 {AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"},
4102 };
4103
4104 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = {
4105 {AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"},
4106 {AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"},
4107 {AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"},
4108 {AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"},
4109 {AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"},
4110 {AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"},
4111 {AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"},
4112 {AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"},
4113 {AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"},
4114 {AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"},
4115 {AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"},
4116 {AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"},
4117 {AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"},
4118 {AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"},
4119 {AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"},
4120 {AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"},
4121 {AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"},
4122 {AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"},
4123 {AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"},
4124 {AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"},
4125 {AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"},
4126 {AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"},
4127 {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"},
4128 {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"},
4129 {AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"},
4130 };
4131
4132 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = {
4133 {AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"},
4134 {AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"},
4135 {AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"},
4136 };
4137
4138 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = {
4139 {AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"},
4140 {AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"},
4141 {AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"},
4142 {AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"},
4143 {AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"},
4144 {AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"},
4145 {AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"},
4146 {AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"},
4147 {AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"},
4148 {AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"},
4149 {AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"},
4150 {AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"},
4151 {AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"},
4152 {AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"},
4153 {AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"},
4154 {AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"},
4155 {AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"},
4156 {AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"},
4157 {AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"},
4158 {AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"},
4159 {AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"},
4160 {AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"},
4161 {AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"},
4162 {AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"},
4163 {AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"},
4164 {AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"},
4165 {AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"},
4166 {AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"},
4167 {AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"},
4168 {AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"},
4169 {AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"},
4170 {AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"},
4171 {AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"},
4172 {AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"},
4173 {AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"},
4174 {AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"},
4175 {AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"},
4176 {AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"},
4177 {AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"},
4178 {AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"},
4179 {AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"},
4180 {AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"},
4181 {AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"},
4182 {AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"},
4183 {AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"},
4184 {AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"},
4185 {AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"},
4186 {AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"},
4187 {AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"},
4188 {AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"},
4189 {AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"},
4190 {AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"},
4191 {AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"},
4192 {AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"},
4193 {AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"},
4194 {AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"},
4195 {AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"},
4196 {AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"},
4197 {AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"},
4198 {AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"},
4199 {AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"},
4200 {AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"},
4201 {AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"},
4202 {AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"},
4203 {AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"},
4204 {AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"},
4205 {AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"},
4206 {AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"},
4207 {AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"},
4208 {AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"},
4209 {AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"},
4210 {AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"},
4211 {AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"},
4212 {AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"},
4213 {AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"},
4214 {AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"},
4215 {AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"},
4216 {AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"},
4217 {AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"},
4218 {AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"},
4219 {AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"},
4220 {AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"},
4221 {AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"},
4222 {AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"},
4223 {AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"},
4224 {AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"},
4225 {AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"},
4226 {AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"},
4227 };
4228
4229 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = {
4230 {AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"},
4231 };
4232
4233 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = {
4234 {AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"},
4235 };
4236
4237 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = {
4238 {AMDGPU_GFX_VML2_MEM, "VML2_MEM"},
4239 };
4240
4241 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = {
4242 {AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"},
4243 };
4244
4245 static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = {
4246 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list)
4247 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list)
4248 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list)
4249 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list)
4250 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list)
4251 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list)
4252 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list)
4253 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list)
4254 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list)
4255 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list)
4256 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list)
4257 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list)
4258 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list)
4259 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list)
4260 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list)
4261 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list)
4262 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list)
4263 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list)
4264 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list)
4265 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list)
4266 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list)
4267 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list)
4268 };
4269
4270 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = {
4271 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH),
4272 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
4273 AMDGPU_GFX_RLC_MEM, 1},
4274 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI),
4275 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
4276 AMDGPU_GFX_CP_MEM, 1},
4277 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI),
4278 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
4279 AMDGPU_GFX_CP_MEM, 1},
4280 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI),
4281 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
4282 AMDGPU_GFX_CP_MEM, 1},
4283 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI),
4284 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
4285 AMDGPU_GFX_GDS_MEM, 1},
4286 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI),
4287 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
4288 AMDGPU_GFX_GC_CANE_MEM, 1},
4289 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI),
4290 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
4291 AMDGPU_GFX_SPI_MEM, 1},
4292 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI),
4293 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
4294 AMDGPU_GFX_SP_MEM, 4},
4295 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI),
4296 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
4297 AMDGPU_GFX_SP_MEM, 4},
4298 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI),
4299 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
4300 AMDGPU_GFX_SQ_MEM, 4},
4301 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI),
4302 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
4303 AMDGPU_GFX_SQC_MEM, 4},
4304 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI),
4305 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
4306 AMDGPU_GFX_TCX_MEM, 1},
4307 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI),
4308 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
4309 AMDGPU_GFX_TCC_MEM, 1},
4310 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI),
4311 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
4312 AMDGPU_GFX_TA_MEM, 4},
4313 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG),
4314 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
4315 AMDGPU_GFX_TCI_MEM, 1},
4316 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG),
4317 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
4318 AMDGPU_GFX_TCP_MEM, 4},
4319 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI),
4320 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
4321 AMDGPU_GFX_TD_MEM, 4},
4322 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI),
4323 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
4324 AMDGPU_GFX_GCEA_MEM, 1},
4325 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI),
4326 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
4327 AMDGPU_GFX_LDS_MEM, 4},
4328 };
4329
4330 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = {
4331 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH),
4332 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
4333 AMDGPU_GFX_RLC_MEM, 1},
4334 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI),
4335 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
4336 AMDGPU_GFX_CP_MEM, 1},
4337 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI),
4338 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
4339 AMDGPU_GFX_CP_MEM, 1},
4340 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI),
4341 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
4342 AMDGPU_GFX_CP_MEM, 1},
4343 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI),
4344 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
4345 AMDGPU_GFX_GDS_MEM, 1},
4346 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI),
4347 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
4348 AMDGPU_GFX_GC_CANE_MEM, 1},
4349 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI),
4350 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
4351 AMDGPU_GFX_SPI_MEM, 1},
4352 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI),
4353 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
4354 AMDGPU_GFX_SP_MEM, 4},
4355 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI),
4356 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
4357 AMDGPU_GFX_SP_MEM, 4},
4358 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI),
4359 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
4360 AMDGPU_GFX_SQ_MEM, 4},
4361 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI),
4362 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
4363 AMDGPU_GFX_SQC_MEM, 4},
4364 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI),
4365 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
4366 AMDGPU_GFX_TCX_MEM, 1},
4367 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI),
4368 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
4369 AMDGPU_GFX_TCC_MEM, 1},
4370 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI),
4371 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
4372 AMDGPU_GFX_TA_MEM, 4},
4373 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG),
4374 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
4375 AMDGPU_GFX_TCI_MEM, 1},
4376 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG),
4377 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
4378 AMDGPU_GFX_TCP_MEM, 4},
4379 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI),
4380 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
4381 AMDGPU_GFX_TD_MEM, 4},
4382 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI),
4383 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"},
4384 AMDGPU_GFX_TCA_MEM, 1},
4385 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI),
4386 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
4387 AMDGPU_GFX_GCEA_MEM, 1},
4388 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI),
4389 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
4390 AMDGPU_GFX_LDS_MEM, 4},
4391 };
4392
gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device * adev,void * ras_error_status,int xcc_id)4393 static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
4394 void *ras_error_status, int xcc_id)
4395 {
4396 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
4397 unsigned long ce_count = 0, ue_count = 0;
4398 uint32_t i, j, k;
4399
4400 /* NOTE: convert xcc_id to physical XCD ID (XCD0 or XCD1) */
4401 struct amdgpu_smuio_mcm_config_info mcm_info = {
4402 .socket_id = adev->smuio.funcs->get_socket_id(adev),
4403 .die_id = xcc_id & 0x01 ? 1 : 0,
4404 };
4405
4406 mutex_lock(&adev->grbm_idx_mutex);
4407
4408 for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
4409 for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
4410 for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
4411 /* no need to select if instance number is 1 */
4412 if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
4413 gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
4414 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4415
4416 amdgpu_ras_inst_query_ras_error_count(adev,
4417 &(gfx_v9_4_3_ce_reg_list[i].reg_entry),
4418 1,
4419 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent,
4420 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size,
4421 GET_INST(GC, xcc_id),
4422 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
4423 &ce_count);
4424
4425 amdgpu_ras_inst_query_ras_error_count(adev,
4426 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4427 1,
4428 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
4429 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
4430 GET_INST(GC, xcc_id),
4431 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
4432 &ue_count);
4433 }
4434 }
4435 }
4436
4437 /* handle extra register entries of UE */
4438 for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
4439 for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
4440 for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
4441 /* no need to select if instance number is 1 */
4442 if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
4443 gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
4444 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4445
4446 amdgpu_ras_inst_query_ras_error_count(adev,
4447 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4448 1,
4449 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
4450 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
4451 GET_INST(GC, xcc_id),
4452 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
4453 &ue_count);
4454 }
4455 }
4456 }
4457
4458 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4459 xcc_id);
4460 mutex_unlock(&adev->grbm_idx_mutex);
4461
4462 /* the caller should make sure initialize value of
4463 * err_data->ue_count and err_data->ce_count
4464 */
4465 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
4466 amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
4467 }
4468
gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device * adev,void * ras_error_status,int xcc_id)4469 static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
4470 void *ras_error_status, int xcc_id)
4471 {
4472 uint32_t i, j, k;
4473
4474 mutex_lock(&adev->grbm_idx_mutex);
4475
4476 for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
4477 for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
4478 for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
4479 /* no need to select if instance number is 1 */
4480 if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
4481 gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
4482 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4483
4484 amdgpu_ras_inst_reset_ras_error_count(adev,
4485 &(gfx_v9_4_3_ce_reg_list[i].reg_entry),
4486 1,
4487 GET_INST(GC, xcc_id));
4488
4489 amdgpu_ras_inst_reset_ras_error_count(adev,
4490 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4491 1,
4492 GET_INST(GC, xcc_id));
4493 }
4494 }
4495 }
4496
4497 /* handle extra register entries of UE */
4498 for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
4499 for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
4500 for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
4501 /* no need to select if instance number is 1 */
4502 if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
4503 gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
4504 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4505
4506 amdgpu_ras_inst_reset_ras_error_count(adev,
4507 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4508 1,
4509 GET_INST(GC, xcc_id));
4510 }
4511 }
4512 }
4513
4514 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4515 xcc_id);
4516 mutex_unlock(&adev->grbm_idx_mutex);
4517 }
4518
gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device * adev,void * ras_error_status,int xcc_id)4519 static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
4520 void *ras_error_status, int xcc_id)
4521 {
4522 uint32_t i;
4523 uint32_t data;
4524
4525 if (amdgpu_sriov_vf(adev))
4526 return;
4527
4528 data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG);
4529 data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
4530 amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0);
4531
4532 if (amdgpu_watchdog_timer.timeout_fatal_disable &&
4533 (amdgpu_watchdog_timer.period < 1 ||
4534 amdgpu_watchdog_timer.period > 0x23)) {
4535 dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n");
4536 amdgpu_watchdog_timer.period = 0x23;
4537 }
4538 data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL,
4539 amdgpu_watchdog_timer.period);
4540
4541 mutex_lock(&adev->grbm_idx_mutex);
4542 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4543 gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id);
4544 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data);
4545 }
4546 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4547 xcc_id);
4548 mutex_unlock(&adev->grbm_idx_mutex);
4549 }
4550
gfx_v9_4_3_query_ras_error_count(struct amdgpu_device * adev,void * ras_error_status)4551 static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev,
4552 void *ras_error_status)
4553 {
4554 amdgpu_gfx_ras_error_func(adev, ras_error_status,
4555 gfx_v9_4_3_inst_query_ras_err_count);
4556 }
4557
gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device * adev)4558 static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev)
4559 {
4560 amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count);
4561 }
4562
gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device * adev)4563 static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
4564 {
4565 amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer);
4566 }
4567
gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring * ring,uint32_t num_nop)4568 static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
4569 {
4570 int i;
4571
4572 /* Header itself is a NOP packet */
4573 if (num_nop == 1) {
4574 amdgpu_ring_write(ring, ring->funcs->nop);
4575 return;
4576 }
4577
4578 /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
4579 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
4580
4581 /* Header is at index 0, followed by num_nops - 1 NOP packet's */
4582 for (i = 1; i < num_nop; i++)
4583 amdgpu_ring_write(ring, ring->funcs->nop);
4584 }
4585
gfx_v9_4_3_ip_print(void * handle,struct drm_printer * p)4586 static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p)
4587 {
4588 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4589 uint32_t i, j, k;
4590 uint32_t xcc_id, xcc_offset, inst_offset;
4591 uint32_t num_xcc, reg, num_inst;
4592 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
4593
4594 if (!adev->gfx.ip_dump_core)
4595 return;
4596
4597 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4598 drm_printf(p, "Number of Instances:%d\n", num_xcc);
4599 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4600 xcc_offset = xcc_id * reg_count;
4601 drm_printf(p, "\nInstance id:%d\n", xcc_id);
4602 for (i = 0; i < reg_count; i++)
4603 drm_printf(p, "%-50s \t 0x%08x\n",
4604 gc_reg_list_9_4_3[i].reg_name,
4605 adev->gfx.ip_dump_core[xcc_offset + i]);
4606 }
4607
4608 /* print compute queue registers for all instances */
4609 if (!adev->gfx.ip_dump_compute_queues)
4610 return;
4611
4612 num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
4613 adev->gfx.mec.num_queue_per_pipe;
4614
4615 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
4616 drm_printf(p, "\nnum_xcc: %d num_mec: %d num_pipe: %d num_queue: %d\n",
4617 num_xcc,
4618 adev->gfx.mec.num_mec,
4619 adev->gfx.mec.num_pipe_per_mec,
4620 adev->gfx.mec.num_queue_per_pipe);
4621
4622 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4623 xcc_offset = xcc_id * reg_count * num_inst;
4624 inst_offset = 0;
4625 for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4626 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4627 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
4628 drm_printf(p,
4629 "\nxcc:%d mec:%d, pipe:%d, queue:%d\n",
4630 xcc_id, i, j, k);
4631 for (reg = 0; reg < reg_count; reg++) {
4632 drm_printf(p,
4633 "%-50s \t 0x%08x\n",
4634 gc_cp_reg_list_9_4_3[reg].reg_name,
4635 adev->gfx.ip_dump_compute_queues
4636 [xcc_offset + inst_offset +
4637 reg]);
4638 }
4639 inst_offset += reg_count;
4640 }
4641 }
4642 }
4643 }
4644 }
4645
gfx_v9_4_3_ip_dump(void * handle)4646 static void gfx_v9_4_3_ip_dump(void *handle)
4647 {
4648 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4649 uint32_t i, j, k;
4650 uint32_t num_xcc, reg, num_inst;
4651 uint32_t xcc_id, xcc_offset, inst_offset;
4652 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
4653
4654 if (!adev->gfx.ip_dump_core)
4655 return;
4656
4657 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4658
4659 amdgpu_gfx_off_ctrl(adev, false);
4660 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4661 xcc_offset = xcc_id * reg_count;
4662 for (i = 0; i < reg_count; i++)
4663 adev->gfx.ip_dump_core[xcc_offset + i] =
4664 RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i],
4665 GET_INST(GC, xcc_id)));
4666 }
4667 amdgpu_gfx_off_ctrl(adev, true);
4668
4669 /* dump compute queue registers for all instances */
4670 if (!adev->gfx.ip_dump_compute_queues)
4671 return;
4672
4673 num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
4674 adev->gfx.mec.num_queue_per_pipe;
4675 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
4676 amdgpu_gfx_off_ctrl(adev, false);
4677 mutex_lock(&adev->srbm_mutex);
4678 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4679 xcc_offset = xcc_id * reg_count * num_inst;
4680 inst_offset = 0;
4681 for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4682 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4683 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
4684 /* ME0 is for GFX so start from 1 for CP */
4685 soc15_grbm_select(adev, 1 + i, j, k, 0,
4686 GET_INST(GC, xcc_id));
4687
4688 for (reg = 0; reg < reg_count; reg++) {
4689 adev->gfx.ip_dump_compute_queues
4690 [xcc_offset +
4691 inst_offset + reg] =
4692 RREG32(SOC15_REG_ENTRY_OFFSET_INST(
4693 gc_cp_reg_list_9_4_3[reg],
4694 GET_INST(GC, xcc_id)));
4695 }
4696 inst_offset += reg_count;
4697 }
4698 }
4699 }
4700 }
4701 soc15_grbm_select(adev, 0, 0, 0, 0, 0);
4702 mutex_unlock(&adev->srbm_mutex);
4703 amdgpu_gfx_off_ctrl(adev, true);
4704 }
4705
gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring * ring)4706 static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
4707 {
4708 /* Emit the cleaner shader */
4709 amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
4710 amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
4711 }
4712
4713 static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
4714 .name = "gfx_v9_4_3",
4715 .early_init = gfx_v9_4_3_early_init,
4716 .late_init = gfx_v9_4_3_late_init,
4717 .sw_init = gfx_v9_4_3_sw_init,
4718 .sw_fini = gfx_v9_4_3_sw_fini,
4719 .hw_init = gfx_v9_4_3_hw_init,
4720 .hw_fini = gfx_v9_4_3_hw_fini,
4721 .suspend = gfx_v9_4_3_suspend,
4722 .resume = gfx_v9_4_3_resume,
4723 .is_idle = gfx_v9_4_3_is_idle,
4724 .wait_for_idle = gfx_v9_4_3_wait_for_idle,
4725 .soft_reset = gfx_v9_4_3_soft_reset,
4726 .set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
4727 .set_powergating_state = gfx_v9_4_3_set_powergating_state,
4728 .get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
4729 .dump_ip_state = gfx_v9_4_3_ip_dump,
4730 .print_ip_state = gfx_v9_4_3_ip_print,
4731 };
4732
4733 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
4734 .type = AMDGPU_RING_TYPE_COMPUTE,
4735 .align_mask = 0xff,
4736 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4737 .support_64bit_ptrs = true,
4738 .get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4739 .get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4740 .set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4741 .emit_frame_size =
4742 20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4743 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4744 5 + /* hdp invalidate */
4745 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4746 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4747 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4748 2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4749 8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */
4750 7 + /* gfx_v9_4_3_emit_mem_sync */
4751 5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */
4752 15 + /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
4753 2, /* gfx_v9_4_3_ring_emit_cleaner_shader */
4754 .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */
4755 .emit_ib = gfx_v9_4_3_ring_emit_ib_compute,
4756 .emit_fence = gfx_v9_4_3_ring_emit_fence,
4757 .emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync,
4758 .emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush,
4759 .emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch,
4760 .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
4761 .test_ring = gfx_v9_4_3_ring_test_ring,
4762 .test_ib = gfx_v9_4_3_ring_test_ib,
4763 .insert_nop = gfx_v9_4_3_ring_insert_nop,
4764 .pad_ib = amdgpu_ring_generic_pad_ib,
4765 .emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4766 .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4767 .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4768 .soft_recovery = gfx_v9_4_3_ring_soft_recovery,
4769 .emit_mem_sync = gfx_v9_4_3_emit_mem_sync,
4770 .emit_wave_limit = gfx_v9_4_3_emit_wave_limit,
4771 .reset = gfx_v9_4_3_reset_kcq,
4772 .emit_cleaner_shader = gfx_v9_4_3_ring_emit_cleaner_shader,
4773 .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
4774 .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
4775 };
4776
4777 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
4778 .type = AMDGPU_RING_TYPE_KIQ,
4779 .align_mask = 0xff,
4780 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4781 .support_64bit_ptrs = true,
4782 .get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4783 .get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4784 .set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4785 .emit_frame_size =
4786 20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4787 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4788 5 + /* hdp invalidate */
4789 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4790 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4791 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4792 2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4793 8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */
4794 .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */
4795 .emit_fence = gfx_v9_4_3_ring_emit_fence_kiq,
4796 .test_ring = gfx_v9_4_3_ring_test_ring,
4797 .insert_nop = amdgpu_ring_insert_nop,
4798 .pad_ib = amdgpu_ring_generic_pad_ib,
4799 .emit_rreg = gfx_v9_4_3_ring_emit_rreg,
4800 .emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4801 .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4802 .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4803 };
4804
gfx_v9_4_3_set_ring_funcs(struct amdgpu_device * adev)4805 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
4806 {
4807 int i, j, num_xcc;
4808
4809 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4810 for (i = 0; i < num_xcc; i++) {
4811 adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq;
4812
4813 for (j = 0; j < adev->gfx.num_compute_rings; j++)
4814 adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs
4815 = &gfx_v9_4_3_ring_funcs_compute;
4816 }
4817 }
4818
4819 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = {
4820 .set = gfx_v9_4_3_set_eop_interrupt_state,
4821 .process = gfx_v9_4_3_eop_irq,
4822 };
4823
4824 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = {
4825 .set = gfx_v9_4_3_set_priv_reg_fault_state,
4826 .process = gfx_v9_4_3_priv_reg_irq,
4827 };
4828
4829 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_bad_op_irq_funcs = {
4830 .set = gfx_v9_4_3_set_bad_op_fault_state,
4831 .process = gfx_v9_4_3_bad_op_irq,
4832 };
4833
4834 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = {
4835 .set = gfx_v9_4_3_set_priv_inst_fault_state,
4836 .process = gfx_v9_4_3_priv_inst_irq,
4837 };
4838
gfx_v9_4_3_set_irq_funcs(struct amdgpu_device * adev)4839 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev)
4840 {
4841 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4842 adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs;
4843
4844 adev->gfx.priv_reg_irq.num_types = 1;
4845 adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs;
4846
4847 adev->gfx.bad_op_irq.num_types = 1;
4848 adev->gfx.bad_op_irq.funcs = &gfx_v9_4_3_bad_op_irq_funcs;
4849
4850 adev->gfx.priv_inst_irq.num_types = 1;
4851 adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs;
4852 }
4853
gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device * adev)4854 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev)
4855 {
4856 adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs;
4857 }
4858
4859
gfx_v9_4_3_set_gds_init(struct amdgpu_device * adev)4860 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
4861 {
4862 /* init asci gds info */
4863 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4864 case IP_VERSION(9, 4, 3):
4865 case IP_VERSION(9, 4, 4):
4866 /* 9.4.3 removed all the GDS internal memory,
4867 * only support GWS opcode in kernel, like barrier
4868 * semaphore.etc */
4869 adev->gds.gds_size = 0;
4870 break;
4871 default:
4872 adev->gds.gds_size = 0x10000;
4873 break;
4874 }
4875
4876 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4877 case IP_VERSION(9, 4, 3):
4878 case IP_VERSION(9, 4, 4):
4879 /* deprecated for 9.4.3, no usage at all */
4880 adev->gds.gds_compute_max_wave_id = 0;
4881 break;
4882 default:
4883 /* this really depends on the chip */
4884 adev->gds.gds_compute_max_wave_id = 0x7ff;
4885 break;
4886 }
4887
4888 adev->gds.gws_size = 64;
4889 adev->gds.oa_size = 16;
4890 }
4891
gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device * adev,u32 bitmap,int xcc_id)4892 static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4893 u32 bitmap, int xcc_id)
4894 {
4895 u32 data;
4896
4897 if (!bitmap)
4898 return;
4899
4900 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4901 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4902
4903 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
4904 }
4905
gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device * adev,int xcc_id)4906 static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id)
4907 {
4908 u32 data, mask;
4909
4910 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
4911 data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
4912
4913 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4914 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4915
4916 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4917
4918 return (~data) & mask;
4919 }
4920
gfx_v9_4_3_get_cu_info(struct amdgpu_device * adev,struct amdgpu_cu_info * cu_info)4921 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
4922 struct amdgpu_cu_info *cu_info)
4923 {
4924 int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0;
4925 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp;
4926 unsigned disable_masks[4 * 4];
4927 bool is_symmetric_cus;
4928
4929 if (!adev || !cu_info)
4930 return -EINVAL;
4931
4932 /*
4933 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
4934 */
4935 if (adev->gfx.config.max_shader_engines *
4936 adev->gfx.config.max_sh_per_se > 16)
4937 return -EINVAL;
4938
4939 amdgpu_gfx_parse_disable_cu(disable_masks,
4940 adev->gfx.config.max_shader_engines,
4941 adev->gfx.config.max_sh_per_se);
4942
4943 mutex_lock(&adev->grbm_idx_mutex);
4944 for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
4945 is_symmetric_cus = true;
4946 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4947 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4948 mask = 1;
4949 ao_bitmap = 0;
4950 counter = 0;
4951 gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
4952 gfx_v9_4_3_set_user_cu_inactive_bitmap(
4953 adev,
4954 disable_masks[i * adev->gfx.config.max_sh_per_se + j],
4955 xcc_id);
4956 bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id);
4957
4958 cu_info->bitmap[xcc_id][i][j] = bitmap;
4959
4960 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
4961 if (bitmap & mask) {
4962 if (counter < adev->gfx.config.max_cu_per_sh)
4963 ao_bitmap |= mask;
4964 counter++;
4965 }
4966 mask <<= 1;
4967 }
4968 active_cu_number += counter;
4969 if (i < 2 && j < 2)
4970 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4971 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4972 }
4973 if (i && is_symmetric_cus && prev_counter != counter)
4974 is_symmetric_cus = false;
4975 prev_counter = counter;
4976 }
4977 if (is_symmetric_cus) {
4978 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG);
4979 tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1);
4980 tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1);
4981 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp);
4982 }
4983 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4984 xcc_id);
4985 }
4986 mutex_unlock(&adev->grbm_idx_mutex);
4987
4988 cu_info->number = active_cu_number;
4989 cu_info->ao_cu_mask = ao_cu_mask;
4990 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
4991
4992 return 0;
4993 }
4994
4995 const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = {
4996 .type = AMD_IP_BLOCK_TYPE_GFX,
4997 .major = 9,
4998 .minor = 4,
4999 .rev = 3,
5000 .funcs = &gfx_v9_4_3_ip_funcs,
5001 };
5002
gfx_v9_4_3_xcp_resume(void * handle,uint32_t inst_mask)5003 static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask)
5004 {
5005 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5006 uint32_t tmp_mask;
5007 int i, r;
5008
5009 /* TODO : Initialize golden regs */
5010 /* gfx_v9_4_3_init_golden_registers(adev); */
5011
5012 tmp_mask = inst_mask;
5013 for_each_inst(i, tmp_mask)
5014 gfx_v9_4_3_xcc_constants_init(adev, i);
5015
5016 if (!amdgpu_sriov_vf(adev)) {
5017 tmp_mask = inst_mask;
5018 for_each_inst(i, tmp_mask) {
5019 r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
5020 if (r)
5021 return r;
5022 }
5023 }
5024
5025 tmp_mask = inst_mask;
5026 for_each_inst(i, tmp_mask) {
5027 r = gfx_v9_4_3_xcc_cp_resume(adev, i);
5028 if (r)
5029 return r;
5030 }
5031
5032 return 0;
5033 }
5034
gfx_v9_4_3_xcp_suspend(void * handle,uint32_t inst_mask)5035 static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask)
5036 {
5037 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5038 int i;
5039
5040 for_each_inst(i, inst_mask)
5041 gfx_v9_4_3_xcc_fini(adev, i);
5042
5043 return 0;
5044 }
5045
5046 struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = {
5047 .suspend = &gfx_v9_4_3_xcp_suspend,
5048 .resume = &gfx_v9_4_3_xcp_resume
5049 };
5050
5051 struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = {
5052 .query_ras_error_count = &gfx_v9_4_3_query_ras_error_count,
5053 .reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
5054 };
5055
gfx_v9_4_3_ras_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)5056 static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
5057 {
5058 int r;
5059
5060 r = amdgpu_ras_block_late_init(adev, ras_block);
5061 if (r)
5062 return r;
5063
5064 r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__GFX,
5065 &gfx_v9_4_3_aca_info,
5066 NULL);
5067 if (r)
5068 goto late_fini;
5069
5070 return 0;
5071
5072 late_fini:
5073 amdgpu_ras_block_late_fini(adev, ras_block);
5074
5075 return r;
5076 }
5077
5078 struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
5079 .ras_block = {
5080 .hw_ops = &gfx_v9_4_3_ras_ops,
5081 .ras_late_init = &gfx_v9_4_3_ras_late_init,
5082 },
5083 .enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer,
5084 };
5085