xref: /linux/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c (revision 06103dccbbd29408255a409f6f98f7f02387dc93)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 
25 #include "amdgpu.h"
26 #include "amdgpu_gfx.h"
27 #include "soc15.h"
28 #include "soc15d.h"
29 #include "soc15_common.h"
30 #include "vega10_enum.h"
31 
32 #include "v9_structs.h"
33 
34 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
35 
36 #include "gc/gc_9_4_3_offset.h"
37 #include "gc/gc_9_4_3_sh_mask.h"
38 
39 #include "gfx_v9_4_3.h"
40 #include "gfx_v9_4_3_cleaner_shader.h"
41 #include "amdgpu_xcp.h"
42 #include "amdgpu_aca.h"
43 
44 MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
45 MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin");
46 MODULE_FIRMWARE("amdgpu/gc_9_5_0_mec.bin");
47 MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
48 MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
49 MODULE_FIRMWARE("amdgpu/gc_9_5_0_rlc.bin");
50 MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin");
51 MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin");
52 
53 #define GFX9_MEC_HPD_SIZE 4096
54 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
55 
56 #define GOLDEN_GB_ADDR_CONFIG 0x2a114042
57 #define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
58 
59 #define XCC_REG_RANGE_0_LOW  0x2000     /* XCC gfxdec0 lower Bound */
60 #define XCC_REG_RANGE_0_HIGH 0x3400     /* XCC gfxdec0 upper Bound */
61 #define XCC_REG_RANGE_1_LOW  0xA000     /* XCC gfxdec1 lower Bound */
62 #define XCC_REG_RANGE_1_HIGH 0x10000    /* XCC gfxdec1 upper Bound */
63 
64 #define NORMALIZE_XCC_REG_OFFSET(offset) \
65 	(offset & 0xFFFF)
66 
67 static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = {
68 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
69 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
70 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1),
71 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2),
72 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1),
73 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1),
74 	SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT),
75 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT),
76 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT),
77 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS),
78 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR),
79 	SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS),
80 	SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS),
81 	SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS),
82 	SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT),
83 	SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT),
84 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS),
85 	SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS),
86 	SOC15_REG_ENTRY_STR(GC, 0, regSQC_DCACHE_UTCL1_STATUS),
87 	SOC15_REG_ENTRY_STR(GC, 0, regSQC_ICACHE_UTCL1_STATUS),
88 	SOC15_REG_ENTRY_STR(GC, 0, regSQ_UTCL1_STATUS),
89 	SOC15_REG_ENTRY_STR(GC, 0, regTCP_UTCL1_STATUS),
90 	SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS),
91 	SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL),
92 	SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS),
93 	SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG),
94 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL),
95 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR),
96 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC2_INSTR_PNTR),
97 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
98 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_STAT),
99 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_COMMAND),
100 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_MESSAGE),
101 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_1),
102 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_2),
103 	SOC15_REG_ENTRY_STR(GC, 0, regSMU_RLC_RESPONSE),
104 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SAFE_MODE),
105 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE),
106 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT),
107 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6),
108 	/* cp header registers */
109 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
110 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME2_HEADER_DUMP),
111 	/* SE status registers */
112 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
113 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
114 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2),
115 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3)
116 };
117 
118 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = {
119 	/* compute queue registers */
120 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
121 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ACTIVE),
122 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
123 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
124 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
125 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
126 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
127 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
128 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
129 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
130 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
131 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
132 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
133 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
134 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
135 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
136 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
137 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
138 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
139 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
140 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
141 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
142 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
143 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
144 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
145 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
146 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
147 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
148 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
149 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
150 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
151 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
152 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
153 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
154 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
155 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
156 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS),
157 };
158 
159 struct amdgpu_gfx_ras gfx_v9_4_3_ras;
160 
161 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
162 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev);
163 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
164 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
165 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
166 				struct amdgpu_cu_info *cu_info);
167 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
168 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
169 
gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring * kiq_ring,uint64_t queue_mask)170 static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
171 				uint64_t queue_mask)
172 {
173 	struct amdgpu_device *adev = kiq_ring->adev;
174 	u64 shader_mc_addr;
175 
176 	/* Cleaner shader MC address */
177 	shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
178 
179 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
180 	amdgpu_ring_write(kiq_ring,
181 		PACKET3_SET_RESOURCES_VMID_MASK(0) |
182 		/* vmid_mask:0* queue_type:0 (KIQ) */
183 		PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
184 	amdgpu_ring_write(kiq_ring,
185 			lower_32_bits(queue_mask));	/* queue mask lo */
186 	amdgpu_ring_write(kiq_ring,
187 			upper_32_bits(queue_mask));	/* queue mask hi */
188 	amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
189 	amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
190 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
191 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
192 }
193 
gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring)194 static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring,
195 				 struct amdgpu_ring *ring)
196 {
197 	struct amdgpu_device *adev = kiq_ring->adev;
198 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
199 	uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
200 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
201 
202 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
203 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
204 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
205 			 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
206 			 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
207 			 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
208 			 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
209 			 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
210 			 /*queue_type: normal compute queue */
211 			 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
212 			 /* alloc format: all_on_one_pipe */
213 			 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
214 			 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
215 			 /* num_queues: must be 1 */
216 			 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
217 	amdgpu_ring_write(kiq_ring,
218 			PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
219 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
220 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
221 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
222 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
223 }
224 
gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring,enum amdgpu_unmap_queues_action action,u64 gpu_addr,u64 seq)225 static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
226 				   struct amdgpu_ring *ring,
227 				   enum amdgpu_unmap_queues_action action,
228 				   u64 gpu_addr, u64 seq)
229 {
230 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
231 
232 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
233 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
234 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
235 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
236 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
237 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
238 	amdgpu_ring_write(kiq_ring,
239 			PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
240 
241 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
242 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
243 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
244 		amdgpu_ring_write(kiq_ring, seq);
245 	} else {
246 		amdgpu_ring_write(kiq_ring, 0);
247 		amdgpu_ring_write(kiq_ring, 0);
248 		amdgpu_ring_write(kiq_ring, 0);
249 	}
250 }
251 
gfx_v9_4_3_kiq_query_status(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring,u64 addr,u64 seq)252 static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring,
253 				   struct amdgpu_ring *ring,
254 				   u64 addr,
255 				   u64 seq)
256 {
257 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
258 
259 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
260 	amdgpu_ring_write(kiq_ring,
261 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
262 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
263 			  PACKET3_QUERY_STATUS_COMMAND(2));
264 	/* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
265 	amdgpu_ring_write(kiq_ring,
266 			PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
267 			PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
268 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
269 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
270 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
271 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
272 }
273 
gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring * kiq_ring,uint16_t pasid,uint32_t flush_type,bool all_hub)274 static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
275 				uint16_t pasid, uint32_t flush_type,
276 				bool all_hub)
277 {
278 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
279 	amdgpu_ring_write(kiq_ring,
280 			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
281 			PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
282 			PACKET3_INVALIDATE_TLBS_PASID(pasid) |
283 			PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
284 }
285 
gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring * kiq_ring,uint32_t queue_type,uint32_t me_id,uint32_t pipe_id,uint32_t queue_id,uint32_t xcc_id,uint32_t vmid)286 static void gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
287 					  uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
288 					  uint32_t xcc_id, uint32_t vmid)
289 {
290 	struct amdgpu_device *adev = kiq_ring->adev;
291 	unsigned i;
292 
293 	/* enter save mode */
294 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
295 	mutex_lock(&adev->srbm_mutex);
296 	soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, xcc_id);
297 
298 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
299 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0x2);
300 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_COMPUTE_QUEUE_RESET, 0x1);
301 		/* wait till dequeue take effects */
302 		for (i = 0; i < adev->usec_timeout; i++) {
303 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
304 				break;
305 			udelay(1);
306 		}
307 		if (i >= adev->usec_timeout)
308 			dev_err(adev->dev, "fail to wait on hqd deactive\n");
309 	} else {
310 		dev_err(adev->dev, "reset queue_type(%d) not supported\n\n", queue_type);
311 	}
312 
313 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
314 	mutex_unlock(&adev->srbm_mutex);
315 	/* exit safe mode */
316 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
317 }
318 
319 static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
320 	.kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
321 	.kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
322 	.kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
323 	.kiq_query_status = gfx_v9_4_3_kiq_query_status,
324 	.kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
325 	.kiq_reset_hw_queue = gfx_v9_4_3_kiq_reset_hw_queue,
326 	.set_resources_size = 8,
327 	.map_queues_size = 7,
328 	.unmap_queues_size = 6,
329 	.query_status_size = 7,
330 	.invalidate_tlbs_size = 2,
331 };
332 
gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device * adev)333 static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev)
334 {
335 	int i, num_xcc;
336 
337 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
338 	for (i = 0; i < num_xcc; i++)
339 		adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs;
340 }
341 
gfx_v9_4_3_init_golden_registers(struct amdgpu_device * adev)342 static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
343 {
344 	int i, num_xcc, dev_inst;
345 
346 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
347 	for (i = 0; i < num_xcc; i++) {
348 		dev_inst = GET_INST(GC, i);
349 
350 		WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
351 			     GOLDEN_GB_ADDR_CONFIG);
352 		if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) {
353 			WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1);
354 		} else {
355 			/* Golden settings applied by driver for ASIC with rev_id 0 */
356 			if (adev->rev_id == 0) {
357 				WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
358 						      REDUCE_FIFO_DEPTH_BY_2, 2);
359 			} else {
360 				WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
361 						      SPARE, 0x1);
362 			}
363 		}
364 	}
365 }
366 
gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg)367 static uint32_t gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg)
368 {
369 	uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg);
370 
371 	/* If it is an XCC reg, normalize the reg to keep
372 	   lower 16 bits in local xcc */
373 
374 	if (((normalized_reg >= XCC_REG_RANGE_0_LOW) && (normalized_reg < XCC_REG_RANGE_0_HIGH)) ||
375 		((normalized_reg >= XCC_REG_RANGE_1_LOW) && (normalized_reg < XCC_REG_RANGE_1_HIGH)))
376 		return normalized_reg;
377 	else
378 		return reg;
379 }
380 
gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring * ring,int eng_sel,bool wc,uint32_t reg,uint32_t val)381 static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
382 				       bool wc, uint32_t reg, uint32_t val)
383 {
384 	reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
385 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
386 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
387 				WRITE_DATA_DST_SEL(0) |
388 				(wc ? WR_CONFIRM : 0));
389 	amdgpu_ring_write(ring, reg);
390 	amdgpu_ring_write(ring, 0);
391 	amdgpu_ring_write(ring, val);
392 }
393 
gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring * ring,int eng_sel,int mem_space,int opt,uint32_t addr0,uint32_t addr1,uint32_t ref,uint32_t mask,uint32_t inv)394 static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
395 				  int mem_space, int opt, uint32_t addr0,
396 				  uint32_t addr1, uint32_t ref, uint32_t mask,
397 				  uint32_t inv)
398 {
399 	/* Only do the normalization on regspace */
400 	if (mem_space == 0) {
401 		addr0 = gfx_v9_4_3_normalize_xcc_reg_offset(addr0);
402 		addr1 = gfx_v9_4_3_normalize_xcc_reg_offset(addr1);
403 	}
404 
405 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
406 	amdgpu_ring_write(ring,
407 				 /* memory (1) or register (0) */
408 				 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
409 				 WAIT_REG_MEM_OPERATION(opt) | /* wait */
410 				 WAIT_REG_MEM_FUNCTION(3) |  /* equal */
411 				 WAIT_REG_MEM_ENGINE(eng_sel)));
412 
413 	if (mem_space)
414 		BUG_ON(addr0 & 0x3); /* Dword align */
415 	amdgpu_ring_write(ring, addr0);
416 	amdgpu_ring_write(ring, addr1);
417 	amdgpu_ring_write(ring, ref);
418 	amdgpu_ring_write(ring, mask);
419 	amdgpu_ring_write(ring, inv); /* poll interval */
420 }
421 
gfx_v9_4_3_ring_test_ring(struct amdgpu_ring * ring)422 static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring)
423 {
424 	uint32_t scratch_reg0_offset, xcc_offset;
425 	struct amdgpu_device *adev = ring->adev;
426 	uint32_t tmp = 0;
427 	unsigned i;
428 	int r;
429 
430 	/* Use register offset which is local to XCC in the packet */
431 	xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
432 	scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
433 	WREG32(scratch_reg0_offset, 0xCAFEDEAD);
434 	tmp = RREG32(scratch_reg0_offset);
435 
436 	r = amdgpu_ring_alloc(ring, 3);
437 	if (r)
438 		return r;
439 
440 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
441 	amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START);
442 	amdgpu_ring_write(ring, 0xDEADBEEF);
443 	amdgpu_ring_commit(ring);
444 
445 	for (i = 0; i < adev->usec_timeout; i++) {
446 		tmp = RREG32(scratch_reg0_offset);
447 		if (tmp == 0xDEADBEEF)
448 			break;
449 		udelay(1);
450 	}
451 
452 	if (i >= adev->usec_timeout)
453 		r = -ETIMEDOUT;
454 	return r;
455 }
456 
gfx_v9_4_3_ring_test_ib(struct amdgpu_ring * ring,long timeout)457 static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
458 {
459 	struct amdgpu_device *adev = ring->adev;
460 	struct amdgpu_ib ib;
461 	struct dma_fence *f = NULL;
462 
463 	unsigned index;
464 	uint64_t gpu_addr;
465 	uint32_t tmp;
466 	long r;
467 
468 	r = amdgpu_device_wb_get(adev, &index);
469 	if (r)
470 		return r;
471 
472 	gpu_addr = adev->wb.gpu_addr + (index * 4);
473 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
474 	memset(&ib, 0, sizeof(ib));
475 
476 	r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
477 	if (r)
478 		goto err1;
479 
480 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
481 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
482 	ib.ptr[2] = lower_32_bits(gpu_addr);
483 	ib.ptr[3] = upper_32_bits(gpu_addr);
484 	ib.ptr[4] = 0xDEADBEEF;
485 	ib.length_dw = 5;
486 
487 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
488 	if (r)
489 		goto err2;
490 
491 	r = dma_fence_wait_timeout(f, false, timeout);
492 	if (r == 0) {
493 		r = -ETIMEDOUT;
494 		goto err2;
495 	} else if (r < 0) {
496 		goto err2;
497 	}
498 
499 	tmp = adev->wb.wb[index];
500 	if (tmp == 0xDEADBEEF)
501 		r = 0;
502 	else
503 		r = -EINVAL;
504 
505 err2:
506 	amdgpu_ib_free(&ib, NULL);
507 	dma_fence_put(f);
508 err1:
509 	amdgpu_device_wb_free(adev, index);
510 	return r;
511 }
512 
513 
514 /* This value might differs per partition */
gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device * adev)515 static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev)
516 {
517 	uint64_t clock;
518 
519 	mutex_lock(&adev->gfx.gpu_clock_mutex);
520 	WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
521 	clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) |
522 		((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
523 	mutex_unlock(&adev->gfx.gpu_clock_mutex);
524 
525 	return clock;
526 }
527 
gfx_v9_4_3_free_microcode(struct amdgpu_device * adev)528 static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev)
529 {
530 	amdgpu_ucode_release(&adev->gfx.pfp_fw);
531 	amdgpu_ucode_release(&adev->gfx.me_fw);
532 	amdgpu_ucode_release(&adev->gfx.ce_fw);
533 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
534 	amdgpu_ucode_release(&adev->gfx.mec_fw);
535 	amdgpu_ucode_release(&adev->gfx.mec2_fw);
536 
537 	kfree(adev->gfx.rlc.register_list_format);
538 }
539 
gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device * adev,const char * chip_name)540 static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
541 					  const char *chip_name)
542 {
543 	int err;
544 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
545 	uint16_t version_major;
546 	uint16_t version_minor;
547 
548 
549 	err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
550 				   AMDGPU_UCODE_REQUIRED,
551 				   "amdgpu/%s_rlc.bin", chip_name);
552 	if (err)
553 		goto out;
554 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
555 
556 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
557 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
558 	err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
559 out:
560 	if (err)
561 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
562 
563 	return err;
564 }
565 
gfx_v9_4_3_should_disable_gfxoff(struct pci_dev * pdev)566 static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev)
567 {
568 	return true;
569 }
570 
gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device * adev)571 static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev)
572 {
573 	if (gfx_v9_4_3_should_disable_gfxoff(adev->pdev))
574 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
575 }
576 
gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device * adev,const char * chip_name)577 static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
578 					  const char *chip_name)
579 {
580 	int err;
581 
582 	if (amdgpu_sriov_vf(adev)) {
583 		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
584 					   AMDGPU_UCODE_REQUIRED,
585 					   "amdgpu/%s_sjt_mec.bin", chip_name);
586 
587 		if (err)
588 			err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
589 							AMDGPU_UCODE_REQUIRED,
590 							"amdgpu/%s_mec.bin", chip_name);
591 	} else
592 		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
593 					   AMDGPU_UCODE_REQUIRED,
594 					   "amdgpu/%s_mec.bin", chip_name);
595 	if (err)
596 		goto out;
597 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
598 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
599 
600 	adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
601 	adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
602 
603 	gfx_v9_4_3_check_if_need_gfxoff(adev);
604 
605 out:
606 	if (err)
607 		amdgpu_ucode_release(&adev->gfx.mec_fw);
608 	return err;
609 }
610 
gfx_v9_4_3_init_microcode(struct amdgpu_device * adev)611 static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
612 {
613 	char ucode_prefix[15];
614 	int r;
615 
616 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
617 
618 	r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix);
619 	if (r)
620 		return r;
621 
622 	r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix);
623 	if (r)
624 		return r;
625 
626 	return r;
627 }
628 
gfx_v9_4_3_mec_fini(struct amdgpu_device * adev)629 static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev)
630 {
631 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
632 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
633 }
634 
gfx_v9_4_3_mec_init(struct amdgpu_device * adev)635 static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev)
636 {
637 	int r, i, num_xcc;
638 	u32 *hpd;
639 	const __le32 *fw_data;
640 	unsigned fw_size;
641 	u32 *fw;
642 	size_t mec_hpd_size;
643 
644 	const struct gfx_firmware_header_v1_0 *mec_hdr;
645 
646 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
647 	for (i = 0; i < num_xcc; i++)
648 		bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
649 			AMDGPU_MAX_COMPUTE_QUEUES);
650 
651 	/* take ownership of the relevant compute queues */
652 	amdgpu_gfx_compute_queue_acquire(adev);
653 	mec_hpd_size =
654 		adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE;
655 	if (mec_hpd_size) {
656 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
657 					      AMDGPU_GEM_DOMAIN_VRAM |
658 					      AMDGPU_GEM_DOMAIN_GTT,
659 					      &adev->gfx.mec.hpd_eop_obj,
660 					      &adev->gfx.mec.hpd_eop_gpu_addr,
661 					      (void **)&hpd);
662 		if (r) {
663 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
664 			gfx_v9_4_3_mec_fini(adev);
665 			return r;
666 		}
667 
668 		if (amdgpu_emu_mode == 1) {
669 			for (i = 0; i < mec_hpd_size / 4; i++) {
670 				memset((void *)(hpd + i), 0, 4);
671 				if (i % 50 == 0)
672 					msleep(1);
673 			}
674 		} else {
675 			memset(hpd, 0, mec_hpd_size);
676 		}
677 
678 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
679 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
680 	}
681 
682 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
683 
684 	fw_data = (const __le32 *)
685 		(adev->gfx.mec_fw->data +
686 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
687 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
688 
689 	r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
690 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
691 				      &adev->gfx.mec.mec_fw_obj,
692 				      &adev->gfx.mec.mec_fw_gpu_addr,
693 				      (void **)&fw);
694 	if (r) {
695 		dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
696 		gfx_v9_4_3_mec_fini(adev);
697 		return r;
698 	}
699 
700 	memcpy(fw, fw_data, fw_size);
701 
702 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
703 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
704 
705 	return 0;
706 }
707 
gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 instance,int xcc_id)708 static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
709 					u32 sh_num, u32 instance, int xcc_id)
710 {
711 	u32 data;
712 
713 	if (instance == 0xffffffff)
714 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
715 				     INSTANCE_BROADCAST_WRITES, 1);
716 	else
717 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
718 				     INSTANCE_INDEX, instance);
719 
720 	if (se_num == 0xffffffff)
721 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
722 				     SE_BROADCAST_WRITES, 1);
723 	else
724 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
725 
726 	if (sh_num == 0xffffffff)
727 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
728 				     SH_BROADCAST_WRITES, 1);
729 	else
730 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
731 
732 	WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
733 }
734 
wave_read_ind(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t address)735 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address)
736 {
737 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
738 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
739 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
740 		(address << SQ_IND_INDEX__INDEX__SHIFT) |
741 		(SQ_IND_INDEX__FORCE_READ_MASK));
742 	return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
743 }
744 
wave_read_regs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t thread,uint32_t regno,uint32_t num,uint32_t * out)745 static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
746 			   uint32_t wave, uint32_t thread,
747 			   uint32_t regno, uint32_t num, uint32_t *out)
748 {
749 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
750 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
751 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
752 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
753 		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
754 		(SQ_IND_INDEX__FORCE_READ_MASK) |
755 		(SQ_IND_INDEX__AUTO_INCR_MASK));
756 	while (num--)
757 		*(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
758 }
759 
gfx_v9_4_3_read_wave_data(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t * dst,int * no_fields)760 static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev,
761 				      uint32_t xcc_id, uint32_t simd, uint32_t wave,
762 				      uint32_t *dst, int *no_fields)
763 {
764 	/* type 1 wave data */
765 	dst[(*no_fields)++] = 1;
766 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
767 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
768 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
769 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
770 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
771 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID);
772 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
773 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
774 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC);
775 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC);
776 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS);
777 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
778 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0);
779 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0);
780 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE);
781 }
782 
gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t start,uint32_t size,uint32_t * dst)783 static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
784 				       uint32_t wave, uint32_t start,
785 				       uint32_t size, uint32_t *dst)
786 {
787 	wave_read_regs(adev, xcc_id, simd, wave, 0,
788 		       start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
789 }
790 
gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t thread,uint32_t start,uint32_t size,uint32_t * dst)791 static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
792 				       uint32_t wave, uint32_t thread,
793 				       uint32_t start, uint32_t size,
794 				       uint32_t *dst)
795 {
796 	wave_read_regs(adev, xcc_id, simd, wave, thread,
797 		       start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
798 }
799 
gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device * adev,u32 me,u32 pipe,u32 q,u32 vm,u32 xcc_id)800 static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
801 					u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
802 {
803 	soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
804 }
805 
gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device * adev)806 static int gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device *adev)
807 {
808 	u32 xcp_ctl;
809 
810 	/* Value is expected to be the same on all, fetch from first instance */
811 	xcp_ctl = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HYP_XCP_CTL);
812 
813 	return REG_GET_FIELD(xcp_ctl, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP);
814 }
815 
gfx_v9_4_3_switch_compute_partition(struct amdgpu_device * adev,int num_xccs_per_xcp)816 static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
817 						int num_xccs_per_xcp)
818 {
819 	int ret, i, num_xcc;
820 	u32 tmp = 0;
821 
822 	if (adev->psp.funcs) {
823 		ret = psp_spatial_partition(&adev->psp,
824 					    NUM_XCC(adev->gfx.xcc_mask) /
825 						    num_xccs_per_xcp);
826 		if (ret)
827 			return ret;
828 	} else {
829 		num_xcc = NUM_XCC(adev->gfx.xcc_mask);
830 
831 		for (i = 0; i < num_xcc; i++) {
832 			tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
833 					    num_xccs_per_xcp);
834 			tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
835 					    i % num_xccs_per_xcp);
836 			WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL,
837 				     tmp);
838 		}
839 		ret = 0;
840 	}
841 
842 	adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
843 
844 	return ret;
845 }
846 
gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device * adev,int ih_node)847 static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
848 {
849 	int xcc;
850 
851 	xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0));
852 	if (!xcc) {
853 		dev_err(adev->dev, "Couldn't find xcc mapping from IH node");
854 		return -EINVAL;
855 	}
856 
857 	return xcc - 1;
858 }
859 
860 static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
861 	.get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
862 	.select_se_sh = &gfx_v9_4_3_xcc_select_se_sh,
863 	.read_wave_data = &gfx_v9_4_3_read_wave_data,
864 	.read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
865 	.read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
866 	.select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
867 	.switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
868 	.ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
869 	.get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp,
870 };
871 
gfx_v9_4_3_aca_bank_parser(struct aca_handle * handle,struct aca_bank * bank,enum aca_smu_type type,void * data)872 static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
873 				      struct aca_bank *bank, enum aca_smu_type type,
874 				      void *data)
875 {
876 	struct aca_bank_info info;
877 	u64 misc0;
878 	u32 instlo;
879 	int ret;
880 
881 	ret = aca_bank_info_decode(bank, &info);
882 	if (ret)
883 		return ret;
884 
885 	/* NOTE: overwrite info.die_id with xcd id for gfx */
886 	instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
887 	instlo &= GENMASK(31, 1);
888 	info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
889 
890 	misc0 = bank->regs[ACA_REG_IDX_MISC0];
891 
892 	switch (type) {
893 	case ACA_SMU_TYPE_UE:
894 		ret = aca_error_cache_log_bank_error(handle, &info,
895 						     ACA_ERROR_TYPE_UE, 1ULL);
896 		break;
897 	case ACA_SMU_TYPE_CE:
898 		ret = aca_error_cache_log_bank_error(handle, &info,
899 						     ACA_ERROR_TYPE_CE, ACA_REG__MISC0__ERRCNT(misc0));
900 		break;
901 	default:
902 		return -EINVAL;
903 	}
904 
905 	return ret;
906 }
907 
gfx_v9_4_3_aca_bank_is_valid(struct aca_handle * handle,struct aca_bank * bank,enum aca_smu_type type,void * data)908 static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
909 					 enum aca_smu_type type, void *data)
910 {
911 	u32 instlo;
912 
913 	instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
914 	instlo &= GENMASK(31, 1);
915 	switch (instlo) {
916 	case mmSMNAID_XCD0_MCA_SMU:
917 	case mmSMNAID_XCD1_MCA_SMU:
918 	case mmSMNXCD_XCD0_MCA_SMU:
919 		return true;
920 	default:
921 		break;
922 	}
923 
924 	return false;
925 }
926 
927 static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = {
928 	.aca_bank_parser = gfx_v9_4_3_aca_bank_parser,
929 	.aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid,
930 };
931 
932 static const struct aca_info gfx_v9_4_3_aca_info = {
933 	.hwip = ACA_HWIP_TYPE_SMU,
934 	.mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
935 	.bank_ops = &gfx_v9_4_3_aca_bank_ops,
936 };
937 
gfx_v9_4_3_gpu_early_init(struct amdgpu_device * adev)938 static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
939 {
940 	u32 gb_addr_config;
941 
942 	adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
943 	adev->gfx.ras = &gfx_v9_4_3_ras;
944 
945 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
946 	case IP_VERSION(9, 4, 3):
947 	case IP_VERSION(9, 4, 4):
948 	case IP_VERSION(9, 5, 0):
949 		adev->gfx.config.max_hw_contexts = 8;
950 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
951 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
952 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
953 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
954 		gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG);
955 		break;
956 	default:
957 		BUG();
958 		break;
959 	}
960 
961 	adev->gfx.config.gb_addr_config = gb_addr_config;
962 
963 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
964 			REG_GET_FIELD(
965 					adev->gfx.config.gb_addr_config,
966 					GB_ADDR_CONFIG,
967 					NUM_PIPES);
968 
969 	adev->gfx.config.max_tile_pipes =
970 		adev->gfx.config.gb_addr_config_fields.num_pipes;
971 
972 	adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
973 			REG_GET_FIELD(
974 					adev->gfx.config.gb_addr_config,
975 					GB_ADDR_CONFIG,
976 					NUM_BANKS);
977 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
978 			REG_GET_FIELD(
979 					adev->gfx.config.gb_addr_config,
980 					GB_ADDR_CONFIG,
981 					MAX_COMPRESSED_FRAGS);
982 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
983 			REG_GET_FIELD(
984 					adev->gfx.config.gb_addr_config,
985 					GB_ADDR_CONFIG,
986 					NUM_RB_PER_SE);
987 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
988 			REG_GET_FIELD(
989 					adev->gfx.config.gb_addr_config,
990 					GB_ADDR_CONFIG,
991 					NUM_SHADER_ENGINES);
992 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
993 			REG_GET_FIELD(
994 					adev->gfx.config.gb_addr_config,
995 					GB_ADDR_CONFIG,
996 					PIPE_INTERLEAVE_SIZE));
997 
998 	return 0;
999 }
1000 
gfx_v9_4_3_compute_ring_init(struct amdgpu_device * adev,int ring_id,int xcc_id,int mec,int pipe,int queue)1001 static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1002 				        int xcc_id, int mec, int pipe, int queue)
1003 {
1004 	unsigned irq_type;
1005 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1006 	unsigned int hw_prio;
1007 	uint32_t xcc_doorbell_start;
1008 
1009 	ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
1010 				       ring_id];
1011 
1012 	/* mec0 is me1 */
1013 	ring->xcc_id = xcc_id;
1014 	ring->me = mec + 1;
1015 	ring->pipe = pipe;
1016 	ring->queue = queue;
1017 
1018 	ring->ring_obj = NULL;
1019 	ring->use_doorbell = true;
1020 	xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
1021 			     xcc_id * adev->doorbell_index.xcc_doorbell_range;
1022 	ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
1023 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
1024 			     (ring_id + xcc_id * adev->gfx.num_compute_rings) *
1025 				     GFX9_MEC_HPD_SIZE;
1026 	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
1027 	sprintf(ring->name, "comp_%d.%d.%d.%d",
1028 			ring->xcc_id, ring->me, ring->pipe, ring->queue);
1029 
1030 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1031 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1032 		+ ring->pipe;
1033 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1034 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
1035 	/* type-2 packets are deprecated on MEC, use type-3 instead */
1036 	return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1037 				hw_prio, NULL);
1038 }
1039 
gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device * adev)1040 static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev)
1041 {
1042 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
1043 	uint32_t *ptr, num_xcc, inst;
1044 
1045 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1046 
1047 	ptr = kcalloc(reg_count * num_xcc, sizeof(uint32_t), GFP_KERNEL);
1048 	if (!ptr) {
1049 		DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
1050 		adev->gfx.ip_dump_core = NULL;
1051 	} else {
1052 		adev->gfx.ip_dump_core = ptr;
1053 	}
1054 
1055 	/* Allocate memory for compute queue registers for all the instances */
1056 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
1057 	inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
1058 		adev->gfx.mec.num_queue_per_pipe;
1059 
1060 	ptr = kcalloc(reg_count * inst * num_xcc, sizeof(uint32_t), GFP_KERNEL);
1061 	if (!ptr) {
1062 		DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
1063 		adev->gfx.ip_dump_compute_queues = NULL;
1064 	} else {
1065 		adev->gfx.ip_dump_compute_queues = ptr;
1066 	}
1067 }
1068 
gfx_v9_4_3_sw_init(struct amdgpu_ip_block * ip_block)1069 static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
1070 {
1071 	int i, j, k, r, ring_id, xcc_id, num_xcc;
1072 	struct amdgpu_device *adev = ip_block->adev;
1073 
1074 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1075 	case IP_VERSION(9, 4, 3):
1076 	case IP_VERSION(9, 4, 4):
1077 		adev->gfx.cleaner_shader_ptr = gfx_9_4_3_cleaner_shader_hex;
1078 		adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_3_cleaner_shader_hex);
1079 		if (adev->gfx.mec_fw_version >= 153) {
1080 			adev->gfx.enable_cleaner_shader = true;
1081 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1082 			if (r) {
1083 				adev->gfx.enable_cleaner_shader = false;
1084 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1085 			}
1086 		}
1087 		break;
1088 	default:
1089 		adev->gfx.enable_cleaner_shader = false;
1090 		break;
1091 	}
1092 
1093 	adev->gfx.mec.num_mec = 2;
1094 	adev->gfx.mec.num_pipe_per_mec = 4;
1095 	adev->gfx.mec.num_queue_per_pipe = 8;
1096 
1097 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1098 
1099 	/* EOP Event */
1100 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
1101 	if (r)
1102 		return r;
1103 
1104 	/* Bad opcode Event */
1105 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1106 			      GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
1107 			      &adev->gfx.bad_op_irq);
1108 	if (r)
1109 		return r;
1110 
1111 	/* Privileged reg */
1112 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
1113 			      &adev->gfx.priv_reg_irq);
1114 	if (r)
1115 		return r;
1116 
1117 	/* Privileged inst */
1118 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
1119 			      &adev->gfx.priv_inst_irq);
1120 	if (r)
1121 		return r;
1122 
1123 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1124 
1125 	r = adev->gfx.rlc.funcs->init(adev);
1126 	if (r) {
1127 		DRM_ERROR("Failed to init rlc BOs!\n");
1128 		return r;
1129 	}
1130 
1131 	r = gfx_v9_4_3_mec_init(adev);
1132 	if (r) {
1133 		DRM_ERROR("Failed to init MEC BOs!\n");
1134 		return r;
1135 	}
1136 
1137 	/* set up the compute queues - allocate horizontally across pipes */
1138 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1139 		ring_id = 0;
1140 		for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1141 			for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1142 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec;
1143 				     k++) {
1144 					if (!amdgpu_gfx_is_mec_queue_enabled(
1145 							adev, xcc_id, i, k, j))
1146 						continue;
1147 
1148 					r = gfx_v9_4_3_compute_ring_init(adev,
1149 								       ring_id,
1150 								       xcc_id,
1151 								       i, k, j);
1152 					if (r)
1153 						return r;
1154 
1155 					ring_id++;
1156 				}
1157 			}
1158 		}
1159 
1160 		r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id);
1161 		if (r) {
1162 			DRM_ERROR("Failed to init KIQ BOs!\n");
1163 			return r;
1164 		}
1165 
1166 		r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1167 		if (r)
1168 			return r;
1169 
1170 		/* create MQD for all compute queues as wel as KIQ for SRIOV case */
1171 		r = amdgpu_gfx_mqd_sw_init(adev,
1172 				sizeof(struct v9_mqd_allocation), xcc_id);
1173 		if (r)
1174 			return r;
1175 	}
1176 
1177 	adev->gfx.compute_supported_reset =
1178 		amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
1179 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1180 	case IP_VERSION(9, 4, 3):
1181 	case IP_VERSION(9, 4, 4):
1182 		if (adev->gfx.mec_fw_version >= 155) {
1183 			adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1184 			adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
1185 		}
1186 		break;
1187 	default:
1188 		break;
1189 	}
1190 	r = gfx_v9_4_3_gpu_early_init(adev);
1191 	if (r)
1192 		return r;
1193 
1194 	r = amdgpu_gfx_ras_sw_init(adev);
1195 	if (r)
1196 		return r;
1197 
1198 	r = amdgpu_gfx_sysfs_init(adev);
1199 	if (r)
1200 		return r;
1201 
1202 	gfx_v9_4_3_alloc_ip_dump(adev);
1203 
1204 	return 0;
1205 }
1206 
gfx_v9_4_3_sw_fini(struct amdgpu_ip_block * ip_block)1207 static int gfx_v9_4_3_sw_fini(struct amdgpu_ip_block *ip_block)
1208 {
1209 	int i, num_xcc;
1210 	struct amdgpu_device *adev = ip_block->adev;
1211 
1212 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1213 	for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
1214 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1215 
1216 	for (i = 0; i < num_xcc; i++) {
1217 		amdgpu_gfx_mqd_sw_fini(adev, i);
1218 		amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
1219 		amdgpu_gfx_kiq_fini(adev, i);
1220 	}
1221 
1222 	amdgpu_gfx_cleaner_shader_sw_fini(adev);
1223 
1224 	gfx_v9_4_3_mec_fini(adev);
1225 	amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
1226 	gfx_v9_4_3_free_microcode(adev);
1227 	amdgpu_gfx_sysfs_fini(adev);
1228 
1229 	kfree(adev->gfx.ip_dump_core);
1230 	kfree(adev->gfx.ip_dump_compute_queues);
1231 
1232 	return 0;
1233 }
1234 
1235 #define DEFAULT_SH_MEM_BASES	(0x6000)
gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device * adev,int xcc_id)1236 static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
1237 					     int xcc_id)
1238 {
1239 	int i;
1240 	uint32_t sh_mem_config;
1241 	uint32_t sh_mem_bases;
1242 	uint32_t data;
1243 
1244 	/*
1245 	 * Configure apertures:
1246 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1247 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1248 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1249 	 */
1250 	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1251 
1252 	sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1253 			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1254 			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1255 
1256 	mutex_lock(&adev->srbm_mutex);
1257 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1258 		soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1259 		/* CP and shaders */
1260 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config);
1261 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
1262 
1263 		/* Enable trap for each kfd vmid. */
1264 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
1265 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1266 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
1267 	}
1268 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1269 	mutex_unlock(&adev->srbm_mutex);
1270 
1271 	/*
1272 	 * Initialize all compute VMIDs to have no GDS, GWS, or OA
1273 	 * access. These should be enabled by FW for target VMIDs.
1274 	 */
1275 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1276 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0);
1277 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0);
1278 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0);
1279 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0);
1280 	}
1281 }
1282 
gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device * adev,int xcc_id)1283 static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
1284 {
1285 	int vmid;
1286 
1287 	/*
1288 	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1289 	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1290 	 * the driver can enable them for graphics. VMID0 should maintain
1291 	 * access so that HWS firmware can save/restore entries.
1292 	 */
1293 	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
1294 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0);
1295 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0);
1296 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0);
1297 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0);
1298 	}
1299 }
1300 
gfx_v9_4_3_xcc_constants_init(struct amdgpu_device * adev,int xcc_id)1301 static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
1302 					  int xcc_id)
1303 {
1304 	u32 tmp;
1305 	int i;
1306 
1307 	/* XXX SH_MEM regs */
1308 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1309 	mutex_lock(&adev->srbm_mutex);
1310 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1311 		soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1312 		/* CP and shaders */
1313 		if (i == 0) {
1314 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1315 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1316 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1317 					    !!adev->gmc.noretry);
1318 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1319 					 regSH_MEM_CONFIG, tmp);
1320 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1321 					 regSH_MEM_BASES, 0);
1322 		} else {
1323 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1324 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1325 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1326 					    !!adev->gmc.noretry);
1327 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1328 					 regSH_MEM_CONFIG, tmp);
1329 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1330 					    (adev->gmc.private_aperture_start >>
1331 					     48));
1332 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1333 					    (adev->gmc.shared_aperture_start >>
1334 					     48));
1335 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1336 					 regSH_MEM_BASES, tmp);
1337 		}
1338 	}
1339 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1340 
1341 	mutex_unlock(&adev->srbm_mutex);
1342 
1343 	gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
1344 	gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
1345 }
1346 
gfx_v9_4_3_constants_init(struct amdgpu_device * adev)1347 static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
1348 {
1349 	int i, num_xcc;
1350 
1351 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1352 
1353 	gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info);
1354 	adev->gfx.config.db_debug2 =
1355 		RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
1356 
1357 	for (i = 0; i < num_xcc; i++)
1358 		gfx_v9_4_3_xcc_constants_init(adev, i);
1359 }
1360 
1361 static void
gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device * adev,int xcc_id)1362 gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev,
1363 					   int xcc_id)
1364 {
1365 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1);
1366 }
1367 
gfx_v9_4_3_xcc_init_pg(struct amdgpu_device * adev,int xcc_id)1368 static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id)
1369 {
1370 	/*
1371 	 * Rlc save restore list is workable since v2_1.
1372 	 * And it's needed by gfxoff feature.
1373 	 */
1374 	if (adev->gfx.rlc.is_rlc_v2_1)
1375 		gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
1376 }
1377 
gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device * adev,int xcc_id)1378 static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
1379 {
1380 	uint32_t data;
1381 
1382 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
1383 	data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK;
1384 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
1385 }
1386 
gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device * adev)1387 static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
1388 {
1389 	uint32_t rlc_setting;
1390 
1391 	/* if RLC is not enabled, do nothing */
1392 	rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
1393 	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
1394 		return false;
1395 
1396 	return true;
1397 }
1398 
gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device * adev,int xcc_id)1399 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
1400 {
1401 	uint32_t data;
1402 	unsigned i;
1403 
1404 	data = RLC_SAFE_MODE__CMD_MASK;
1405 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
1406 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1407 
1408 	/* wait for RLC_SAFE_MODE */
1409 	for (i = 0; i < adev->usec_timeout; i++) {
1410 		if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
1411 			break;
1412 		udelay(1);
1413 	}
1414 }
1415 
gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device * adev,int xcc_id)1416 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev,
1417 					   int xcc_id)
1418 {
1419 	uint32_t data;
1420 
1421 	data = RLC_SAFE_MODE__CMD_MASK;
1422 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1423 }
1424 
gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device * adev)1425 static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
1426 {
1427 	int xcc_id, num_xcc;
1428 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1429 
1430 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1431 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1432 		reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
1433 		reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0);
1434 		reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1);
1435 		reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2);
1436 		reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3);
1437 		reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
1438 		reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
1439 		reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT);
1440 	}
1441 	adev->gfx.rlc.rlcg_reg_access_supported = true;
1442 }
1443 
gfx_v9_4_3_rlc_init(struct amdgpu_device * adev)1444 static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
1445 {
1446 	/* init spm vmid with 0xf */
1447 	if (adev->gfx.rlc.funcs->update_spm_vmid)
1448 		adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
1449 
1450 	return 0;
1451 }
1452 
gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device * adev,int xcc_id)1453 static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev,
1454 					       int xcc_id)
1455 {
1456 	u32 i, j, k;
1457 	u32 mask;
1458 
1459 	mutex_lock(&adev->grbm_idx_mutex);
1460 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1461 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1462 			gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff,
1463 						    xcc_id);
1464 			for (k = 0; k < adev->usec_timeout; k++) {
1465 				if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0)
1466 					break;
1467 				udelay(1);
1468 			}
1469 			if (k == adev->usec_timeout) {
1470 				gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff,
1471 							    0xffffffff,
1472 							    0xffffffff, xcc_id);
1473 				mutex_unlock(&adev->grbm_idx_mutex);
1474 				DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
1475 					 i, j);
1476 				return;
1477 			}
1478 		}
1479 	}
1480 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
1481 				    xcc_id);
1482 	mutex_unlock(&adev->grbm_idx_mutex);
1483 
1484 	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1485 		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1486 		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1487 		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1488 	for (k = 0; k < adev->usec_timeout; k++) {
1489 		if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1490 			break;
1491 		udelay(1);
1492 	}
1493 }
1494 
gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device * adev,bool enable,int xcc_id)1495 static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1496 						     bool enable, int xcc_id)
1497 {
1498 	u32 tmp;
1499 
1500 	/* These interrupts should be enabled to drive DS clock */
1501 
1502 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1503 
1504 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1505 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1506 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1507 
1508 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1509 }
1510 
gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device * adev,int xcc_id)1511 static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id)
1512 {
1513 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1514 			      RLC_ENABLE_F32, 0);
1515 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
1516 	gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id);
1517 }
1518 
gfx_v9_4_3_rlc_stop(struct amdgpu_device * adev)1519 static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev)
1520 {
1521 	int i, num_xcc;
1522 
1523 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1524 	for (i = 0; i < num_xcc; i++)
1525 		gfx_v9_4_3_xcc_rlc_stop(adev, i);
1526 }
1527 
gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device * adev,int xcc_id)1528 static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id)
1529 {
1530 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1531 			      SOFT_RESET_RLC, 1);
1532 	udelay(50);
1533 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1534 			      SOFT_RESET_RLC, 0);
1535 	udelay(50);
1536 }
1537 
gfx_v9_4_3_rlc_reset(struct amdgpu_device * adev)1538 static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
1539 {
1540 	int i, num_xcc;
1541 
1542 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1543 	for (i = 0; i < num_xcc; i++)
1544 		gfx_v9_4_3_xcc_rlc_reset(adev, i);
1545 }
1546 
gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device * adev,int xcc_id)1547 static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id)
1548 {
1549 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1550 			      RLC_ENABLE_F32, 1);
1551 	udelay(50);
1552 
1553 	/* carrizo do enable cp interrupt after cp inited */
1554 	if (!(adev->flags & AMD_IS_APU)) {
1555 		gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
1556 		udelay(50);
1557 	}
1558 }
1559 
gfx_v9_4_3_rlc_start(struct amdgpu_device * adev)1560 static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
1561 {
1562 #ifdef AMDGPU_RLC_DEBUG_RETRY
1563 	u32 rlc_ucode_ver;
1564 #endif
1565 	int i, num_xcc;
1566 
1567 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1568 	for (i = 0; i < num_xcc; i++) {
1569 		gfx_v9_4_3_xcc_rlc_start(adev, i);
1570 #ifdef AMDGPU_RLC_DEBUG_RETRY
1571 		/* RLC_GPM_GENERAL_6 : RLC Ucode version */
1572 		rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6);
1573 		if (rlc_ucode_ver == 0x108) {
1574 			dev_info(adev->dev,
1575 				 "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
1576 				 rlc_ucode_ver, adev->gfx.rlc_fw_version);
1577 			/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
1578 			 * default is 0x9C4 to create a 100us interval */
1579 			WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4);
1580 			/* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
1581 			 * to disable the page fault retry interrupts, default is
1582 			 * 0x100 (256) */
1583 			WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100);
1584 		}
1585 #endif
1586 	}
1587 }
1588 
gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device * adev,int xcc_id)1589 static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1590 					     int xcc_id)
1591 {
1592 	const struct rlc_firmware_header_v2_0 *hdr;
1593 	const __le32 *fw_data;
1594 	unsigned i, fw_size;
1595 
1596 	if (!adev->gfx.rlc_fw)
1597 		return -EINVAL;
1598 
1599 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1600 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
1601 
1602 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1603 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1604 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1605 
1606 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1607 			RLCG_UCODE_LOADING_START_ADDRESS);
1608 	for (i = 0; i < fw_size; i++) {
1609 		if (amdgpu_emu_mode == 1 && i % 100 == 0) {
1610 			dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i);
1611 			msleep(1);
1612 		}
1613 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
1614 	}
1615 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1616 
1617 	return 0;
1618 }
1619 
gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device * adev,int xcc_id)1620 static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id)
1621 {
1622 	int r;
1623 
1624 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1625 		gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id);
1626 		/* legacy rlc firmware loading */
1627 		r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id);
1628 		if (r)
1629 			return r;
1630 		gfx_v9_4_3_xcc_rlc_start(adev, xcc_id);
1631 	}
1632 
1633 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
1634 	/* disable CG */
1635 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1636 	gfx_v9_4_3_xcc_init_pg(adev, xcc_id);
1637 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
1638 
1639 	return 0;
1640 }
1641 
gfx_v9_4_3_rlc_resume(struct amdgpu_device * adev)1642 static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
1643 {
1644 	int r, i, num_xcc;
1645 
1646 	if (amdgpu_sriov_vf(adev))
1647 		return 0;
1648 
1649 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1650 	for (i = 0; i < num_xcc; i++) {
1651 		r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
1652 		if (r)
1653 			return r;
1654 	}
1655 
1656 	return 0;
1657 }
1658 
gfx_v9_4_3_update_spm_vmid(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned vmid)1659 static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1660 				       unsigned vmid)
1661 {
1662 	u32 reg, pre_data, data;
1663 
1664 	reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL);
1665 	if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev))
1666 		pre_data = RREG32_NO_KIQ(reg);
1667 	else
1668 		pre_data = RREG32(reg);
1669 
1670 	data =	pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK);
1671 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
1672 
1673 	if (pre_data != data) {
1674 		if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) {
1675 			WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1676 		} else
1677 			WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1678 	}
1679 }
1680 
1681 static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = {
1682 	{SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)},
1683 	{SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)},
1684 };
1685 
gfx_v9_4_3_check_rlcg_range(struct amdgpu_device * adev,uint32_t offset,struct soc15_reg_rlcg * entries,int arr_size)1686 static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev,
1687 					uint32_t offset,
1688 					struct soc15_reg_rlcg *entries, int arr_size)
1689 {
1690 	int i, inst;
1691 	uint32_t reg;
1692 
1693 	if (!entries)
1694 		return false;
1695 
1696 	for (i = 0; i < arr_size; i++) {
1697 		const struct soc15_reg_rlcg *entry;
1698 
1699 		entry = &entries[i];
1700 		inst = adev->ip_map.logical_to_dev_inst ?
1701 			       adev->ip_map.logical_to_dev_inst(
1702 				       adev, entry->hwip, entry->instance) :
1703 			       entry->instance;
1704 		reg = adev->reg_offset[entry->hwip][inst][entry->segment] +
1705 		      entry->reg;
1706 		if (offset == reg)
1707 			return true;
1708 	}
1709 
1710 	return false;
1711 }
1712 
gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device * adev,u32 offset)1713 static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
1714 {
1715 	return gfx_v9_4_3_check_rlcg_range(adev, offset,
1716 					(void *)rlcg_access_gc_9_4_3,
1717 					ARRAY_SIZE(rlcg_access_gc_9_4_3));
1718 }
1719 
gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device * adev,bool enable,int xcc_id)1720 static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev,
1721 					     bool enable, int xcc_id)
1722 {
1723 	if (enable) {
1724 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0);
1725 	} else {
1726 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL,
1727 			(CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK |
1728 			 CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK |
1729 			 CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK |
1730 			 CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK |
1731 			 CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK |
1732 			 CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK |
1733 			 CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK |
1734 			 CP_MEC_CNTL__MEC_ME1_HALT_MASK |
1735 			 CP_MEC_CNTL__MEC_ME2_HALT_MASK));
1736 		adev->gfx.kiq[xcc_id].ring.sched.ready = false;
1737 	}
1738 	udelay(50);
1739 }
1740 
gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device * adev,int xcc_id)1741 static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev,
1742 						    int xcc_id)
1743 {
1744 	const struct gfx_firmware_header_v1_0 *mec_hdr;
1745 	const __le32 *fw_data;
1746 	unsigned i;
1747 	u32 tmp;
1748 	u32 mec_ucode_addr_offset;
1749 	u32 mec_ucode_data_offset;
1750 
1751 	if (!adev->gfx.mec_fw)
1752 		return -EINVAL;
1753 
1754 	gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
1755 
1756 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1757 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1758 
1759 	fw_data = (const __le32 *)
1760 		(adev->gfx.mec_fw->data +
1761 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1762 	tmp = 0;
1763 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
1764 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
1765 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
1766 
1767 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
1768 		adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
1769 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
1770 		upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
1771 
1772 	mec_ucode_addr_offset =
1773 		SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR);
1774 	mec_ucode_data_offset =
1775 		SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA);
1776 
1777 	/* MEC1 */
1778 	WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset);
1779 	for (i = 0; i < mec_hdr->jt_size; i++)
1780 		WREG32(mec_ucode_data_offset,
1781 		       le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
1782 
1783 	WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version);
1784 	/* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
1785 
1786 	return 0;
1787 }
1788 
1789 /* KIQ functions */
gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring * ring,int xcc_id)1790 static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
1791 {
1792 	uint32_t tmp;
1793 	struct amdgpu_device *adev = ring->adev;
1794 
1795 	/* tell RLC which is KIQ queue */
1796 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
1797 	tmp &= 0xffffff00;
1798 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1799 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp | 0x80);
1800 }
1801 
gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring * ring,struct v9_mqd * mqd)1802 static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
1803 {
1804 	struct amdgpu_device *adev = ring->adev;
1805 
1806 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
1807 		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
1808 			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
1809 			mqd->cp_hqd_queue_priority =
1810 				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
1811 		}
1812 	}
1813 }
1814 
gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring * ring,int xcc_id)1815 static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id)
1816 {
1817 	struct amdgpu_device *adev = ring->adev;
1818 	struct v9_mqd *mqd = ring->mqd_ptr;
1819 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
1820 	uint32_t tmp;
1821 
1822 	mqd->header = 0xC0310800;
1823 	mqd->compute_pipelinestat_enable = 0x00000001;
1824 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
1825 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
1826 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
1827 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
1828 	mqd->compute_misc_reserved = 0x00000003;
1829 
1830 	mqd->dynamic_cu_mask_addr_lo =
1831 		lower_32_bits(ring->mqd_gpu_addr
1832 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1833 	mqd->dynamic_cu_mask_addr_hi =
1834 		upper_32_bits(ring->mqd_gpu_addr
1835 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1836 
1837 	eop_base_addr = ring->eop_gpu_addr >> 8;
1838 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
1839 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
1840 
1841 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1842 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL);
1843 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
1844 			(order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
1845 
1846 	mqd->cp_hqd_eop_control = tmp;
1847 
1848 	/* enable doorbell? */
1849 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL);
1850 
1851 	if (ring->use_doorbell) {
1852 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1853 				    DOORBELL_OFFSET, ring->doorbell_index);
1854 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1855 				    DOORBELL_EN, 1);
1856 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1857 				    DOORBELL_SOURCE, 0);
1858 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1859 				    DOORBELL_HIT, 0);
1860 		if (amdgpu_sriov_vf(adev))
1861 			tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1862 					    DOORBELL_MODE, 1);
1863 	} else {
1864 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1865 					 DOORBELL_EN, 0);
1866 	}
1867 
1868 	mqd->cp_hqd_pq_doorbell_control = tmp;
1869 
1870 	/* disable the queue if it's active */
1871 	ring->wptr = 0;
1872 	mqd->cp_hqd_dequeue_request = 0;
1873 	mqd->cp_hqd_pq_rptr = 0;
1874 	mqd->cp_hqd_pq_wptr_lo = 0;
1875 	mqd->cp_hqd_pq_wptr_hi = 0;
1876 
1877 	/* set the pointer to the MQD */
1878 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
1879 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
1880 
1881 	/* set MQD vmid to 0 */
1882 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL);
1883 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
1884 	mqd->cp_mqd_control = tmp;
1885 
1886 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1887 	hqd_gpu_addr = ring->gpu_addr >> 8;
1888 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
1889 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
1890 
1891 	/* set up the HQD, this is similar to CP_RB0_CNTL */
1892 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL);
1893 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
1894 			    (order_base_2(ring->ring_size / 4) - 1));
1895 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
1896 			((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
1897 #ifdef __BIG_ENDIAN
1898 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
1899 #endif
1900 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
1901 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
1902 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
1903 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
1904 	mqd->cp_hqd_pq_control = tmp;
1905 
1906 	/* set the wb address whether it's enabled or not */
1907 	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
1908 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
1909 	mqd->cp_hqd_pq_rptr_report_addr_hi =
1910 		upper_32_bits(wb_gpu_addr) & 0xffff;
1911 
1912 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1913 	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1914 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
1915 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
1916 
1917 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
1918 	ring->wptr = 0;
1919 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR);
1920 
1921 	/* set the vmid for the queue */
1922 	mqd->cp_hqd_vmid = 0;
1923 
1924 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE);
1925 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
1926 	mqd->cp_hqd_persistent_state = tmp;
1927 
1928 	/* set MIN_IB_AVAIL_SIZE */
1929 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL);
1930 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
1931 	mqd->cp_hqd_ib_control = tmp;
1932 
1933 	/* set static priority for a queue/ring */
1934 	gfx_v9_4_3_mqd_set_priority(ring, mqd);
1935 	mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM);
1936 
1937 	/* map_queues packet doesn't need activate the queue,
1938 	 * so only kiq need set this field.
1939 	 */
1940 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
1941 		mqd->cp_hqd_active = 1;
1942 
1943 	return 0;
1944 }
1945 
gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring * ring,int xcc_id)1946 static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring,
1947 					    int xcc_id)
1948 {
1949 	struct amdgpu_device *adev = ring->adev;
1950 	struct v9_mqd *mqd = ring->mqd_ptr;
1951 	int j;
1952 
1953 	/* disable wptr polling */
1954 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
1955 
1956 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
1957 	       mqd->cp_hqd_eop_base_addr_lo);
1958 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
1959 	       mqd->cp_hqd_eop_base_addr_hi);
1960 
1961 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1962 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
1963 	       mqd->cp_hqd_eop_control);
1964 
1965 	/* enable doorbell? */
1966 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
1967 	       mqd->cp_hqd_pq_doorbell_control);
1968 
1969 	/* disable the queue if it's active */
1970 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
1971 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
1972 		for (j = 0; j < adev->usec_timeout; j++) {
1973 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
1974 				break;
1975 			udelay(1);
1976 		}
1977 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
1978 		       mqd->cp_hqd_dequeue_request);
1979 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
1980 		       mqd->cp_hqd_pq_rptr);
1981 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
1982 		       mqd->cp_hqd_pq_wptr_lo);
1983 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
1984 		       mqd->cp_hqd_pq_wptr_hi);
1985 	}
1986 
1987 	/* set the pointer to the MQD */
1988 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
1989 	       mqd->cp_mqd_base_addr_lo);
1990 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
1991 	       mqd->cp_mqd_base_addr_hi);
1992 
1993 	/* set MQD vmid to 0 */
1994 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
1995 	       mqd->cp_mqd_control);
1996 
1997 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1998 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
1999 	       mqd->cp_hqd_pq_base_lo);
2000 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
2001 	       mqd->cp_hqd_pq_base_hi);
2002 
2003 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2004 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
2005 	       mqd->cp_hqd_pq_control);
2006 
2007 	/* set the wb address whether it's enabled or not */
2008 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
2009 				mqd->cp_hqd_pq_rptr_report_addr_lo);
2010 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2011 				mqd->cp_hqd_pq_rptr_report_addr_hi);
2012 
2013 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2014 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
2015 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
2016 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2017 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
2018 
2019 	/* enable the doorbell if requested */
2020 	if (ring->use_doorbell) {
2021 		WREG32_SOC15(
2022 			GC, GET_INST(GC, xcc_id),
2023 			regCP_MEC_DOORBELL_RANGE_LOWER,
2024 			((adev->doorbell_index.kiq +
2025 			  xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2026 			 2) << 2);
2027 		WREG32_SOC15(
2028 			GC, GET_INST(GC, xcc_id),
2029 			regCP_MEC_DOORBELL_RANGE_UPPER,
2030 			((adev->doorbell_index.userqueue_end +
2031 			  xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2032 			 2) << 2);
2033 	}
2034 
2035 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2036 	       mqd->cp_hqd_pq_doorbell_control);
2037 
2038 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2039 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2040 	       mqd->cp_hqd_pq_wptr_lo);
2041 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2042 	       mqd->cp_hqd_pq_wptr_hi);
2043 
2044 	/* set the vmid for the queue */
2045 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
2046 
2047 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
2048 	       mqd->cp_hqd_persistent_state);
2049 
2050 	/* activate the queue */
2051 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
2052 	       mqd->cp_hqd_active);
2053 
2054 	if (ring->use_doorbell)
2055 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2056 
2057 	return 0;
2058 }
2059 
gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring * ring,int xcc_id)2060 static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring,
2061 					    int xcc_id)
2062 {
2063 	struct amdgpu_device *adev = ring->adev;
2064 	int j;
2065 
2066 	/* disable the queue if it's active */
2067 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
2068 
2069 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
2070 
2071 		for (j = 0; j < adev->usec_timeout; j++) {
2072 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
2073 				break;
2074 			udelay(1);
2075 		}
2076 
2077 		if (j == AMDGPU_MAX_USEC_TIMEOUT) {
2078 			DRM_DEBUG("%s dequeue request failed.\n", ring->name);
2079 
2080 			/* Manual disable if dequeue request times out */
2081 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
2082 		}
2083 
2084 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
2085 		      0);
2086 	}
2087 
2088 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);
2089 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0);
2090 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT);
2091 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
2092 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);
2093 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
2094 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0);
2095 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0);
2096 
2097 	return 0;
2098 }
2099 
gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring * ring,int xcc_id)2100 static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
2101 {
2102 	struct amdgpu_device *adev = ring->adev;
2103 	struct v9_mqd *mqd = ring->mqd_ptr;
2104 	struct v9_mqd *tmp_mqd;
2105 
2106 	gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id);
2107 
2108 	/* GPU could be in bad state during probe, driver trigger the reset
2109 	 * after load the SMU, in this case , the mqd is not be initialized.
2110 	 * driver need to re-init the mqd.
2111 	 * check mqd->cp_hqd_pq_control since this value should not be 0
2112 	 */
2113 	tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup;
2114 	if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) {
2115 		/* for GPU_RESET case , reset MQD to a clean status */
2116 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2117 			memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation));
2118 
2119 		/* reset ring buffer */
2120 		ring->wptr = 0;
2121 		amdgpu_ring_clear_ring(ring);
2122 		mutex_lock(&adev->srbm_mutex);
2123 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2124 		gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
2125 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2126 		mutex_unlock(&adev->srbm_mutex);
2127 	} else {
2128 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2129 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2130 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2131 		mutex_lock(&adev->srbm_mutex);
2132 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
2133 			amdgpu_ring_clear_ring(ring);
2134 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2135 		gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
2136 		gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
2137 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2138 		mutex_unlock(&adev->srbm_mutex);
2139 
2140 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2141 			memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
2142 	}
2143 
2144 	return 0;
2145 }
2146 
gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring * ring,int xcc_id,bool restore)2147 static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore)
2148 {
2149 	struct amdgpu_device *adev = ring->adev;
2150 	struct v9_mqd *mqd = ring->mqd_ptr;
2151 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
2152 	struct v9_mqd *tmp_mqd;
2153 
2154 	/* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
2155 	 * is not be initialized before
2156 	 */
2157 	tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
2158 
2159 	if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
2160 	    (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
2161 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2162 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2163 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2164 		mutex_lock(&adev->srbm_mutex);
2165 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2166 		gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
2167 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2168 		mutex_unlock(&adev->srbm_mutex);
2169 
2170 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2171 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
2172 	} else {
2173 		/* restore MQD to a clean status */
2174 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2175 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
2176 		/* reset ring buffer */
2177 		ring->wptr = 0;
2178 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
2179 		amdgpu_ring_clear_ring(ring);
2180 	}
2181 
2182 	return 0;
2183 }
2184 
gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device * adev,int xcc_id)2185 static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
2186 {
2187 	struct amdgpu_ring *ring;
2188 	int j;
2189 
2190 	for (j = 0; j < adev->gfx.num_compute_rings; j++) {
2191 		ring = &adev->gfx.compute_ring[j +  xcc_id * adev->gfx.num_compute_rings];
2192 		if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2193 			mutex_lock(&adev->srbm_mutex);
2194 			soc15_grbm_select(adev, ring->me,
2195 					ring->pipe,
2196 					ring->queue, 0, GET_INST(GC, xcc_id));
2197 			gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id);
2198 			soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2199 			mutex_unlock(&adev->srbm_mutex);
2200 		}
2201 	}
2202 
2203 	return 0;
2204 }
2205 
gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device * adev,int xcc_id)2206 static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
2207 {
2208 	struct amdgpu_ring *ring;
2209 	int r;
2210 
2211 	ring = &adev->gfx.kiq[xcc_id].ring;
2212 
2213 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
2214 	if (unlikely(r != 0))
2215 		return r;
2216 
2217 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2218 	if (unlikely(r != 0)) {
2219 		amdgpu_bo_unreserve(ring->mqd_obj);
2220 		return r;
2221 	}
2222 
2223 	gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id);
2224 	amdgpu_bo_kunmap(ring->mqd_obj);
2225 	ring->mqd_ptr = NULL;
2226 	amdgpu_bo_unreserve(ring->mqd_obj);
2227 	return 0;
2228 }
2229 
gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device * adev,int xcc_id)2230 static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
2231 {
2232 	struct amdgpu_ring *ring = NULL;
2233 	int r = 0, i;
2234 
2235 	gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
2236 
2237 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2238 		ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2239 
2240 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
2241 		if (unlikely(r != 0))
2242 			goto done;
2243 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2244 		if (!r) {
2245 			r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
2246 			amdgpu_bo_kunmap(ring->mqd_obj);
2247 			ring->mqd_ptr = NULL;
2248 		}
2249 		amdgpu_bo_unreserve(ring->mqd_obj);
2250 		if (r)
2251 			goto done;
2252 	}
2253 
2254 	r = amdgpu_gfx_enable_kcq(adev, xcc_id);
2255 done:
2256 	return r;
2257 }
2258 
gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device * adev,int xcc_id)2259 static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
2260 {
2261 	struct amdgpu_ring *ring;
2262 	int r, j;
2263 
2264 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2265 
2266 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2267 		gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id);
2268 
2269 		r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id);
2270 		if (r)
2271 			return r;
2272 	} else {
2273 		gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
2274 	}
2275 
2276 	r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id);
2277 	if (r)
2278 		return r;
2279 
2280 	r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id);
2281 	if (r)
2282 		return r;
2283 
2284 	for (j = 0; j < adev->gfx.num_compute_rings; j++) {
2285 		ring = &adev->gfx.compute_ring
2286 				[j + xcc_id * adev->gfx.num_compute_rings];
2287 		r = amdgpu_ring_test_helper(ring);
2288 		if (r)
2289 			return r;
2290 	}
2291 
2292 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
2293 
2294 	return 0;
2295 }
2296 
gfx_v9_4_3_cp_resume(struct amdgpu_device * adev)2297 static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
2298 {
2299 	int r = 0, i, num_xcc, num_xcp, num_xcc_per_xcp;
2300 
2301 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2302 	if (amdgpu_sriov_vf(adev)) {
2303 		enum amdgpu_gfx_partition mode;
2304 
2305 		mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2306 						       AMDGPU_XCP_FL_NONE);
2307 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2308 			return -EINVAL;
2309 		num_xcc_per_xcp = gfx_v9_4_3_get_xccs_per_xcp(adev);
2310 		adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp;
2311 		num_xcp = num_xcc / num_xcc_per_xcp;
2312 		r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
2313 
2314 	} else {
2315 		if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2316 						    AMDGPU_XCP_FL_NONE) ==
2317 		    AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2318 			r = amdgpu_xcp_switch_partition_mode(
2319 				adev->xcp_mgr, amdgpu_user_partt_mode);
2320 	}
2321 	if (r)
2322 		return r;
2323 
2324 	for (i = 0; i < num_xcc; i++) {
2325 		r = gfx_v9_4_3_xcc_cp_resume(adev, i);
2326 		if (r)
2327 			return r;
2328 	}
2329 
2330 	return 0;
2331 }
2332 
gfx_v9_4_3_xcc_fini(struct amdgpu_device * adev,int xcc_id)2333 static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
2334 {
2335 	if (amdgpu_gfx_disable_kcq(adev, xcc_id))
2336 		DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id);
2337 
2338 	if (amdgpu_sriov_vf(adev)) {
2339 		/* must disable polling for SRIOV when hw finished, otherwise
2340 		 * CPC engine may still keep fetching WB address which is already
2341 		 * invalid after sw finished and trigger DMAR reading error in
2342 		 * hypervisor side.
2343 		 */
2344 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
2345 		return;
2346 	}
2347 
2348 	/* Use deinitialize sequence from CAIL when unbinding device
2349 	 * from driver, otherwise KIQ is hanging when binding back
2350 	 */
2351 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2352 		mutex_lock(&adev->srbm_mutex);
2353 		soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me,
2354 				  adev->gfx.kiq[xcc_id].ring.pipe,
2355 				  adev->gfx.kiq[xcc_id].ring.queue, 0,
2356 				  GET_INST(GC, xcc_id));
2357 		gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring,
2358 						 xcc_id);
2359 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2360 		mutex_unlock(&adev->srbm_mutex);
2361 	}
2362 
2363 	gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id);
2364 	gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
2365 }
2366 
gfx_v9_4_3_hw_init(struct amdgpu_ip_block * ip_block)2367 static int gfx_v9_4_3_hw_init(struct amdgpu_ip_block *ip_block)
2368 {
2369 	int r;
2370 	struct amdgpu_device *adev = ip_block->adev;
2371 
2372 	amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
2373 				       adev->gfx.cleaner_shader_ptr);
2374 
2375 	if (!amdgpu_sriov_vf(adev))
2376 		gfx_v9_4_3_init_golden_registers(adev);
2377 
2378 	gfx_v9_4_3_constants_init(adev);
2379 
2380 	r = adev->gfx.rlc.funcs->resume(adev);
2381 	if (r)
2382 		return r;
2383 
2384 	r = gfx_v9_4_3_cp_resume(adev);
2385 	if (r)
2386 		return r;
2387 
2388 	return r;
2389 }
2390 
gfx_v9_4_3_hw_fini(struct amdgpu_ip_block * ip_block)2391 static int gfx_v9_4_3_hw_fini(struct amdgpu_ip_block *ip_block)
2392 {
2393 	struct amdgpu_device *adev = ip_block->adev;
2394 	int i, num_xcc;
2395 
2396 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2397 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2398 	amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
2399 
2400 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2401 	for (i = 0; i < num_xcc; i++) {
2402 		gfx_v9_4_3_xcc_fini(adev, i);
2403 	}
2404 
2405 	return 0;
2406 }
2407 
gfx_v9_4_3_suspend(struct amdgpu_ip_block * ip_block)2408 static int gfx_v9_4_3_suspend(struct amdgpu_ip_block *ip_block)
2409 {
2410 	return gfx_v9_4_3_hw_fini(ip_block);
2411 }
2412 
gfx_v9_4_3_resume(struct amdgpu_ip_block * ip_block)2413 static int gfx_v9_4_3_resume(struct amdgpu_ip_block *ip_block)
2414 {
2415 	return gfx_v9_4_3_hw_init(ip_block);
2416 }
2417 
gfx_v9_4_3_is_idle(void * handle)2418 static bool gfx_v9_4_3_is_idle(void *handle)
2419 {
2420 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2421 	int i, num_xcc;
2422 
2423 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2424 	for (i = 0; i < num_xcc; i++) {
2425 		if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS),
2426 					GRBM_STATUS, GUI_ACTIVE))
2427 			return false;
2428 	}
2429 	return true;
2430 }
2431 
gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block * ip_block)2432 static int gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
2433 {
2434 	unsigned i;
2435 	struct amdgpu_device *adev = ip_block->adev;
2436 
2437 	for (i = 0; i < adev->usec_timeout; i++) {
2438 		if (gfx_v9_4_3_is_idle(adev))
2439 			return 0;
2440 		udelay(1);
2441 	}
2442 	return -ETIMEDOUT;
2443 }
2444 
gfx_v9_4_3_soft_reset(struct amdgpu_ip_block * ip_block)2445 static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block)
2446 {
2447 	u32 grbm_soft_reset = 0;
2448 	u32 tmp;
2449 	struct amdgpu_device *adev = ip_block->adev;
2450 
2451 	/* GRBM_STATUS */
2452 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS);
2453 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
2454 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
2455 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
2456 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
2457 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
2458 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
2459 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2460 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2461 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2462 						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
2463 	}
2464 
2465 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
2466 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2467 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2468 	}
2469 
2470 	/* GRBM_STATUS2 */
2471 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2);
2472 	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
2473 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2474 						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2475 
2476 
2477 	if (grbm_soft_reset) {
2478 		/* stop the rlc */
2479 		adev->gfx.rlc.funcs->stop(adev);
2480 
2481 		/* Disable MEC parsing/prefetching */
2482 		gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
2483 
2484 		if (grbm_soft_reset) {
2485 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2486 			tmp |= grbm_soft_reset;
2487 			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2488 			WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2489 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2490 
2491 			udelay(50);
2492 
2493 			tmp &= ~grbm_soft_reset;
2494 			WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2495 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2496 		}
2497 
2498 		/* Wait a little for things to settle down */
2499 		udelay(50);
2500 	}
2501 	return 0;
2502 }
2503 
gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring * ring,uint32_t vmid,uint32_t gds_base,uint32_t gds_size,uint32_t gws_base,uint32_t gws_size,uint32_t oa_base,uint32_t oa_size)2504 static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring,
2505 					  uint32_t vmid,
2506 					  uint32_t gds_base, uint32_t gds_size,
2507 					  uint32_t gws_base, uint32_t gws_size,
2508 					  uint32_t oa_base, uint32_t oa_size)
2509 {
2510 	struct amdgpu_device *adev = ring->adev;
2511 
2512 	/* GDS Base */
2513 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2514 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid,
2515 				   gds_base);
2516 
2517 	/* GDS Size */
2518 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2519 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid,
2520 				   gds_size);
2521 
2522 	/* GWS */
2523 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2524 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid,
2525 				   gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
2526 
2527 	/* OA */
2528 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2529 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid,
2530 				   (1 << (oa_size + oa_base)) - (1 << oa_base));
2531 }
2532 
gfx_v9_4_3_early_init(struct amdgpu_ip_block * ip_block)2533 static int gfx_v9_4_3_early_init(struct amdgpu_ip_block *ip_block)
2534 {
2535 	struct amdgpu_device *adev = ip_block->adev;
2536 
2537 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2538 					  AMDGPU_MAX_COMPUTE_RINGS);
2539 	gfx_v9_4_3_set_kiq_pm4_funcs(adev);
2540 	gfx_v9_4_3_set_ring_funcs(adev);
2541 	gfx_v9_4_3_set_irq_funcs(adev);
2542 	gfx_v9_4_3_set_gds_init(adev);
2543 	gfx_v9_4_3_set_rlc_funcs(adev);
2544 
2545 	/* init rlcg reg access ctrl */
2546 	gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev);
2547 
2548 	return gfx_v9_4_3_init_microcode(adev);
2549 }
2550 
gfx_v9_4_3_late_init(struct amdgpu_ip_block * ip_block)2551 static int gfx_v9_4_3_late_init(struct amdgpu_ip_block *ip_block)
2552 {
2553 	struct amdgpu_device *adev = ip_block->adev;
2554 	int r;
2555 
2556 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2557 	if (r)
2558 		return r;
2559 
2560 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2561 	if (r)
2562 		return r;
2563 
2564 	r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
2565 	if (r)
2566 		return r;
2567 
2568 	if (adev->gfx.ras &&
2569 	    adev->gfx.ras->enable_watchdog_timer)
2570 		adev->gfx.ras->enable_watchdog_timer(adev);
2571 
2572 	return 0;
2573 }
2574 
gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device * adev,bool enable,int xcc_id)2575 static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev,
2576 					    bool enable, int xcc_id)
2577 {
2578 	uint32_t def, data;
2579 
2580 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
2581 		return;
2582 
2583 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2584 				  regRLC_CGTT_MGCG_OVERRIDE);
2585 
2586 	if (enable)
2587 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2588 	else
2589 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2590 
2591 	if (def != data)
2592 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2593 			     regRLC_CGTT_MGCG_OVERRIDE, data);
2594 
2595 }
2596 
gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device * adev,bool enable,int xcc_id)2597 static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
2598 						bool enable, int xcc_id)
2599 {
2600 	uint32_t def, data;
2601 
2602 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
2603 		return;
2604 
2605 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2606 				  regRLC_CGTT_MGCG_OVERRIDE);
2607 
2608 	if (enable)
2609 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2610 	else
2611 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2612 
2613 	if (def != data)
2614 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2615 			     regRLC_CGTT_MGCG_OVERRIDE, data);
2616 }
2617 
2618 static void
gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device * adev,bool enable,int xcc_id)2619 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
2620 						bool enable, int xcc_id)
2621 {
2622 	uint32_t data, def;
2623 
2624 	/* It is disabled by HW by default */
2625 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
2626 		/* 1 - RLC_CGTT_MGCG_OVERRIDE */
2627 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2628 
2629 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2630 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2631 			  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2632 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2633 
2634 		if (def != data)
2635 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2636 
2637 		/* MGLS is a global flag to control all MGLS in GFX */
2638 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
2639 			/* 2 - RLC memory Light sleep */
2640 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
2641 				def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2642 				data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2643 				if (def != data)
2644 					WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2645 			}
2646 			/* 3 - CP memory Light sleep */
2647 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
2648 				def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2649 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2650 				if (def != data)
2651 					WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2652 			}
2653 		}
2654 	} else {
2655 		/* 1 - MGCG_OVERRIDE */
2656 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2657 
2658 		data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2659 			 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2660 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2661 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2662 
2663 		if (def != data)
2664 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2665 
2666 		/* 2 - disable MGLS in RLC */
2667 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2668 		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
2669 			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2670 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2671 		}
2672 
2673 		/* 3 - disable MGLS in CP */
2674 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2675 		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
2676 			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2677 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2678 		}
2679 	}
2680 
2681 }
2682 
2683 static void
gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device * adev,bool enable,int xcc_id)2684 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
2685 						bool enable, int xcc_id)
2686 {
2687 	uint32_t def, data;
2688 
2689 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
2690 
2691 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2692 		/* unset CGCG override */
2693 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
2694 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2695 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2696 		else
2697 			data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2698 		/* update CGCG and CGLS override bits */
2699 		if (def != data)
2700 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2701 
2702 		/* CGCG Hysteresis: 400us */
2703 		def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2704 
2705 		data = (0x2710
2706 			<< RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
2707 		       RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2708 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2709 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
2710 				RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2711 		if (def != data)
2712 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2713 
2714 		/* set IDLE_POLL_COUNT(0x33450100)*/
2715 		def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
2716 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
2717 			(0x3345 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2718 		if (def != data)
2719 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
2720 	} else {
2721 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2722 		/* reset CGCG/CGLS bits */
2723 		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
2724 		/* disable cgcg and cgls in FSM */
2725 		if (def != data)
2726 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2727 	}
2728 
2729 }
2730 
gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device * adev,bool enable,int xcc_id)2731 static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
2732 						  bool enable, int xcc_id)
2733 {
2734 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
2735 
2736 	if (enable) {
2737 		/* FGCG */
2738 		gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2739 		gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2740 
2741 		/* CGCG/CGLS should be enabled after MGCG/MGLS
2742 		 * ===  MGCG + MGLS ===
2743 		 */
2744 		gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2745 								xcc_id);
2746 		/* ===  CGCG + CGLS === */
2747 		gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2748 								xcc_id);
2749 	} else {
2750 		/* CGCG/CGLS should be disabled before MGCG/MGLS
2751 		 * ===  CGCG + CGLS ===
2752 		 */
2753 		gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2754 								xcc_id);
2755 		/* ===  MGCG + MGLS === */
2756 		gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2757 								xcc_id);
2758 
2759 		/* FGCG */
2760 		gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2761 		gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2762 	}
2763 
2764 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
2765 
2766 	return 0;
2767 }
2768 
2769 static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
2770 	.is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled,
2771 	.set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode,
2772 	.unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode,
2773 	.init = gfx_v9_4_3_rlc_init,
2774 	.resume = gfx_v9_4_3_rlc_resume,
2775 	.stop = gfx_v9_4_3_rlc_stop,
2776 	.reset = gfx_v9_4_3_rlc_reset,
2777 	.start = gfx_v9_4_3_rlc_start,
2778 	.update_spm_vmid = gfx_v9_4_3_update_spm_vmid,
2779 	.is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
2780 };
2781 
gfx_v9_4_3_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)2782 static int gfx_v9_4_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
2783 					  enum amd_powergating_state state)
2784 {
2785 	return 0;
2786 }
2787 
gfx_v9_4_3_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)2788 static int gfx_v9_4_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
2789 					  enum amd_clockgating_state state)
2790 {
2791 	struct amdgpu_device *adev = ip_block->adev;
2792 	int i, num_xcc;
2793 
2794 	if (amdgpu_sriov_vf(adev))
2795 		return 0;
2796 
2797 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2798 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2799 	case IP_VERSION(9, 4, 3):
2800 	case IP_VERSION(9, 4, 4):
2801 		for (i = 0; i < num_xcc; i++)
2802 			gfx_v9_4_3_xcc_update_gfx_clock_gating(
2803 				adev, state == AMD_CG_STATE_GATE, i);
2804 		break;
2805 	default:
2806 		break;
2807 	}
2808 	return 0;
2809 }
2810 
gfx_v9_4_3_get_clockgating_state(void * handle,u64 * flags)2811 static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags)
2812 {
2813 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2814 	int data;
2815 
2816 	if (amdgpu_sriov_vf(adev))
2817 		*flags = 0;
2818 
2819 	/* AMD_CG_SUPPORT_GFX_MGCG */
2820 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE));
2821 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
2822 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
2823 
2824 	/* AMD_CG_SUPPORT_GFX_CGCG */
2825 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL));
2826 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
2827 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
2828 
2829 	/* AMD_CG_SUPPORT_GFX_CGLS */
2830 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
2831 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
2832 
2833 	/* AMD_CG_SUPPORT_GFX_RLC_LS */
2834 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL));
2835 	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
2836 		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
2837 
2838 	/* AMD_CG_SUPPORT_GFX_CP_LS */
2839 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL));
2840 	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
2841 		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
2842 }
2843 
gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring * ring)2844 static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2845 {
2846 	struct amdgpu_device *adev = ring->adev;
2847 	u32 ref_and_mask, reg_mem_engine;
2848 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
2849 
2850 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2851 		switch (ring->me) {
2852 		case 1:
2853 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
2854 			break;
2855 		case 2:
2856 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
2857 			break;
2858 		default:
2859 			return;
2860 		}
2861 		reg_mem_engine = 0;
2862 	} else {
2863 		ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
2864 		reg_mem_engine = 1; /* pfp */
2865 	}
2866 
2867 	gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1,
2868 			      adev->nbio.funcs->get_hdp_flush_req_offset(adev),
2869 			      adev->nbio.funcs->get_hdp_flush_done_offset(adev),
2870 			      ref_and_mask, ref_and_mask, 0x20);
2871 }
2872 
gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)2873 static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring,
2874 					  struct amdgpu_job *job,
2875 					  struct amdgpu_ib *ib,
2876 					  uint32_t flags)
2877 {
2878 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2879 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
2880 
2881 	/* Currently, there is a high possibility to get wave ID mismatch
2882 	 * between ME and GDS, leading to a hw deadlock, because ME generates
2883 	 * different wave IDs than the GDS expects. This situation happens
2884 	 * randomly when at least 5 compute pipes use GDS ordered append.
2885 	 * The wave IDs generated by ME are also wrong after suspend/resume.
2886 	 * Those are probably bugs somewhere else in the kernel driver.
2887 	 *
2888 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
2889 	 * GDS to 0 for this ring (me/pipe).
2890 	 */
2891 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
2892 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2893 		amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
2894 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2895 	}
2896 
2897 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2898 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
2899 	amdgpu_ring_write(ring,
2900 #ifdef __BIG_ENDIAN
2901 				(2 << 0) |
2902 #endif
2903 				lower_32_bits(ib->gpu_addr));
2904 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
2905 	amdgpu_ring_write(ring, control);
2906 }
2907 
gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)2908 static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
2909 				     u64 seq, unsigned flags)
2910 {
2911 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2912 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2913 	bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
2914 
2915 	/* RELEASE_MEM - flush caches, send int */
2916 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
2917 	amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
2918 					       EOP_TC_NC_ACTION_EN) :
2919 					      (EOP_TCL1_ACTION_EN |
2920 					       EOP_TC_ACTION_EN |
2921 					       EOP_TC_WB_ACTION_EN |
2922 					       EOP_TC_MD_ACTION_EN)) |
2923 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2924 				 EVENT_INDEX(5)));
2925 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2926 
2927 	/*
2928 	 * the address should be Qword aligned if 64bit write, Dword
2929 	 * aligned if only send 32bit data low (discard data high)
2930 	 */
2931 	if (write64bit)
2932 		BUG_ON(addr & 0x7);
2933 	else
2934 		BUG_ON(addr & 0x3);
2935 	amdgpu_ring_write(ring, lower_32_bits(addr));
2936 	amdgpu_ring_write(ring, upper_32_bits(addr));
2937 	amdgpu_ring_write(ring, lower_32_bits(seq));
2938 	amdgpu_ring_write(ring, upper_32_bits(seq));
2939 	amdgpu_ring_write(ring, 0);
2940 }
2941 
gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring * ring)2942 static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
2943 {
2944 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2945 	uint32_t seq = ring->fence_drv.sync_seq;
2946 	uint64_t addr = ring->fence_drv.gpu_addr;
2947 
2948 	gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0,
2949 			      lower_32_bits(addr), upper_32_bits(addr),
2950 			      seq, 0xffffffff, 4);
2951 }
2952 
gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)2953 static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring,
2954 					unsigned vmid, uint64_t pd_addr)
2955 {
2956 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
2957 }
2958 
gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring * ring)2959 static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring)
2960 {
2961 	return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
2962 }
2963 
gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring * ring)2964 static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring)
2965 {
2966 	u64 wptr;
2967 
2968 	/* XXX check if swapping is necessary on BE */
2969 	if (ring->use_doorbell)
2970 		wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
2971 	else
2972 		BUG();
2973 	return wptr;
2974 }
2975 
gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring * ring)2976 static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring)
2977 {
2978 	struct amdgpu_device *adev = ring->adev;
2979 
2980 	/* XXX check if swapping is necessary on BE */
2981 	if (ring->use_doorbell) {
2982 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
2983 		WDOORBELL64(ring->doorbell_index, ring->wptr);
2984 	} else {
2985 		BUG(); /* only DOORBELL method supported on gfx9 now */
2986 	}
2987 }
2988 
gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned int flags)2989 static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
2990 					 u64 seq, unsigned int flags)
2991 {
2992 	struct amdgpu_device *adev = ring->adev;
2993 
2994 	/* we only allocate 32bit for each seq wb address */
2995 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
2996 
2997 	/* write fence seq to the "addr" */
2998 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2999 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3000 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
3001 	amdgpu_ring_write(ring, lower_32_bits(addr));
3002 	amdgpu_ring_write(ring, upper_32_bits(addr));
3003 	amdgpu_ring_write(ring, lower_32_bits(seq));
3004 
3005 	if (flags & AMDGPU_FENCE_FLAG_INT) {
3006 		/* set register to trigger INT */
3007 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3008 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3009 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
3010 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
3011 		amdgpu_ring_write(ring, 0);
3012 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
3013 	}
3014 }
3015 
gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t reg_val_offs)3016 static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
3017 				    uint32_t reg_val_offs)
3018 {
3019 	struct amdgpu_device *adev = ring->adev;
3020 
3021 	reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
3022 
3023 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
3024 	amdgpu_ring_write(ring, 0 |	/* src: register*/
3025 				(5 << 8) |	/* dst: memory */
3026 				(1 << 20));	/* write confirm */
3027 	amdgpu_ring_write(ring, reg);
3028 	amdgpu_ring_write(ring, 0);
3029 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3030 				reg_val_offs * 4));
3031 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3032 				reg_val_offs * 4));
3033 }
3034 
gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)3035 static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
3036 				    uint32_t val)
3037 {
3038 	uint32_t cmd = 0;
3039 
3040 	reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
3041 
3042 	switch (ring->funcs->type) {
3043 	case AMDGPU_RING_TYPE_GFX:
3044 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
3045 		break;
3046 	case AMDGPU_RING_TYPE_KIQ:
3047 		cmd = (1 << 16); /* no inc addr */
3048 		break;
3049 	default:
3050 		cmd = WR_CONFIRM;
3051 		break;
3052 	}
3053 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3054 	amdgpu_ring_write(ring, cmd);
3055 	amdgpu_ring_write(ring, reg);
3056 	amdgpu_ring_write(ring, 0);
3057 	amdgpu_ring_write(ring, val);
3058 }
3059 
gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)3060 static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
3061 					uint32_t val, uint32_t mask)
3062 {
3063 	gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
3064 }
3065 
gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring * ring,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)3066 static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
3067 						  uint32_t reg0, uint32_t reg1,
3068 						  uint32_t ref, uint32_t mask)
3069 {
3070 	amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
3071 						   ref, mask);
3072 }
3073 
gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring * ring,unsigned vmid)3074 static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring,
3075 					  unsigned vmid)
3076 {
3077 	struct amdgpu_device *adev = ring->adev;
3078 	uint32_t value = 0;
3079 
3080 	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
3081 	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
3082 	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
3083 	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
3084 	amdgpu_gfx_rlc_enter_safe_mode(adev, ring->xcc_id);
3085 	WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regSQ_CMD, value);
3086 	amdgpu_gfx_rlc_exit_safe_mode(adev, ring->xcc_id);
3087 }
3088 
gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(struct amdgpu_device * adev,int me,int pipe,enum amdgpu_interrupt_state state,int xcc_id)3089 static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3090 	struct amdgpu_device *adev, int me, int pipe,
3091 	enum amdgpu_interrupt_state state, int xcc_id)
3092 {
3093 	u32 mec_int_cntl, mec_int_cntl_reg;
3094 
3095 	/*
3096 	 * amdgpu controls only the first MEC. That's why this function only
3097 	 * handles the setting of interrupts for this specific MEC. All other
3098 	 * pipes' interrupts are set by amdkfd.
3099 	 */
3100 
3101 	if (me == 1) {
3102 		switch (pipe) {
3103 		case 0:
3104 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
3105 			break;
3106 		case 1:
3107 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
3108 			break;
3109 		case 2:
3110 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
3111 			break;
3112 		case 3:
3113 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
3114 			break;
3115 		default:
3116 			DRM_DEBUG("invalid pipe %d\n", pipe);
3117 			return;
3118 		}
3119 	} else {
3120 		DRM_DEBUG("invalid me %d\n", me);
3121 		return;
3122 	}
3123 
3124 	switch (state) {
3125 	case AMDGPU_IRQ_STATE_DISABLE:
3126 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3127 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3128 					     TIME_STAMP_INT_ENABLE, 0);
3129 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3130 		break;
3131 	case AMDGPU_IRQ_STATE_ENABLE:
3132 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3133 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3134 					     TIME_STAMP_INT_ENABLE, 1);
3135 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3136 		break;
3137 	default:
3138 		break;
3139 	}
3140 }
3141 
gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device * adev,int xcc_id,int me,int pipe)3142 static u32 gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device *adev,
3143 				     int xcc_id, int me, int pipe)
3144 {
3145 	/*
3146 	 * amdgpu controls only the first MEC. That's why this function only
3147 	 * handles the setting of interrupts for this specific MEC. All other
3148 	 * pipes' interrupts are set by amdkfd.
3149 	 */
3150 	if (me != 1)
3151 		return 0;
3152 
3153 	switch (pipe) {
3154 	case 0:
3155 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
3156 	case 1:
3157 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
3158 	case 2:
3159 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
3160 	case 3:
3161 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
3162 	default:
3163 		return 0;
3164 	}
3165 }
3166 
gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)3167 static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev,
3168 					     struct amdgpu_irq_src *source,
3169 					     unsigned type,
3170 					     enum amdgpu_interrupt_state state)
3171 {
3172 	u32 mec_int_cntl_reg, mec_int_cntl;
3173 	int i, j, k, num_xcc;
3174 
3175 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3176 	switch (state) {
3177 	case AMDGPU_IRQ_STATE_DISABLE:
3178 	case AMDGPU_IRQ_STATE_ENABLE:
3179 		for (i = 0; i < num_xcc; i++) {
3180 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3181 					      PRIV_REG_INT_ENABLE,
3182 					      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3183 			for (j = 0; j < adev->gfx.mec.num_mec; j++) {
3184 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
3185 					/* MECs start at 1 */
3186 					mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
3187 
3188 					if (mec_int_cntl_reg) {
3189 						mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
3190 						mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3191 									     PRIV_REG_INT_ENABLE,
3192 									     state == AMDGPU_IRQ_STATE_ENABLE ?
3193 									     1 : 0);
3194 						WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
3195 					}
3196 				}
3197 			}
3198 		}
3199 		break;
3200 	default:
3201 		break;
3202 	}
3203 
3204 	return 0;
3205 }
3206 
gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)3207 static int gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device *adev,
3208 					     struct amdgpu_irq_src *source,
3209 					     unsigned type,
3210 					     enum amdgpu_interrupt_state state)
3211 {
3212 	u32 mec_int_cntl_reg, mec_int_cntl;
3213 	int i, j, k, num_xcc;
3214 
3215 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3216 	switch (state) {
3217 	case AMDGPU_IRQ_STATE_DISABLE:
3218 	case AMDGPU_IRQ_STATE_ENABLE:
3219 		for (i = 0; i < num_xcc; i++) {
3220 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3221 					      OPCODE_ERROR_INT_ENABLE,
3222 					      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3223 			for (j = 0; j < adev->gfx.mec.num_mec; j++) {
3224 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
3225 					/* MECs start at 1 */
3226 					mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
3227 
3228 					if (mec_int_cntl_reg) {
3229 						mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
3230 						mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3231 									     OPCODE_ERROR_INT_ENABLE,
3232 									     state == AMDGPU_IRQ_STATE_ENABLE ?
3233 									     1 : 0);
3234 						WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
3235 					}
3236 				}
3237 			}
3238 		}
3239 		break;
3240 	default:
3241 		break;
3242 	}
3243 
3244 	return 0;
3245 }
3246 
gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)3247 static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev,
3248 					      struct amdgpu_irq_src *source,
3249 					      unsigned type,
3250 					      enum amdgpu_interrupt_state state)
3251 {
3252 	int i, num_xcc;
3253 
3254 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3255 	switch (state) {
3256 	case AMDGPU_IRQ_STATE_DISABLE:
3257 	case AMDGPU_IRQ_STATE_ENABLE:
3258 		for (i = 0; i < num_xcc; i++)
3259 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3260 				PRIV_INSTR_INT_ENABLE,
3261 				state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3262 		break;
3263 	default:
3264 		break;
3265 	}
3266 
3267 	return 0;
3268 }
3269 
gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)3270 static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev,
3271 					    struct amdgpu_irq_src *src,
3272 					    unsigned type,
3273 					    enum amdgpu_interrupt_state state)
3274 {
3275 	int i, num_xcc;
3276 
3277 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3278 	for (i = 0; i < num_xcc; i++) {
3279 		switch (type) {
3280 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3281 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3282 				adev, 1, 0, state, i);
3283 			break;
3284 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3285 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3286 				adev, 1, 1, state, i);
3287 			break;
3288 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
3289 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3290 				adev, 1, 2, state, i);
3291 			break;
3292 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
3293 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3294 				adev, 1, 3, state, i);
3295 			break;
3296 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
3297 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3298 				adev, 2, 0, state, i);
3299 			break;
3300 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
3301 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3302 				adev, 2, 1, state, i);
3303 			break;
3304 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
3305 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3306 				adev, 2, 2, state, i);
3307 			break;
3308 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
3309 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3310 				adev, 2, 3, state, i);
3311 			break;
3312 		default:
3313 			break;
3314 		}
3315 	}
3316 
3317 	return 0;
3318 }
3319 
gfx_v9_4_3_eop_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3320 static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev,
3321 			    struct amdgpu_irq_src *source,
3322 			    struct amdgpu_iv_entry *entry)
3323 {
3324 	int i, xcc_id;
3325 	u8 me_id, pipe_id, queue_id;
3326 	struct amdgpu_ring *ring;
3327 
3328 	DRM_DEBUG("IH: CP EOP\n");
3329 	me_id = (entry->ring_id & 0x0c) >> 2;
3330 	pipe_id = (entry->ring_id & 0x03) >> 0;
3331 	queue_id = (entry->ring_id & 0x70) >> 4;
3332 
3333 	xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
3334 
3335 	if (xcc_id == -EINVAL)
3336 		return -EINVAL;
3337 
3338 	switch (me_id) {
3339 	case 0:
3340 	case 1:
3341 	case 2:
3342 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3343 			ring = &adev->gfx.compute_ring
3344 					[i +
3345 					 xcc_id * adev->gfx.num_compute_rings];
3346 			/* Per-queue interrupt is supported for MEC starting from VI.
3347 			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
3348 			  */
3349 
3350 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
3351 				amdgpu_fence_process(ring);
3352 		}
3353 		break;
3354 	}
3355 	return 0;
3356 }
3357 
gfx_v9_4_3_fault(struct amdgpu_device * adev,struct amdgpu_iv_entry * entry)3358 static void gfx_v9_4_3_fault(struct amdgpu_device *adev,
3359 			   struct amdgpu_iv_entry *entry)
3360 {
3361 	u8 me_id, pipe_id, queue_id;
3362 	struct amdgpu_ring *ring;
3363 	int i, xcc_id;
3364 
3365 	me_id = (entry->ring_id & 0x0c) >> 2;
3366 	pipe_id = (entry->ring_id & 0x03) >> 0;
3367 	queue_id = (entry->ring_id & 0x70) >> 4;
3368 
3369 	xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
3370 
3371 	if (xcc_id == -EINVAL)
3372 		return;
3373 
3374 	switch (me_id) {
3375 	case 0:
3376 	case 1:
3377 	case 2:
3378 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3379 			ring = &adev->gfx.compute_ring
3380 					[i +
3381 					 xcc_id * adev->gfx.num_compute_rings];
3382 			if (ring->me == me_id && ring->pipe == pipe_id &&
3383 			    ring->queue == queue_id)
3384 				drm_sched_fault(&ring->sched);
3385 		}
3386 		break;
3387 	}
3388 }
3389 
gfx_v9_4_3_priv_reg_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3390 static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev,
3391 				 struct amdgpu_irq_src *source,
3392 				 struct amdgpu_iv_entry *entry)
3393 {
3394 	DRM_ERROR("Illegal register access in command stream\n");
3395 	gfx_v9_4_3_fault(adev, entry);
3396 	return 0;
3397 }
3398 
gfx_v9_4_3_bad_op_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3399 static int gfx_v9_4_3_bad_op_irq(struct amdgpu_device *adev,
3400 				 struct amdgpu_irq_src *source,
3401 				 struct amdgpu_iv_entry *entry)
3402 {
3403 	DRM_ERROR("Illegal opcode in command stream\n");
3404 	gfx_v9_4_3_fault(adev, entry);
3405 	return 0;
3406 }
3407 
gfx_v9_4_3_priv_inst_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3408 static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev,
3409 				  struct amdgpu_irq_src *source,
3410 				  struct amdgpu_iv_entry *entry)
3411 {
3412 	DRM_ERROR("Illegal instruction in command stream\n");
3413 	gfx_v9_4_3_fault(adev, entry);
3414 	return 0;
3415 }
3416 
gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring * ring)3417 static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring)
3418 {
3419 	const unsigned int cp_coher_cntl =
3420 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
3421 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
3422 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
3423 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
3424 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
3425 
3426 	/* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
3427 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
3428 	amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
3429 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
3430 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
3431 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3432 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
3433 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
3434 }
3435 
gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring * ring,uint32_t pipe,bool enable)3436 static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring,
3437 					uint32_t pipe, bool enable)
3438 {
3439 	struct amdgpu_device *adev = ring->adev;
3440 	uint32_t val;
3441 	uint32_t wcl_cs_reg;
3442 
3443 	/* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
3444 	val = enable ? 0x1 : 0x7f;
3445 
3446 	switch (pipe) {
3447 	case 0:
3448 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0);
3449 		break;
3450 	case 1:
3451 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1);
3452 		break;
3453 	case 2:
3454 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2);
3455 		break;
3456 	case 3:
3457 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3);
3458 		break;
3459 	default:
3460 		DRM_DEBUG("invalid pipe %d\n", pipe);
3461 		return;
3462 	}
3463 
3464 	amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
3465 
3466 }
gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring * ring,bool enable)3467 static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
3468 {
3469 	struct amdgpu_device *adev = ring->adev;
3470 	uint32_t val;
3471 	int i;
3472 
3473 	/* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
3474 	 * number of gfx waves. Setting 5 bit will make sure gfx only gets
3475 	 * around 25% of gpu resources.
3476 	 */
3477 	val = enable ? 0x1f : 0x07ffffff;
3478 	amdgpu_ring_emit_wreg(ring,
3479 			      SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX),
3480 			      val);
3481 
3482 	/* Restrict waves for normal/low priority compute queues as well
3483 	 * to get best QoS for high priority compute jobs.
3484 	 *
3485 	 * amdgpu controls only 1st ME(0-3 CS pipes).
3486 	 */
3487 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3488 		if (i != ring->pipe)
3489 			gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable);
3490 
3491 	}
3492 }
3493 
gfx_v9_4_3_unmap_done(struct amdgpu_device * adev,uint32_t me,uint32_t pipe,uint32_t queue,uint32_t xcc_id)3494 static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me,
3495 				uint32_t pipe, uint32_t queue,
3496 				uint32_t xcc_id)
3497 {
3498 	int i, r;
3499 	/* make sure dequeue is complete*/
3500 	gfx_v9_4_3_xcc_set_safe_mode(adev, xcc_id);
3501 	mutex_lock(&adev->srbm_mutex);
3502 	soc15_grbm_select(adev, me, pipe, queue, 0, GET_INST(GC, xcc_id));
3503 	for (i = 0; i < adev->usec_timeout; i++) {
3504 		if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
3505 			break;
3506 		udelay(1);
3507 	}
3508 	if (i >= adev->usec_timeout)
3509 		r = -ETIMEDOUT;
3510 	else
3511 		r = 0;
3512 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
3513 	mutex_unlock(&adev->srbm_mutex);
3514 	gfx_v9_4_3_xcc_unset_safe_mode(adev, xcc_id);
3515 
3516 	return r;
3517 
3518 }
3519 
gfx_v9_4_3_pipe_reset_support(struct amdgpu_device * adev)3520 static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev)
3521 {
3522 	/*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/
3523 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
3524 			adev->gfx.mec_fw_version >= 0x0000009b)
3525 		return true;
3526 	else
3527 		dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n");
3528 
3529 	return false;
3530 }
3531 
gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring * ring)3532 static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring)
3533 {
3534 	struct amdgpu_device *adev = ring->adev;
3535 	uint32_t reset_pipe, clean_pipe;
3536 	int r;
3537 
3538 	if (!gfx_v9_4_3_pipe_reset_support(adev))
3539 		return -EINVAL;
3540 
3541 	gfx_v9_4_3_xcc_set_safe_mode(adev, ring->xcc_id);
3542 	mutex_lock(&adev->srbm_mutex);
3543 
3544 	reset_pipe = RREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL);
3545 	clean_pipe = reset_pipe;
3546 
3547 	if (ring->me == 1) {
3548 		switch (ring->pipe) {
3549 		case 0:
3550 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3551 						   MEC_ME1_PIPE0_RESET, 1);
3552 			break;
3553 		case 1:
3554 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3555 						   MEC_ME1_PIPE1_RESET, 1);
3556 			break;
3557 		case 2:
3558 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3559 						   MEC_ME1_PIPE2_RESET, 1);
3560 			break;
3561 		case 3:
3562 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3563 						   MEC_ME1_PIPE3_RESET, 1);
3564 			break;
3565 		default:
3566 			break;
3567 		}
3568 	} else {
3569 		if (ring->pipe)
3570 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3571 						   MEC_ME2_PIPE1_RESET, 1);
3572 		else
3573 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3574 						   MEC_ME2_PIPE0_RESET, 1);
3575 	}
3576 
3577 	WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, reset_pipe);
3578 	WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, clean_pipe);
3579 	mutex_unlock(&adev->srbm_mutex);
3580 	gfx_v9_4_3_xcc_unset_safe_mode(adev, ring->xcc_id);
3581 
3582 	r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
3583 	return r;
3584 }
3585 
gfx_v9_4_3_reset_kcq(struct amdgpu_ring * ring,unsigned int vmid)3586 static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
3587 				unsigned int vmid)
3588 {
3589 	struct amdgpu_device *adev = ring->adev;
3590 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
3591 	struct amdgpu_ring *kiq_ring = &kiq->ring;
3592 	unsigned long flags;
3593 	int r;
3594 
3595 	if (amdgpu_sriov_vf(adev))
3596 		return -EINVAL;
3597 
3598 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
3599 		return -EINVAL;
3600 
3601 	spin_lock_irqsave(&kiq->ring_lock, flags);
3602 
3603 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
3604 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
3605 		return -ENOMEM;
3606 	}
3607 
3608 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
3609 				   0, 0);
3610 	amdgpu_ring_commit(kiq_ring);
3611 
3612 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
3613 
3614 	r = amdgpu_ring_test_ring(kiq_ring);
3615 	if (r) {
3616 		dev_err(adev->dev, "kiq ring test failed after ring: %s queue reset\n",
3617 				ring->name);
3618 		goto pipe_reset;
3619 	}
3620 
3621 	r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
3622 	if (r)
3623 		dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
3624 
3625 pipe_reset:
3626 	if(r) {
3627 		r = gfx_v9_4_3_reset_hw_pipe(ring);
3628 		dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
3629 				r ? "failed" : "successfully");
3630 		if (r)
3631 			return r;
3632 	}
3633 
3634 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
3635 	if (unlikely(r != 0)){
3636 		dev_err(adev->dev, "fail to resv mqd_obj\n");
3637 		return r;
3638 	}
3639 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3640 	if (!r) {
3641 		r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
3642 		amdgpu_bo_kunmap(ring->mqd_obj);
3643 		ring->mqd_ptr = NULL;
3644 	}
3645 	amdgpu_bo_unreserve(ring->mqd_obj);
3646 	if (r) {
3647 		dev_err(adev->dev, "fail to unresv mqd_obj\n");
3648 		return r;
3649 	}
3650 	spin_lock_irqsave(&kiq->ring_lock, flags);
3651 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
3652 	if (r) {
3653 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
3654 		return -ENOMEM;
3655 	}
3656 	kiq->pmf->kiq_map_queues(kiq_ring, ring);
3657 	amdgpu_ring_commit(kiq_ring);
3658 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
3659 
3660 	r = amdgpu_ring_test_ring(kiq_ring);
3661 	if (r) {
3662 		dev_err(adev->dev, "fail to remap queue\n");
3663 		return r;
3664 	}
3665 	return amdgpu_ring_test_ring(ring);
3666 }
3667 
3668 enum amdgpu_gfx_cp_ras_mem_id {
3669 	AMDGPU_GFX_CP_MEM1 = 1,
3670 	AMDGPU_GFX_CP_MEM2,
3671 	AMDGPU_GFX_CP_MEM3,
3672 	AMDGPU_GFX_CP_MEM4,
3673 	AMDGPU_GFX_CP_MEM5,
3674 };
3675 
3676 enum amdgpu_gfx_gcea_ras_mem_id {
3677 	AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4,
3678 	AMDGPU_GFX_GCEA_IORD_CMDMEM,
3679 	AMDGPU_GFX_GCEA_GMIWR_CMDMEM,
3680 	AMDGPU_GFX_GCEA_GMIRD_CMDMEM,
3681 	AMDGPU_GFX_GCEA_DRAMWR_CMDMEM,
3682 	AMDGPU_GFX_GCEA_DRAMRD_CMDMEM,
3683 	AMDGPU_GFX_GCEA_MAM_DMEM0,
3684 	AMDGPU_GFX_GCEA_MAM_DMEM1,
3685 	AMDGPU_GFX_GCEA_MAM_DMEM2,
3686 	AMDGPU_GFX_GCEA_MAM_DMEM3,
3687 	AMDGPU_GFX_GCEA_MAM_AMEM0,
3688 	AMDGPU_GFX_GCEA_MAM_AMEM1,
3689 	AMDGPU_GFX_GCEA_MAM_AMEM2,
3690 	AMDGPU_GFX_GCEA_MAM_AMEM3,
3691 	AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER,
3692 	AMDGPU_GFX_GCEA_WRET_TAGMEM,
3693 	AMDGPU_GFX_GCEA_RRET_TAGMEM,
3694 	AMDGPU_GFX_GCEA_IOWR_DATAMEM,
3695 	AMDGPU_GFX_GCEA_GMIWR_DATAMEM,
3696 	AMDGPU_GFX_GCEA_DRAM_DATAMEM,
3697 };
3698 
3699 enum amdgpu_gfx_gc_cane_ras_mem_id {
3700 	AMDGPU_GFX_GC_CANE_MEM0 = 0,
3701 };
3702 
3703 enum amdgpu_gfx_gcutcl2_ras_mem_id {
3704 	AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160,
3705 };
3706 
3707 enum amdgpu_gfx_gds_ras_mem_id {
3708 	AMDGPU_GFX_GDS_MEM0 = 0,
3709 };
3710 
3711 enum amdgpu_gfx_lds_ras_mem_id {
3712 	AMDGPU_GFX_LDS_BANK0 = 0,
3713 	AMDGPU_GFX_LDS_BANK1,
3714 	AMDGPU_GFX_LDS_BANK2,
3715 	AMDGPU_GFX_LDS_BANK3,
3716 	AMDGPU_GFX_LDS_BANK4,
3717 	AMDGPU_GFX_LDS_BANK5,
3718 	AMDGPU_GFX_LDS_BANK6,
3719 	AMDGPU_GFX_LDS_BANK7,
3720 	AMDGPU_GFX_LDS_BANK8,
3721 	AMDGPU_GFX_LDS_BANK9,
3722 	AMDGPU_GFX_LDS_BANK10,
3723 	AMDGPU_GFX_LDS_BANK11,
3724 	AMDGPU_GFX_LDS_BANK12,
3725 	AMDGPU_GFX_LDS_BANK13,
3726 	AMDGPU_GFX_LDS_BANK14,
3727 	AMDGPU_GFX_LDS_BANK15,
3728 	AMDGPU_GFX_LDS_BANK16,
3729 	AMDGPU_GFX_LDS_BANK17,
3730 	AMDGPU_GFX_LDS_BANK18,
3731 	AMDGPU_GFX_LDS_BANK19,
3732 	AMDGPU_GFX_LDS_BANK20,
3733 	AMDGPU_GFX_LDS_BANK21,
3734 	AMDGPU_GFX_LDS_BANK22,
3735 	AMDGPU_GFX_LDS_BANK23,
3736 	AMDGPU_GFX_LDS_BANK24,
3737 	AMDGPU_GFX_LDS_BANK25,
3738 	AMDGPU_GFX_LDS_BANK26,
3739 	AMDGPU_GFX_LDS_BANK27,
3740 	AMDGPU_GFX_LDS_BANK28,
3741 	AMDGPU_GFX_LDS_BANK29,
3742 	AMDGPU_GFX_LDS_BANK30,
3743 	AMDGPU_GFX_LDS_BANK31,
3744 	AMDGPU_GFX_LDS_SP_BUFFER_A,
3745 	AMDGPU_GFX_LDS_SP_BUFFER_B,
3746 };
3747 
3748 enum amdgpu_gfx_rlc_ras_mem_id {
3749 	AMDGPU_GFX_RLC_GPMF32 = 1,
3750 	AMDGPU_GFX_RLC_RLCVF32,
3751 	AMDGPU_GFX_RLC_SCRATCH,
3752 	AMDGPU_GFX_RLC_SRM_ARAM,
3753 	AMDGPU_GFX_RLC_SRM_DRAM,
3754 	AMDGPU_GFX_RLC_TCTAG,
3755 	AMDGPU_GFX_RLC_SPM_SE,
3756 	AMDGPU_GFX_RLC_SPM_GRBMT,
3757 };
3758 
3759 enum amdgpu_gfx_sp_ras_mem_id {
3760 	AMDGPU_GFX_SP_SIMDID0 = 0,
3761 };
3762 
3763 enum amdgpu_gfx_spi_ras_mem_id {
3764 	AMDGPU_GFX_SPI_MEM0 = 0,
3765 	AMDGPU_GFX_SPI_MEM1,
3766 	AMDGPU_GFX_SPI_MEM2,
3767 	AMDGPU_GFX_SPI_MEM3,
3768 };
3769 
3770 enum amdgpu_gfx_sqc_ras_mem_id {
3771 	AMDGPU_GFX_SQC_INST_CACHE_A = 100,
3772 	AMDGPU_GFX_SQC_INST_CACHE_B = 101,
3773 	AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102,
3774 	AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103,
3775 	AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104,
3776 	AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105,
3777 	AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106,
3778 	AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107,
3779 	AMDGPU_GFX_SQC_DATA_CACHE_A = 200,
3780 	AMDGPU_GFX_SQC_DATA_CACHE_B = 201,
3781 	AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202,
3782 	AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203,
3783 	AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204,
3784 	AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205,
3785 	AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206,
3786 	AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207,
3787 	AMDGPU_GFX_SQC_DIRTY_BIT_A = 208,
3788 	AMDGPU_GFX_SQC_DIRTY_BIT_B = 209,
3789 	AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210,
3790 	AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211,
3791 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212,
3792 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213,
3793 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108,
3794 };
3795 
3796 enum amdgpu_gfx_sq_ras_mem_id {
3797 	AMDGPU_GFX_SQ_SGPR_MEM0 = 0,
3798 	AMDGPU_GFX_SQ_SGPR_MEM1,
3799 	AMDGPU_GFX_SQ_SGPR_MEM2,
3800 	AMDGPU_GFX_SQ_SGPR_MEM3,
3801 };
3802 
3803 enum amdgpu_gfx_ta_ras_mem_id {
3804 	AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1,
3805 	AMDGPU_GFX_TA_FS_AFIFO_RAM_HI,
3806 	AMDGPU_GFX_TA_FS_CFIFO_RAM,
3807 	AMDGPU_GFX_TA_FSX_LFIFO,
3808 	AMDGPU_GFX_TA_FS_DFIFO_RAM,
3809 };
3810 
3811 enum amdgpu_gfx_tcc_ras_mem_id {
3812 	AMDGPU_GFX_TCC_MEM1 = 1,
3813 };
3814 
3815 enum amdgpu_gfx_tca_ras_mem_id {
3816 	AMDGPU_GFX_TCA_MEM1 = 1,
3817 };
3818 
3819 enum amdgpu_gfx_tci_ras_mem_id {
3820 	AMDGPU_GFX_TCIW_MEM = 1,
3821 };
3822 
3823 enum amdgpu_gfx_tcp_ras_mem_id {
3824 	AMDGPU_GFX_TCP_LFIFO0 = 1,
3825 	AMDGPU_GFX_TCP_SET0BANK0_RAM,
3826 	AMDGPU_GFX_TCP_SET0BANK1_RAM,
3827 	AMDGPU_GFX_TCP_SET0BANK2_RAM,
3828 	AMDGPU_GFX_TCP_SET0BANK3_RAM,
3829 	AMDGPU_GFX_TCP_SET1BANK0_RAM,
3830 	AMDGPU_GFX_TCP_SET1BANK1_RAM,
3831 	AMDGPU_GFX_TCP_SET1BANK2_RAM,
3832 	AMDGPU_GFX_TCP_SET1BANK3_RAM,
3833 	AMDGPU_GFX_TCP_SET2BANK0_RAM,
3834 	AMDGPU_GFX_TCP_SET2BANK1_RAM,
3835 	AMDGPU_GFX_TCP_SET2BANK2_RAM,
3836 	AMDGPU_GFX_TCP_SET2BANK3_RAM,
3837 	AMDGPU_GFX_TCP_SET3BANK0_RAM,
3838 	AMDGPU_GFX_TCP_SET3BANK1_RAM,
3839 	AMDGPU_GFX_TCP_SET3BANK2_RAM,
3840 	AMDGPU_GFX_TCP_SET3BANK3_RAM,
3841 	AMDGPU_GFX_TCP_VM_FIFO,
3842 	AMDGPU_GFX_TCP_DB_TAGRAM0,
3843 	AMDGPU_GFX_TCP_DB_TAGRAM1,
3844 	AMDGPU_GFX_TCP_DB_TAGRAM2,
3845 	AMDGPU_GFX_TCP_DB_TAGRAM3,
3846 	AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0,
3847 	AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1,
3848 	AMDGPU_GFX_TCP_CMD_FIFO,
3849 };
3850 
3851 enum amdgpu_gfx_td_ras_mem_id {
3852 	AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1,
3853 	AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM,
3854 	AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM,
3855 };
3856 
3857 enum amdgpu_gfx_tcx_ras_mem_id {
3858 	AMDGPU_GFX_TCX_FIFOD0 = 0,
3859 	AMDGPU_GFX_TCX_FIFOD1,
3860 	AMDGPU_GFX_TCX_FIFOD2,
3861 	AMDGPU_GFX_TCX_FIFOD3,
3862 	AMDGPU_GFX_TCX_FIFOD4,
3863 	AMDGPU_GFX_TCX_FIFOD5,
3864 	AMDGPU_GFX_TCX_FIFOD6,
3865 	AMDGPU_GFX_TCX_FIFOD7,
3866 	AMDGPU_GFX_TCX_FIFOB0,
3867 	AMDGPU_GFX_TCX_FIFOB1,
3868 	AMDGPU_GFX_TCX_FIFOB2,
3869 	AMDGPU_GFX_TCX_FIFOB3,
3870 	AMDGPU_GFX_TCX_FIFOB4,
3871 	AMDGPU_GFX_TCX_FIFOB5,
3872 	AMDGPU_GFX_TCX_FIFOB6,
3873 	AMDGPU_GFX_TCX_FIFOB7,
3874 	AMDGPU_GFX_TCX_FIFOA0,
3875 	AMDGPU_GFX_TCX_FIFOA1,
3876 	AMDGPU_GFX_TCX_FIFOA2,
3877 	AMDGPU_GFX_TCX_FIFOA3,
3878 	AMDGPU_GFX_TCX_FIFOA4,
3879 	AMDGPU_GFX_TCX_FIFOA5,
3880 	AMDGPU_GFX_TCX_FIFOA6,
3881 	AMDGPU_GFX_TCX_FIFOA7,
3882 	AMDGPU_GFX_TCX_CFIFO0,
3883 	AMDGPU_GFX_TCX_CFIFO1,
3884 	AMDGPU_GFX_TCX_CFIFO2,
3885 	AMDGPU_GFX_TCX_CFIFO3,
3886 	AMDGPU_GFX_TCX_CFIFO4,
3887 	AMDGPU_GFX_TCX_CFIFO5,
3888 	AMDGPU_GFX_TCX_CFIFO6,
3889 	AMDGPU_GFX_TCX_CFIFO7,
3890 	AMDGPU_GFX_TCX_FIFO_ACKB0,
3891 	AMDGPU_GFX_TCX_FIFO_ACKB1,
3892 	AMDGPU_GFX_TCX_FIFO_ACKB2,
3893 	AMDGPU_GFX_TCX_FIFO_ACKB3,
3894 	AMDGPU_GFX_TCX_FIFO_ACKB4,
3895 	AMDGPU_GFX_TCX_FIFO_ACKB5,
3896 	AMDGPU_GFX_TCX_FIFO_ACKB6,
3897 	AMDGPU_GFX_TCX_FIFO_ACKB7,
3898 	AMDGPU_GFX_TCX_FIFO_ACKD0,
3899 	AMDGPU_GFX_TCX_FIFO_ACKD1,
3900 	AMDGPU_GFX_TCX_FIFO_ACKD2,
3901 	AMDGPU_GFX_TCX_FIFO_ACKD3,
3902 	AMDGPU_GFX_TCX_FIFO_ACKD4,
3903 	AMDGPU_GFX_TCX_FIFO_ACKD5,
3904 	AMDGPU_GFX_TCX_FIFO_ACKD6,
3905 	AMDGPU_GFX_TCX_FIFO_ACKD7,
3906 	AMDGPU_GFX_TCX_DST_FIFOA0,
3907 	AMDGPU_GFX_TCX_DST_FIFOA1,
3908 	AMDGPU_GFX_TCX_DST_FIFOA2,
3909 	AMDGPU_GFX_TCX_DST_FIFOA3,
3910 	AMDGPU_GFX_TCX_DST_FIFOA4,
3911 	AMDGPU_GFX_TCX_DST_FIFOA5,
3912 	AMDGPU_GFX_TCX_DST_FIFOA6,
3913 	AMDGPU_GFX_TCX_DST_FIFOA7,
3914 	AMDGPU_GFX_TCX_DST_FIFOB0,
3915 	AMDGPU_GFX_TCX_DST_FIFOB1,
3916 	AMDGPU_GFX_TCX_DST_FIFOB2,
3917 	AMDGPU_GFX_TCX_DST_FIFOB3,
3918 	AMDGPU_GFX_TCX_DST_FIFOB4,
3919 	AMDGPU_GFX_TCX_DST_FIFOB5,
3920 	AMDGPU_GFX_TCX_DST_FIFOB6,
3921 	AMDGPU_GFX_TCX_DST_FIFOB7,
3922 	AMDGPU_GFX_TCX_DST_FIFOD0,
3923 	AMDGPU_GFX_TCX_DST_FIFOD1,
3924 	AMDGPU_GFX_TCX_DST_FIFOD2,
3925 	AMDGPU_GFX_TCX_DST_FIFOD3,
3926 	AMDGPU_GFX_TCX_DST_FIFOD4,
3927 	AMDGPU_GFX_TCX_DST_FIFOD5,
3928 	AMDGPU_GFX_TCX_DST_FIFOD6,
3929 	AMDGPU_GFX_TCX_DST_FIFOD7,
3930 	AMDGPU_GFX_TCX_DST_FIFO_ACKB0,
3931 	AMDGPU_GFX_TCX_DST_FIFO_ACKB1,
3932 	AMDGPU_GFX_TCX_DST_FIFO_ACKB2,
3933 	AMDGPU_GFX_TCX_DST_FIFO_ACKB3,
3934 	AMDGPU_GFX_TCX_DST_FIFO_ACKB4,
3935 	AMDGPU_GFX_TCX_DST_FIFO_ACKB5,
3936 	AMDGPU_GFX_TCX_DST_FIFO_ACKB6,
3937 	AMDGPU_GFX_TCX_DST_FIFO_ACKB7,
3938 	AMDGPU_GFX_TCX_DST_FIFO_ACKD0,
3939 	AMDGPU_GFX_TCX_DST_FIFO_ACKD1,
3940 	AMDGPU_GFX_TCX_DST_FIFO_ACKD2,
3941 	AMDGPU_GFX_TCX_DST_FIFO_ACKD3,
3942 	AMDGPU_GFX_TCX_DST_FIFO_ACKD4,
3943 	AMDGPU_GFX_TCX_DST_FIFO_ACKD5,
3944 	AMDGPU_GFX_TCX_DST_FIFO_ACKD6,
3945 	AMDGPU_GFX_TCX_DST_FIFO_ACKD7,
3946 };
3947 
3948 enum amdgpu_gfx_atc_l2_ras_mem_id {
3949 	AMDGPU_GFX_ATC_L2_MEM0 = 0,
3950 };
3951 
3952 enum amdgpu_gfx_utcl2_ras_mem_id {
3953 	AMDGPU_GFX_UTCL2_MEM0 = 0,
3954 };
3955 
3956 enum amdgpu_gfx_vml2_ras_mem_id {
3957 	AMDGPU_GFX_VML2_MEM0 = 0,
3958 };
3959 
3960 enum amdgpu_gfx_vml2_walker_ras_mem_id {
3961 	AMDGPU_GFX_VML2_WALKER_MEM0 = 0,
3962 };
3963 
3964 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = {
3965 	{AMDGPU_GFX_CP_MEM1, "CP_MEM1"},
3966 	{AMDGPU_GFX_CP_MEM2, "CP_MEM2"},
3967 	{AMDGPU_GFX_CP_MEM3, "CP_MEM3"},
3968 	{AMDGPU_GFX_CP_MEM4, "CP_MEM4"},
3969 	{AMDGPU_GFX_CP_MEM5, "CP_MEM5"},
3970 };
3971 
3972 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = {
3973 	{AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"},
3974 	{AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"},
3975 	{AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"},
3976 	{AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"},
3977 	{AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"},
3978 	{AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"},
3979 	{AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"},
3980 	{AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"},
3981 	{AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"},
3982 	{AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"},
3983 	{AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"},
3984 	{AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"},
3985 	{AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"},
3986 	{AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"},
3987 	{AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"},
3988 	{AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"},
3989 	{AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"},
3990 	{AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"},
3991 	{AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"},
3992 	{AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"},
3993 };
3994 
3995 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = {
3996 	{AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"},
3997 };
3998 
3999 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = {
4000 	{AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"},
4001 };
4002 
4003 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = {
4004 	{AMDGPU_GFX_GDS_MEM0, "GDS_MEM"},
4005 };
4006 
4007 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = {
4008 	{AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"},
4009 	{AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"},
4010 	{AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"},
4011 	{AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"},
4012 	{AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"},
4013 	{AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"},
4014 	{AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"},
4015 	{AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"},
4016 	{AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"},
4017 	{AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"},
4018 	{AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"},
4019 	{AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"},
4020 	{AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"},
4021 	{AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"},
4022 	{AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"},
4023 	{AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"},
4024 	{AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"},
4025 	{AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"},
4026 	{AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"},
4027 	{AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"},
4028 	{AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"},
4029 	{AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"},
4030 	{AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"},
4031 	{AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"},
4032 	{AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"},
4033 	{AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"},
4034 	{AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"},
4035 	{AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"},
4036 	{AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"},
4037 	{AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"},
4038 	{AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"},
4039 	{AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"},
4040 	{AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"},
4041 	{AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"},
4042 };
4043 
4044 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = {
4045 	{AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"},
4046 	{AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"},
4047 	{AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"},
4048 	{AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"},
4049 	{AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"},
4050 	{AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"},
4051 	{AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"},
4052 	{AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"},
4053 };
4054 
4055 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = {
4056 	{AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"},
4057 };
4058 
4059 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = {
4060 	{AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"},
4061 	{AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"},
4062 	{AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"},
4063 	{AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"},
4064 };
4065 
4066 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = {
4067 	{AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"},
4068 	{AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"},
4069 	{AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"},
4070 	{AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"},
4071 	{AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"},
4072 	{AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"},
4073 	{AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"},
4074 	{AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"},
4075 	{AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"},
4076 	{AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"},
4077 	{AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"},
4078 	{AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"},
4079 	{AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"},
4080 	{AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"},
4081 	{AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"},
4082 	{AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"},
4083 	{AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"},
4084 	{AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"},
4085 	{AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"},
4086 	{AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"},
4087 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"},
4088 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"},
4089 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"},
4090 };
4091 
4092 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = {
4093 	{AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"},
4094 	{AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"},
4095 	{AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"},
4096 	{AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"},
4097 };
4098 
4099 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = {
4100 	{AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"},
4101 	{AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"},
4102 	{AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"},
4103 	{AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"},
4104 	{AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"},
4105 };
4106 
4107 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = {
4108 	{AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"},
4109 };
4110 
4111 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = {
4112 	{AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"},
4113 };
4114 
4115 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = {
4116 	{AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"},
4117 };
4118 
4119 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = {
4120 	{AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"},
4121 	{AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"},
4122 	{AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"},
4123 	{AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"},
4124 	{AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"},
4125 	{AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"},
4126 	{AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"},
4127 	{AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"},
4128 	{AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"},
4129 	{AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"},
4130 	{AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"},
4131 	{AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"},
4132 	{AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"},
4133 	{AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"},
4134 	{AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"},
4135 	{AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"},
4136 	{AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"},
4137 	{AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"},
4138 	{AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"},
4139 	{AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"},
4140 	{AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"},
4141 	{AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"},
4142 	{AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"},
4143 	{AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"},
4144 	{AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"},
4145 };
4146 
4147 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = {
4148 	{AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"},
4149 	{AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"},
4150 	{AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"},
4151 };
4152 
4153 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = {
4154 	{AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"},
4155 	{AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"},
4156 	{AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"},
4157 	{AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"},
4158 	{AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"},
4159 	{AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"},
4160 	{AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"},
4161 	{AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"},
4162 	{AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"},
4163 	{AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"},
4164 	{AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"},
4165 	{AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"},
4166 	{AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"},
4167 	{AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"},
4168 	{AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"},
4169 	{AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"},
4170 	{AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"},
4171 	{AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"},
4172 	{AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"},
4173 	{AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"},
4174 	{AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"},
4175 	{AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"},
4176 	{AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"},
4177 	{AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"},
4178 	{AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"},
4179 	{AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"},
4180 	{AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"},
4181 	{AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"},
4182 	{AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"},
4183 	{AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"},
4184 	{AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"},
4185 	{AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"},
4186 	{AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"},
4187 	{AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"},
4188 	{AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"},
4189 	{AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"},
4190 	{AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"},
4191 	{AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"},
4192 	{AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"},
4193 	{AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"},
4194 	{AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"},
4195 	{AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"},
4196 	{AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"},
4197 	{AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"},
4198 	{AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"},
4199 	{AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"},
4200 	{AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"},
4201 	{AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"},
4202 	{AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"},
4203 	{AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"},
4204 	{AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"},
4205 	{AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"},
4206 	{AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"},
4207 	{AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"},
4208 	{AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"},
4209 	{AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"},
4210 	{AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"},
4211 	{AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"},
4212 	{AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"},
4213 	{AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"},
4214 	{AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"},
4215 	{AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"},
4216 	{AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"},
4217 	{AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"},
4218 	{AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"},
4219 	{AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"},
4220 	{AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"},
4221 	{AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"},
4222 	{AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"},
4223 	{AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"},
4224 	{AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"},
4225 	{AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"},
4226 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"},
4227 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"},
4228 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"},
4229 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"},
4230 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"},
4231 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"},
4232 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"},
4233 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"},
4234 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"},
4235 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"},
4236 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"},
4237 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"},
4238 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"},
4239 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"},
4240 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"},
4241 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"},
4242 };
4243 
4244 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = {
4245 	{AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"},
4246 };
4247 
4248 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = {
4249 	{AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"},
4250 };
4251 
4252 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = {
4253 	{AMDGPU_GFX_VML2_MEM, "VML2_MEM"},
4254 };
4255 
4256 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = {
4257 	{AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"},
4258 };
4259 
4260 static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = {
4261 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list)
4262 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list)
4263 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list)
4264 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list)
4265 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list)
4266 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list)
4267 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list)
4268 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list)
4269 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list)
4270 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list)
4271 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list)
4272 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list)
4273 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list)
4274 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list)
4275 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list)
4276 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list)
4277 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list)
4278 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list)
4279 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list)
4280 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list)
4281 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list)
4282 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list)
4283 };
4284 
4285 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = {
4286 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH),
4287 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
4288 	    AMDGPU_GFX_RLC_MEM, 1},
4289 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI),
4290 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
4291 	    AMDGPU_GFX_CP_MEM, 1},
4292 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI),
4293 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
4294 	    AMDGPU_GFX_CP_MEM, 1},
4295 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI),
4296 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
4297 	    AMDGPU_GFX_CP_MEM, 1},
4298 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI),
4299 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
4300 	    AMDGPU_GFX_GDS_MEM, 1},
4301 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI),
4302 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
4303 	    AMDGPU_GFX_GC_CANE_MEM, 1},
4304 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI),
4305 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
4306 	    AMDGPU_GFX_SPI_MEM, 1},
4307 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI),
4308 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
4309 	    AMDGPU_GFX_SP_MEM, 4},
4310 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI),
4311 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
4312 	    AMDGPU_GFX_SP_MEM, 4},
4313 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI),
4314 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
4315 	    AMDGPU_GFX_SQ_MEM, 4},
4316 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI),
4317 	    5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
4318 	    AMDGPU_GFX_SQC_MEM, 4},
4319 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI),
4320 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
4321 	    AMDGPU_GFX_TCX_MEM, 1},
4322 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI),
4323 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
4324 	    AMDGPU_GFX_TCC_MEM, 1},
4325 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI),
4326 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
4327 	    AMDGPU_GFX_TA_MEM, 4},
4328 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG),
4329 	    27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
4330 	    AMDGPU_GFX_TCI_MEM, 1},
4331 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG),
4332 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
4333 	    AMDGPU_GFX_TCP_MEM, 4},
4334 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI),
4335 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
4336 	    AMDGPU_GFX_TD_MEM, 4},
4337 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI),
4338 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
4339 	    AMDGPU_GFX_GCEA_MEM, 1},
4340 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI),
4341 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
4342 	    AMDGPU_GFX_LDS_MEM, 4},
4343 };
4344 
4345 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = {
4346 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH),
4347 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
4348 	    AMDGPU_GFX_RLC_MEM, 1},
4349 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI),
4350 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
4351 	    AMDGPU_GFX_CP_MEM, 1},
4352 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI),
4353 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
4354 	    AMDGPU_GFX_CP_MEM, 1},
4355 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI),
4356 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
4357 	    AMDGPU_GFX_CP_MEM, 1},
4358 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI),
4359 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
4360 	    AMDGPU_GFX_GDS_MEM, 1},
4361 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI),
4362 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
4363 	    AMDGPU_GFX_GC_CANE_MEM, 1},
4364 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI),
4365 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
4366 	    AMDGPU_GFX_SPI_MEM, 1},
4367 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI),
4368 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
4369 	    AMDGPU_GFX_SP_MEM, 4},
4370 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI),
4371 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
4372 	    AMDGPU_GFX_SP_MEM, 4},
4373 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI),
4374 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
4375 	    AMDGPU_GFX_SQ_MEM, 4},
4376 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI),
4377 	    5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
4378 	    AMDGPU_GFX_SQC_MEM, 4},
4379 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI),
4380 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
4381 	    AMDGPU_GFX_TCX_MEM, 1},
4382 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI),
4383 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
4384 	    AMDGPU_GFX_TCC_MEM, 1},
4385 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI),
4386 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
4387 	    AMDGPU_GFX_TA_MEM, 4},
4388 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG),
4389 	    27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
4390 	    AMDGPU_GFX_TCI_MEM, 1},
4391 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG),
4392 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
4393 	    AMDGPU_GFX_TCP_MEM, 4},
4394 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI),
4395 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
4396 	    AMDGPU_GFX_TD_MEM, 4},
4397 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI),
4398 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"},
4399 	    AMDGPU_GFX_TCA_MEM, 1},
4400 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI),
4401 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
4402 	    AMDGPU_GFX_GCEA_MEM, 1},
4403 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI),
4404 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
4405 	    AMDGPU_GFX_LDS_MEM, 4},
4406 };
4407 
gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device * adev,void * ras_error_status,int xcc_id)4408 static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
4409 					void *ras_error_status, int xcc_id)
4410 {
4411 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
4412 	unsigned long ce_count = 0, ue_count = 0;
4413 	uint32_t i, j, k;
4414 
4415 	/* NOTE: convert xcc_id to physical XCD ID (XCD0 or XCD1) */
4416 	struct amdgpu_smuio_mcm_config_info mcm_info = {
4417 		.socket_id = adev->smuio.funcs->get_socket_id(adev),
4418 		.die_id = xcc_id & 0x01 ? 1 : 0,
4419 	};
4420 
4421 	mutex_lock(&adev->grbm_idx_mutex);
4422 
4423 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
4424 		for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
4425 			for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
4426 				/* no need to select if instance number is 1 */
4427 				if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
4428 				    gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
4429 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4430 
4431 				amdgpu_ras_inst_query_ras_error_count(adev,
4432 					&(gfx_v9_4_3_ce_reg_list[i].reg_entry),
4433 					1,
4434 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent,
4435 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size,
4436 					GET_INST(GC, xcc_id),
4437 					AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
4438 					&ce_count);
4439 
4440 				amdgpu_ras_inst_query_ras_error_count(adev,
4441 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4442 					1,
4443 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
4444 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
4445 					GET_INST(GC, xcc_id),
4446 					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
4447 					&ue_count);
4448 			}
4449 		}
4450 	}
4451 
4452 	/* handle extra register entries of UE */
4453 	for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
4454 		for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
4455 			for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
4456 				/* no need to select if instance number is 1 */
4457 				if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
4458 					gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
4459 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4460 
4461 				amdgpu_ras_inst_query_ras_error_count(adev,
4462 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4463 					1,
4464 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
4465 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
4466 					GET_INST(GC, xcc_id),
4467 					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
4468 					&ue_count);
4469 			}
4470 		}
4471 	}
4472 
4473 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4474 			xcc_id);
4475 	mutex_unlock(&adev->grbm_idx_mutex);
4476 
4477 	/* the caller should make sure initialize value of
4478 	 * err_data->ue_count and err_data->ce_count
4479 	 */
4480 	amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
4481 	amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
4482 }
4483 
gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device * adev,void * ras_error_status,int xcc_id)4484 static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
4485 					void *ras_error_status, int xcc_id)
4486 {
4487 	uint32_t i, j, k;
4488 
4489 	mutex_lock(&adev->grbm_idx_mutex);
4490 
4491 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
4492 		for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
4493 			for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
4494 				/* no need to select if instance number is 1 */
4495 				if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
4496 				    gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
4497 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4498 
4499 				amdgpu_ras_inst_reset_ras_error_count(adev,
4500 					&(gfx_v9_4_3_ce_reg_list[i].reg_entry),
4501 					1,
4502 					GET_INST(GC, xcc_id));
4503 
4504 				amdgpu_ras_inst_reset_ras_error_count(adev,
4505 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4506 					1,
4507 					GET_INST(GC, xcc_id));
4508 			}
4509 		}
4510 	}
4511 
4512 	/* handle extra register entries of UE */
4513 	for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
4514 		for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
4515 			for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
4516 				/* no need to select if instance number is 1 */
4517 				if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
4518 					gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
4519 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4520 
4521 				amdgpu_ras_inst_reset_ras_error_count(adev,
4522 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4523 					1,
4524 					GET_INST(GC, xcc_id));
4525 			}
4526 		}
4527 	}
4528 
4529 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4530 			xcc_id);
4531 	mutex_unlock(&adev->grbm_idx_mutex);
4532 }
4533 
gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device * adev,void * ras_error_status,int xcc_id)4534 static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
4535 					void *ras_error_status, int xcc_id)
4536 {
4537 	uint32_t i;
4538 	uint32_t data;
4539 
4540 	if (amdgpu_sriov_vf(adev))
4541 		return;
4542 
4543 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG);
4544 	data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
4545 			     amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0);
4546 
4547 	if (amdgpu_watchdog_timer.timeout_fatal_disable &&
4548 	    (amdgpu_watchdog_timer.period < 1 ||
4549 	     amdgpu_watchdog_timer.period > 0x23)) {
4550 		dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n");
4551 		amdgpu_watchdog_timer.period = 0x23;
4552 	}
4553 	data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL,
4554 			     amdgpu_watchdog_timer.period);
4555 
4556 	mutex_lock(&adev->grbm_idx_mutex);
4557 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4558 		gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id);
4559 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data);
4560 	}
4561 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4562 			xcc_id);
4563 	mutex_unlock(&adev->grbm_idx_mutex);
4564 }
4565 
gfx_v9_4_3_query_ras_error_count(struct amdgpu_device * adev,void * ras_error_status)4566 static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev,
4567 					void *ras_error_status)
4568 {
4569 	amdgpu_gfx_ras_error_func(adev, ras_error_status,
4570 			gfx_v9_4_3_inst_query_ras_err_count);
4571 }
4572 
gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device * adev)4573 static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev)
4574 {
4575 	amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count);
4576 }
4577 
gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device * adev)4578 static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
4579 {
4580 	amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer);
4581 }
4582 
gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring * ring,uint32_t num_nop)4583 static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
4584 {
4585 	/* Header itself is a NOP packet */
4586 	if (num_nop == 1) {
4587 		amdgpu_ring_write(ring, ring->funcs->nop);
4588 		return;
4589 	}
4590 
4591 	/* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
4592 	amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
4593 
4594 	/* Header is at index 0, followed by num_nops - 1 NOP packet's */
4595 	amdgpu_ring_insert_nop(ring, num_nop - 1);
4596 }
4597 
gfx_v9_4_3_ip_print(struct amdgpu_ip_block * ip_block,struct drm_printer * p)4598 static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
4599 {
4600 	struct amdgpu_device *adev = ip_block->adev;
4601 	uint32_t i, j, k;
4602 	uint32_t xcc_id, xcc_offset, inst_offset;
4603 	uint32_t num_xcc, reg, num_inst;
4604 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
4605 
4606 	if (!adev->gfx.ip_dump_core)
4607 		return;
4608 
4609 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4610 	drm_printf(p, "Number of Instances:%d\n", num_xcc);
4611 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4612 		xcc_offset = xcc_id * reg_count;
4613 		drm_printf(p, "\nInstance id:%d\n", xcc_id);
4614 		for (i = 0; i < reg_count; i++)
4615 			drm_printf(p, "%-50s \t 0x%08x\n",
4616 				   gc_reg_list_9_4_3[i].reg_name,
4617 				   adev->gfx.ip_dump_core[xcc_offset + i]);
4618 	}
4619 
4620 	/* print compute queue registers for all instances */
4621 	if (!adev->gfx.ip_dump_compute_queues)
4622 		return;
4623 
4624 	num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
4625 		adev->gfx.mec.num_queue_per_pipe;
4626 
4627 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
4628 	drm_printf(p, "\nnum_xcc: %d num_mec: %d num_pipe: %d num_queue: %d\n",
4629 		   num_xcc,
4630 		   adev->gfx.mec.num_mec,
4631 		   adev->gfx.mec.num_pipe_per_mec,
4632 		   adev->gfx.mec.num_queue_per_pipe);
4633 
4634 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4635 		xcc_offset = xcc_id * reg_count * num_inst;
4636 		inst_offset = 0;
4637 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4638 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4639 				for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
4640 					drm_printf(p,
4641 						   "\nxcc:%d mec:%d, pipe:%d, queue:%d\n",
4642 						    xcc_id, i, j, k);
4643 					for (reg = 0; reg < reg_count; reg++) {
4644 						drm_printf(p,
4645 							   "%-50s \t 0x%08x\n",
4646 							   gc_cp_reg_list_9_4_3[reg].reg_name,
4647 							   adev->gfx.ip_dump_compute_queues
4648 								[xcc_offset + inst_offset +
4649 								reg]);
4650 					}
4651 					inst_offset += reg_count;
4652 				}
4653 			}
4654 		}
4655 	}
4656 }
4657 
gfx_v9_4_3_ip_dump(struct amdgpu_ip_block * ip_block)4658 static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
4659 {
4660 	struct amdgpu_device *adev = ip_block->adev;
4661 	uint32_t i, j, k;
4662 	uint32_t num_xcc, reg, num_inst;
4663 	uint32_t xcc_id, xcc_offset, inst_offset;
4664 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
4665 
4666 	if (!adev->gfx.ip_dump_core)
4667 		return;
4668 
4669 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4670 
4671 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4672 		xcc_offset = xcc_id * reg_count;
4673 		for (i = 0; i < reg_count; i++)
4674 			adev->gfx.ip_dump_core[xcc_offset + i] =
4675 				RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i],
4676 								   GET_INST(GC, xcc_id)));
4677 	}
4678 
4679 	/* dump compute queue registers for all instances */
4680 	if (!adev->gfx.ip_dump_compute_queues)
4681 		return;
4682 
4683 	num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
4684 		adev->gfx.mec.num_queue_per_pipe;
4685 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
4686 	mutex_lock(&adev->srbm_mutex);
4687 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4688 		xcc_offset = xcc_id * reg_count * num_inst;
4689 		inst_offset = 0;
4690 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4691 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4692 				for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
4693 					/* ME0 is for GFX so start from 1 for CP */
4694 					soc15_grbm_select(adev, 1 + i, j, k, 0,
4695 							  GET_INST(GC, xcc_id));
4696 
4697 					for (reg = 0; reg < reg_count; reg++) {
4698 						adev->gfx.ip_dump_compute_queues
4699 							[xcc_offset +
4700 							 inst_offset + reg] =
4701 							RREG32(SOC15_REG_ENTRY_OFFSET_INST(
4702 								gc_cp_reg_list_9_4_3[reg],
4703 								GET_INST(GC, xcc_id)));
4704 					}
4705 					inst_offset += reg_count;
4706 				}
4707 			}
4708 		}
4709 	}
4710 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
4711 	mutex_unlock(&adev->srbm_mutex);
4712 }
4713 
gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring * ring)4714 static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
4715 {
4716 	/* Emit the cleaner shader */
4717 	amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
4718 	amdgpu_ring_write(ring, 0);  /* RESERVED field, programmed to zero */
4719 }
4720 
4721 static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
4722 	.name = "gfx_v9_4_3",
4723 	.early_init = gfx_v9_4_3_early_init,
4724 	.late_init = gfx_v9_4_3_late_init,
4725 	.sw_init = gfx_v9_4_3_sw_init,
4726 	.sw_fini = gfx_v9_4_3_sw_fini,
4727 	.hw_init = gfx_v9_4_3_hw_init,
4728 	.hw_fini = gfx_v9_4_3_hw_fini,
4729 	.suspend = gfx_v9_4_3_suspend,
4730 	.resume = gfx_v9_4_3_resume,
4731 	.is_idle = gfx_v9_4_3_is_idle,
4732 	.wait_for_idle = gfx_v9_4_3_wait_for_idle,
4733 	.soft_reset = gfx_v9_4_3_soft_reset,
4734 	.set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
4735 	.set_powergating_state = gfx_v9_4_3_set_powergating_state,
4736 	.get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
4737 	.dump_ip_state = gfx_v9_4_3_ip_dump,
4738 	.print_ip_state = gfx_v9_4_3_ip_print,
4739 };
4740 
4741 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
4742 	.type = AMDGPU_RING_TYPE_COMPUTE,
4743 	.align_mask = 0xff,
4744 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4745 	.support_64bit_ptrs = true,
4746 	.get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4747 	.get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4748 	.set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4749 	.emit_frame_size =
4750 		20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4751 		7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4752 		5 + /* hdp invalidate */
4753 		7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4754 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4755 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4756 		2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4757 		8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */
4758 		7 + /* gfx_v9_4_3_emit_mem_sync */
4759 		5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */
4760 		15 + /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
4761 		2, /* gfx_v9_4_3_ring_emit_cleaner_shader */
4762 	.emit_ib_size =	7, /* gfx_v9_4_3_ring_emit_ib_compute */
4763 	.emit_ib = gfx_v9_4_3_ring_emit_ib_compute,
4764 	.emit_fence = gfx_v9_4_3_ring_emit_fence,
4765 	.emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync,
4766 	.emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush,
4767 	.emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch,
4768 	.emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
4769 	.test_ring = gfx_v9_4_3_ring_test_ring,
4770 	.test_ib = gfx_v9_4_3_ring_test_ib,
4771 	.insert_nop = gfx_v9_4_3_ring_insert_nop,
4772 	.pad_ib = amdgpu_ring_generic_pad_ib,
4773 	.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4774 	.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4775 	.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4776 	.soft_recovery = gfx_v9_4_3_ring_soft_recovery,
4777 	.emit_mem_sync = gfx_v9_4_3_emit_mem_sync,
4778 	.emit_wave_limit = gfx_v9_4_3_emit_wave_limit,
4779 	.reset = gfx_v9_4_3_reset_kcq,
4780 	.emit_cleaner_shader = gfx_v9_4_3_ring_emit_cleaner_shader,
4781 	.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
4782 	.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
4783 };
4784 
4785 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
4786 	.type = AMDGPU_RING_TYPE_KIQ,
4787 	.align_mask = 0xff,
4788 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4789 	.support_64bit_ptrs = true,
4790 	.get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4791 	.get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4792 	.set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4793 	.emit_frame_size =
4794 		20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4795 		7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4796 		5 + /* hdp invalidate */
4797 		7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4798 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4799 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4800 		2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4801 		8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */
4802 	.emit_ib_size =	7, /* gfx_v9_4_3_ring_emit_ib_compute */
4803 	.emit_fence = gfx_v9_4_3_ring_emit_fence_kiq,
4804 	.test_ring = gfx_v9_4_3_ring_test_ring,
4805 	.insert_nop = amdgpu_ring_insert_nop,
4806 	.pad_ib = amdgpu_ring_generic_pad_ib,
4807 	.emit_rreg = gfx_v9_4_3_ring_emit_rreg,
4808 	.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4809 	.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4810 	.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4811 };
4812 
gfx_v9_4_3_set_ring_funcs(struct amdgpu_device * adev)4813 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
4814 {
4815 	int i, j, num_xcc;
4816 
4817 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4818 	for (i = 0; i < num_xcc; i++) {
4819 		adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq;
4820 
4821 		for (j = 0; j < adev->gfx.num_compute_rings; j++)
4822 			adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs
4823 					= &gfx_v9_4_3_ring_funcs_compute;
4824 	}
4825 }
4826 
4827 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = {
4828 	.set = gfx_v9_4_3_set_eop_interrupt_state,
4829 	.process = gfx_v9_4_3_eop_irq,
4830 };
4831 
4832 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = {
4833 	.set = gfx_v9_4_3_set_priv_reg_fault_state,
4834 	.process = gfx_v9_4_3_priv_reg_irq,
4835 };
4836 
4837 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_bad_op_irq_funcs = {
4838 	.set = gfx_v9_4_3_set_bad_op_fault_state,
4839 	.process = gfx_v9_4_3_bad_op_irq,
4840 };
4841 
4842 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = {
4843 	.set = gfx_v9_4_3_set_priv_inst_fault_state,
4844 	.process = gfx_v9_4_3_priv_inst_irq,
4845 };
4846 
gfx_v9_4_3_set_irq_funcs(struct amdgpu_device * adev)4847 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev)
4848 {
4849 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4850 	adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs;
4851 
4852 	adev->gfx.priv_reg_irq.num_types = 1;
4853 	adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs;
4854 
4855 	adev->gfx.bad_op_irq.num_types = 1;
4856 	adev->gfx.bad_op_irq.funcs = &gfx_v9_4_3_bad_op_irq_funcs;
4857 
4858 	adev->gfx.priv_inst_irq.num_types = 1;
4859 	adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs;
4860 }
4861 
gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device * adev)4862 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev)
4863 {
4864 	adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs;
4865 }
4866 
4867 
gfx_v9_4_3_set_gds_init(struct amdgpu_device * adev)4868 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
4869 {
4870 	/* init asci gds info */
4871 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4872 	case IP_VERSION(9, 4, 3):
4873 	case IP_VERSION(9, 4, 4):
4874 	case IP_VERSION(9, 5, 0):
4875 		/* 9.4.3 removed all the GDS internal memory,
4876 		 * only support GWS opcode in kernel, like barrier
4877 		 * semaphore.etc */
4878 		adev->gds.gds_size = 0;
4879 		break;
4880 	default:
4881 		adev->gds.gds_size = 0x10000;
4882 		break;
4883 	}
4884 
4885 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4886 	case IP_VERSION(9, 4, 3):
4887 	case IP_VERSION(9, 4, 4):
4888 	case IP_VERSION(9, 5, 0):
4889 		/* deprecated for 9.4.3, no usage at all */
4890 		adev->gds.gds_compute_max_wave_id = 0;
4891 		break;
4892 	default:
4893 		/* this really depends on the chip */
4894 		adev->gds.gds_compute_max_wave_id = 0x7ff;
4895 		break;
4896 	}
4897 
4898 	adev->gds.gws_size = 64;
4899 	adev->gds.oa_size = 16;
4900 }
4901 
gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device * adev,u32 bitmap,int xcc_id)4902 static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4903 						 u32 bitmap, int xcc_id)
4904 {
4905 	u32 data;
4906 
4907 	if (!bitmap)
4908 		return;
4909 
4910 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4911 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4912 
4913 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
4914 }
4915 
gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device * adev,int xcc_id)4916 static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id)
4917 {
4918 	u32 data, mask;
4919 
4920 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
4921 	data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
4922 
4923 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4924 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4925 
4926 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4927 
4928 	return (~data) & mask;
4929 }
4930 
gfx_v9_4_3_get_cu_info(struct amdgpu_device * adev,struct amdgpu_cu_info * cu_info)4931 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
4932 				 struct amdgpu_cu_info *cu_info)
4933 {
4934 	int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0;
4935 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp;
4936 	unsigned disable_masks[4 * 4];
4937 	bool is_symmetric_cus;
4938 
4939 	if (!adev || !cu_info)
4940 		return -EINVAL;
4941 
4942 	/*
4943 	 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
4944 	 */
4945 	if (adev->gfx.config.max_shader_engines *
4946 		adev->gfx.config.max_sh_per_se > 16)
4947 		return -EINVAL;
4948 
4949 	amdgpu_gfx_parse_disable_cu(disable_masks,
4950 				    adev->gfx.config.max_shader_engines,
4951 				    adev->gfx.config.max_sh_per_se);
4952 
4953 	mutex_lock(&adev->grbm_idx_mutex);
4954 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
4955 		is_symmetric_cus = true;
4956 		for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4957 			for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4958 				mask = 1;
4959 				ao_bitmap = 0;
4960 				counter = 0;
4961 				gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
4962 				gfx_v9_4_3_set_user_cu_inactive_bitmap(
4963 					adev,
4964 					disable_masks[i * adev->gfx.config.max_sh_per_se + j],
4965 					xcc_id);
4966 				bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id);
4967 
4968 				cu_info->bitmap[xcc_id][i][j] = bitmap;
4969 
4970 				for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
4971 					if (bitmap & mask) {
4972 						if (counter < adev->gfx.config.max_cu_per_sh)
4973 							ao_bitmap |= mask;
4974 						counter++;
4975 					}
4976 					mask <<= 1;
4977 				}
4978 				active_cu_number += counter;
4979 				if (i < 2 && j < 2)
4980 					ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4981 				cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4982 			}
4983 			if (i && is_symmetric_cus && prev_counter != counter)
4984 				is_symmetric_cus = false;
4985 			prev_counter = counter;
4986 		}
4987 		if (is_symmetric_cus) {
4988 			tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG);
4989 			tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1);
4990 			tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1);
4991 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp);
4992 		}
4993 		gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4994 					    xcc_id);
4995 	}
4996 	mutex_unlock(&adev->grbm_idx_mutex);
4997 
4998 	cu_info->number = active_cu_number;
4999 	cu_info->ao_cu_mask = ao_cu_mask;
5000 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5001 
5002 	return 0;
5003 }
5004 
5005 const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = {
5006 	.type = AMD_IP_BLOCK_TYPE_GFX,
5007 	.major = 9,
5008 	.minor = 4,
5009 	.rev = 3,
5010 	.funcs = &gfx_v9_4_3_ip_funcs,
5011 };
5012 
gfx_v9_4_3_xcp_resume(void * handle,uint32_t inst_mask)5013 static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask)
5014 {
5015 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5016 	uint32_t tmp_mask;
5017 	int i, r;
5018 
5019 	/* TODO : Initialize golden regs */
5020 	/* gfx_v9_4_3_init_golden_registers(adev); */
5021 
5022 	tmp_mask = inst_mask;
5023 	for_each_inst(i, tmp_mask)
5024 		gfx_v9_4_3_xcc_constants_init(adev, i);
5025 
5026 	if (!amdgpu_sriov_vf(adev)) {
5027 		tmp_mask = inst_mask;
5028 		for_each_inst(i, tmp_mask) {
5029 			r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
5030 			if (r)
5031 				return r;
5032 		}
5033 	}
5034 
5035 	tmp_mask = inst_mask;
5036 	for_each_inst(i, tmp_mask) {
5037 		r = gfx_v9_4_3_xcc_cp_resume(adev, i);
5038 		if (r)
5039 			return r;
5040 	}
5041 
5042 	return 0;
5043 }
5044 
gfx_v9_4_3_xcp_suspend(void * handle,uint32_t inst_mask)5045 static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask)
5046 {
5047 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5048 	int i;
5049 
5050 	for_each_inst(i, inst_mask)
5051 		gfx_v9_4_3_xcc_fini(adev, i);
5052 
5053 	return 0;
5054 }
5055 
5056 struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = {
5057 	.suspend = &gfx_v9_4_3_xcp_suspend,
5058 	.resume = &gfx_v9_4_3_xcp_resume
5059 };
5060 
5061 struct amdgpu_ras_block_hw_ops  gfx_v9_4_3_ras_ops = {
5062 	.query_ras_error_count = &gfx_v9_4_3_query_ras_error_count,
5063 	.reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
5064 };
5065 
gfx_v9_4_3_ras_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)5066 static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
5067 {
5068 	int r;
5069 
5070 	r = amdgpu_ras_block_late_init(adev, ras_block);
5071 	if (r)
5072 		return r;
5073 
5074 	r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__GFX,
5075 				&gfx_v9_4_3_aca_info,
5076 				NULL);
5077 	if (r)
5078 		goto late_fini;
5079 
5080 	return 0;
5081 
5082 late_fini:
5083 	amdgpu_ras_block_late_fini(adev, ras_block);
5084 
5085 	return r;
5086 }
5087 
5088 struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
5089 	.ras_block = {
5090 		.hw_ops = &gfx_v9_4_3_ras_ops,
5091 		.ras_late_init = &gfx_v9_4_3_ras_late_init,
5092 	},
5093 	.enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer,
5094 };
5095