xref: /linux/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 
25 #include "amdgpu.h"
26 #include "amdgpu_gfx.h"
27 #include "soc15.h"
28 #include "soc15d.h"
29 #include "soc15_common.h"
30 #include "vega10_enum.h"
31 
32 #include "v9_structs.h"
33 
34 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
35 
36 #include "gc/gc_9_4_3_offset.h"
37 #include "gc/gc_9_4_3_sh_mask.h"
38 
39 #include "gfx_v9_4_3.h"
40 #include "gfx_v9_4_3_cleaner_shader.h"
41 #include "amdgpu_xcp.h"
42 #include "amdgpu_aca.h"
43 
44 MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
45 MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin");
46 MODULE_FIRMWARE("amdgpu/gc_9_5_0_mec.bin");
47 MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
48 MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
49 MODULE_FIRMWARE("amdgpu/gc_9_5_0_rlc.bin");
50 MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin");
51 MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin");
52 
53 #define GFX9_MEC_HPD_SIZE 4096
54 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
55 
56 #define GOLDEN_GB_ADDR_CONFIG 0x2a114042
57 #define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
58 
59 #define XCC_REG_RANGE_0_LOW  0x2000     /* XCC gfxdec0 lower Bound */
60 #define XCC_REG_RANGE_0_HIGH 0x3400     /* XCC gfxdec0 upper Bound */
61 #define XCC_REG_RANGE_1_LOW  0xA000     /* XCC gfxdec1 lower Bound */
62 #define XCC_REG_RANGE_1_HIGH 0x10000    /* XCC gfxdec1 upper Bound */
63 
64 #define NORMALIZE_XCC_REG_OFFSET(offset) \
65 	(offset & 0xFFFF)
66 
67 static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = {
68 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
69 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
70 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1),
71 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2),
72 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1),
73 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1),
74 	SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT),
75 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT),
76 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT),
77 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS),
78 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR),
79 	SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS),
80 	SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS),
81 	SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS),
82 	SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT),
83 	SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT),
84 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS),
85 	SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS),
86 	SOC15_REG_ENTRY_STR(GC, 0, regSQC_DCACHE_UTCL1_STATUS),
87 	SOC15_REG_ENTRY_STR(GC, 0, regSQC_ICACHE_UTCL1_STATUS),
88 	SOC15_REG_ENTRY_STR(GC, 0, regSQ_UTCL1_STATUS),
89 	SOC15_REG_ENTRY_STR(GC, 0, regTCP_UTCL1_STATUS),
90 	SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS),
91 	SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL),
92 	SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS),
93 	SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG),
94 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL),
95 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR),
96 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC2_INSTR_PNTR),
97 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
98 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_STAT),
99 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_COMMAND),
100 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_MESSAGE),
101 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_1),
102 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_2),
103 	SOC15_REG_ENTRY_STR(GC, 0, regSMU_RLC_RESPONSE),
104 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SAFE_MODE),
105 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE),
106 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT),
107 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6),
108 	/* SE status registers */
109 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
110 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
111 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2),
112 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3)
113 };
114 
115 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = {
116 	/* compute queue registers */
117 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
118 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ACTIVE),
119 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
120 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
121 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
122 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
123 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
124 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
125 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
126 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
127 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
128 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
129 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
130 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
131 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
132 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
133 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
134 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
135 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
136 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
137 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
138 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
139 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
140 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
141 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
142 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
143 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
144 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
145 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
146 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
147 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
148 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
149 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
150 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
151 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
152 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
153 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS),
154 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
155 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
156 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
157 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
158 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
159 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
160 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
161 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
162 };
163 
164 struct amdgpu_gfx_ras gfx_v9_4_3_ras;
165 
166 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
167 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev);
168 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
169 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
170 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
171 				struct amdgpu_cu_info *cu_info);
172 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
173 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
174 
175 static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
176 				uint64_t queue_mask)
177 {
178 	struct amdgpu_device *adev = kiq_ring->adev;
179 	u64 shader_mc_addr;
180 
181 	/* Cleaner shader MC address */
182 	shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
183 
184 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
185 	amdgpu_ring_write(kiq_ring,
186 		PACKET3_SET_RESOURCES_VMID_MASK(0) |
187 		/* vmid_mask:0* queue_type:0 (KIQ) */
188 		PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
189 	amdgpu_ring_write(kiq_ring,
190 			lower_32_bits(queue_mask));	/* queue mask lo */
191 	amdgpu_ring_write(kiq_ring,
192 			upper_32_bits(queue_mask));	/* queue mask hi */
193 	amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
194 	amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
195 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
196 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
197 }
198 
199 static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring,
200 				 struct amdgpu_ring *ring)
201 {
202 	struct amdgpu_device *adev = kiq_ring->adev;
203 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
204 	uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
205 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
206 
207 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
208 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
209 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
210 			 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
211 			 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
212 			 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
213 			 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
214 			 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
215 			 /*queue_type: normal compute queue */
216 			 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
217 			 /* alloc format: all_on_one_pipe */
218 			 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
219 			 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
220 			 /* num_queues: must be 1 */
221 			 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
222 	amdgpu_ring_write(kiq_ring,
223 			PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
224 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
225 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
226 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
227 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
228 }
229 
230 static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
231 				   struct amdgpu_ring *ring,
232 				   enum amdgpu_unmap_queues_action action,
233 				   u64 gpu_addr, u64 seq)
234 {
235 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
236 
237 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
238 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
239 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
240 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
241 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
242 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
243 	amdgpu_ring_write(kiq_ring,
244 			PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
245 
246 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
247 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
248 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
249 		amdgpu_ring_write(kiq_ring, seq);
250 	} else {
251 		amdgpu_ring_write(kiq_ring, 0);
252 		amdgpu_ring_write(kiq_ring, 0);
253 		amdgpu_ring_write(kiq_ring, 0);
254 	}
255 }
256 
257 static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring,
258 				   struct amdgpu_ring *ring,
259 				   u64 addr,
260 				   u64 seq)
261 {
262 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
263 
264 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
265 	amdgpu_ring_write(kiq_ring,
266 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
267 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
268 			  PACKET3_QUERY_STATUS_COMMAND(2));
269 	/* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
270 	amdgpu_ring_write(kiq_ring,
271 			PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
272 			PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
273 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
274 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
275 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
276 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
277 }
278 
279 static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
280 				uint16_t pasid, uint32_t flush_type,
281 				bool all_hub)
282 {
283 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
284 	amdgpu_ring_write(kiq_ring,
285 			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
286 			PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
287 			PACKET3_INVALIDATE_TLBS_PASID(pasid) |
288 			PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
289 }
290 
291 static void gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
292 					  uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
293 					  uint32_t xcc_id, uint32_t vmid)
294 {
295 	struct amdgpu_device *adev = kiq_ring->adev;
296 	unsigned i;
297 
298 	/* enter save mode */
299 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
300 	mutex_lock(&adev->srbm_mutex);
301 	soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, xcc_id);
302 
303 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
304 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0x2);
305 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_COMPUTE_QUEUE_RESET, 0x1);
306 		/* wait till dequeue take effects */
307 		for (i = 0; i < adev->usec_timeout; i++) {
308 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
309 				break;
310 			udelay(1);
311 		}
312 		if (i >= adev->usec_timeout)
313 			dev_err(adev->dev, "fail to wait on hqd deactive\n");
314 	} else {
315 		dev_err(adev->dev, "reset queue_type(%d) not supported\n\n", queue_type);
316 	}
317 
318 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
319 	mutex_unlock(&adev->srbm_mutex);
320 	/* exit safe mode */
321 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
322 }
323 
324 static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
325 	.kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
326 	.kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
327 	.kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
328 	.kiq_query_status = gfx_v9_4_3_kiq_query_status,
329 	.kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
330 	.kiq_reset_hw_queue = gfx_v9_4_3_kiq_reset_hw_queue,
331 	.set_resources_size = 8,
332 	.map_queues_size = 7,
333 	.unmap_queues_size = 6,
334 	.query_status_size = 7,
335 	.invalidate_tlbs_size = 2,
336 };
337 
338 static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev)
339 {
340 	int i, num_xcc;
341 
342 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
343 	for (i = 0; i < num_xcc; i++)
344 		adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs;
345 }
346 
347 static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
348 {
349 	int i, num_xcc, dev_inst;
350 
351 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
352 	for (i = 0; i < num_xcc; i++) {
353 		dev_inst = GET_INST(GC, i);
354 
355 		WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
356 			     GOLDEN_GB_ADDR_CONFIG);
357 		WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1);
358 	}
359 }
360 
361 static uint32_t gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg)
362 {
363 	uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg);
364 
365 	/* If it is an XCC reg, normalize the reg to keep
366 	   lower 16 bits in local xcc */
367 
368 	if (((normalized_reg >= XCC_REG_RANGE_0_LOW) && (normalized_reg < XCC_REG_RANGE_0_HIGH)) ||
369 		((normalized_reg >= XCC_REG_RANGE_1_LOW) && (normalized_reg < XCC_REG_RANGE_1_HIGH)))
370 		return normalized_reg;
371 	else
372 		return reg;
373 }
374 
375 static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
376 				       bool wc, uint32_t reg, uint32_t val)
377 {
378 	reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
379 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
380 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
381 				WRITE_DATA_DST_SEL(0) |
382 				(wc ? WR_CONFIRM : 0));
383 	amdgpu_ring_write(ring, reg);
384 	amdgpu_ring_write(ring, 0);
385 	amdgpu_ring_write(ring, val);
386 }
387 
388 static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
389 				  int mem_space, int opt, uint32_t addr0,
390 				  uint32_t addr1, uint32_t ref, uint32_t mask,
391 				  uint32_t inv)
392 {
393 	/* Only do the normalization on regspace */
394 	if (mem_space == 0) {
395 		addr0 = gfx_v9_4_3_normalize_xcc_reg_offset(addr0);
396 		addr1 = gfx_v9_4_3_normalize_xcc_reg_offset(addr1);
397 	}
398 
399 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
400 	amdgpu_ring_write(ring,
401 				 /* memory (1) or register (0) */
402 				 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
403 				 WAIT_REG_MEM_OPERATION(opt) | /* wait */
404 				 WAIT_REG_MEM_FUNCTION(3) |  /* equal */
405 				 WAIT_REG_MEM_ENGINE(eng_sel)));
406 
407 	if (mem_space)
408 		BUG_ON(addr0 & 0x3); /* Dword align */
409 	amdgpu_ring_write(ring, addr0);
410 	amdgpu_ring_write(ring, addr1);
411 	amdgpu_ring_write(ring, ref);
412 	amdgpu_ring_write(ring, mask);
413 	amdgpu_ring_write(ring, inv); /* poll interval */
414 }
415 
416 static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring)
417 {
418 	uint32_t scratch_reg0_offset, xcc_offset;
419 	struct amdgpu_device *adev = ring->adev;
420 	uint32_t tmp = 0;
421 	unsigned i;
422 	int r;
423 
424 	/* Use register offset which is local to XCC in the packet */
425 	xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
426 	scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
427 	WREG32(scratch_reg0_offset, 0xCAFEDEAD);
428 	tmp = RREG32(scratch_reg0_offset);
429 
430 	r = amdgpu_ring_alloc(ring, 3);
431 	if (r)
432 		return r;
433 
434 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
435 	amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START);
436 	amdgpu_ring_write(ring, 0xDEADBEEF);
437 	amdgpu_ring_commit(ring);
438 
439 	for (i = 0; i < adev->usec_timeout; i++) {
440 		tmp = RREG32(scratch_reg0_offset);
441 		if (tmp == 0xDEADBEEF)
442 			break;
443 		udelay(1);
444 	}
445 
446 	if (i >= adev->usec_timeout)
447 		r = -ETIMEDOUT;
448 	return r;
449 }
450 
451 static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
452 {
453 	struct amdgpu_device *adev = ring->adev;
454 	struct amdgpu_ib ib;
455 	struct dma_fence *f = NULL;
456 
457 	unsigned index;
458 	uint64_t gpu_addr;
459 	uint32_t tmp;
460 	long r;
461 
462 	r = amdgpu_device_wb_get(adev, &index);
463 	if (r)
464 		return r;
465 
466 	gpu_addr = adev->wb.gpu_addr + (index * 4);
467 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
468 	memset(&ib, 0, sizeof(ib));
469 
470 	r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
471 	if (r)
472 		goto err1;
473 
474 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
475 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
476 	ib.ptr[2] = lower_32_bits(gpu_addr);
477 	ib.ptr[3] = upper_32_bits(gpu_addr);
478 	ib.ptr[4] = 0xDEADBEEF;
479 	ib.length_dw = 5;
480 
481 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
482 	if (r)
483 		goto err2;
484 
485 	r = dma_fence_wait_timeout(f, false, timeout);
486 	if (r == 0) {
487 		r = -ETIMEDOUT;
488 		goto err2;
489 	} else if (r < 0) {
490 		goto err2;
491 	}
492 
493 	tmp = adev->wb.wb[index];
494 	if (tmp == 0xDEADBEEF)
495 		r = 0;
496 	else
497 		r = -EINVAL;
498 
499 err2:
500 	amdgpu_ib_free(&ib, NULL);
501 	dma_fence_put(f);
502 err1:
503 	amdgpu_device_wb_free(adev, index);
504 	return r;
505 }
506 
507 
508 /* This value might differs per partition */
509 static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev)
510 {
511 	uint64_t clock;
512 
513 	mutex_lock(&adev->gfx.gpu_clock_mutex);
514 	WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
515 	clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) |
516 		((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
517 	mutex_unlock(&adev->gfx.gpu_clock_mutex);
518 
519 	return clock;
520 }
521 
522 static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev)
523 {
524 	amdgpu_ucode_release(&adev->gfx.pfp_fw);
525 	amdgpu_ucode_release(&adev->gfx.me_fw);
526 	amdgpu_ucode_release(&adev->gfx.ce_fw);
527 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
528 	amdgpu_ucode_release(&adev->gfx.mec_fw);
529 	amdgpu_ucode_release(&adev->gfx.mec2_fw);
530 
531 	kfree(adev->gfx.rlc.register_list_format);
532 }
533 
534 static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
535 					  const char *chip_name)
536 {
537 	int err;
538 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
539 	uint16_t version_major;
540 	uint16_t version_minor;
541 
542 
543 	err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
544 				   AMDGPU_UCODE_REQUIRED,
545 				   "amdgpu/%s_rlc.bin", chip_name);
546 	if (err)
547 		goto out;
548 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
549 
550 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
551 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
552 	err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
553 out:
554 	if (err)
555 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
556 
557 	return err;
558 }
559 
560 static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
561 					  const char *chip_name)
562 {
563 	int err;
564 
565 	if (amdgpu_sriov_vf(adev)) {
566 		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
567 					   AMDGPU_UCODE_REQUIRED,
568 					   "amdgpu/%s_sjt_mec.bin", chip_name);
569 
570 		if (err)
571 			err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
572 							AMDGPU_UCODE_REQUIRED,
573 							"amdgpu/%s_mec.bin", chip_name);
574 	} else
575 		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
576 					   AMDGPU_UCODE_REQUIRED,
577 					   "amdgpu/%s_mec.bin", chip_name);
578 	if (err)
579 		goto out;
580 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
581 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
582 
583 	adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
584 	adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
585 
586 out:
587 	if (err)
588 		amdgpu_ucode_release(&adev->gfx.mec_fw);
589 	return err;
590 }
591 
592 static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
593 {
594 	char ucode_prefix[15];
595 	int r;
596 
597 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
598 
599 	r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix);
600 	if (r)
601 		return r;
602 
603 	r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix);
604 	if (r)
605 		return r;
606 
607 	return r;
608 }
609 
610 static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev)
611 {
612 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
613 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
614 }
615 
616 static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev)
617 {
618 	int r, i, num_xcc;
619 	u32 *hpd;
620 	const __le32 *fw_data;
621 	unsigned fw_size;
622 	u32 *fw;
623 	size_t mec_hpd_size;
624 
625 	const struct gfx_firmware_header_v1_0 *mec_hdr;
626 
627 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
628 	for (i = 0; i < num_xcc; i++)
629 		bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
630 			AMDGPU_MAX_COMPUTE_QUEUES);
631 
632 	/* take ownership of the relevant compute queues */
633 	amdgpu_gfx_compute_queue_acquire(adev);
634 	mec_hpd_size =
635 		adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE;
636 	if (mec_hpd_size) {
637 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
638 					      AMDGPU_GEM_DOMAIN_VRAM |
639 					      AMDGPU_GEM_DOMAIN_GTT,
640 					      &adev->gfx.mec.hpd_eop_obj,
641 					      &adev->gfx.mec.hpd_eop_gpu_addr,
642 					      (void **)&hpd);
643 		if (r) {
644 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
645 			gfx_v9_4_3_mec_fini(adev);
646 			return r;
647 		}
648 
649 		if (amdgpu_emu_mode == 1) {
650 			for (i = 0; i < mec_hpd_size / 4; i++) {
651 				memset((void *)(hpd + i), 0, 4);
652 				if (i % 50 == 0)
653 					msleep(1);
654 			}
655 		} else {
656 			memset(hpd, 0, mec_hpd_size);
657 		}
658 
659 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
660 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
661 	}
662 
663 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
664 
665 	fw_data = (const __le32 *)
666 		(adev->gfx.mec_fw->data +
667 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
668 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
669 
670 	r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
671 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
672 				      &adev->gfx.mec.mec_fw_obj,
673 				      &adev->gfx.mec.mec_fw_gpu_addr,
674 				      (void **)&fw);
675 	if (r) {
676 		dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
677 		gfx_v9_4_3_mec_fini(adev);
678 		return r;
679 	}
680 
681 	memcpy(fw, fw_data, fw_size);
682 
683 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
684 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
685 
686 	return 0;
687 }
688 
689 static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
690 					u32 sh_num, u32 instance, int xcc_id)
691 {
692 	u32 data;
693 
694 	if (instance == 0xffffffff)
695 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
696 				     INSTANCE_BROADCAST_WRITES, 1);
697 	else
698 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
699 				     INSTANCE_INDEX, instance);
700 
701 	if (se_num == 0xffffffff)
702 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
703 				     SE_BROADCAST_WRITES, 1);
704 	else
705 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
706 
707 	if (sh_num == 0xffffffff)
708 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
709 				     SH_BROADCAST_WRITES, 1);
710 	else
711 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
712 
713 	WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
714 }
715 
716 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address)
717 {
718 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
719 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
720 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
721 		(address << SQ_IND_INDEX__INDEX__SHIFT) |
722 		(SQ_IND_INDEX__FORCE_READ_MASK));
723 	return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
724 }
725 
726 static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
727 			   uint32_t wave, uint32_t thread,
728 			   uint32_t regno, uint32_t num, uint32_t *out)
729 {
730 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
731 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
732 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
733 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
734 		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
735 		(SQ_IND_INDEX__FORCE_READ_MASK) |
736 		(SQ_IND_INDEX__AUTO_INCR_MASK));
737 	while (num--)
738 		*(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
739 }
740 
741 static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev,
742 				      uint32_t xcc_id, uint32_t simd, uint32_t wave,
743 				      uint32_t *dst, int *no_fields)
744 {
745 	/* type 1 wave data */
746 	dst[(*no_fields)++] = 1;
747 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
748 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
749 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
750 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
751 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
752 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID);
753 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
754 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
755 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC);
756 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC);
757 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS);
758 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
759 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0);
760 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0);
761 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE);
762 }
763 
764 static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
765 				       uint32_t wave, uint32_t start,
766 				       uint32_t size, uint32_t *dst)
767 {
768 	wave_read_regs(adev, xcc_id, simd, wave, 0,
769 		       start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
770 }
771 
772 static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
773 				       uint32_t wave, uint32_t thread,
774 				       uint32_t start, uint32_t size,
775 				       uint32_t *dst)
776 {
777 	wave_read_regs(adev, xcc_id, simd, wave, thread,
778 		       start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
779 }
780 
781 static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
782 					u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
783 {
784 	soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
785 }
786 
787 static int gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device *adev)
788 {
789 	u32 xcp_ctl;
790 
791 	/* Value is expected to be the same on all, fetch from first instance */
792 	xcp_ctl = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HYP_XCP_CTL);
793 
794 	return REG_GET_FIELD(xcp_ctl, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP);
795 }
796 
797 static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
798 						int num_xccs_per_xcp)
799 {
800 	int ret, i, num_xcc;
801 	u32 tmp = 0;
802 
803 	if (adev->psp.funcs) {
804 		ret = psp_spatial_partition(&adev->psp,
805 					    NUM_XCC(adev->gfx.xcc_mask) /
806 						    num_xccs_per_xcp);
807 		if (ret)
808 			return ret;
809 	} else {
810 		num_xcc = NUM_XCC(adev->gfx.xcc_mask);
811 
812 		for (i = 0; i < num_xcc; i++) {
813 			tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
814 					    num_xccs_per_xcp);
815 			tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
816 					    i % num_xccs_per_xcp);
817 			WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL,
818 				     tmp);
819 		}
820 		ret = 0;
821 	}
822 
823 	adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
824 
825 	return ret;
826 }
827 
828 static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
829 {
830 	int xcc;
831 
832 	xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0));
833 	if (!xcc) {
834 		dev_err(adev->dev, "Couldn't find xcc mapping from IH node");
835 		return -EINVAL;
836 	}
837 
838 	return xcc - 1;
839 }
840 
841 static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
842 	.get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
843 	.select_se_sh = &gfx_v9_4_3_xcc_select_se_sh,
844 	.read_wave_data = &gfx_v9_4_3_read_wave_data,
845 	.read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
846 	.read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
847 	.select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
848 	.switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
849 	.ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
850 	.get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp,
851 	.get_hdp_flush_mask = &amdgpu_gfx_get_hdp_flush_mask,
852 };
853 
854 static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
855 				      struct aca_bank *bank, enum aca_smu_type type,
856 				      void *data)
857 {
858 	struct aca_bank_info info;
859 	u64 misc0;
860 	u32 instlo;
861 	int ret;
862 
863 	ret = aca_bank_info_decode(bank, &info);
864 	if (ret)
865 		return ret;
866 
867 	/* NOTE: overwrite info.die_id with xcd id for gfx */
868 	instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
869 	instlo &= GENMASK(31, 1);
870 	info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
871 
872 	misc0 = bank->regs[ACA_REG_IDX_MISC0];
873 
874 	switch (type) {
875 	case ACA_SMU_TYPE_UE:
876 		bank->aca_err_type = ACA_ERROR_TYPE_UE;
877 		ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1ULL);
878 		break;
879 	case ACA_SMU_TYPE_CE:
880 		bank->aca_err_type = ACA_ERROR_TYPE_CE;
881 		ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
882 						     ACA_REG__MISC0__ERRCNT(misc0));
883 		break;
884 	default:
885 		return -EINVAL;
886 	}
887 
888 	return ret;
889 }
890 
891 static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
892 					 enum aca_smu_type type, void *data)
893 {
894 	u32 instlo;
895 
896 	instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
897 	instlo &= GENMASK(31, 1);
898 	switch (instlo) {
899 	case mmSMNAID_XCD0_MCA_SMU:
900 	case mmSMNAID_XCD1_MCA_SMU:
901 	case mmSMNXCD_XCD0_MCA_SMU:
902 		return true;
903 	default:
904 		break;
905 	}
906 
907 	return false;
908 }
909 
910 static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = {
911 	.aca_bank_parser = gfx_v9_4_3_aca_bank_parser,
912 	.aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid,
913 };
914 
915 static const struct aca_info gfx_v9_4_3_aca_info = {
916 	.hwip = ACA_HWIP_TYPE_SMU,
917 	.mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
918 	.bank_ops = &gfx_v9_4_3_aca_bank_ops,
919 };
920 
921 static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
922 {
923 	adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
924 	adev->gfx.ras = &gfx_v9_4_3_ras;
925 
926 	adev->gfx.config.max_hw_contexts = 8;
927 	adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
928 	adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
929 	adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
930 	adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
931 	adev->gfx.config.gb_addr_config = GOLDEN_GB_ADDR_CONFIG;
932 
933 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
934 			REG_GET_FIELD(
935 					adev->gfx.config.gb_addr_config,
936 					GB_ADDR_CONFIG,
937 					NUM_PIPES);
938 
939 	adev->gfx.config.max_tile_pipes =
940 		adev->gfx.config.gb_addr_config_fields.num_pipes;
941 
942 	adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
943 			REG_GET_FIELD(
944 					adev->gfx.config.gb_addr_config,
945 					GB_ADDR_CONFIG,
946 					NUM_BANKS);
947 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
948 			REG_GET_FIELD(
949 					adev->gfx.config.gb_addr_config,
950 					GB_ADDR_CONFIG,
951 					MAX_COMPRESSED_FRAGS);
952 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
953 			REG_GET_FIELD(
954 					adev->gfx.config.gb_addr_config,
955 					GB_ADDR_CONFIG,
956 					NUM_RB_PER_SE);
957 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
958 			REG_GET_FIELD(
959 					adev->gfx.config.gb_addr_config,
960 					GB_ADDR_CONFIG,
961 					NUM_SHADER_ENGINES);
962 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
963 			REG_GET_FIELD(
964 					adev->gfx.config.gb_addr_config,
965 					GB_ADDR_CONFIG,
966 					PIPE_INTERLEAVE_SIZE));
967 
968 	return 0;
969 }
970 
971 static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
972 				        int xcc_id, int mec, int pipe, int queue)
973 {
974 	unsigned irq_type;
975 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
976 	unsigned int hw_prio;
977 	uint32_t xcc_doorbell_start;
978 
979 	ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
980 				       ring_id];
981 
982 	/* mec0 is me1 */
983 	ring->xcc_id = xcc_id;
984 	ring->me = mec + 1;
985 	ring->pipe = pipe;
986 	ring->queue = queue;
987 
988 	ring->ring_obj = NULL;
989 	ring->use_doorbell = true;
990 	xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
991 			     xcc_id * adev->doorbell_index.xcc_doorbell_range;
992 	ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
993 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
994 			     (ring_id + xcc_id * adev->gfx.num_compute_rings) *
995 				     GFX9_MEC_HPD_SIZE;
996 	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
997 	sprintf(ring->name, "comp_%d.%d.%d.%d",
998 			ring->xcc_id, ring->me, ring->pipe, ring->queue);
999 
1000 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1001 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1002 		+ ring->pipe;
1003 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1004 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
1005 	/* type-2 packets are deprecated on MEC, use type-3 instead */
1006 	return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1007 				hw_prio, NULL);
1008 }
1009 
1010 static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev)
1011 {
1012 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
1013 	uint32_t *ptr, num_xcc, inst;
1014 
1015 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1016 
1017 	ptr = kcalloc(reg_count * num_xcc, sizeof(uint32_t), GFP_KERNEL);
1018 	if (!ptr) {
1019 		DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
1020 		adev->gfx.ip_dump_core = NULL;
1021 	} else {
1022 		adev->gfx.ip_dump_core = ptr;
1023 	}
1024 
1025 	/* Allocate memory for compute queue registers for all the instances */
1026 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
1027 	inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
1028 		adev->gfx.mec.num_queue_per_pipe;
1029 
1030 	ptr = kcalloc(reg_count * inst * num_xcc, sizeof(uint32_t), GFP_KERNEL);
1031 	if (!ptr) {
1032 		DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
1033 		adev->gfx.ip_dump_compute_queues = NULL;
1034 	} else {
1035 		adev->gfx.ip_dump_compute_queues = ptr;
1036 	}
1037 }
1038 
1039 static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
1040 {
1041 	int i, j, k, r, ring_id, xcc_id, num_xcc;
1042 	struct amdgpu_device *adev = ip_block->adev;
1043 
1044 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1045 	case IP_VERSION(9, 4, 3):
1046 	case IP_VERSION(9, 4, 4):
1047 		adev->gfx.cleaner_shader_ptr = gfx_9_4_3_cleaner_shader_hex;
1048 		adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_3_cleaner_shader_hex);
1049 		if (adev->gfx.mec_fw_version >= 153) {
1050 			adev->gfx.enable_cleaner_shader = true;
1051 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1052 			if (r) {
1053 				adev->gfx.enable_cleaner_shader = false;
1054 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1055 			}
1056 		}
1057 		break;
1058 	default:
1059 		adev->gfx.enable_cleaner_shader = false;
1060 		break;
1061 	}
1062 
1063 	adev->gfx.mec.num_mec = 2;
1064 	adev->gfx.mec.num_pipe_per_mec = 4;
1065 	adev->gfx.mec.num_queue_per_pipe = 8;
1066 
1067 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1068 
1069 	/* EOP Event */
1070 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
1071 	if (r)
1072 		return r;
1073 
1074 	/* Bad opcode Event */
1075 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1076 			      GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
1077 			      &adev->gfx.bad_op_irq);
1078 	if (r)
1079 		return r;
1080 
1081 	/* Privileged reg */
1082 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
1083 			      &adev->gfx.priv_reg_irq);
1084 	if (r)
1085 		return r;
1086 
1087 	/* Privileged inst */
1088 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
1089 			      &adev->gfx.priv_inst_irq);
1090 	if (r)
1091 		return r;
1092 
1093 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1094 
1095 	r = adev->gfx.rlc.funcs->init(adev);
1096 	if (r) {
1097 		DRM_ERROR("Failed to init rlc BOs!\n");
1098 		return r;
1099 	}
1100 
1101 	r = gfx_v9_4_3_mec_init(adev);
1102 	if (r) {
1103 		DRM_ERROR("Failed to init MEC BOs!\n");
1104 		return r;
1105 	}
1106 
1107 	/* set up the compute queues - allocate horizontally across pipes */
1108 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1109 		ring_id = 0;
1110 		for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1111 			for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1112 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec;
1113 				     k++) {
1114 					if (!amdgpu_gfx_is_mec_queue_enabled(
1115 							adev, xcc_id, i, k, j))
1116 						continue;
1117 
1118 					r = gfx_v9_4_3_compute_ring_init(adev,
1119 								       ring_id,
1120 								       xcc_id,
1121 								       i, k, j);
1122 					if (r)
1123 						return r;
1124 
1125 					ring_id++;
1126 				}
1127 			}
1128 		}
1129 
1130 		r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id);
1131 		if (r) {
1132 			DRM_ERROR("Failed to init KIQ BOs!\n");
1133 			return r;
1134 		}
1135 
1136 		r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1137 		if (r)
1138 			return r;
1139 
1140 		/* create MQD for all compute queues as wel as KIQ for SRIOV case */
1141 		r = amdgpu_gfx_mqd_sw_init(adev,
1142 				sizeof(struct v9_mqd_allocation), xcc_id);
1143 		if (r)
1144 			return r;
1145 	}
1146 
1147 	adev->gfx.compute_supported_reset =
1148 		amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
1149 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1150 	case IP_VERSION(9, 4, 3):
1151 	case IP_VERSION(9, 4, 4):
1152 		if ((adev->gfx.mec_fw_version >= 155) &&
1153 		    !amdgpu_sriov_vf(adev) &&
1154 		    !adev->debug_disable_gpu_ring_reset) {
1155 			adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1156 			adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
1157 		}
1158 		break;
1159 	case IP_VERSION(9, 5, 0):
1160 		if ((adev->gfx.mec_fw_version >= 21) &&
1161 		    !amdgpu_sriov_vf(adev) &&
1162 		    !adev->debug_disable_gpu_ring_reset) {
1163 			adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1164 			adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
1165 		}
1166 		break;
1167 	default:
1168 		break;
1169 	}
1170 	r = gfx_v9_4_3_gpu_early_init(adev);
1171 	if (r)
1172 		return r;
1173 
1174 	r = amdgpu_gfx_ras_sw_init(adev);
1175 	if (r)
1176 		return r;
1177 
1178 	r = amdgpu_gfx_sysfs_init(adev);
1179 	if (r)
1180 		return r;
1181 
1182 	gfx_v9_4_3_alloc_ip_dump(adev);
1183 
1184 	return 0;
1185 }
1186 
1187 static int gfx_v9_4_3_sw_fini(struct amdgpu_ip_block *ip_block)
1188 {
1189 	int i, num_xcc;
1190 	struct amdgpu_device *adev = ip_block->adev;
1191 
1192 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1193 	for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
1194 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1195 
1196 	for (i = 0; i < num_xcc; i++) {
1197 		amdgpu_gfx_mqd_sw_fini(adev, i);
1198 		amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
1199 		amdgpu_gfx_kiq_fini(adev, i);
1200 	}
1201 
1202 	amdgpu_gfx_cleaner_shader_sw_fini(adev);
1203 
1204 	gfx_v9_4_3_mec_fini(adev);
1205 	amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
1206 	gfx_v9_4_3_free_microcode(adev);
1207 	amdgpu_gfx_sysfs_fini(adev);
1208 
1209 	kfree(adev->gfx.ip_dump_core);
1210 	kfree(adev->gfx.ip_dump_compute_queues);
1211 
1212 	return 0;
1213 }
1214 
1215 #define DEFAULT_SH_MEM_BASES	(0x6000)
1216 static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
1217 					     int xcc_id)
1218 {
1219 	int i;
1220 	uint32_t sh_mem_config;
1221 	uint32_t sh_mem_bases;
1222 	uint32_t data;
1223 
1224 	/*
1225 	 * Configure apertures:
1226 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1227 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1228 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1229 	 */
1230 	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1231 
1232 	sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1233 			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1234 			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1235 
1236 	mutex_lock(&adev->srbm_mutex);
1237 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1238 		soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1239 		/* CP and shaders */
1240 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config);
1241 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
1242 
1243 		/* Enable trap for each kfd vmid. */
1244 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
1245 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1246 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
1247 	}
1248 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1249 	mutex_unlock(&adev->srbm_mutex);
1250 
1251 	/*
1252 	 * Initialize all compute VMIDs to have no GDS, GWS, or OA
1253 	 * access. These should be enabled by FW for target VMIDs.
1254 	 */
1255 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1256 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0);
1257 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0);
1258 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0);
1259 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0);
1260 	}
1261 }
1262 
1263 static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
1264 {
1265 	int vmid;
1266 
1267 	/*
1268 	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1269 	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1270 	 * the driver can enable them for graphics. VMID0 should maintain
1271 	 * access so that HWS firmware can save/restore entries.
1272 	 */
1273 	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
1274 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0);
1275 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0);
1276 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0);
1277 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0);
1278 	}
1279 }
1280 
1281 /* For ASICs that needs xnack chain and MEC version supports, set SG_CONFIG1
1282  * DISABLE_XNACK_CHECK_IN_RETRY_DISABLE bit and inform KFD to set xnack_chain
1283  * bit in SET_RESOURCES
1284  */
1285 static void gfx_v9_4_3_xcc_init_sq(struct amdgpu_device *adev, int xcc_id)
1286 {
1287 	uint32_t data;
1288 
1289 	if (!(adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN))
1290 		return;
1291 
1292 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_CONFIG1);
1293 	data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1);
1294 	WREG32_SOC15(GC, xcc_id, regSQ_CONFIG1, data);
1295 }
1296 
1297 static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
1298 					  int xcc_id)
1299 {
1300 	u32 tmp;
1301 	int i;
1302 
1303 	/* XXX SH_MEM regs */
1304 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1305 	mutex_lock(&adev->srbm_mutex);
1306 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1307 		soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1308 		/* CP and shaders */
1309 		if (i == 0) {
1310 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1311 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1312 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1313 					    !!adev->gmc.noretry);
1314 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1315 					 regSH_MEM_CONFIG, tmp);
1316 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1317 					 regSH_MEM_BASES, 0);
1318 		} else {
1319 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1320 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1321 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1322 					    !!adev->gmc.noretry);
1323 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1324 					 regSH_MEM_CONFIG, tmp);
1325 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1326 					    (adev->gmc.private_aperture_start >>
1327 					     48));
1328 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1329 					    (adev->gmc.shared_aperture_start >>
1330 					     48));
1331 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1332 					 regSH_MEM_BASES, tmp);
1333 		}
1334 	}
1335 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1336 
1337 	mutex_unlock(&adev->srbm_mutex);
1338 
1339 	gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
1340 	gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
1341 	gfx_v9_4_3_xcc_init_sq(adev, xcc_id);
1342 }
1343 
1344 static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
1345 {
1346 	int i, num_xcc;
1347 
1348 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1349 
1350 	gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info);
1351 	adev->gfx.config.db_debug2 =
1352 		RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
1353 
1354 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1355 	/* ToDo: GC 9.4.4 */
1356 	case IP_VERSION(9, 4, 3):
1357 		if (adev->gfx.mec_fw_version >= 184 &&
1358 		    (amdgpu_sriov_reg_access_sq_config(adev) ||
1359 		     !amdgpu_sriov_vf(adev)))
1360 			adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
1361 		break;
1362 	case IP_VERSION(9, 5, 0):
1363 		if (adev->gfx.mec_fw_version >= 23)
1364 			adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
1365 		break;
1366 	default:
1367 		break;
1368 	}
1369 
1370 	for (i = 0; i < num_xcc; i++)
1371 		gfx_v9_4_3_xcc_constants_init(adev, i);
1372 }
1373 
1374 static void
1375 gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev,
1376 					   int xcc_id)
1377 {
1378 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1);
1379 }
1380 
1381 static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id)
1382 {
1383 	/*
1384 	 * Rlc save restore list is workable since v2_1.
1385 	 */
1386 	gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
1387 }
1388 
1389 static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
1390 {
1391 	uint32_t data;
1392 
1393 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
1394 	data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK;
1395 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
1396 }
1397 
1398 static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
1399 {
1400 	uint32_t rlc_setting;
1401 
1402 	/* if RLC is not enabled, do nothing */
1403 	rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
1404 	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
1405 		return false;
1406 
1407 	return true;
1408 }
1409 
1410 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
1411 {
1412 	uint32_t data;
1413 	unsigned i;
1414 
1415 	data = RLC_SAFE_MODE__CMD_MASK;
1416 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
1417 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1418 
1419 	/* wait for RLC_SAFE_MODE */
1420 	for (i = 0; i < adev->usec_timeout; i++) {
1421 		if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
1422 			break;
1423 		udelay(1);
1424 	}
1425 }
1426 
1427 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev,
1428 					   int xcc_id)
1429 {
1430 	uint32_t data;
1431 
1432 	data = RLC_SAFE_MODE__CMD_MASK;
1433 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1434 }
1435 
1436 static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
1437 {
1438 	int xcc_id, num_xcc;
1439 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1440 
1441 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1442 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1443 		reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
1444 		reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0);
1445 		reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1);
1446 		reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2);
1447 		reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3);
1448 		reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
1449 		reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
1450 		reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT);
1451 	}
1452 	adev->gfx.rlc.rlcg_reg_access_supported = true;
1453 }
1454 
1455 static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
1456 {
1457 	/* init spm vmid with 0xf */
1458 	if (adev->gfx.rlc.funcs->update_spm_vmid)
1459 		adev->gfx.rlc.funcs->update_spm_vmid(adev, 0, NULL, 0xf);
1460 
1461 	return 0;
1462 }
1463 
1464 static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev,
1465 					       int xcc_id)
1466 {
1467 	u32 i, j, k;
1468 	u32 mask;
1469 
1470 	mutex_lock(&adev->grbm_idx_mutex);
1471 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1472 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1473 			gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff,
1474 						    xcc_id);
1475 			for (k = 0; k < adev->usec_timeout; k++) {
1476 				if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0)
1477 					break;
1478 				udelay(1);
1479 			}
1480 			if (k == adev->usec_timeout) {
1481 				gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff,
1482 							    0xffffffff,
1483 							    0xffffffff, xcc_id);
1484 				mutex_unlock(&adev->grbm_idx_mutex);
1485 				drm_info(adev_to_drm(adev), "Timeout wait for RLC serdes %u,%u\n",
1486 					 i, j);
1487 				return;
1488 			}
1489 		}
1490 	}
1491 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
1492 				    xcc_id);
1493 	mutex_unlock(&adev->grbm_idx_mutex);
1494 
1495 	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1496 		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1497 		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1498 		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1499 	for (k = 0; k < adev->usec_timeout; k++) {
1500 		if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1501 			break;
1502 		udelay(1);
1503 	}
1504 }
1505 
1506 static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1507 						     bool enable, int xcc_id)
1508 {
1509 	u32 tmp;
1510 
1511 	/* These interrupts should be enabled to drive DS clock */
1512 
1513 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1514 
1515 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1516 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1517 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1518 
1519 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1520 }
1521 
1522 static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id)
1523 {
1524 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1525 			      RLC_ENABLE_F32, 0);
1526 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
1527 	gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id);
1528 }
1529 
1530 static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev)
1531 {
1532 	int i, num_xcc;
1533 
1534 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1535 	for (i = 0; i < num_xcc; i++)
1536 		gfx_v9_4_3_xcc_rlc_stop(adev, i);
1537 }
1538 
1539 static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id)
1540 {
1541 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1542 			      SOFT_RESET_RLC, 1);
1543 	udelay(50);
1544 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1545 			      SOFT_RESET_RLC, 0);
1546 	udelay(50);
1547 }
1548 
1549 static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
1550 {
1551 	int i, num_xcc;
1552 
1553 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1554 	for (i = 0; i < num_xcc; i++)
1555 		gfx_v9_4_3_xcc_rlc_reset(adev, i);
1556 }
1557 
1558 static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id)
1559 {
1560 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1561 			      RLC_ENABLE_F32, 1);
1562 	udelay(50);
1563 
1564 	/* carrizo do enable cp interrupt after cp inited */
1565 	if (!(adev->flags & AMD_IS_APU)) {
1566 		gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
1567 		udelay(50);
1568 	}
1569 }
1570 
1571 static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
1572 {
1573 #ifdef AMDGPU_RLC_DEBUG_RETRY
1574 	u32 rlc_ucode_ver;
1575 #endif
1576 	int i, num_xcc;
1577 
1578 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1579 	for (i = 0; i < num_xcc; i++) {
1580 		gfx_v9_4_3_xcc_rlc_start(adev, i);
1581 #ifdef AMDGPU_RLC_DEBUG_RETRY
1582 		/* RLC_GPM_GENERAL_6 : RLC Ucode version */
1583 		rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6);
1584 		if (rlc_ucode_ver == 0x108) {
1585 			dev_info(adev->dev,
1586 				 "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i\n",
1587 				 rlc_ucode_ver, adev->gfx.rlc_fw_version);
1588 			/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
1589 			 * default is 0x9C4 to create a 100us interval */
1590 			WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4);
1591 			/* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
1592 			 * to disable the page fault retry interrupts, default is
1593 			 * 0x100 (256) */
1594 			WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100);
1595 		}
1596 #endif
1597 	}
1598 }
1599 
1600 static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1601 					     int xcc_id)
1602 {
1603 	const struct rlc_firmware_header_v2_0 *hdr;
1604 	const __le32 *fw_data;
1605 	unsigned i, fw_size;
1606 
1607 	if (!adev->gfx.rlc_fw)
1608 		return -EINVAL;
1609 
1610 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1611 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
1612 
1613 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1614 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1615 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1616 
1617 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1618 			RLCG_UCODE_LOADING_START_ADDRESS);
1619 	for (i = 0; i < fw_size; i++) {
1620 		if (amdgpu_emu_mode == 1 && i % 100 == 0) {
1621 			dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i);
1622 			msleep(1);
1623 		}
1624 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
1625 	}
1626 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1627 
1628 	return 0;
1629 }
1630 
1631 static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id)
1632 {
1633 	int r;
1634 
1635 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1636 		gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id);
1637 		/* legacy rlc firmware loading */
1638 		r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id);
1639 		if (r)
1640 			return r;
1641 		gfx_v9_4_3_xcc_rlc_start(adev, xcc_id);
1642 	}
1643 
1644 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
1645 	/* disable CG */
1646 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1647 	gfx_v9_4_3_xcc_init_pg(adev, xcc_id);
1648 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
1649 
1650 	return 0;
1651 }
1652 
1653 static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
1654 {
1655 	int r, i, num_xcc;
1656 
1657 	if (amdgpu_sriov_vf(adev))
1658 		return 0;
1659 
1660 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1661 	for (i = 0; i < num_xcc; i++) {
1662 		r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
1663 		if (r)
1664 			return r;
1665 	}
1666 
1667 	return 0;
1668 }
1669 
1670 static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev,
1671 					      int inst, struct amdgpu_ring *ring, unsigned int vmid)
1672 {
1673 	u32 reg, pre_data, data;
1674 
1675 	reg = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regRLC_SPM_MC_CNTL);
1676 	if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev))
1677 		pre_data = RREG32_NO_KIQ(reg);
1678 	else
1679 		pre_data = RREG32(reg);
1680 
1681 	data =	pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK);
1682 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
1683 
1684 	if (pre_data != data) {
1685 		if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) {
1686 			WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, inst), regRLC_SPM_MC_CNTL, data);
1687 		} else
1688 			WREG32_SOC15(GC, GET_INST(GC, inst), regRLC_SPM_MC_CNTL, data);
1689 	}
1690 }
1691 
1692 static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = {
1693 	{SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)},
1694 	{SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)},
1695 };
1696 
1697 static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev,
1698 					uint32_t offset,
1699 					struct soc15_reg_rlcg *entries, int arr_size)
1700 {
1701 	int i, inst;
1702 	uint32_t reg;
1703 
1704 	if (!entries)
1705 		return false;
1706 
1707 	for (i = 0; i < arr_size; i++) {
1708 		const struct soc15_reg_rlcg *entry;
1709 
1710 		entry = &entries[i];
1711 		inst = adev->ip_map.logical_to_dev_inst ?
1712 			       adev->ip_map.logical_to_dev_inst(
1713 				       adev, entry->hwip, entry->instance) :
1714 			       entry->instance;
1715 		reg = adev->reg_offset[entry->hwip][inst][entry->segment] +
1716 		      entry->reg;
1717 		if (offset == reg)
1718 			return true;
1719 	}
1720 
1721 	return false;
1722 }
1723 
1724 static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
1725 {
1726 	return gfx_v9_4_3_check_rlcg_range(adev, offset,
1727 					(void *)rlcg_access_gc_9_4_3,
1728 					ARRAY_SIZE(rlcg_access_gc_9_4_3));
1729 }
1730 
1731 static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev,
1732 					     bool enable, int xcc_id)
1733 {
1734 	if (enable) {
1735 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0);
1736 	} else {
1737 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL,
1738 			(CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK |
1739 			 CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK |
1740 			 CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK |
1741 			 CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK |
1742 			 CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK |
1743 			 CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK |
1744 			 CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK |
1745 			 CP_MEC_CNTL__MEC_ME1_HALT_MASK |
1746 			 CP_MEC_CNTL__MEC_ME2_HALT_MASK));
1747 		adev->gfx.kiq[xcc_id].ring.sched.ready = false;
1748 	}
1749 	udelay(50);
1750 }
1751 
1752 static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev,
1753 						    int xcc_id)
1754 {
1755 	const struct gfx_firmware_header_v1_0 *mec_hdr;
1756 	const __le32 *fw_data;
1757 	unsigned i;
1758 	u32 tmp;
1759 	u32 mec_ucode_addr_offset;
1760 	u32 mec_ucode_data_offset;
1761 
1762 	if (!adev->gfx.mec_fw)
1763 		return -EINVAL;
1764 
1765 	gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
1766 
1767 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1768 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1769 
1770 	fw_data = (const __le32 *)
1771 		(adev->gfx.mec_fw->data +
1772 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1773 	tmp = 0;
1774 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
1775 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
1776 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
1777 
1778 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
1779 		adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
1780 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
1781 		upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
1782 
1783 	mec_ucode_addr_offset =
1784 		SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR);
1785 	mec_ucode_data_offset =
1786 		SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA);
1787 
1788 	/* MEC1 */
1789 	WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset);
1790 	for (i = 0; i < mec_hdr->jt_size; i++)
1791 		WREG32(mec_ucode_data_offset,
1792 		       le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
1793 
1794 	WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version);
1795 	/* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
1796 
1797 	return 0;
1798 }
1799 
1800 /* KIQ functions */
1801 static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
1802 {
1803 	uint32_t tmp;
1804 	struct amdgpu_device *adev = ring->adev;
1805 
1806 	/* tell RLC which is KIQ queue */
1807 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
1808 	tmp &= 0xffffff00;
1809 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1810 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp | 0x80);
1811 }
1812 
1813 static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
1814 {
1815 	struct amdgpu_device *adev = ring->adev;
1816 
1817 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
1818 		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
1819 			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
1820 			mqd->cp_hqd_queue_priority =
1821 				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
1822 		}
1823 	}
1824 }
1825 
1826 static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id)
1827 {
1828 	struct amdgpu_device *adev = ring->adev;
1829 	struct v9_mqd *mqd = ring->mqd_ptr;
1830 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
1831 	uint32_t tmp;
1832 
1833 	mqd->header = 0xC0310800;
1834 	mqd->compute_pipelinestat_enable = 0x00000001;
1835 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
1836 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
1837 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
1838 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
1839 	mqd->compute_misc_reserved = 0x00000003;
1840 
1841 	mqd->dynamic_cu_mask_addr_lo =
1842 		lower_32_bits(ring->mqd_gpu_addr
1843 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1844 	mqd->dynamic_cu_mask_addr_hi =
1845 		upper_32_bits(ring->mqd_gpu_addr
1846 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1847 
1848 	eop_base_addr = ring->eop_gpu_addr >> 8;
1849 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
1850 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
1851 
1852 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1853 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL);
1854 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
1855 			(order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
1856 
1857 	mqd->cp_hqd_eop_control = tmp;
1858 
1859 	/* enable doorbell? */
1860 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL);
1861 
1862 	if (ring->use_doorbell) {
1863 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1864 				    DOORBELL_OFFSET, ring->doorbell_index);
1865 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1866 				    DOORBELL_EN, 1);
1867 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1868 				    DOORBELL_SOURCE, 0);
1869 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1870 				    DOORBELL_HIT, 0);
1871 		if (amdgpu_sriov_multi_vf_mode(adev))
1872 			tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1873 					    DOORBELL_MODE, 1);
1874 	} else {
1875 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1876 					 DOORBELL_EN, 0);
1877 	}
1878 
1879 	mqd->cp_hqd_pq_doorbell_control = tmp;
1880 
1881 	/* disable the queue if it's active */
1882 	ring->wptr = 0;
1883 	mqd->cp_hqd_dequeue_request = 0;
1884 	mqd->cp_hqd_pq_rptr = 0;
1885 	mqd->cp_hqd_pq_wptr_lo = 0;
1886 	mqd->cp_hqd_pq_wptr_hi = 0;
1887 
1888 	/* set the pointer to the MQD */
1889 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
1890 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
1891 
1892 	/* set MQD vmid to 0 */
1893 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL);
1894 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
1895 	mqd->cp_mqd_control = tmp;
1896 
1897 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1898 	hqd_gpu_addr = ring->gpu_addr >> 8;
1899 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
1900 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
1901 
1902 	/* set up the HQD, this is similar to CP_RB0_CNTL */
1903 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL);
1904 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
1905 			    (order_base_2(ring->ring_size / 4) - 1));
1906 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
1907 			((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
1908 #ifdef __BIG_ENDIAN
1909 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
1910 #endif
1911 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
1912 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
1913 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
1914 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
1915 	mqd->cp_hqd_pq_control = tmp;
1916 
1917 	/* set the wb address whether it's enabled or not */
1918 	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
1919 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
1920 	mqd->cp_hqd_pq_rptr_report_addr_hi =
1921 		upper_32_bits(wb_gpu_addr) & 0xffff;
1922 
1923 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1924 	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1925 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
1926 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
1927 
1928 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
1929 	ring->wptr = 0;
1930 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR);
1931 
1932 	/* set the vmid for the queue */
1933 	mqd->cp_hqd_vmid = 0;
1934 
1935 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE);
1936 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
1937 	mqd->cp_hqd_persistent_state = tmp;
1938 
1939 	/* set MIN_IB_AVAIL_SIZE */
1940 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL);
1941 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
1942 	mqd->cp_hqd_ib_control = tmp;
1943 
1944 	/* set static priority for a queue/ring */
1945 	gfx_v9_4_3_mqd_set_priority(ring, mqd);
1946 	mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM);
1947 
1948 	/* map_queues packet doesn't need activate the queue,
1949 	 * so only kiq need set this field.
1950 	 */
1951 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
1952 		mqd->cp_hqd_active = 1;
1953 
1954 	return 0;
1955 }
1956 
1957 static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring,
1958 					    int xcc_id)
1959 {
1960 	struct amdgpu_device *adev = ring->adev;
1961 	struct v9_mqd *mqd = ring->mqd_ptr;
1962 	int j;
1963 
1964 	/* disable wptr polling */
1965 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
1966 
1967 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
1968 	       mqd->cp_hqd_eop_base_addr_lo);
1969 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
1970 	       mqd->cp_hqd_eop_base_addr_hi);
1971 
1972 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1973 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
1974 	       mqd->cp_hqd_eop_control);
1975 
1976 	/* enable doorbell? */
1977 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
1978 	       mqd->cp_hqd_pq_doorbell_control);
1979 
1980 	/* disable the queue if it's active */
1981 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
1982 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
1983 		for (j = 0; j < adev->usec_timeout; j++) {
1984 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
1985 				break;
1986 			udelay(1);
1987 		}
1988 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
1989 		       mqd->cp_hqd_dequeue_request);
1990 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
1991 		       mqd->cp_hqd_pq_rptr);
1992 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
1993 		       mqd->cp_hqd_pq_wptr_lo);
1994 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
1995 		       mqd->cp_hqd_pq_wptr_hi);
1996 	}
1997 
1998 	/* set the pointer to the MQD */
1999 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
2000 	       mqd->cp_mqd_base_addr_lo);
2001 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
2002 	       mqd->cp_mqd_base_addr_hi);
2003 
2004 	/* set MQD vmid to 0 */
2005 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
2006 	       mqd->cp_mqd_control);
2007 
2008 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2009 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
2010 	       mqd->cp_hqd_pq_base_lo);
2011 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
2012 	       mqd->cp_hqd_pq_base_hi);
2013 
2014 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2015 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
2016 	       mqd->cp_hqd_pq_control);
2017 
2018 	/* set the wb address whether it's enabled or not */
2019 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
2020 				mqd->cp_hqd_pq_rptr_report_addr_lo);
2021 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2022 				mqd->cp_hqd_pq_rptr_report_addr_hi);
2023 
2024 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2025 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
2026 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
2027 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2028 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
2029 
2030 	/* enable the doorbell if requested */
2031 	if (ring->use_doorbell) {
2032 		WREG32_SOC15(
2033 			GC, GET_INST(GC, xcc_id),
2034 			regCP_MEC_DOORBELL_RANGE_LOWER,
2035 			((adev->doorbell_index.kiq +
2036 			  xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2037 			 2) << 2);
2038 		WREG32_SOC15(
2039 			GC, GET_INST(GC, xcc_id),
2040 			regCP_MEC_DOORBELL_RANGE_UPPER,
2041 			((adev->doorbell_index.userqueue_end +
2042 			  xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2043 			 2) << 2);
2044 	}
2045 
2046 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2047 	       mqd->cp_hqd_pq_doorbell_control);
2048 
2049 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2050 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2051 	       mqd->cp_hqd_pq_wptr_lo);
2052 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2053 	       mqd->cp_hqd_pq_wptr_hi);
2054 
2055 	/* set the vmid for the queue */
2056 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
2057 
2058 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
2059 	       mqd->cp_hqd_persistent_state);
2060 
2061 	/* activate the queue */
2062 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
2063 	       mqd->cp_hqd_active);
2064 
2065 	if (ring->use_doorbell)
2066 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2067 
2068 	return 0;
2069 }
2070 
2071 static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring,
2072 					    int xcc_id)
2073 {
2074 	struct amdgpu_device *adev = ring->adev;
2075 	int j;
2076 
2077 	/* disable the queue if it's active */
2078 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
2079 
2080 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
2081 
2082 		for (j = 0; j < adev->usec_timeout; j++) {
2083 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
2084 				break;
2085 			udelay(1);
2086 		}
2087 
2088 		if (j == AMDGPU_MAX_USEC_TIMEOUT) {
2089 			DRM_DEBUG("%s dequeue request failed.\n", ring->name);
2090 
2091 			/* Manual disable if dequeue request times out */
2092 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
2093 		}
2094 
2095 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
2096 		      0);
2097 	}
2098 
2099 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);
2100 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0);
2101 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT);
2102 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
2103 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);
2104 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
2105 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0);
2106 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0);
2107 
2108 	return 0;
2109 }
2110 
2111 static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
2112 {
2113 	struct amdgpu_device *adev = ring->adev;
2114 	struct v9_mqd *mqd = ring->mqd_ptr;
2115 	struct v9_mqd *tmp_mqd;
2116 
2117 	gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id);
2118 
2119 	/* GPU could be in bad state during probe, driver trigger the reset
2120 	 * after load the SMU, in this case , the mqd is not be initialized.
2121 	 * driver need to re-init the mqd.
2122 	 * check mqd->cp_hqd_pq_control since this value should not be 0
2123 	 */
2124 	tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup;
2125 	if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) {
2126 		/* for GPU_RESET case , reset MQD to a clean status */
2127 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2128 			memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation));
2129 
2130 		/* reset ring buffer */
2131 		ring->wptr = 0;
2132 		amdgpu_ring_clear_ring(ring);
2133 		mutex_lock(&adev->srbm_mutex);
2134 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2135 		gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
2136 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2137 		mutex_unlock(&adev->srbm_mutex);
2138 	} else {
2139 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2140 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2141 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2142 		mutex_lock(&adev->srbm_mutex);
2143 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
2144 			amdgpu_ring_clear_ring(ring);
2145 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2146 		gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
2147 		gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
2148 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2149 		mutex_unlock(&adev->srbm_mutex);
2150 
2151 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2152 			memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
2153 	}
2154 
2155 	return 0;
2156 }
2157 
2158 static void gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id,
2159 					  bool restore)
2160 {
2161 	struct amdgpu_device *adev = ring->adev;
2162 	struct v9_mqd *mqd = ring->mqd_ptr;
2163 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
2164 	struct v9_mqd *tmp_mqd;
2165 
2166 	/* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
2167 	 * is not be initialized before
2168 	 */
2169 	tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
2170 
2171 	if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
2172 	    (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
2173 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2174 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2175 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2176 		mutex_lock(&adev->srbm_mutex);
2177 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2178 		gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
2179 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2180 		mutex_unlock(&adev->srbm_mutex);
2181 
2182 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2183 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
2184 	} else {
2185 		/* restore MQD to a clean status */
2186 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2187 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
2188 		/* reset ring buffer */
2189 		ring->wptr = 0;
2190 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
2191 		amdgpu_ring_clear_ring(ring);
2192 	}
2193 }
2194 
2195 static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
2196 {
2197 	struct amdgpu_ring *ring;
2198 	int j;
2199 
2200 	for (j = 0; j < adev->gfx.num_compute_rings; j++) {
2201 		ring = &adev->gfx.compute_ring[j +  xcc_id * adev->gfx.num_compute_rings];
2202 		if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2203 			mutex_lock(&adev->srbm_mutex);
2204 			soc15_grbm_select(adev, ring->me,
2205 					ring->pipe,
2206 					ring->queue, 0, GET_INST(GC, xcc_id));
2207 			gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id);
2208 			soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2209 			mutex_unlock(&adev->srbm_mutex);
2210 		}
2211 	}
2212 
2213 	return 0;
2214 }
2215 
2216 static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
2217 {
2218 	gfx_v9_4_3_xcc_kiq_init_queue(&adev->gfx.kiq[xcc_id].ring, xcc_id);
2219 	return 0;
2220 }
2221 
2222 static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
2223 {
2224 	struct amdgpu_ring *ring;
2225 	int i;
2226 
2227 	gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
2228 
2229 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2230 		ring = &adev->gfx.compute_ring[i + xcc_id *
2231 			adev->gfx.num_compute_rings];
2232 
2233 		gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
2234 	}
2235 
2236 	return amdgpu_gfx_enable_kcq(adev, xcc_id);
2237 }
2238 
2239 static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
2240 {
2241 	struct amdgpu_ring *ring;
2242 	int r, j;
2243 
2244 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2245 
2246 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2247 		gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id);
2248 
2249 		r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id);
2250 		if (r)
2251 			return r;
2252 	} else {
2253 		gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
2254 	}
2255 
2256 	r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id);
2257 	if (r)
2258 		return r;
2259 
2260 	r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id);
2261 	if (r)
2262 		return r;
2263 
2264 	for (j = 0; j < adev->gfx.num_compute_rings; j++) {
2265 		ring = &adev->gfx.compute_ring
2266 				[j + xcc_id * adev->gfx.num_compute_rings];
2267 		r = amdgpu_ring_test_helper(ring);
2268 		if (r)
2269 			return r;
2270 	}
2271 
2272 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
2273 
2274 	return 0;
2275 }
2276 
2277 static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
2278 {
2279 	int r = 0, i, num_xcc, num_xcp, num_xcc_per_xcp;
2280 
2281 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2282 	if (amdgpu_sriov_vf(adev)) {
2283 		enum amdgpu_gfx_partition mode;
2284 
2285 		mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2286 						       AMDGPU_XCP_FL_NONE);
2287 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2288 			return -EINVAL;
2289 		num_xcc_per_xcp = gfx_v9_4_3_get_xccs_per_xcp(adev);
2290 		adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp;
2291 		num_xcp = num_xcc / num_xcc_per_xcp;
2292 		r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
2293 
2294 	} else {
2295 		if (adev->in_suspend)
2296 			amdgpu_xcp_restore_partition_mode(adev->xcp_mgr);
2297 		else if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2298 						    AMDGPU_XCP_FL_NONE) ==
2299 		    AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2300 			r = amdgpu_xcp_switch_partition_mode(
2301 				adev->xcp_mgr, amdgpu_user_partt_mode);
2302 	}
2303 	if (r)
2304 		return r;
2305 
2306 	for (i = 0; i < num_xcc; i++) {
2307 		r = gfx_v9_4_3_xcc_cp_resume(adev, i);
2308 		if (r)
2309 			return r;
2310 	}
2311 
2312 	return 0;
2313 }
2314 
2315 static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
2316 {
2317 	if (amdgpu_gfx_disable_kcq(adev, xcc_id))
2318 		DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id);
2319 
2320 	if (amdgpu_sriov_vf(adev)) {
2321 		/* must disable polling for SRIOV when hw finished, otherwise
2322 		 * CPC engine may still keep fetching WB address which is already
2323 		 * invalid after sw finished and trigger DMAR reading error in
2324 		 * hypervisor side.
2325 		 */
2326 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
2327 		return;
2328 	}
2329 
2330 	/* Use deinitialize sequence from CAIL when unbinding device
2331 	 * from driver, otherwise KIQ is hanging when binding back
2332 	 */
2333 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2334 		mutex_lock(&adev->srbm_mutex);
2335 		soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me,
2336 				  adev->gfx.kiq[xcc_id].ring.pipe,
2337 				  adev->gfx.kiq[xcc_id].ring.queue, 0,
2338 				  GET_INST(GC, xcc_id));
2339 		gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring,
2340 						 xcc_id);
2341 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2342 		mutex_unlock(&adev->srbm_mutex);
2343 	}
2344 
2345 	gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id);
2346 	gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
2347 }
2348 
2349 static int gfx_v9_4_3_hw_init(struct amdgpu_ip_block *ip_block)
2350 {
2351 	int r;
2352 	struct amdgpu_device *adev = ip_block->adev;
2353 
2354 	amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
2355 				       adev->gfx.cleaner_shader_ptr);
2356 
2357 	if (!amdgpu_sriov_vf(adev))
2358 		gfx_v9_4_3_init_golden_registers(adev);
2359 
2360 	gfx_v9_4_3_constants_init(adev);
2361 
2362 	r = adev->gfx.rlc.funcs->resume(adev);
2363 	if (r)
2364 		return r;
2365 
2366 	r = gfx_v9_4_3_cp_resume(adev);
2367 	if (r)
2368 		return r;
2369 
2370 	return r;
2371 }
2372 
2373 static int gfx_v9_4_3_hw_fini(struct amdgpu_ip_block *ip_block)
2374 {
2375 	struct amdgpu_device *adev = ip_block->adev;
2376 	int i, num_xcc;
2377 
2378 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2379 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2380 	amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
2381 
2382 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2383 	for (i = 0; i < num_xcc; i++) {
2384 		gfx_v9_4_3_xcc_fini(adev, i);
2385 	}
2386 
2387 	return 0;
2388 }
2389 
2390 static int gfx_v9_4_3_suspend(struct amdgpu_ip_block *ip_block)
2391 {
2392 	return gfx_v9_4_3_hw_fini(ip_block);
2393 }
2394 
2395 static int gfx_v9_4_3_resume(struct amdgpu_ip_block *ip_block)
2396 {
2397 	return gfx_v9_4_3_hw_init(ip_block);
2398 }
2399 
2400 static bool gfx_v9_4_3_is_idle(struct amdgpu_ip_block *ip_block)
2401 {
2402 	struct amdgpu_device *adev = ip_block->adev;
2403 	int i, num_xcc;
2404 
2405 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2406 	for (i = 0; i < num_xcc; i++) {
2407 		if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS),
2408 					GRBM_STATUS, GUI_ACTIVE))
2409 			return false;
2410 	}
2411 	return true;
2412 }
2413 
2414 static int gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
2415 {
2416 	unsigned i;
2417 	struct amdgpu_device *adev = ip_block->adev;
2418 
2419 	for (i = 0; i < adev->usec_timeout; i++) {
2420 		if (gfx_v9_4_3_is_idle(ip_block))
2421 			return 0;
2422 		udelay(1);
2423 	}
2424 	return -ETIMEDOUT;
2425 }
2426 
2427 static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block)
2428 {
2429 	u32 grbm_soft_reset = 0;
2430 	u32 tmp;
2431 	struct amdgpu_device *adev = ip_block->adev;
2432 
2433 	/* GRBM_STATUS */
2434 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS);
2435 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
2436 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
2437 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
2438 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
2439 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
2440 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
2441 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2442 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2443 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2444 						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
2445 	}
2446 
2447 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
2448 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2449 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2450 	}
2451 
2452 	/* GRBM_STATUS2 */
2453 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2);
2454 	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
2455 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2456 						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2457 
2458 
2459 	if (grbm_soft_reset) {
2460 		/* stop the rlc */
2461 		adev->gfx.rlc.funcs->stop(adev);
2462 
2463 		/* Disable MEC parsing/prefetching */
2464 		gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
2465 
2466 		tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2467 		tmp |= grbm_soft_reset;
2468 		dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2469 		WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2470 		tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2471 
2472 		udelay(50);
2473 
2474 		tmp &= ~grbm_soft_reset;
2475 		WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2476 		tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2477 
2478 		/* Wait a little for things to settle down */
2479 		udelay(50);
2480 	}
2481 	return 0;
2482 }
2483 
2484 static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring,
2485 					  uint32_t vmid,
2486 					  uint32_t gds_base, uint32_t gds_size,
2487 					  uint32_t gws_base, uint32_t gws_size,
2488 					  uint32_t oa_base, uint32_t oa_size)
2489 {
2490 	struct amdgpu_device *adev = ring->adev;
2491 
2492 	/* GDS Base */
2493 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2494 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid,
2495 				   gds_base);
2496 
2497 	/* GDS Size */
2498 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2499 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid,
2500 				   gds_size);
2501 
2502 	/* GWS */
2503 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2504 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid,
2505 				   gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
2506 
2507 	/* OA */
2508 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2509 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid,
2510 				   (1 << (oa_size + oa_base)) - (1 << oa_base));
2511 }
2512 
2513 static int gfx_v9_4_3_early_init(struct amdgpu_ip_block *ip_block)
2514 {
2515 	struct amdgpu_device *adev = ip_block->adev;
2516 
2517 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2518 					  AMDGPU_MAX_COMPUTE_RINGS);
2519 	gfx_v9_4_3_set_kiq_pm4_funcs(adev);
2520 	gfx_v9_4_3_set_ring_funcs(adev);
2521 	gfx_v9_4_3_set_irq_funcs(adev);
2522 	gfx_v9_4_3_set_gds_init(adev);
2523 	gfx_v9_4_3_set_rlc_funcs(adev);
2524 
2525 	/* init rlcg reg access ctrl */
2526 	gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev);
2527 
2528 	return gfx_v9_4_3_init_microcode(adev);
2529 }
2530 
2531 static int gfx_v9_4_3_late_init(struct amdgpu_ip_block *ip_block)
2532 {
2533 	struct amdgpu_device *adev = ip_block->adev;
2534 	int r;
2535 
2536 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2537 	if (r)
2538 		return r;
2539 
2540 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2541 	if (r)
2542 		return r;
2543 
2544 	r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
2545 	if (r)
2546 		return r;
2547 
2548 	if (adev->gfx.ras &&
2549 	    adev->gfx.ras->enable_watchdog_timer)
2550 		adev->gfx.ras->enable_watchdog_timer(adev);
2551 
2552 	return 0;
2553 }
2554 
2555 static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev,
2556 					    bool enable, int xcc_id)
2557 {
2558 	uint32_t def, data;
2559 
2560 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
2561 		return;
2562 
2563 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2564 				  regRLC_CGTT_MGCG_OVERRIDE);
2565 
2566 	if (enable)
2567 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2568 	else
2569 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2570 
2571 	if (def != data)
2572 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2573 			     regRLC_CGTT_MGCG_OVERRIDE, data);
2574 
2575 }
2576 
2577 static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
2578 						bool enable, int xcc_id)
2579 {
2580 	uint32_t def, data;
2581 
2582 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
2583 		return;
2584 
2585 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2586 				  regRLC_CGTT_MGCG_OVERRIDE);
2587 
2588 	if (enable)
2589 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2590 	else
2591 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2592 
2593 	if (def != data)
2594 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2595 			     regRLC_CGTT_MGCG_OVERRIDE, data);
2596 }
2597 
2598 static void
2599 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
2600 						bool enable, int xcc_id)
2601 {
2602 	uint32_t data, def;
2603 
2604 	/* It is disabled by HW by default */
2605 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
2606 		/* 1 - RLC_CGTT_MGCG_OVERRIDE */
2607 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2608 
2609 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2610 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2611 			  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2612 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2613 
2614 		if (def != data)
2615 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2616 
2617 		/* MGLS is a global flag to control all MGLS in GFX */
2618 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
2619 			/* 2 - RLC memory Light sleep */
2620 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
2621 				def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2622 				data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2623 				if (def != data)
2624 					WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2625 			}
2626 			/* 3 - CP memory Light sleep */
2627 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
2628 				def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2629 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2630 				if (def != data)
2631 					WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2632 			}
2633 		}
2634 	} else {
2635 		/* 1 - MGCG_OVERRIDE */
2636 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2637 
2638 		data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2639 			 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2640 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2641 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2642 
2643 		if (def != data)
2644 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2645 
2646 		/* 2 - disable MGLS in RLC */
2647 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2648 		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
2649 			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2650 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2651 		}
2652 
2653 		/* 3 - disable MGLS in CP */
2654 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2655 		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
2656 			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2657 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2658 		}
2659 	}
2660 
2661 }
2662 
2663 static void
2664 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
2665 						bool enable, int xcc_id)
2666 {
2667 	uint32_t def, data;
2668 
2669 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
2670 
2671 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2672 		/* unset CGCG override */
2673 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
2674 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2675 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2676 		else
2677 			data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2678 		/* update CGCG and CGLS override bits */
2679 		if (def != data)
2680 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2681 
2682 		/* CGCG Hysteresis: 400us */
2683 		def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2684 
2685 		data = (0x2710
2686 			<< RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
2687 		       RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2688 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2689 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
2690 				RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2691 		if (def != data)
2692 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2693 
2694 		/* set IDLE_POLL_COUNT(0x33450100)*/
2695 		def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
2696 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
2697 			(0x3345 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2698 		if (def != data)
2699 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
2700 	} else {
2701 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2702 		/* reset CGCG/CGLS bits */
2703 		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
2704 		/* disable cgcg and cgls in FSM */
2705 		if (def != data)
2706 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2707 	}
2708 
2709 }
2710 
2711 static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
2712 						  bool enable, int xcc_id)
2713 {
2714 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
2715 
2716 	if (enable) {
2717 		/* FGCG */
2718 		gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2719 		gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2720 
2721 		/* CGCG/CGLS should be enabled after MGCG/MGLS
2722 		 * ===  MGCG + MGLS ===
2723 		 */
2724 		gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2725 								xcc_id);
2726 		/* ===  CGCG + CGLS === */
2727 		gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2728 								xcc_id);
2729 	} else {
2730 		/* CGCG/CGLS should be disabled before MGCG/MGLS
2731 		 * ===  CGCG + CGLS ===
2732 		 */
2733 		gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2734 								xcc_id);
2735 		/* ===  MGCG + MGLS === */
2736 		gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2737 								xcc_id);
2738 
2739 		/* FGCG */
2740 		gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2741 		gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2742 	}
2743 
2744 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
2745 
2746 	return 0;
2747 }
2748 
2749 static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
2750 	.is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled,
2751 	.set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode,
2752 	.unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode,
2753 	.init = gfx_v9_4_3_rlc_init,
2754 	.resume = gfx_v9_4_3_rlc_resume,
2755 	.stop = gfx_v9_4_3_rlc_stop,
2756 	.reset = gfx_v9_4_3_rlc_reset,
2757 	.start = gfx_v9_4_3_rlc_start,
2758 	.update_spm_vmid = gfx_v9_4_3_update_spm_vmid,
2759 	.is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
2760 };
2761 
2762 static int gfx_v9_4_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
2763 					  enum amd_powergating_state state)
2764 {
2765 	return 0;
2766 }
2767 
2768 static int gfx_v9_4_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
2769 					  enum amd_clockgating_state state)
2770 {
2771 	struct amdgpu_device *adev = ip_block->adev;
2772 	int i, num_xcc;
2773 
2774 	if (amdgpu_sriov_vf(adev))
2775 		return 0;
2776 
2777 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2778 	for (i = 0; i < num_xcc; i++)
2779 		gfx_v9_4_3_xcc_update_gfx_clock_gating(
2780 			adev, state == AMD_CG_STATE_GATE, i);
2781 
2782 	return 0;
2783 }
2784 
2785 static void gfx_v9_4_3_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
2786 {
2787 	struct amdgpu_device *adev = ip_block->adev;
2788 	int data;
2789 
2790 	if (amdgpu_sriov_vf(adev))
2791 		*flags = 0;
2792 
2793 	/* AMD_CG_SUPPORT_GFX_MGCG */
2794 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE));
2795 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
2796 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
2797 
2798 	/* AMD_CG_SUPPORT_GFX_CGCG */
2799 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL));
2800 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
2801 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
2802 
2803 	/* AMD_CG_SUPPORT_GFX_CGLS */
2804 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
2805 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
2806 
2807 	/* AMD_CG_SUPPORT_GFX_RLC_LS */
2808 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL));
2809 	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
2810 		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
2811 
2812 	/* AMD_CG_SUPPORT_GFX_CP_LS */
2813 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL));
2814 	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
2815 		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
2816 }
2817 
2818 static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2819 {
2820 	struct amdgpu_device *adev = ring->adev;
2821 	u32 ref_and_mask, reg_mem_engine;
2822 
2823 	if (!adev->gfx.funcs->get_hdp_flush_mask) {
2824 		dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__);
2825 		return;
2826 	}
2827 
2828 	adev->gfx.funcs->get_hdp_flush_mask(ring, &ref_and_mask, &reg_mem_engine);
2829 	gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1,
2830 			      adev->nbio.funcs->get_hdp_flush_req_offset(adev),
2831 			      adev->nbio.funcs->get_hdp_flush_done_offset(adev),
2832 			      ref_and_mask, ref_and_mask, 0x20);
2833 }
2834 
2835 static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring,
2836 					  struct amdgpu_job *job,
2837 					  struct amdgpu_ib *ib,
2838 					  uint32_t flags)
2839 {
2840 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2841 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
2842 
2843 	/* Currently, there is a high possibility to get wave ID mismatch
2844 	 * between ME and GDS, leading to a hw deadlock, because ME generates
2845 	 * different wave IDs than the GDS expects. This situation happens
2846 	 * randomly when at least 5 compute pipes use GDS ordered append.
2847 	 * The wave IDs generated by ME are also wrong after suspend/resume.
2848 	 * Those are probably bugs somewhere else in the kernel driver.
2849 	 *
2850 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
2851 	 * GDS to 0 for this ring (me/pipe).
2852 	 */
2853 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
2854 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2855 		amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
2856 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2857 	}
2858 
2859 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2860 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
2861 	amdgpu_ring_write(ring,
2862 #ifdef __BIG_ENDIAN
2863 				(2 << 0) |
2864 #endif
2865 				lower_32_bits(ib->gpu_addr));
2866 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
2867 	amdgpu_ring_write(ring, control);
2868 }
2869 
2870 static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
2871 				     u64 seq, unsigned flags)
2872 {
2873 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2874 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2875 	bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
2876 
2877 	/* RELEASE_MEM - flush caches, send int */
2878 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
2879 	amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
2880 					       EOP_TC_NC_ACTION_EN) :
2881 					      (EOP_TCL1_ACTION_EN |
2882 					       EOP_TC_ACTION_EN |
2883 					       EOP_TC_WB_ACTION_EN |
2884 					       EOP_TC_MD_ACTION_EN)) |
2885 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2886 				 EVENT_INDEX(5)));
2887 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2888 
2889 	/*
2890 	 * the address should be Qword aligned if 64bit write, Dword
2891 	 * aligned if only send 32bit data low (discard data high)
2892 	 */
2893 	if (write64bit)
2894 		BUG_ON(addr & 0x7);
2895 	else
2896 		BUG_ON(addr & 0x3);
2897 	amdgpu_ring_write(ring, lower_32_bits(addr));
2898 	amdgpu_ring_write(ring, upper_32_bits(addr));
2899 	amdgpu_ring_write(ring, lower_32_bits(seq));
2900 	amdgpu_ring_write(ring, upper_32_bits(seq));
2901 	amdgpu_ring_write(ring, 0);
2902 }
2903 
2904 static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
2905 {
2906 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2907 	uint32_t seq = ring->fence_drv.sync_seq;
2908 	uint64_t addr = ring->fence_drv.gpu_addr;
2909 
2910 	gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0,
2911 			      lower_32_bits(addr), upper_32_bits(addr),
2912 			      seq, 0xffffffff, 4);
2913 }
2914 
2915 static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring,
2916 					unsigned vmid, uint64_t pd_addr)
2917 {
2918 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
2919 }
2920 
2921 static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring)
2922 {
2923 	return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
2924 }
2925 
2926 static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring)
2927 {
2928 	u64 wptr;
2929 
2930 	/* XXX check if swapping is necessary on BE */
2931 	if (ring->use_doorbell)
2932 		wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
2933 	else
2934 		BUG();
2935 	return wptr;
2936 }
2937 
2938 static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring)
2939 {
2940 	struct amdgpu_device *adev = ring->adev;
2941 
2942 	/* XXX check if swapping is necessary on BE */
2943 	if (ring->use_doorbell) {
2944 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
2945 		WDOORBELL64(ring->doorbell_index, ring->wptr);
2946 	} else {
2947 		BUG(); /* only DOORBELL method supported on gfx9 now */
2948 	}
2949 }
2950 
2951 static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
2952 					 u64 seq, unsigned int flags)
2953 {
2954 	struct amdgpu_device *adev = ring->adev;
2955 
2956 	/* we only allocate 32bit for each seq wb address */
2957 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
2958 
2959 	/* write fence seq to the "addr" */
2960 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2961 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2962 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
2963 	amdgpu_ring_write(ring, lower_32_bits(addr));
2964 	amdgpu_ring_write(ring, upper_32_bits(addr));
2965 	amdgpu_ring_write(ring, lower_32_bits(seq));
2966 
2967 	if (flags & AMDGPU_FENCE_FLAG_INT) {
2968 		/* set register to trigger INT */
2969 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2970 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2971 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
2972 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
2973 		amdgpu_ring_write(ring, 0);
2974 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
2975 	}
2976 }
2977 
2978 static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
2979 				    uint32_t reg_val_offs)
2980 {
2981 	struct amdgpu_device *adev = ring->adev;
2982 
2983 	reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
2984 
2985 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
2986 	amdgpu_ring_write(ring, 0 |	/* src: register*/
2987 				(5 << 8) |	/* dst: memory */
2988 				(1 << 20));	/* write confirm */
2989 	amdgpu_ring_write(ring, reg);
2990 	amdgpu_ring_write(ring, 0);
2991 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
2992 				reg_val_offs * 4));
2993 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
2994 				reg_val_offs * 4));
2995 }
2996 
2997 static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
2998 				    uint32_t val)
2999 {
3000 	uint32_t cmd = 0;
3001 
3002 	reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
3003 
3004 	switch (ring->funcs->type) {
3005 	case AMDGPU_RING_TYPE_GFX:
3006 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
3007 		break;
3008 	case AMDGPU_RING_TYPE_KIQ:
3009 		cmd = (1 << 16); /* no inc addr */
3010 		break;
3011 	default:
3012 		cmd = WR_CONFIRM;
3013 		break;
3014 	}
3015 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3016 	amdgpu_ring_write(ring, cmd);
3017 	amdgpu_ring_write(ring, reg);
3018 	amdgpu_ring_write(ring, 0);
3019 	amdgpu_ring_write(ring, val);
3020 }
3021 
3022 static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
3023 					uint32_t val, uint32_t mask)
3024 {
3025 	gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
3026 }
3027 
3028 static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
3029 						  uint32_t reg0, uint32_t reg1,
3030 						  uint32_t ref, uint32_t mask)
3031 {
3032 	amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
3033 						   ref, mask);
3034 }
3035 
3036 static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring,
3037 					  unsigned vmid)
3038 {
3039 	struct amdgpu_device *adev = ring->adev;
3040 	uint32_t value = 0;
3041 
3042 	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
3043 	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
3044 	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
3045 	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
3046 	amdgpu_gfx_rlc_enter_safe_mode(adev, ring->xcc_id);
3047 	WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regSQ_CMD, value);
3048 	amdgpu_gfx_rlc_exit_safe_mode(adev, ring->xcc_id);
3049 }
3050 
3051 static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3052 	struct amdgpu_device *adev, int me, int pipe,
3053 	enum amdgpu_interrupt_state state, int xcc_id)
3054 {
3055 	u32 mec_int_cntl, mec_int_cntl_reg;
3056 
3057 	/*
3058 	 * amdgpu controls only the first MEC. That's why this function only
3059 	 * handles the setting of interrupts for this specific MEC. All other
3060 	 * pipes' interrupts are set by amdkfd.
3061 	 */
3062 
3063 	if (me == 1) {
3064 		switch (pipe) {
3065 		case 0:
3066 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
3067 			break;
3068 		case 1:
3069 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
3070 			break;
3071 		case 2:
3072 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
3073 			break;
3074 		case 3:
3075 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
3076 			break;
3077 		default:
3078 			DRM_DEBUG("invalid pipe %d\n", pipe);
3079 			return;
3080 		}
3081 	} else {
3082 		DRM_DEBUG("invalid me %d\n", me);
3083 		return;
3084 	}
3085 
3086 	switch (state) {
3087 	case AMDGPU_IRQ_STATE_DISABLE:
3088 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3089 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3090 					     TIME_STAMP_INT_ENABLE, 0);
3091 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3092 		break;
3093 	case AMDGPU_IRQ_STATE_ENABLE:
3094 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3095 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3096 					     TIME_STAMP_INT_ENABLE, 1);
3097 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3098 		break;
3099 	default:
3100 		break;
3101 	}
3102 }
3103 
3104 static u32 gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device *adev,
3105 				     int xcc_id, int me, int pipe)
3106 {
3107 	/*
3108 	 * amdgpu controls only the first MEC. That's why this function only
3109 	 * handles the setting of interrupts for this specific MEC. All other
3110 	 * pipes' interrupts are set by amdkfd.
3111 	 */
3112 	if (me != 1)
3113 		return 0;
3114 
3115 	switch (pipe) {
3116 	case 0:
3117 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
3118 	case 1:
3119 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
3120 	case 2:
3121 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
3122 	case 3:
3123 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
3124 	default:
3125 		return 0;
3126 	}
3127 }
3128 
3129 static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev,
3130 					     struct amdgpu_irq_src *source,
3131 					     unsigned type,
3132 					     enum amdgpu_interrupt_state state)
3133 {
3134 	u32 mec_int_cntl_reg, mec_int_cntl;
3135 	int i, j, k, num_xcc;
3136 
3137 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3138 	switch (state) {
3139 	case AMDGPU_IRQ_STATE_DISABLE:
3140 	case AMDGPU_IRQ_STATE_ENABLE:
3141 		for (i = 0; i < num_xcc; i++) {
3142 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3143 					      PRIV_REG_INT_ENABLE,
3144 					      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3145 			for (j = 0; j < adev->gfx.mec.num_mec; j++) {
3146 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
3147 					/* MECs start at 1 */
3148 					mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
3149 
3150 					if (mec_int_cntl_reg) {
3151 						mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
3152 						mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3153 									     PRIV_REG_INT_ENABLE,
3154 									     state == AMDGPU_IRQ_STATE_ENABLE ?
3155 									     1 : 0);
3156 						WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
3157 					}
3158 				}
3159 			}
3160 		}
3161 		break;
3162 	default:
3163 		break;
3164 	}
3165 
3166 	return 0;
3167 }
3168 
3169 static int gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device *adev,
3170 					     struct amdgpu_irq_src *source,
3171 					     unsigned type,
3172 					     enum amdgpu_interrupt_state state)
3173 {
3174 	u32 mec_int_cntl_reg, mec_int_cntl;
3175 	int i, j, k, num_xcc;
3176 
3177 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3178 	switch (state) {
3179 	case AMDGPU_IRQ_STATE_DISABLE:
3180 	case AMDGPU_IRQ_STATE_ENABLE:
3181 		for (i = 0; i < num_xcc; i++) {
3182 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3183 					      OPCODE_ERROR_INT_ENABLE,
3184 					      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3185 			for (j = 0; j < adev->gfx.mec.num_mec; j++) {
3186 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
3187 					/* MECs start at 1 */
3188 					mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
3189 
3190 					if (mec_int_cntl_reg) {
3191 						mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
3192 						mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3193 									     OPCODE_ERROR_INT_ENABLE,
3194 									     state == AMDGPU_IRQ_STATE_ENABLE ?
3195 									     1 : 0);
3196 						WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
3197 					}
3198 				}
3199 			}
3200 		}
3201 		break;
3202 	default:
3203 		break;
3204 	}
3205 
3206 	return 0;
3207 }
3208 
3209 static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev,
3210 					      struct amdgpu_irq_src *source,
3211 					      unsigned type,
3212 					      enum amdgpu_interrupt_state state)
3213 {
3214 	int i, num_xcc;
3215 
3216 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3217 	switch (state) {
3218 	case AMDGPU_IRQ_STATE_DISABLE:
3219 	case AMDGPU_IRQ_STATE_ENABLE:
3220 		for (i = 0; i < num_xcc; i++)
3221 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3222 				PRIV_INSTR_INT_ENABLE,
3223 				state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3224 		break;
3225 	default:
3226 		break;
3227 	}
3228 
3229 	return 0;
3230 }
3231 
3232 static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev,
3233 					    struct amdgpu_irq_src *src,
3234 					    unsigned type,
3235 					    enum amdgpu_interrupt_state state)
3236 {
3237 	int i, num_xcc;
3238 
3239 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3240 	for (i = 0; i < num_xcc; i++) {
3241 		switch (type) {
3242 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3243 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3244 				adev, 1, 0, state, i);
3245 			break;
3246 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3247 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3248 				adev, 1, 1, state, i);
3249 			break;
3250 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
3251 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3252 				adev, 1, 2, state, i);
3253 			break;
3254 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
3255 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3256 				adev, 1, 3, state, i);
3257 			break;
3258 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
3259 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3260 				adev, 2, 0, state, i);
3261 			break;
3262 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
3263 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3264 				adev, 2, 1, state, i);
3265 			break;
3266 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
3267 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3268 				adev, 2, 2, state, i);
3269 			break;
3270 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
3271 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3272 				adev, 2, 3, state, i);
3273 			break;
3274 		default:
3275 			break;
3276 		}
3277 	}
3278 
3279 	return 0;
3280 }
3281 
3282 static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev,
3283 			    struct amdgpu_irq_src *source,
3284 			    struct amdgpu_iv_entry *entry)
3285 {
3286 	int i, xcc_id;
3287 	u8 me_id, pipe_id, queue_id;
3288 	struct amdgpu_ring *ring;
3289 
3290 	DRM_DEBUG("IH: CP EOP\n");
3291 	me_id = (entry->ring_id & 0x0c) >> 2;
3292 	pipe_id = (entry->ring_id & 0x03) >> 0;
3293 	queue_id = (entry->ring_id & 0x70) >> 4;
3294 
3295 	xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
3296 
3297 	if (xcc_id == -EINVAL)
3298 		return -EINVAL;
3299 
3300 	switch (me_id) {
3301 	case 0:
3302 	case 1:
3303 	case 2:
3304 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3305 			ring = &adev->gfx.compute_ring
3306 					[i +
3307 					 xcc_id * adev->gfx.num_compute_rings];
3308 			/* Per-queue interrupt is supported for MEC starting from VI.
3309 			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
3310 			  */
3311 
3312 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
3313 				amdgpu_fence_process(ring);
3314 		}
3315 		break;
3316 	}
3317 	return 0;
3318 }
3319 
3320 static void gfx_v9_4_3_fault(struct amdgpu_device *adev,
3321 			   struct amdgpu_iv_entry *entry)
3322 {
3323 	u8 me_id, pipe_id, queue_id;
3324 	struct amdgpu_ring *ring;
3325 	int i, xcc_id;
3326 
3327 	me_id = (entry->ring_id & 0x0c) >> 2;
3328 	pipe_id = (entry->ring_id & 0x03) >> 0;
3329 	queue_id = (entry->ring_id & 0x70) >> 4;
3330 
3331 	xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
3332 
3333 	if (xcc_id == -EINVAL)
3334 		return;
3335 
3336 	switch (me_id) {
3337 	case 0:
3338 	case 1:
3339 	case 2:
3340 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3341 			ring = &adev->gfx.compute_ring
3342 					[i +
3343 					 xcc_id * adev->gfx.num_compute_rings];
3344 			if (ring->me == me_id && ring->pipe == pipe_id &&
3345 			    ring->queue == queue_id)
3346 				drm_sched_fault(&ring->sched);
3347 		}
3348 		break;
3349 	}
3350 }
3351 
3352 static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev,
3353 				 struct amdgpu_irq_src *source,
3354 				 struct amdgpu_iv_entry *entry)
3355 {
3356 	DRM_ERROR("Illegal register access in command stream\n");
3357 	gfx_v9_4_3_fault(adev, entry);
3358 	return 0;
3359 }
3360 
3361 static int gfx_v9_4_3_bad_op_irq(struct amdgpu_device *adev,
3362 				 struct amdgpu_irq_src *source,
3363 				 struct amdgpu_iv_entry *entry)
3364 {
3365 	DRM_ERROR("Illegal opcode in command stream\n");
3366 	gfx_v9_4_3_fault(adev, entry);
3367 	return 0;
3368 }
3369 
3370 static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev,
3371 				  struct amdgpu_irq_src *source,
3372 				  struct amdgpu_iv_entry *entry)
3373 {
3374 	DRM_ERROR("Illegal instruction in command stream\n");
3375 	gfx_v9_4_3_fault(adev, entry);
3376 	return 0;
3377 }
3378 
3379 static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring)
3380 {
3381 	const unsigned int cp_coher_cntl =
3382 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
3383 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
3384 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
3385 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
3386 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
3387 
3388 	/* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
3389 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
3390 	amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
3391 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
3392 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
3393 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3394 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
3395 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
3396 }
3397 
3398 static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring,
3399 					uint32_t pipe, bool enable)
3400 {
3401 	struct amdgpu_device *adev = ring->adev;
3402 	uint32_t val;
3403 	uint32_t wcl_cs_reg;
3404 
3405 	/* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
3406 	val = enable ? 0x1 : 0x7f;
3407 
3408 	switch (pipe) {
3409 	case 0:
3410 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0);
3411 		break;
3412 	case 1:
3413 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1);
3414 		break;
3415 	case 2:
3416 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2);
3417 		break;
3418 	case 3:
3419 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3);
3420 		break;
3421 	default:
3422 		DRM_DEBUG("invalid pipe %d\n", pipe);
3423 		return;
3424 	}
3425 
3426 	amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
3427 
3428 }
3429 static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
3430 {
3431 	struct amdgpu_device *adev = ring->adev;
3432 	uint32_t val;
3433 	int i;
3434 
3435 	/* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
3436 	 * number of gfx waves. Setting 5 bit will make sure gfx only gets
3437 	 * around 25% of gpu resources.
3438 	 */
3439 	val = enable ? 0x1f : 0x07ffffff;
3440 	amdgpu_ring_emit_wreg(ring,
3441 			      SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX),
3442 			      val);
3443 
3444 	/* Restrict waves for normal/low priority compute queues as well
3445 	 * to get best QoS for high priority compute jobs.
3446 	 *
3447 	 * amdgpu controls only 1st ME(0-3 CS pipes).
3448 	 */
3449 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3450 		if (i != ring->pipe)
3451 			gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable);
3452 
3453 	}
3454 }
3455 
3456 static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me,
3457 				uint32_t pipe, uint32_t queue,
3458 				uint32_t xcc_id)
3459 {
3460 	int i, r;
3461 	/* make sure dequeue is complete*/
3462 	gfx_v9_4_3_xcc_set_safe_mode(adev, xcc_id);
3463 	mutex_lock(&adev->srbm_mutex);
3464 	soc15_grbm_select(adev, me, pipe, queue, 0, GET_INST(GC, xcc_id));
3465 	for (i = 0; i < adev->usec_timeout; i++) {
3466 		if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
3467 			break;
3468 		udelay(1);
3469 	}
3470 	if (i >= adev->usec_timeout)
3471 		r = -ETIMEDOUT;
3472 	else
3473 		r = 0;
3474 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
3475 	mutex_unlock(&adev->srbm_mutex);
3476 	gfx_v9_4_3_xcc_unset_safe_mode(adev, xcc_id);
3477 
3478 	return r;
3479 
3480 }
3481 
3482 static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev)
3483 {
3484 	if (!!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
3485 		return true;
3486 	else
3487 		dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n");
3488 
3489 	return false;
3490 }
3491 
3492 static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring)
3493 {
3494 	struct amdgpu_device *adev = ring->adev;
3495 	uint32_t reset_pipe, clean_pipe;
3496 	int r;
3497 
3498 	if (!gfx_v9_4_3_pipe_reset_support(adev))
3499 		return -EINVAL;
3500 
3501 	gfx_v9_4_3_xcc_set_safe_mode(adev, ring->xcc_id);
3502 	mutex_lock(&adev->srbm_mutex);
3503 
3504 	reset_pipe = RREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL);
3505 	clean_pipe = reset_pipe;
3506 
3507 	if (ring->me == 1) {
3508 		switch (ring->pipe) {
3509 		case 0:
3510 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3511 						   MEC_ME1_PIPE0_RESET, 1);
3512 			break;
3513 		case 1:
3514 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3515 						   MEC_ME1_PIPE1_RESET, 1);
3516 			break;
3517 		case 2:
3518 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3519 						   MEC_ME1_PIPE2_RESET, 1);
3520 			break;
3521 		case 3:
3522 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3523 						   MEC_ME1_PIPE3_RESET, 1);
3524 			break;
3525 		default:
3526 			break;
3527 		}
3528 	} else {
3529 		if (ring->pipe)
3530 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3531 						   MEC_ME2_PIPE1_RESET, 1);
3532 		else
3533 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3534 						   MEC_ME2_PIPE0_RESET, 1);
3535 	}
3536 
3537 	WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, reset_pipe);
3538 	WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, clean_pipe);
3539 	mutex_unlock(&adev->srbm_mutex);
3540 	gfx_v9_4_3_xcc_unset_safe_mode(adev, ring->xcc_id);
3541 
3542 	r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
3543 	return r;
3544 }
3545 
3546 static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
3547 				unsigned int vmid,
3548 				struct amdgpu_fence *timedout_fence)
3549 {
3550 	struct amdgpu_device *adev = ring->adev;
3551 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
3552 	struct amdgpu_ring *kiq_ring = &kiq->ring;
3553 	int reset_mode = AMDGPU_RESET_TYPE_PER_QUEUE;
3554 	unsigned long flags;
3555 	int r;
3556 
3557 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
3558 		return -EINVAL;
3559 
3560 	amdgpu_ring_reset_helper_begin(ring, timedout_fence);
3561 
3562 	spin_lock_irqsave(&kiq->ring_lock, flags);
3563 
3564 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
3565 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
3566 		return -ENOMEM;
3567 	}
3568 
3569 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
3570 				   0, 0);
3571 	amdgpu_ring_commit(kiq_ring);
3572 
3573 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
3574 
3575 	r = amdgpu_ring_test_ring(kiq_ring);
3576 	if (r) {
3577 		dev_err(adev->dev, "kiq ring test failed after ring: %s queue reset\n",
3578 				ring->name);
3579 		goto pipe_reset;
3580 	}
3581 
3582 	r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
3583 	if (r)
3584 		dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
3585 
3586 pipe_reset:
3587 	if (r) {
3588 		if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
3589 			return -EOPNOTSUPP;
3590 		r = gfx_v9_4_3_reset_hw_pipe(ring);
3591 		reset_mode = AMDGPU_RESET_TYPE_PER_PIPE;
3592 		dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
3593 				r ? "failed" : "successfully");
3594 		if (r)
3595 			return r;
3596 	}
3597 
3598 	gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
3599 
3600 	spin_lock_irqsave(&kiq->ring_lock, flags);
3601 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
3602 	if (r) {
3603 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
3604 		return -ENOMEM;
3605 	}
3606 	kiq->pmf->kiq_map_queues(kiq_ring, ring);
3607 	amdgpu_ring_commit(kiq_ring);
3608 	r = amdgpu_ring_test_ring(kiq_ring);
3609 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
3610 	if (r) {
3611 		if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE)
3612 			goto pipe_reset;
3613 
3614 		dev_err(adev->dev, "fail to remap queue\n");
3615 		return r;
3616 	}
3617 
3618 	if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE) {
3619 		r = amdgpu_ring_test_ring(ring);
3620 		if (r)
3621 			goto pipe_reset;
3622 	}
3623 
3624 
3625 	return amdgpu_ring_reset_helper_end(ring, timedout_fence);
3626 }
3627 
3628 enum amdgpu_gfx_cp_ras_mem_id {
3629 	AMDGPU_GFX_CP_MEM1 = 1,
3630 	AMDGPU_GFX_CP_MEM2,
3631 	AMDGPU_GFX_CP_MEM3,
3632 	AMDGPU_GFX_CP_MEM4,
3633 	AMDGPU_GFX_CP_MEM5,
3634 };
3635 
3636 enum amdgpu_gfx_gcea_ras_mem_id {
3637 	AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4,
3638 	AMDGPU_GFX_GCEA_IORD_CMDMEM,
3639 	AMDGPU_GFX_GCEA_GMIWR_CMDMEM,
3640 	AMDGPU_GFX_GCEA_GMIRD_CMDMEM,
3641 	AMDGPU_GFX_GCEA_DRAMWR_CMDMEM,
3642 	AMDGPU_GFX_GCEA_DRAMRD_CMDMEM,
3643 	AMDGPU_GFX_GCEA_MAM_DMEM0,
3644 	AMDGPU_GFX_GCEA_MAM_DMEM1,
3645 	AMDGPU_GFX_GCEA_MAM_DMEM2,
3646 	AMDGPU_GFX_GCEA_MAM_DMEM3,
3647 	AMDGPU_GFX_GCEA_MAM_AMEM0,
3648 	AMDGPU_GFX_GCEA_MAM_AMEM1,
3649 	AMDGPU_GFX_GCEA_MAM_AMEM2,
3650 	AMDGPU_GFX_GCEA_MAM_AMEM3,
3651 	AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER,
3652 	AMDGPU_GFX_GCEA_WRET_TAGMEM,
3653 	AMDGPU_GFX_GCEA_RRET_TAGMEM,
3654 	AMDGPU_GFX_GCEA_IOWR_DATAMEM,
3655 	AMDGPU_GFX_GCEA_GMIWR_DATAMEM,
3656 	AMDGPU_GFX_GCEA_DRAM_DATAMEM,
3657 };
3658 
3659 enum amdgpu_gfx_gc_cane_ras_mem_id {
3660 	AMDGPU_GFX_GC_CANE_MEM0 = 0,
3661 };
3662 
3663 enum amdgpu_gfx_gcutcl2_ras_mem_id {
3664 	AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160,
3665 };
3666 
3667 enum amdgpu_gfx_gds_ras_mem_id {
3668 	AMDGPU_GFX_GDS_MEM0 = 0,
3669 };
3670 
3671 enum amdgpu_gfx_lds_ras_mem_id {
3672 	AMDGPU_GFX_LDS_BANK0 = 0,
3673 	AMDGPU_GFX_LDS_BANK1,
3674 	AMDGPU_GFX_LDS_BANK2,
3675 	AMDGPU_GFX_LDS_BANK3,
3676 	AMDGPU_GFX_LDS_BANK4,
3677 	AMDGPU_GFX_LDS_BANK5,
3678 	AMDGPU_GFX_LDS_BANK6,
3679 	AMDGPU_GFX_LDS_BANK7,
3680 	AMDGPU_GFX_LDS_BANK8,
3681 	AMDGPU_GFX_LDS_BANK9,
3682 	AMDGPU_GFX_LDS_BANK10,
3683 	AMDGPU_GFX_LDS_BANK11,
3684 	AMDGPU_GFX_LDS_BANK12,
3685 	AMDGPU_GFX_LDS_BANK13,
3686 	AMDGPU_GFX_LDS_BANK14,
3687 	AMDGPU_GFX_LDS_BANK15,
3688 	AMDGPU_GFX_LDS_BANK16,
3689 	AMDGPU_GFX_LDS_BANK17,
3690 	AMDGPU_GFX_LDS_BANK18,
3691 	AMDGPU_GFX_LDS_BANK19,
3692 	AMDGPU_GFX_LDS_BANK20,
3693 	AMDGPU_GFX_LDS_BANK21,
3694 	AMDGPU_GFX_LDS_BANK22,
3695 	AMDGPU_GFX_LDS_BANK23,
3696 	AMDGPU_GFX_LDS_BANK24,
3697 	AMDGPU_GFX_LDS_BANK25,
3698 	AMDGPU_GFX_LDS_BANK26,
3699 	AMDGPU_GFX_LDS_BANK27,
3700 	AMDGPU_GFX_LDS_BANK28,
3701 	AMDGPU_GFX_LDS_BANK29,
3702 	AMDGPU_GFX_LDS_BANK30,
3703 	AMDGPU_GFX_LDS_BANK31,
3704 	AMDGPU_GFX_LDS_SP_BUFFER_A,
3705 	AMDGPU_GFX_LDS_SP_BUFFER_B,
3706 };
3707 
3708 enum amdgpu_gfx_rlc_ras_mem_id {
3709 	AMDGPU_GFX_RLC_GPMF32 = 1,
3710 	AMDGPU_GFX_RLC_RLCVF32,
3711 	AMDGPU_GFX_RLC_SCRATCH,
3712 	AMDGPU_GFX_RLC_SRM_ARAM,
3713 	AMDGPU_GFX_RLC_SRM_DRAM,
3714 	AMDGPU_GFX_RLC_TCTAG,
3715 	AMDGPU_GFX_RLC_SPM_SE,
3716 	AMDGPU_GFX_RLC_SPM_GRBMT,
3717 };
3718 
3719 enum amdgpu_gfx_sp_ras_mem_id {
3720 	AMDGPU_GFX_SP_SIMDID0 = 0,
3721 };
3722 
3723 enum amdgpu_gfx_spi_ras_mem_id {
3724 	AMDGPU_GFX_SPI_MEM0 = 0,
3725 	AMDGPU_GFX_SPI_MEM1,
3726 	AMDGPU_GFX_SPI_MEM2,
3727 	AMDGPU_GFX_SPI_MEM3,
3728 };
3729 
3730 enum amdgpu_gfx_sqc_ras_mem_id {
3731 	AMDGPU_GFX_SQC_INST_CACHE_A = 100,
3732 	AMDGPU_GFX_SQC_INST_CACHE_B = 101,
3733 	AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102,
3734 	AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103,
3735 	AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104,
3736 	AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105,
3737 	AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106,
3738 	AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107,
3739 	AMDGPU_GFX_SQC_DATA_CACHE_A = 200,
3740 	AMDGPU_GFX_SQC_DATA_CACHE_B = 201,
3741 	AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202,
3742 	AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203,
3743 	AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204,
3744 	AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205,
3745 	AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206,
3746 	AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207,
3747 	AMDGPU_GFX_SQC_DIRTY_BIT_A = 208,
3748 	AMDGPU_GFX_SQC_DIRTY_BIT_B = 209,
3749 	AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210,
3750 	AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211,
3751 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212,
3752 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213,
3753 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108,
3754 };
3755 
3756 enum amdgpu_gfx_sq_ras_mem_id {
3757 	AMDGPU_GFX_SQ_SGPR_MEM0 = 0,
3758 	AMDGPU_GFX_SQ_SGPR_MEM1,
3759 	AMDGPU_GFX_SQ_SGPR_MEM2,
3760 	AMDGPU_GFX_SQ_SGPR_MEM3,
3761 };
3762 
3763 enum amdgpu_gfx_ta_ras_mem_id {
3764 	AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1,
3765 	AMDGPU_GFX_TA_FS_AFIFO_RAM_HI,
3766 	AMDGPU_GFX_TA_FS_CFIFO_RAM,
3767 	AMDGPU_GFX_TA_FSX_LFIFO,
3768 	AMDGPU_GFX_TA_FS_DFIFO_RAM,
3769 };
3770 
3771 enum amdgpu_gfx_tcc_ras_mem_id {
3772 	AMDGPU_GFX_TCC_MEM1 = 1,
3773 };
3774 
3775 enum amdgpu_gfx_tca_ras_mem_id {
3776 	AMDGPU_GFX_TCA_MEM1 = 1,
3777 };
3778 
3779 enum amdgpu_gfx_tci_ras_mem_id {
3780 	AMDGPU_GFX_TCIW_MEM = 1,
3781 };
3782 
3783 enum amdgpu_gfx_tcp_ras_mem_id {
3784 	AMDGPU_GFX_TCP_LFIFO0 = 1,
3785 	AMDGPU_GFX_TCP_SET0BANK0_RAM,
3786 	AMDGPU_GFX_TCP_SET0BANK1_RAM,
3787 	AMDGPU_GFX_TCP_SET0BANK2_RAM,
3788 	AMDGPU_GFX_TCP_SET0BANK3_RAM,
3789 	AMDGPU_GFX_TCP_SET1BANK0_RAM,
3790 	AMDGPU_GFX_TCP_SET1BANK1_RAM,
3791 	AMDGPU_GFX_TCP_SET1BANK2_RAM,
3792 	AMDGPU_GFX_TCP_SET1BANK3_RAM,
3793 	AMDGPU_GFX_TCP_SET2BANK0_RAM,
3794 	AMDGPU_GFX_TCP_SET2BANK1_RAM,
3795 	AMDGPU_GFX_TCP_SET2BANK2_RAM,
3796 	AMDGPU_GFX_TCP_SET2BANK3_RAM,
3797 	AMDGPU_GFX_TCP_SET3BANK0_RAM,
3798 	AMDGPU_GFX_TCP_SET3BANK1_RAM,
3799 	AMDGPU_GFX_TCP_SET3BANK2_RAM,
3800 	AMDGPU_GFX_TCP_SET3BANK3_RAM,
3801 	AMDGPU_GFX_TCP_VM_FIFO,
3802 	AMDGPU_GFX_TCP_DB_TAGRAM0,
3803 	AMDGPU_GFX_TCP_DB_TAGRAM1,
3804 	AMDGPU_GFX_TCP_DB_TAGRAM2,
3805 	AMDGPU_GFX_TCP_DB_TAGRAM3,
3806 	AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0,
3807 	AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1,
3808 	AMDGPU_GFX_TCP_CMD_FIFO,
3809 };
3810 
3811 enum amdgpu_gfx_td_ras_mem_id {
3812 	AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1,
3813 	AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM,
3814 	AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM,
3815 };
3816 
3817 enum amdgpu_gfx_tcx_ras_mem_id {
3818 	AMDGPU_GFX_TCX_FIFOD0 = 0,
3819 	AMDGPU_GFX_TCX_FIFOD1,
3820 	AMDGPU_GFX_TCX_FIFOD2,
3821 	AMDGPU_GFX_TCX_FIFOD3,
3822 	AMDGPU_GFX_TCX_FIFOD4,
3823 	AMDGPU_GFX_TCX_FIFOD5,
3824 	AMDGPU_GFX_TCX_FIFOD6,
3825 	AMDGPU_GFX_TCX_FIFOD7,
3826 	AMDGPU_GFX_TCX_FIFOB0,
3827 	AMDGPU_GFX_TCX_FIFOB1,
3828 	AMDGPU_GFX_TCX_FIFOB2,
3829 	AMDGPU_GFX_TCX_FIFOB3,
3830 	AMDGPU_GFX_TCX_FIFOB4,
3831 	AMDGPU_GFX_TCX_FIFOB5,
3832 	AMDGPU_GFX_TCX_FIFOB6,
3833 	AMDGPU_GFX_TCX_FIFOB7,
3834 	AMDGPU_GFX_TCX_FIFOA0,
3835 	AMDGPU_GFX_TCX_FIFOA1,
3836 	AMDGPU_GFX_TCX_FIFOA2,
3837 	AMDGPU_GFX_TCX_FIFOA3,
3838 	AMDGPU_GFX_TCX_FIFOA4,
3839 	AMDGPU_GFX_TCX_FIFOA5,
3840 	AMDGPU_GFX_TCX_FIFOA6,
3841 	AMDGPU_GFX_TCX_FIFOA7,
3842 	AMDGPU_GFX_TCX_CFIFO0,
3843 	AMDGPU_GFX_TCX_CFIFO1,
3844 	AMDGPU_GFX_TCX_CFIFO2,
3845 	AMDGPU_GFX_TCX_CFIFO3,
3846 	AMDGPU_GFX_TCX_CFIFO4,
3847 	AMDGPU_GFX_TCX_CFIFO5,
3848 	AMDGPU_GFX_TCX_CFIFO6,
3849 	AMDGPU_GFX_TCX_CFIFO7,
3850 	AMDGPU_GFX_TCX_FIFO_ACKB0,
3851 	AMDGPU_GFX_TCX_FIFO_ACKB1,
3852 	AMDGPU_GFX_TCX_FIFO_ACKB2,
3853 	AMDGPU_GFX_TCX_FIFO_ACKB3,
3854 	AMDGPU_GFX_TCX_FIFO_ACKB4,
3855 	AMDGPU_GFX_TCX_FIFO_ACKB5,
3856 	AMDGPU_GFX_TCX_FIFO_ACKB6,
3857 	AMDGPU_GFX_TCX_FIFO_ACKB7,
3858 	AMDGPU_GFX_TCX_FIFO_ACKD0,
3859 	AMDGPU_GFX_TCX_FIFO_ACKD1,
3860 	AMDGPU_GFX_TCX_FIFO_ACKD2,
3861 	AMDGPU_GFX_TCX_FIFO_ACKD3,
3862 	AMDGPU_GFX_TCX_FIFO_ACKD4,
3863 	AMDGPU_GFX_TCX_FIFO_ACKD5,
3864 	AMDGPU_GFX_TCX_FIFO_ACKD6,
3865 	AMDGPU_GFX_TCX_FIFO_ACKD7,
3866 	AMDGPU_GFX_TCX_DST_FIFOA0,
3867 	AMDGPU_GFX_TCX_DST_FIFOA1,
3868 	AMDGPU_GFX_TCX_DST_FIFOA2,
3869 	AMDGPU_GFX_TCX_DST_FIFOA3,
3870 	AMDGPU_GFX_TCX_DST_FIFOA4,
3871 	AMDGPU_GFX_TCX_DST_FIFOA5,
3872 	AMDGPU_GFX_TCX_DST_FIFOA6,
3873 	AMDGPU_GFX_TCX_DST_FIFOA7,
3874 	AMDGPU_GFX_TCX_DST_FIFOB0,
3875 	AMDGPU_GFX_TCX_DST_FIFOB1,
3876 	AMDGPU_GFX_TCX_DST_FIFOB2,
3877 	AMDGPU_GFX_TCX_DST_FIFOB3,
3878 	AMDGPU_GFX_TCX_DST_FIFOB4,
3879 	AMDGPU_GFX_TCX_DST_FIFOB5,
3880 	AMDGPU_GFX_TCX_DST_FIFOB6,
3881 	AMDGPU_GFX_TCX_DST_FIFOB7,
3882 	AMDGPU_GFX_TCX_DST_FIFOD0,
3883 	AMDGPU_GFX_TCX_DST_FIFOD1,
3884 	AMDGPU_GFX_TCX_DST_FIFOD2,
3885 	AMDGPU_GFX_TCX_DST_FIFOD3,
3886 	AMDGPU_GFX_TCX_DST_FIFOD4,
3887 	AMDGPU_GFX_TCX_DST_FIFOD5,
3888 	AMDGPU_GFX_TCX_DST_FIFOD6,
3889 	AMDGPU_GFX_TCX_DST_FIFOD7,
3890 	AMDGPU_GFX_TCX_DST_FIFO_ACKB0,
3891 	AMDGPU_GFX_TCX_DST_FIFO_ACKB1,
3892 	AMDGPU_GFX_TCX_DST_FIFO_ACKB2,
3893 	AMDGPU_GFX_TCX_DST_FIFO_ACKB3,
3894 	AMDGPU_GFX_TCX_DST_FIFO_ACKB4,
3895 	AMDGPU_GFX_TCX_DST_FIFO_ACKB5,
3896 	AMDGPU_GFX_TCX_DST_FIFO_ACKB6,
3897 	AMDGPU_GFX_TCX_DST_FIFO_ACKB7,
3898 	AMDGPU_GFX_TCX_DST_FIFO_ACKD0,
3899 	AMDGPU_GFX_TCX_DST_FIFO_ACKD1,
3900 	AMDGPU_GFX_TCX_DST_FIFO_ACKD2,
3901 	AMDGPU_GFX_TCX_DST_FIFO_ACKD3,
3902 	AMDGPU_GFX_TCX_DST_FIFO_ACKD4,
3903 	AMDGPU_GFX_TCX_DST_FIFO_ACKD5,
3904 	AMDGPU_GFX_TCX_DST_FIFO_ACKD6,
3905 	AMDGPU_GFX_TCX_DST_FIFO_ACKD7,
3906 };
3907 
3908 enum amdgpu_gfx_atc_l2_ras_mem_id {
3909 	AMDGPU_GFX_ATC_L2_MEM0 = 0,
3910 };
3911 
3912 enum amdgpu_gfx_utcl2_ras_mem_id {
3913 	AMDGPU_GFX_UTCL2_MEM0 = 0,
3914 };
3915 
3916 enum amdgpu_gfx_vml2_ras_mem_id {
3917 	AMDGPU_GFX_VML2_MEM0 = 0,
3918 };
3919 
3920 enum amdgpu_gfx_vml2_walker_ras_mem_id {
3921 	AMDGPU_GFX_VML2_WALKER_MEM0 = 0,
3922 };
3923 
3924 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = {
3925 	{AMDGPU_GFX_CP_MEM1, "CP_MEM1"},
3926 	{AMDGPU_GFX_CP_MEM2, "CP_MEM2"},
3927 	{AMDGPU_GFX_CP_MEM3, "CP_MEM3"},
3928 	{AMDGPU_GFX_CP_MEM4, "CP_MEM4"},
3929 	{AMDGPU_GFX_CP_MEM5, "CP_MEM5"},
3930 };
3931 
3932 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = {
3933 	{AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"},
3934 	{AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"},
3935 	{AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"},
3936 	{AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"},
3937 	{AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"},
3938 	{AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"},
3939 	{AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"},
3940 	{AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"},
3941 	{AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"},
3942 	{AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"},
3943 	{AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"},
3944 	{AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"},
3945 	{AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"},
3946 	{AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"},
3947 	{AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"},
3948 	{AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"},
3949 	{AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"},
3950 	{AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"},
3951 	{AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"},
3952 	{AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"},
3953 };
3954 
3955 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = {
3956 	{AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"},
3957 };
3958 
3959 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = {
3960 	{AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"},
3961 };
3962 
3963 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = {
3964 	{AMDGPU_GFX_GDS_MEM0, "GDS_MEM"},
3965 };
3966 
3967 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = {
3968 	{AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"},
3969 	{AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"},
3970 	{AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"},
3971 	{AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"},
3972 	{AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"},
3973 	{AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"},
3974 	{AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"},
3975 	{AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"},
3976 	{AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"},
3977 	{AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"},
3978 	{AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"},
3979 	{AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"},
3980 	{AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"},
3981 	{AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"},
3982 	{AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"},
3983 	{AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"},
3984 	{AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"},
3985 	{AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"},
3986 	{AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"},
3987 	{AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"},
3988 	{AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"},
3989 	{AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"},
3990 	{AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"},
3991 	{AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"},
3992 	{AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"},
3993 	{AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"},
3994 	{AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"},
3995 	{AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"},
3996 	{AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"},
3997 	{AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"},
3998 	{AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"},
3999 	{AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"},
4000 	{AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"},
4001 	{AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"},
4002 };
4003 
4004 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = {
4005 	{AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"},
4006 	{AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"},
4007 	{AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"},
4008 	{AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"},
4009 	{AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"},
4010 	{AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"},
4011 	{AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"},
4012 	{AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"},
4013 };
4014 
4015 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = {
4016 	{AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"},
4017 };
4018 
4019 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = {
4020 	{AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"},
4021 	{AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"},
4022 	{AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"},
4023 	{AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"},
4024 };
4025 
4026 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = {
4027 	{AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"},
4028 	{AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"},
4029 	{AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"},
4030 	{AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"},
4031 	{AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"},
4032 	{AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"},
4033 	{AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"},
4034 	{AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"},
4035 	{AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"},
4036 	{AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"},
4037 	{AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"},
4038 	{AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"},
4039 	{AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"},
4040 	{AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"},
4041 	{AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"},
4042 	{AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"},
4043 	{AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"},
4044 	{AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"},
4045 	{AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"},
4046 	{AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"},
4047 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"},
4048 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"},
4049 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"},
4050 };
4051 
4052 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = {
4053 	{AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"},
4054 	{AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"},
4055 	{AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"},
4056 	{AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"},
4057 };
4058 
4059 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = {
4060 	{AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"},
4061 	{AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"},
4062 	{AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"},
4063 	{AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"},
4064 	{AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"},
4065 };
4066 
4067 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = {
4068 	{AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"},
4069 };
4070 
4071 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = {
4072 	{AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"},
4073 };
4074 
4075 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = {
4076 	{AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"},
4077 };
4078 
4079 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = {
4080 	{AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"},
4081 	{AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"},
4082 	{AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"},
4083 	{AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"},
4084 	{AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"},
4085 	{AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"},
4086 	{AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"},
4087 	{AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"},
4088 	{AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"},
4089 	{AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"},
4090 	{AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"},
4091 	{AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"},
4092 	{AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"},
4093 	{AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"},
4094 	{AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"},
4095 	{AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"},
4096 	{AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"},
4097 	{AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"},
4098 	{AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"},
4099 	{AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"},
4100 	{AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"},
4101 	{AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"},
4102 	{AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"},
4103 	{AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"},
4104 	{AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"},
4105 };
4106 
4107 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = {
4108 	{AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"},
4109 	{AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"},
4110 	{AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"},
4111 };
4112 
4113 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = {
4114 	{AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"},
4115 	{AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"},
4116 	{AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"},
4117 	{AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"},
4118 	{AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"},
4119 	{AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"},
4120 	{AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"},
4121 	{AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"},
4122 	{AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"},
4123 	{AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"},
4124 	{AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"},
4125 	{AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"},
4126 	{AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"},
4127 	{AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"},
4128 	{AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"},
4129 	{AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"},
4130 	{AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"},
4131 	{AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"},
4132 	{AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"},
4133 	{AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"},
4134 	{AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"},
4135 	{AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"},
4136 	{AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"},
4137 	{AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"},
4138 	{AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"},
4139 	{AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"},
4140 	{AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"},
4141 	{AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"},
4142 	{AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"},
4143 	{AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"},
4144 	{AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"},
4145 	{AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"},
4146 	{AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"},
4147 	{AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"},
4148 	{AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"},
4149 	{AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"},
4150 	{AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"},
4151 	{AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"},
4152 	{AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"},
4153 	{AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"},
4154 	{AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"},
4155 	{AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"},
4156 	{AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"},
4157 	{AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"},
4158 	{AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"},
4159 	{AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"},
4160 	{AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"},
4161 	{AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"},
4162 	{AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"},
4163 	{AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"},
4164 	{AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"},
4165 	{AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"},
4166 	{AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"},
4167 	{AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"},
4168 	{AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"},
4169 	{AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"},
4170 	{AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"},
4171 	{AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"},
4172 	{AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"},
4173 	{AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"},
4174 	{AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"},
4175 	{AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"},
4176 	{AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"},
4177 	{AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"},
4178 	{AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"},
4179 	{AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"},
4180 	{AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"},
4181 	{AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"},
4182 	{AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"},
4183 	{AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"},
4184 	{AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"},
4185 	{AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"},
4186 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"},
4187 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"},
4188 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"},
4189 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"},
4190 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"},
4191 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"},
4192 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"},
4193 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"},
4194 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"},
4195 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"},
4196 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"},
4197 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"},
4198 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"},
4199 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"},
4200 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"},
4201 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"},
4202 };
4203 
4204 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = {
4205 	{AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"},
4206 };
4207 
4208 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = {
4209 	{AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"},
4210 };
4211 
4212 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = {
4213 	{AMDGPU_GFX_VML2_MEM, "VML2_MEM"},
4214 };
4215 
4216 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = {
4217 	{AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"},
4218 };
4219 
4220 static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = {
4221 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list)
4222 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list)
4223 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list)
4224 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list)
4225 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list)
4226 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list)
4227 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list)
4228 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list)
4229 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list)
4230 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list)
4231 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list)
4232 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list)
4233 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list)
4234 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list)
4235 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list)
4236 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list)
4237 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list)
4238 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list)
4239 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list)
4240 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list)
4241 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list)
4242 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list)
4243 };
4244 
4245 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = {
4246 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH),
4247 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
4248 	    AMDGPU_GFX_RLC_MEM, 1},
4249 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI),
4250 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
4251 	    AMDGPU_GFX_CP_MEM, 1},
4252 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI),
4253 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
4254 	    AMDGPU_GFX_CP_MEM, 1},
4255 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI),
4256 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
4257 	    AMDGPU_GFX_CP_MEM, 1},
4258 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI),
4259 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
4260 	    AMDGPU_GFX_GDS_MEM, 1},
4261 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI),
4262 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
4263 	    AMDGPU_GFX_GC_CANE_MEM, 1},
4264 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI),
4265 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
4266 	    AMDGPU_GFX_SPI_MEM, 1},
4267 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI),
4268 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
4269 	    AMDGPU_GFX_SP_MEM, 4},
4270 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI),
4271 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
4272 	    AMDGPU_GFX_SP_MEM, 4},
4273 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI),
4274 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
4275 	    AMDGPU_GFX_SQ_MEM, 4},
4276 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI),
4277 	    5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
4278 	    AMDGPU_GFX_SQC_MEM, 4},
4279 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI),
4280 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
4281 	    AMDGPU_GFX_TCX_MEM, 1},
4282 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI),
4283 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
4284 	    AMDGPU_GFX_TCC_MEM, 1},
4285 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI),
4286 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
4287 	    AMDGPU_GFX_TA_MEM, 4},
4288 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG),
4289 	    27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
4290 	    AMDGPU_GFX_TCI_MEM, 1},
4291 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG),
4292 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
4293 	    AMDGPU_GFX_TCP_MEM, 4},
4294 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI),
4295 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
4296 	    AMDGPU_GFX_TD_MEM, 4},
4297 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI),
4298 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
4299 	    AMDGPU_GFX_GCEA_MEM, 1},
4300 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI),
4301 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
4302 	    AMDGPU_GFX_LDS_MEM, 4},
4303 };
4304 
4305 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = {
4306 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH),
4307 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
4308 	    AMDGPU_GFX_RLC_MEM, 1},
4309 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI),
4310 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
4311 	    AMDGPU_GFX_CP_MEM, 1},
4312 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI),
4313 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
4314 	    AMDGPU_GFX_CP_MEM, 1},
4315 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI),
4316 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
4317 	    AMDGPU_GFX_CP_MEM, 1},
4318 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI),
4319 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
4320 	    AMDGPU_GFX_GDS_MEM, 1},
4321 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI),
4322 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
4323 	    AMDGPU_GFX_GC_CANE_MEM, 1},
4324 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI),
4325 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
4326 	    AMDGPU_GFX_SPI_MEM, 1},
4327 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI),
4328 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
4329 	    AMDGPU_GFX_SP_MEM, 4},
4330 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI),
4331 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
4332 	    AMDGPU_GFX_SP_MEM, 4},
4333 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI),
4334 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
4335 	    AMDGPU_GFX_SQ_MEM, 4},
4336 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI),
4337 	    5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
4338 	    AMDGPU_GFX_SQC_MEM, 4},
4339 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI),
4340 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
4341 	    AMDGPU_GFX_TCX_MEM, 1},
4342 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI),
4343 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
4344 	    AMDGPU_GFX_TCC_MEM, 1},
4345 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI),
4346 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
4347 	    AMDGPU_GFX_TA_MEM, 4},
4348 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG),
4349 	    27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
4350 	    AMDGPU_GFX_TCI_MEM, 1},
4351 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG),
4352 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
4353 	    AMDGPU_GFX_TCP_MEM, 4},
4354 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI),
4355 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
4356 	    AMDGPU_GFX_TD_MEM, 4},
4357 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI),
4358 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"},
4359 	    AMDGPU_GFX_TCA_MEM, 1},
4360 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI),
4361 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
4362 	    AMDGPU_GFX_GCEA_MEM, 1},
4363 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI),
4364 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
4365 	    AMDGPU_GFX_LDS_MEM, 4},
4366 };
4367 
4368 static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
4369 					void *ras_error_status, int xcc_id)
4370 {
4371 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
4372 	unsigned long ce_count = 0, ue_count = 0;
4373 	uint32_t i, j, k;
4374 
4375 	/* NOTE: convert xcc_id to physical XCD ID (XCD0 or XCD1) */
4376 	struct amdgpu_smuio_mcm_config_info mcm_info = {
4377 		.socket_id = adev->smuio.funcs->get_socket_id(adev),
4378 		.die_id = xcc_id & 0x01 ? 1 : 0,
4379 	};
4380 
4381 	mutex_lock(&adev->grbm_idx_mutex);
4382 
4383 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
4384 		for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
4385 			for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
4386 				/* no need to select if instance number is 1 */
4387 				if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
4388 				    gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
4389 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4390 
4391 				amdgpu_ras_inst_query_ras_error_count(adev,
4392 					&(gfx_v9_4_3_ce_reg_list[i].reg_entry),
4393 					1,
4394 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent,
4395 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size,
4396 					GET_INST(GC, xcc_id),
4397 					AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
4398 					&ce_count);
4399 
4400 				amdgpu_ras_inst_query_ras_error_count(adev,
4401 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4402 					1,
4403 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
4404 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
4405 					GET_INST(GC, xcc_id),
4406 					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
4407 					&ue_count);
4408 			}
4409 		}
4410 	}
4411 
4412 	/* handle extra register entries of UE */
4413 	for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
4414 		for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
4415 			for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
4416 				/* no need to select if instance number is 1 */
4417 				if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
4418 					gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
4419 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4420 
4421 				amdgpu_ras_inst_query_ras_error_count(adev,
4422 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4423 					1,
4424 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
4425 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
4426 					GET_INST(GC, xcc_id),
4427 					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
4428 					&ue_count);
4429 			}
4430 		}
4431 	}
4432 
4433 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4434 			xcc_id);
4435 	mutex_unlock(&adev->grbm_idx_mutex);
4436 
4437 	/* the caller should make sure initialize value of
4438 	 * err_data->ue_count and err_data->ce_count
4439 	 */
4440 	amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
4441 	amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
4442 }
4443 
4444 static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
4445 					void *ras_error_status, int xcc_id)
4446 {
4447 	uint32_t i, j, k;
4448 
4449 	mutex_lock(&adev->grbm_idx_mutex);
4450 
4451 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
4452 		for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
4453 			for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
4454 				/* no need to select if instance number is 1 */
4455 				if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
4456 				    gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
4457 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4458 
4459 				amdgpu_ras_inst_reset_ras_error_count(adev,
4460 					&(gfx_v9_4_3_ce_reg_list[i].reg_entry),
4461 					1,
4462 					GET_INST(GC, xcc_id));
4463 
4464 				amdgpu_ras_inst_reset_ras_error_count(adev,
4465 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4466 					1,
4467 					GET_INST(GC, xcc_id));
4468 			}
4469 		}
4470 	}
4471 
4472 	/* handle extra register entries of UE */
4473 	for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
4474 		for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
4475 			for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
4476 				/* no need to select if instance number is 1 */
4477 				if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
4478 					gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
4479 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4480 
4481 				amdgpu_ras_inst_reset_ras_error_count(adev,
4482 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4483 					1,
4484 					GET_INST(GC, xcc_id));
4485 			}
4486 		}
4487 	}
4488 
4489 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4490 			xcc_id);
4491 	mutex_unlock(&adev->grbm_idx_mutex);
4492 }
4493 
4494 static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
4495 					void *ras_error_status, int xcc_id)
4496 {
4497 	uint32_t i;
4498 	uint32_t data;
4499 
4500 	if (amdgpu_sriov_vf(adev))
4501 		return;
4502 
4503 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG);
4504 	data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
4505 			     amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0);
4506 
4507 	if (amdgpu_watchdog_timer.timeout_fatal_disable &&
4508 	    (amdgpu_watchdog_timer.period < 1 ||
4509 	     amdgpu_watchdog_timer.period > 0x23)) {
4510 		dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n");
4511 		amdgpu_watchdog_timer.period = 0x23;
4512 	}
4513 	data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL,
4514 			     amdgpu_watchdog_timer.period);
4515 
4516 	mutex_lock(&adev->grbm_idx_mutex);
4517 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4518 		gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id);
4519 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data);
4520 	}
4521 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4522 			xcc_id);
4523 	mutex_unlock(&adev->grbm_idx_mutex);
4524 }
4525 
4526 static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev,
4527 					void *ras_error_status)
4528 {
4529 	amdgpu_gfx_ras_error_func(adev, ras_error_status,
4530 			gfx_v9_4_3_inst_query_ras_err_count);
4531 }
4532 
4533 static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev)
4534 {
4535 	amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count);
4536 }
4537 
4538 static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
4539 {
4540 	amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer);
4541 }
4542 
4543 static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
4544 {
4545 	/* Header itself is a NOP packet */
4546 	if (num_nop == 1) {
4547 		amdgpu_ring_write(ring, ring->funcs->nop);
4548 		return;
4549 	}
4550 
4551 	/* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
4552 	amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
4553 
4554 	/* Header is at index 0, followed by num_nops - 1 NOP packet's */
4555 	amdgpu_ring_insert_nop(ring, num_nop - 1);
4556 }
4557 
4558 static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
4559 {
4560 	struct amdgpu_device *adev = ip_block->adev;
4561 	uint32_t i, j, k;
4562 	uint32_t xcc_id, xcc_offset, inst_offset;
4563 	uint32_t num_xcc, reg, num_inst;
4564 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
4565 
4566 	if (!adev->gfx.ip_dump_core)
4567 		return;
4568 
4569 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4570 	drm_printf(p, "Number of Instances:%d\n", num_xcc);
4571 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4572 		xcc_offset = xcc_id * reg_count;
4573 		drm_printf(p, "\nInstance id:%d\n", xcc_id);
4574 		for (i = 0; i < reg_count; i++)
4575 			drm_printf(p, "%-50s \t 0x%08x\n",
4576 				   gc_reg_list_9_4_3[i].reg_name,
4577 				   adev->gfx.ip_dump_core[xcc_offset + i]);
4578 	}
4579 
4580 	/* print compute queue registers for all instances */
4581 	if (!adev->gfx.ip_dump_compute_queues)
4582 		return;
4583 
4584 	num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
4585 		adev->gfx.mec.num_queue_per_pipe;
4586 
4587 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
4588 	drm_printf(p, "\nnum_xcc: %d num_mec: %d num_pipe: %d num_queue: %d\n",
4589 		   num_xcc,
4590 		   adev->gfx.mec.num_mec,
4591 		   adev->gfx.mec.num_pipe_per_mec,
4592 		   adev->gfx.mec.num_queue_per_pipe);
4593 
4594 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4595 		xcc_offset = xcc_id * reg_count * num_inst;
4596 		inst_offset = 0;
4597 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4598 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4599 				for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
4600 					drm_printf(p,
4601 						   "\nxcc:%d mec:%d, pipe:%d, queue:%d\n",
4602 						    xcc_id, i, j, k);
4603 					for (reg = 0; reg < reg_count; reg++) {
4604 						if (i && gc_cp_reg_list_9_4_3[reg].reg_offset ==
4605 						    regCP_MEC_ME1_HEADER_DUMP)
4606 							drm_printf(p,
4607 								   "%-50s \t 0x%08x\n",
4608 								   "regCP_MEC_ME2_HEADER_DUMP",
4609 								   adev->gfx.ip_dump_compute_queues
4610 								   [xcc_offset + inst_offset +
4611 								    reg]);
4612 						else
4613 							drm_printf(p,
4614 								   "%-50s \t 0x%08x\n",
4615 								   gc_cp_reg_list_9_4_3[reg].reg_name,
4616 								   adev->gfx.ip_dump_compute_queues
4617 								   [xcc_offset + inst_offset +
4618 								    reg]);
4619 					}
4620 					inst_offset += reg_count;
4621 				}
4622 			}
4623 		}
4624 	}
4625 }
4626 
4627 static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
4628 {
4629 	struct amdgpu_device *adev = ip_block->adev;
4630 	uint32_t i, j, k;
4631 	uint32_t num_xcc, reg, num_inst;
4632 	uint32_t xcc_id, xcc_offset, inst_offset;
4633 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
4634 
4635 	if (!adev->gfx.ip_dump_core)
4636 		return;
4637 
4638 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4639 
4640 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4641 		xcc_offset = xcc_id * reg_count;
4642 		for (i = 0; i < reg_count; i++)
4643 			adev->gfx.ip_dump_core[xcc_offset + i] =
4644 				RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i],
4645 								   GET_INST(GC, xcc_id)));
4646 	}
4647 
4648 	/* dump compute queue registers for all instances */
4649 	if (!adev->gfx.ip_dump_compute_queues)
4650 		return;
4651 
4652 	num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
4653 		adev->gfx.mec.num_queue_per_pipe;
4654 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
4655 	mutex_lock(&adev->srbm_mutex);
4656 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4657 		xcc_offset = xcc_id * reg_count * num_inst;
4658 		inst_offset = 0;
4659 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4660 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4661 				for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
4662 					/* ME0 is for GFX so start from 1 for CP */
4663 					soc15_grbm_select(adev, 1 + i, j, k, 0,
4664 							  GET_INST(GC, xcc_id));
4665 
4666 					for (reg = 0; reg < reg_count; reg++) {
4667 						if (i && gc_cp_reg_list_9_4_3[reg].reg_offset ==
4668 						    regCP_MEC_ME1_HEADER_DUMP)
4669 							adev->gfx.ip_dump_compute_queues
4670 								[xcc_offset +
4671 								 inst_offset + reg] =
4672 								RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id),
4673 											regCP_MEC_ME2_HEADER_DUMP));
4674 						else
4675 							adev->gfx.ip_dump_compute_queues
4676 								[xcc_offset +
4677 								 inst_offset + reg] =
4678 								RREG32(SOC15_REG_ENTRY_OFFSET_INST(
4679 									       gc_cp_reg_list_9_4_3[reg],
4680 									       GET_INST(GC, xcc_id)));
4681 					}
4682 					inst_offset += reg_count;
4683 				}
4684 			}
4685 		}
4686 	}
4687 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
4688 	mutex_unlock(&adev->srbm_mutex);
4689 }
4690 
4691 static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
4692 {
4693 	/* Emit the cleaner shader */
4694 	amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
4695 	amdgpu_ring_write(ring, 0);  /* RESERVED field, programmed to zero */
4696 }
4697 
4698 static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
4699 	.name = "gfx_v9_4_3",
4700 	.early_init = gfx_v9_4_3_early_init,
4701 	.late_init = gfx_v9_4_3_late_init,
4702 	.sw_init = gfx_v9_4_3_sw_init,
4703 	.sw_fini = gfx_v9_4_3_sw_fini,
4704 	.hw_init = gfx_v9_4_3_hw_init,
4705 	.hw_fini = gfx_v9_4_3_hw_fini,
4706 	.suspend = gfx_v9_4_3_suspend,
4707 	.resume = gfx_v9_4_3_resume,
4708 	.is_idle = gfx_v9_4_3_is_idle,
4709 	.wait_for_idle = gfx_v9_4_3_wait_for_idle,
4710 	.soft_reset = gfx_v9_4_3_soft_reset,
4711 	.set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
4712 	.set_powergating_state = gfx_v9_4_3_set_powergating_state,
4713 	.get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
4714 	.dump_ip_state = gfx_v9_4_3_ip_dump,
4715 	.print_ip_state = gfx_v9_4_3_ip_print,
4716 };
4717 
4718 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
4719 	.type = AMDGPU_RING_TYPE_COMPUTE,
4720 	.align_mask = 0xff,
4721 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4722 	.support_64bit_ptrs = true,
4723 	.get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4724 	.get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4725 	.set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4726 	.emit_frame_size =
4727 		20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4728 		7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4729 		5 + /* hdp invalidate */
4730 		7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4731 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4732 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4733 		2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4734 		8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */
4735 		7 + /* gfx_v9_4_3_emit_mem_sync */
4736 		5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */
4737 		15 + /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
4738 		2, /* gfx_v9_4_3_ring_emit_cleaner_shader */
4739 	.emit_ib_size =	7, /* gfx_v9_4_3_ring_emit_ib_compute */
4740 	.emit_ib = gfx_v9_4_3_ring_emit_ib_compute,
4741 	.emit_fence = gfx_v9_4_3_ring_emit_fence,
4742 	.emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync,
4743 	.emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush,
4744 	.emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch,
4745 	.emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
4746 	.test_ring = gfx_v9_4_3_ring_test_ring,
4747 	.test_ib = gfx_v9_4_3_ring_test_ib,
4748 	.insert_nop = gfx_v9_4_3_ring_insert_nop,
4749 	.pad_ib = amdgpu_ring_generic_pad_ib,
4750 	.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4751 	.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4752 	.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4753 	.soft_recovery = gfx_v9_4_3_ring_soft_recovery,
4754 	.emit_mem_sync = gfx_v9_4_3_emit_mem_sync,
4755 	.emit_wave_limit = gfx_v9_4_3_emit_wave_limit,
4756 	.reset = gfx_v9_4_3_reset_kcq,
4757 	.emit_cleaner_shader = gfx_v9_4_3_ring_emit_cleaner_shader,
4758 	.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
4759 	.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
4760 };
4761 
4762 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
4763 	.type = AMDGPU_RING_TYPE_KIQ,
4764 	.align_mask = 0xff,
4765 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4766 	.support_64bit_ptrs = true,
4767 	.get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4768 	.get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4769 	.set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4770 	.emit_frame_size =
4771 		20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4772 		7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4773 		5 + /* hdp invalidate */
4774 		7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4775 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4776 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4777 		2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4778 		8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */
4779 	.emit_ib_size =	7, /* gfx_v9_4_3_ring_emit_ib_compute */
4780 	.emit_fence = gfx_v9_4_3_ring_emit_fence_kiq,
4781 	.test_ring = gfx_v9_4_3_ring_test_ring,
4782 	.insert_nop = amdgpu_ring_insert_nop,
4783 	.pad_ib = amdgpu_ring_generic_pad_ib,
4784 	.emit_rreg = gfx_v9_4_3_ring_emit_rreg,
4785 	.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4786 	.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4787 	.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4788 	.emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
4789 };
4790 
4791 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
4792 {
4793 	int i, j, num_xcc;
4794 
4795 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4796 	for (i = 0; i < num_xcc; i++) {
4797 		adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq;
4798 
4799 		for (j = 0; j < adev->gfx.num_compute_rings; j++)
4800 			adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs
4801 					= &gfx_v9_4_3_ring_funcs_compute;
4802 	}
4803 }
4804 
4805 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = {
4806 	.set = gfx_v9_4_3_set_eop_interrupt_state,
4807 	.process = gfx_v9_4_3_eop_irq,
4808 };
4809 
4810 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = {
4811 	.set = gfx_v9_4_3_set_priv_reg_fault_state,
4812 	.process = gfx_v9_4_3_priv_reg_irq,
4813 };
4814 
4815 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_bad_op_irq_funcs = {
4816 	.set = gfx_v9_4_3_set_bad_op_fault_state,
4817 	.process = gfx_v9_4_3_bad_op_irq,
4818 };
4819 
4820 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = {
4821 	.set = gfx_v9_4_3_set_priv_inst_fault_state,
4822 	.process = gfx_v9_4_3_priv_inst_irq,
4823 };
4824 
4825 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev)
4826 {
4827 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4828 	adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs;
4829 
4830 	adev->gfx.priv_reg_irq.num_types = 1;
4831 	adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs;
4832 
4833 	adev->gfx.bad_op_irq.num_types = 1;
4834 	adev->gfx.bad_op_irq.funcs = &gfx_v9_4_3_bad_op_irq_funcs;
4835 
4836 	adev->gfx.priv_inst_irq.num_types = 1;
4837 	adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs;
4838 }
4839 
4840 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev)
4841 {
4842 	adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs;
4843 }
4844 
4845 
4846 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
4847 {
4848 	/* 9.4.3 variants removed all the GDS internal memory,
4849 	 * only support GWS opcode in kernel, like barrier
4850 	 * semaphore.etc */
4851 
4852 	/* init asic gds info */
4853 	adev->gds.gds_size = 0;
4854 	adev->gds.gds_compute_max_wave_id = 0;
4855 	adev->gds.gws_size = 64;
4856 	adev->gds.oa_size = 16;
4857 }
4858 
4859 static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4860 						 u32 bitmap, int xcc_id)
4861 {
4862 	u32 data;
4863 
4864 	if (!bitmap)
4865 		return;
4866 
4867 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4868 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4869 
4870 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
4871 }
4872 
4873 static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id)
4874 {
4875 	u32 data, mask;
4876 
4877 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
4878 	data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
4879 
4880 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4881 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4882 
4883 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4884 
4885 	return (~data) & mask;
4886 }
4887 
4888 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
4889 				 struct amdgpu_cu_info *cu_info)
4890 {
4891 	int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0;
4892 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp;
4893 	unsigned disable_masks[4 * 4];
4894 	bool is_symmetric_cus;
4895 
4896 	if (!adev || !cu_info)
4897 		return -EINVAL;
4898 
4899 	/*
4900 	 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
4901 	 */
4902 	if (adev->gfx.config.max_shader_engines *
4903 		adev->gfx.config.max_sh_per_se > 16)
4904 		return -EINVAL;
4905 
4906 	amdgpu_gfx_parse_disable_cu(adev, disable_masks,
4907 				    adev->gfx.config.max_shader_engines,
4908 				    adev->gfx.config.max_sh_per_se);
4909 
4910 	mutex_lock(&adev->grbm_idx_mutex);
4911 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
4912 		is_symmetric_cus = true;
4913 		for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4914 			for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4915 				mask = 1;
4916 				ao_bitmap = 0;
4917 				counter = 0;
4918 				gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
4919 				gfx_v9_4_3_set_user_cu_inactive_bitmap(
4920 					adev,
4921 					disable_masks[i * adev->gfx.config.max_sh_per_se + j],
4922 					xcc_id);
4923 				bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id);
4924 
4925 				cu_info->bitmap[xcc_id][i][j] = bitmap;
4926 
4927 				for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
4928 					if (bitmap & mask) {
4929 						if (counter < adev->gfx.config.max_cu_per_sh)
4930 							ao_bitmap |= mask;
4931 						counter++;
4932 					}
4933 					mask <<= 1;
4934 				}
4935 				active_cu_number += counter;
4936 				if (i < 2 && j < 2)
4937 					ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4938 				cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4939 			}
4940 			if (i && is_symmetric_cus && prev_counter != counter)
4941 				is_symmetric_cus = false;
4942 			prev_counter = counter;
4943 		}
4944 		if (is_symmetric_cus) {
4945 			tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG);
4946 			tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1);
4947 			tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1);
4948 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp);
4949 		}
4950 		gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4951 					    xcc_id);
4952 	}
4953 	mutex_unlock(&adev->grbm_idx_mutex);
4954 
4955 	cu_info->number = active_cu_number;
4956 	cu_info->ao_cu_mask = ao_cu_mask;
4957 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
4958 
4959 	return 0;
4960 }
4961 
4962 const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = {
4963 	.type = AMD_IP_BLOCK_TYPE_GFX,
4964 	.major = 9,
4965 	.minor = 4,
4966 	.rev = 3,
4967 	.funcs = &gfx_v9_4_3_ip_funcs,
4968 };
4969 
4970 static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask)
4971 {
4972 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4973 	uint32_t tmp_mask;
4974 	int i, r;
4975 
4976 	/* TODO : Initialize golden regs */
4977 	/* gfx_v9_4_3_init_golden_registers(adev); */
4978 
4979 	tmp_mask = inst_mask;
4980 	for_each_inst(i, tmp_mask)
4981 		gfx_v9_4_3_xcc_constants_init(adev, i);
4982 
4983 	if (!amdgpu_sriov_vf(adev)) {
4984 		tmp_mask = inst_mask;
4985 		for_each_inst(i, tmp_mask) {
4986 			r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
4987 			if (r)
4988 				return r;
4989 		}
4990 	}
4991 
4992 	tmp_mask = inst_mask;
4993 	for_each_inst(i, tmp_mask) {
4994 		r = gfx_v9_4_3_xcc_cp_resume(adev, i);
4995 		if (r)
4996 			return r;
4997 	}
4998 
4999 	return 0;
5000 }
5001 
5002 static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask)
5003 {
5004 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5005 	int i;
5006 
5007 	for_each_inst(i, inst_mask)
5008 		gfx_v9_4_3_xcc_fini(adev, i);
5009 
5010 	return 0;
5011 }
5012 
5013 struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = {
5014 	.suspend = &gfx_v9_4_3_xcp_suspend,
5015 	.resume = &gfx_v9_4_3_xcp_resume
5016 };
5017 
5018 struct amdgpu_ras_block_hw_ops  gfx_v9_4_3_ras_ops = {
5019 	.query_ras_error_count = &gfx_v9_4_3_query_ras_error_count,
5020 	.reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
5021 };
5022 
5023 static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
5024 {
5025 	int r;
5026 
5027 	r = amdgpu_ras_block_late_init(adev, ras_block);
5028 	if (r)
5029 		return r;
5030 
5031 	r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__GFX,
5032 				&gfx_v9_4_3_aca_info,
5033 				NULL);
5034 	if (r)
5035 		goto late_fini;
5036 
5037 	return 0;
5038 
5039 late_fini:
5040 	amdgpu_ras_block_late_fini(adev, ras_block);
5041 
5042 	return r;
5043 }
5044 
5045 struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
5046 	.ras_block = {
5047 		.hw_ops = &gfx_v9_4_3_ras_ops,
5048 		.ras_late_init = &gfx_v9_4_3_ras_late_init,
5049 	},
5050 	.enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer,
5051 };
5052