xref: /linux/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c (revision 2845f512232de9e436b9e3b5529e906e62414013)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 
25 #include "amdgpu.h"
26 #include "amdgpu_gfx.h"
27 #include "soc15.h"
28 #include "soc15d.h"
29 #include "soc15_common.h"
30 #include "vega10_enum.h"
31 
32 #include "v9_structs.h"
33 
34 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
35 
36 #include "gc/gc_9_4_3_offset.h"
37 #include "gc/gc_9_4_3_sh_mask.h"
38 
39 #include "gfx_v9_4_3.h"
40 #include "gfx_v9_4_3_cleaner_shader.h"
41 #include "amdgpu_xcp.h"
42 #include "amdgpu_aca.h"
43 
44 MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
45 MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin");
46 MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
47 MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
48 
49 #define GFX9_MEC_HPD_SIZE 4096
50 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
51 
52 #define GOLDEN_GB_ADDR_CONFIG 0x2a114042
53 #define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
54 
55 #define mmSMNAID_XCD0_MCA_SMU 0x36430400	/* SMN AID XCD0 */
56 #define mmSMNAID_XCD1_MCA_SMU 0x38430400	/* SMN AID XCD1 */
57 #define mmSMNXCD_XCD0_MCA_SMU 0x40430400	/* SMN XCD XCD0 */
58 
59 #define XCC_REG_RANGE_0_LOW  0x2000     /* XCC gfxdec0 lower Bound */
60 #define XCC_REG_RANGE_0_HIGH 0x3400     /* XCC gfxdec0 upper Bound */
61 #define XCC_REG_RANGE_1_LOW  0xA000     /* XCC gfxdec1 lower Bound */
62 #define XCC_REG_RANGE_1_HIGH 0x10000    /* XCC gfxdec1 upper Bound */
63 
64 #define NORMALIZE_XCC_REG_OFFSET(offset) \
65 	(offset & 0xFFFF)
66 
67 static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = {
68 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
69 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
70 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1),
71 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2),
72 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1),
73 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1),
74 	SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT),
75 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT),
76 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT),
77 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS),
78 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR),
79 	SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS),
80 	SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS),
81 	SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS),
82 	SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT),
83 	SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT),
84 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS),
85 	SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS),
86 	SOC15_REG_ENTRY_STR(GC, 0, regSQC_DCACHE_UTCL1_STATUS),
87 	SOC15_REG_ENTRY_STR(GC, 0, regSQC_ICACHE_UTCL1_STATUS),
88 	SOC15_REG_ENTRY_STR(GC, 0, regSQ_UTCL1_STATUS),
89 	SOC15_REG_ENTRY_STR(GC, 0, regTCP_UTCL1_STATUS),
90 	SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS),
91 	SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL),
92 	SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS),
93 	SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG),
94 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL),
95 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR),
96 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC2_INSTR_PNTR),
97 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
98 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_STAT),
99 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_COMMAND),
100 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_MESSAGE),
101 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_1),
102 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_2),
103 	SOC15_REG_ENTRY_STR(GC, 0, regSMU_RLC_RESPONSE),
104 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SAFE_MODE),
105 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE),
106 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT),
107 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6),
108 	/* cp header registers */
109 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
110 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME2_HEADER_DUMP),
111 	/* SE status registers */
112 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
113 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
114 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2),
115 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3)
116 };
117 
118 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = {
119 	/* compute queue registers */
120 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
121 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ACTIVE),
122 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
123 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
124 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
125 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
126 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
127 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
128 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
129 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
130 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
131 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
132 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
133 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
134 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
135 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
136 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
137 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
138 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
139 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
140 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
141 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
142 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
143 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
144 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
145 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
146 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
147 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
148 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
149 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
150 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
151 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
152 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
153 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
154 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
155 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
156 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS),
157 };
158 
159 struct amdgpu_gfx_ras gfx_v9_4_3_ras;
160 
161 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
162 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev);
163 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
164 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
165 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
166 				struct amdgpu_cu_info *cu_info);
167 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
168 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
169 
170 static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
171 				uint64_t queue_mask)
172 {
173 	struct amdgpu_device *adev = kiq_ring->adev;
174 	u64 shader_mc_addr;
175 
176 	/* Cleaner shader MC address */
177 	shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
178 
179 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
180 	amdgpu_ring_write(kiq_ring,
181 		PACKET3_SET_RESOURCES_VMID_MASK(0) |
182 		/* vmid_mask:0* queue_type:0 (KIQ) */
183 		PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
184 	amdgpu_ring_write(kiq_ring,
185 			lower_32_bits(queue_mask));	/* queue mask lo */
186 	amdgpu_ring_write(kiq_ring,
187 			upper_32_bits(queue_mask));	/* queue mask hi */
188 	amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
189 	amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
190 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
191 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
192 }
193 
194 static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring,
195 				 struct amdgpu_ring *ring)
196 {
197 	struct amdgpu_device *adev = kiq_ring->adev;
198 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
199 	uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
200 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
201 
202 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
203 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
204 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
205 			 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
206 			 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
207 			 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
208 			 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
209 			 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
210 			 /*queue_type: normal compute queue */
211 			 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
212 			 /* alloc format: all_on_one_pipe */
213 			 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
214 			 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
215 			 /* num_queues: must be 1 */
216 			 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
217 	amdgpu_ring_write(kiq_ring,
218 			PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
219 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
220 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
221 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
222 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
223 }
224 
225 static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
226 				   struct amdgpu_ring *ring,
227 				   enum amdgpu_unmap_queues_action action,
228 				   u64 gpu_addr, u64 seq)
229 {
230 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
231 
232 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
233 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
234 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
235 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
236 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
237 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
238 	amdgpu_ring_write(kiq_ring,
239 			PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
240 
241 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
242 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
243 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
244 		amdgpu_ring_write(kiq_ring, seq);
245 	} else {
246 		amdgpu_ring_write(kiq_ring, 0);
247 		amdgpu_ring_write(kiq_ring, 0);
248 		amdgpu_ring_write(kiq_ring, 0);
249 	}
250 }
251 
252 static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring,
253 				   struct amdgpu_ring *ring,
254 				   u64 addr,
255 				   u64 seq)
256 {
257 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
258 
259 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
260 	amdgpu_ring_write(kiq_ring,
261 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
262 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
263 			  PACKET3_QUERY_STATUS_COMMAND(2));
264 	/* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
265 	amdgpu_ring_write(kiq_ring,
266 			PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
267 			PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
268 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
269 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
270 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
271 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
272 }
273 
274 static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
275 				uint16_t pasid, uint32_t flush_type,
276 				bool all_hub)
277 {
278 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
279 	amdgpu_ring_write(kiq_ring,
280 			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
281 			PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
282 			PACKET3_INVALIDATE_TLBS_PASID(pasid) |
283 			PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
284 }
285 
286 static void gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
287 					  uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
288 					  uint32_t xcc_id, uint32_t vmid)
289 {
290 	struct amdgpu_device *adev = kiq_ring->adev;
291 	unsigned i;
292 
293 	/* enter save mode */
294 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
295 	mutex_lock(&adev->srbm_mutex);
296 	soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, xcc_id);
297 
298 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
299 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0x2);
300 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_COMPUTE_QUEUE_RESET, 0x1);
301 		/* wait till dequeue take effects */
302 		for (i = 0; i < adev->usec_timeout; i++) {
303 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
304 				break;
305 			udelay(1);
306 		}
307 		if (i >= adev->usec_timeout)
308 			dev_err(adev->dev, "fail to wait on hqd deactive\n");
309 	} else {
310 		dev_err(adev->dev, "reset queue_type(%d) not supported\n\n", queue_type);
311 	}
312 
313 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
314 	mutex_unlock(&adev->srbm_mutex);
315 	/* exit safe mode */
316 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
317 }
318 
319 static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
320 	.kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
321 	.kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
322 	.kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
323 	.kiq_query_status = gfx_v9_4_3_kiq_query_status,
324 	.kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
325 	.kiq_reset_hw_queue = gfx_v9_4_3_kiq_reset_hw_queue,
326 	.set_resources_size = 8,
327 	.map_queues_size = 7,
328 	.unmap_queues_size = 6,
329 	.query_status_size = 7,
330 	.invalidate_tlbs_size = 2,
331 };
332 
333 static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev)
334 {
335 	int i, num_xcc;
336 
337 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
338 	for (i = 0; i < num_xcc; i++)
339 		adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs;
340 }
341 
342 static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
343 {
344 	int i, num_xcc, dev_inst;
345 
346 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
347 	for (i = 0; i < num_xcc; i++) {
348 		dev_inst = GET_INST(GC, i);
349 
350 		WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
351 			     GOLDEN_GB_ADDR_CONFIG);
352 		/* Golden settings applied by driver for ASIC with rev_id 0 */
353 		if (adev->rev_id == 0) {
354 			WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
355 					      REDUCE_FIFO_DEPTH_BY_2, 2);
356 		} else {
357 			WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
358 						SPARE, 0x1);
359 		}
360 	}
361 }
362 
363 static uint32_t gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg)
364 {
365 	uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg);
366 
367 	/* If it is an XCC reg, normalize the reg to keep
368 	   lower 16 bits in local xcc */
369 
370 	if (((normalized_reg >= XCC_REG_RANGE_0_LOW) && (normalized_reg < XCC_REG_RANGE_0_HIGH)) ||
371 		((normalized_reg >= XCC_REG_RANGE_1_LOW) && (normalized_reg < XCC_REG_RANGE_1_HIGH)))
372 		return normalized_reg;
373 	else
374 		return reg;
375 }
376 
377 static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
378 				       bool wc, uint32_t reg, uint32_t val)
379 {
380 	reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
381 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
382 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
383 				WRITE_DATA_DST_SEL(0) |
384 				(wc ? WR_CONFIRM : 0));
385 	amdgpu_ring_write(ring, reg);
386 	amdgpu_ring_write(ring, 0);
387 	amdgpu_ring_write(ring, val);
388 }
389 
390 static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
391 				  int mem_space, int opt, uint32_t addr0,
392 				  uint32_t addr1, uint32_t ref, uint32_t mask,
393 				  uint32_t inv)
394 {
395 	/* Only do the normalization on regspace */
396 	if (mem_space == 0) {
397 		addr0 = gfx_v9_4_3_normalize_xcc_reg_offset(addr0);
398 		addr1 = gfx_v9_4_3_normalize_xcc_reg_offset(addr1);
399 	}
400 
401 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
402 	amdgpu_ring_write(ring,
403 				 /* memory (1) or register (0) */
404 				 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
405 				 WAIT_REG_MEM_OPERATION(opt) | /* wait */
406 				 WAIT_REG_MEM_FUNCTION(3) |  /* equal */
407 				 WAIT_REG_MEM_ENGINE(eng_sel)));
408 
409 	if (mem_space)
410 		BUG_ON(addr0 & 0x3); /* Dword align */
411 	amdgpu_ring_write(ring, addr0);
412 	amdgpu_ring_write(ring, addr1);
413 	amdgpu_ring_write(ring, ref);
414 	amdgpu_ring_write(ring, mask);
415 	amdgpu_ring_write(ring, inv); /* poll interval */
416 }
417 
418 static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring)
419 {
420 	uint32_t scratch_reg0_offset, xcc_offset;
421 	struct amdgpu_device *adev = ring->adev;
422 	uint32_t tmp = 0;
423 	unsigned i;
424 	int r;
425 
426 	/* Use register offset which is local to XCC in the packet */
427 	xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
428 	scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
429 	WREG32(scratch_reg0_offset, 0xCAFEDEAD);
430 	tmp = RREG32(scratch_reg0_offset);
431 
432 	r = amdgpu_ring_alloc(ring, 3);
433 	if (r)
434 		return r;
435 
436 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
437 	amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START);
438 	amdgpu_ring_write(ring, 0xDEADBEEF);
439 	amdgpu_ring_commit(ring);
440 
441 	for (i = 0; i < adev->usec_timeout; i++) {
442 		tmp = RREG32(scratch_reg0_offset);
443 		if (tmp == 0xDEADBEEF)
444 			break;
445 		udelay(1);
446 	}
447 
448 	if (i >= adev->usec_timeout)
449 		r = -ETIMEDOUT;
450 	return r;
451 }
452 
453 static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
454 {
455 	struct amdgpu_device *adev = ring->adev;
456 	struct amdgpu_ib ib;
457 	struct dma_fence *f = NULL;
458 
459 	unsigned index;
460 	uint64_t gpu_addr;
461 	uint32_t tmp;
462 	long r;
463 
464 	r = amdgpu_device_wb_get(adev, &index);
465 	if (r)
466 		return r;
467 
468 	gpu_addr = adev->wb.gpu_addr + (index * 4);
469 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
470 	memset(&ib, 0, sizeof(ib));
471 
472 	r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
473 	if (r)
474 		goto err1;
475 
476 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
477 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
478 	ib.ptr[2] = lower_32_bits(gpu_addr);
479 	ib.ptr[3] = upper_32_bits(gpu_addr);
480 	ib.ptr[4] = 0xDEADBEEF;
481 	ib.length_dw = 5;
482 
483 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
484 	if (r)
485 		goto err2;
486 
487 	r = dma_fence_wait_timeout(f, false, timeout);
488 	if (r == 0) {
489 		r = -ETIMEDOUT;
490 		goto err2;
491 	} else if (r < 0) {
492 		goto err2;
493 	}
494 
495 	tmp = adev->wb.wb[index];
496 	if (tmp == 0xDEADBEEF)
497 		r = 0;
498 	else
499 		r = -EINVAL;
500 
501 err2:
502 	amdgpu_ib_free(adev, &ib, NULL);
503 	dma_fence_put(f);
504 err1:
505 	amdgpu_device_wb_free(adev, index);
506 	return r;
507 }
508 
509 
510 /* This value might differs per partition */
511 static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev)
512 {
513 	uint64_t clock;
514 
515 	mutex_lock(&adev->gfx.gpu_clock_mutex);
516 	WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
517 	clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) |
518 		((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
519 	mutex_unlock(&adev->gfx.gpu_clock_mutex);
520 
521 	return clock;
522 }
523 
524 static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev)
525 {
526 	amdgpu_ucode_release(&adev->gfx.pfp_fw);
527 	amdgpu_ucode_release(&adev->gfx.me_fw);
528 	amdgpu_ucode_release(&adev->gfx.ce_fw);
529 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
530 	amdgpu_ucode_release(&adev->gfx.mec_fw);
531 	amdgpu_ucode_release(&adev->gfx.mec2_fw);
532 
533 	kfree(adev->gfx.rlc.register_list_format);
534 }
535 
536 static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
537 					  const char *chip_name)
538 {
539 	int err;
540 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
541 	uint16_t version_major;
542 	uint16_t version_minor;
543 
544 
545 	err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
546 				   "amdgpu/%s_rlc.bin", chip_name);
547 	if (err)
548 		goto out;
549 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
550 
551 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
552 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
553 	err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
554 out:
555 	if (err)
556 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
557 
558 	return err;
559 }
560 
561 static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev)
562 {
563 	return true;
564 }
565 
566 static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev)
567 {
568 	if (gfx_v9_4_3_should_disable_gfxoff(adev->pdev))
569 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
570 }
571 
572 static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
573 					  const char *chip_name)
574 {
575 	int err;
576 
577 	err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
578 				   "amdgpu/%s_mec.bin", chip_name);
579 	if (err)
580 		goto out;
581 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
582 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
583 
584 	adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
585 	adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
586 
587 	gfx_v9_4_3_check_if_need_gfxoff(adev);
588 
589 out:
590 	if (err)
591 		amdgpu_ucode_release(&adev->gfx.mec_fw);
592 	return err;
593 }
594 
595 static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
596 {
597 	char ucode_prefix[15];
598 	int r;
599 
600 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
601 
602 	r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix);
603 	if (r)
604 		return r;
605 
606 	r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix);
607 	if (r)
608 		return r;
609 
610 	return r;
611 }
612 
613 static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev)
614 {
615 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
616 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
617 }
618 
619 static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev)
620 {
621 	int r, i, num_xcc;
622 	u32 *hpd;
623 	const __le32 *fw_data;
624 	unsigned fw_size;
625 	u32 *fw;
626 	size_t mec_hpd_size;
627 
628 	const struct gfx_firmware_header_v1_0 *mec_hdr;
629 
630 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
631 	for (i = 0; i < num_xcc; i++)
632 		bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
633 			AMDGPU_MAX_COMPUTE_QUEUES);
634 
635 	/* take ownership of the relevant compute queues */
636 	amdgpu_gfx_compute_queue_acquire(adev);
637 	mec_hpd_size =
638 		adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE;
639 	if (mec_hpd_size) {
640 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
641 					      AMDGPU_GEM_DOMAIN_VRAM |
642 					      AMDGPU_GEM_DOMAIN_GTT,
643 					      &adev->gfx.mec.hpd_eop_obj,
644 					      &adev->gfx.mec.hpd_eop_gpu_addr,
645 					      (void **)&hpd);
646 		if (r) {
647 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
648 			gfx_v9_4_3_mec_fini(adev);
649 			return r;
650 		}
651 
652 		if (amdgpu_emu_mode == 1) {
653 			for (i = 0; i < mec_hpd_size / 4; i++) {
654 				memset((void *)(hpd + i), 0, 4);
655 				if (i % 50 == 0)
656 					msleep(1);
657 			}
658 		} else {
659 			memset(hpd, 0, mec_hpd_size);
660 		}
661 
662 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
663 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
664 	}
665 
666 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
667 
668 	fw_data = (const __le32 *)
669 		(adev->gfx.mec_fw->data +
670 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
671 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
672 
673 	r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
674 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
675 				      &adev->gfx.mec.mec_fw_obj,
676 				      &adev->gfx.mec.mec_fw_gpu_addr,
677 				      (void **)&fw);
678 	if (r) {
679 		dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
680 		gfx_v9_4_3_mec_fini(adev);
681 		return r;
682 	}
683 
684 	memcpy(fw, fw_data, fw_size);
685 
686 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
687 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
688 
689 	return 0;
690 }
691 
692 static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
693 					u32 sh_num, u32 instance, int xcc_id)
694 {
695 	u32 data;
696 
697 	if (instance == 0xffffffff)
698 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
699 				     INSTANCE_BROADCAST_WRITES, 1);
700 	else
701 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
702 				     INSTANCE_INDEX, instance);
703 
704 	if (se_num == 0xffffffff)
705 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
706 				     SE_BROADCAST_WRITES, 1);
707 	else
708 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
709 
710 	if (sh_num == 0xffffffff)
711 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
712 				     SH_BROADCAST_WRITES, 1);
713 	else
714 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
715 
716 	WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
717 }
718 
719 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address)
720 {
721 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
722 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
723 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
724 		(address << SQ_IND_INDEX__INDEX__SHIFT) |
725 		(SQ_IND_INDEX__FORCE_READ_MASK));
726 	return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
727 }
728 
729 static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
730 			   uint32_t wave, uint32_t thread,
731 			   uint32_t regno, uint32_t num, uint32_t *out)
732 {
733 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
734 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
735 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
736 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
737 		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
738 		(SQ_IND_INDEX__FORCE_READ_MASK) |
739 		(SQ_IND_INDEX__AUTO_INCR_MASK));
740 	while (num--)
741 		*(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
742 }
743 
744 static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev,
745 				      uint32_t xcc_id, uint32_t simd, uint32_t wave,
746 				      uint32_t *dst, int *no_fields)
747 {
748 	/* type 1 wave data */
749 	dst[(*no_fields)++] = 1;
750 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
751 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
752 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
753 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
754 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
755 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID);
756 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
757 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
758 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC);
759 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC);
760 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS);
761 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
762 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0);
763 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0);
764 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE);
765 }
766 
767 static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
768 				       uint32_t wave, uint32_t start,
769 				       uint32_t size, uint32_t *dst)
770 {
771 	wave_read_regs(adev, xcc_id, simd, wave, 0,
772 		       start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
773 }
774 
775 static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
776 				       uint32_t wave, uint32_t thread,
777 				       uint32_t start, uint32_t size,
778 				       uint32_t *dst)
779 {
780 	wave_read_regs(adev, xcc_id, simd, wave, thread,
781 		       start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
782 }
783 
784 static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
785 					u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
786 {
787 	soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
788 }
789 
790 static int gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device *adev)
791 {
792 	u32 xcp_ctl;
793 
794 	/* Value is expected to be the same on all, fetch from first instance */
795 	xcp_ctl = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HYP_XCP_CTL);
796 
797 	return REG_GET_FIELD(xcp_ctl, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP);
798 }
799 
800 static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
801 						int num_xccs_per_xcp)
802 {
803 	int ret, i, num_xcc;
804 	u32 tmp = 0;
805 
806 	if (adev->psp.funcs) {
807 		ret = psp_spatial_partition(&adev->psp,
808 					    NUM_XCC(adev->gfx.xcc_mask) /
809 						    num_xccs_per_xcp);
810 		if (ret)
811 			return ret;
812 	} else {
813 		num_xcc = NUM_XCC(adev->gfx.xcc_mask);
814 
815 		for (i = 0; i < num_xcc; i++) {
816 			tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
817 					    num_xccs_per_xcp);
818 			tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
819 					    i % num_xccs_per_xcp);
820 			WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL,
821 				     tmp);
822 		}
823 		ret = 0;
824 	}
825 
826 	adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
827 
828 	return ret;
829 }
830 
831 static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
832 {
833 	int xcc;
834 
835 	xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0));
836 	if (!xcc) {
837 		dev_err(adev->dev, "Couldn't find xcc mapping from IH node");
838 		return -EINVAL;
839 	}
840 
841 	return xcc - 1;
842 }
843 
844 static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
845 	.get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
846 	.select_se_sh = &gfx_v9_4_3_xcc_select_se_sh,
847 	.read_wave_data = &gfx_v9_4_3_read_wave_data,
848 	.read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
849 	.read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
850 	.select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
851 	.switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
852 	.ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
853 	.get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp,
854 };
855 
856 static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
857 				      struct aca_bank *bank, enum aca_smu_type type,
858 				      void *data)
859 {
860 	struct aca_bank_info info;
861 	u64 misc0;
862 	u32 instlo;
863 	int ret;
864 
865 	ret = aca_bank_info_decode(bank, &info);
866 	if (ret)
867 		return ret;
868 
869 	/* NOTE: overwrite info.die_id with xcd id for gfx */
870 	instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
871 	instlo &= GENMASK(31, 1);
872 	info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
873 
874 	misc0 = bank->regs[ACA_REG_IDX_MISC0];
875 
876 	switch (type) {
877 	case ACA_SMU_TYPE_UE:
878 		ret = aca_error_cache_log_bank_error(handle, &info,
879 						     ACA_ERROR_TYPE_UE, 1ULL);
880 		break;
881 	case ACA_SMU_TYPE_CE:
882 		ret = aca_error_cache_log_bank_error(handle, &info,
883 						     ACA_ERROR_TYPE_CE, ACA_REG__MISC0__ERRCNT(misc0));
884 		break;
885 	default:
886 		return -EINVAL;
887 	}
888 
889 	return ret;
890 }
891 
892 static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
893 					 enum aca_smu_type type, void *data)
894 {
895 	u32 instlo;
896 
897 	instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
898 	instlo &= GENMASK(31, 1);
899 	switch (instlo) {
900 	case mmSMNAID_XCD0_MCA_SMU:
901 	case mmSMNAID_XCD1_MCA_SMU:
902 	case mmSMNXCD_XCD0_MCA_SMU:
903 		return true;
904 	default:
905 		break;
906 	}
907 
908 	return false;
909 }
910 
911 static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = {
912 	.aca_bank_parser = gfx_v9_4_3_aca_bank_parser,
913 	.aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid,
914 };
915 
916 static const struct aca_info gfx_v9_4_3_aca_info = {
917 	.hwip = ACA_HWIP_TYPE_SMU,
918 	.mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
919 	.bank_ops = &gfx_v9_4_3_aca_bank_ops,
920 };
921 
922 static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
923 {
924 	u32 gb_addr_config;
925 
926 	adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
927 	adev->gfx.ras = &gfx_v9_4_3_ras;
928 
929 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
930 	case IP_VERSION(9, 4, 3):
931 	case IP_VERSION(9, 4, 4):
932 		adev->gfx.config.max_hw_contexts = 8;
933 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
934 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
935 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
936 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
937 		gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG);
938 		break;
939 	default:
940 		BUG();
941 		break;
942 	}
943 
944 	adev->gfx.config.gb_addr_config = gb_addr_config;
945 
946 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
947 			REG_GET_FIELD(
948 					adev->gfx.config.gb_addr_config,
949 					GB_ADDR_CONFIG,
950 					NUM_PIPES);
951 
952 	adev->gfx.config.max_tile_pipes =
953 		adev->gfx.config.gb_addr_config_fields.num_pipes;
954 
955 	adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
956 			REG_GET_FIELD(
957 					adev->gfx.config.gb_addr_config,
958 					GB_ADDR_CONFIG,
959 					NUM_BANKS);
960 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
961 			REG_GET_FIELD(
962 					adev->gfx.config.gb_addr_config,
963 					GB_ADDR_CONFIG,
964 					MAX_COMPRESSED_FRAGS);
965 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
966 			REG_GET_FIELD(
967 					adev->gfx.config.gb_addr_config,
968 					GB_ADDR_CONFIG,
969 					NUM_RB_PER_SE);
970 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
971 			REG_GET_FIELD(
972 					adev->gfx.config.gb_addr_config,
973 					GB_ADDR_CONFIG,
974 					NUM_SHADER_ENGINES);
975 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
976 			REG_GET_FIELD(
977 					adev->gfx.config.gb_addr_config,
978 					GB_ADDR_CONFIG,
979 					PIPE_INTERLEAVE_SIZE));
980 
981 	return 0;
982 }
983 
984 static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
985 				        int xcc_id, int mec, int pipe, int queue)
986 {
987 	unsigned irq_type;
988 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
989 	unsigned int hw_prio;
990 	uint32_t xcc_doorbell_start;
991 
992 	ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
993 				       ring_id];
994 
995 	/* mec0 is me1 */
996 	ring->xcc_id = xcc_id;
997 	ring->me = mec + 1;
998 	ring->pipe = pipe;
999 	ring->queue = queue;
1000 
1001 	ring->ring_obj = NULL;
1002 	ring->use_doorbell = true;
1003 	xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
1004 			     xcc_id * adev->doorbell_index.xcc_doorbell_range;
1005 	ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
1006 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
1007 			     (ring_id + xcc_id * adev->gfx.num_compute_rings) *
1008 				     GFX9_MEC_HPD_SIZE;
1009 	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
1010 	sprintf(ring->name, "comp_%d.%d.%d.%d",
1011 			ring->xcc_id, ring->me, ring->pipe, ring->queue);
1012 
1013 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1014 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1015 		+ ring->pipe;
1016 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1017 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
1018 	/* type-2 packets are deprecated on MEC, use type-3 instead */
1019 	return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1020 				hw_prio, NULL);
1021 }
1022 
1023 static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev)
1024 {
1025 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
1026 	uint32_t *ptr, num_xcc, inst;
1027 
1028 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1029 
1030 	ptr = kcalloc(reg_count * num_xcc, sizeof(uint32_t), GFP_KERNEL);
1031 	if (!ptr) {
1032 		DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
1033 		adev->gfx.ip_dump_core = NULL;
1034 	} else {
1035 		adev->gfx.ip_dump_core = ptr;
1036 	}
1037 
1038 	/* Allocate memory for compute queue registers for all the instances */
1039 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
1040 	inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
1041 		adev->gfx.mec.num_queue_per_pipe;
1042 
1043 	ptr = kcalloc(reg_count * inst * num_xcc, sizeof(uint32_t), GFP_KERNEL);
1044 	if (!ptr) {
1045 		DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
1046 		adev->gfx.ip_dump_compute_queues = NULL;
1047 	} else {
1048 		adev->gfx.ip_dump_compute_queues = ptr;
1049 	}
1050 }
1051 
1052 static int gfx_v9_4_3_sw_init(void *handle)
1053 {
1054 	int i, j, k, r, ring_id, xcc_id, num_xcc;
1055 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1056 
1057 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1058 	case IP_VERSION(9, 4, 3):
1059 	case IP_VERSION(9, 4, 4):
1060 		adev->gfx.cleaner_shader_ptr = gfx_9_4_3_cleaner_shader_hex;
1061 		adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_3_cleaner_shader_hex);
1062 		if (adev->gfx.mec_fw_version >= 153) {
1063 			adev->gfx.enable_cleaner_shader = true;
1064 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1065 			if (r) {
1066 				adev->gfx.enable_cleaner_shader = false;
1067 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1068 			}
1069 		}
1070 		break;
1071 	default:
1072 		adev->gfx.enable_cleaner_shader = false;
1073 		break;
1074 	}
1075 
1076 	adev->gfx.mec.num_mec = 2;
1077 	adev->gfx.mec.num_pipe_per_mec = 4;
1078 	adev->gfx.mec.num_queue_per_pipe = 8;
1079 
1080 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1081 
1082 	/* EOP Event */
1083 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
1084 	if (r)
1085 		return r;
1086 
1087 	/* Bad opcode Event */
1088 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1089 			      GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
1090 			      &adev->gfx.bad_op_irq);
1091 	if (r)
1092 		return r;
1093 
1094 	/* Privileged reg */
1095 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
1096 			      &adev->gfx.priv_reg_irq);
1097 	if (r)
1098 		return r;
1099 
1100 	/* Privileged inst */
1101 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
1102 			      &adev->gfx.priv_inst_irq);
1103 	if (r)
1104 		return r;
1105 
1106 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1107 
1108 	r = adev->gfx.rlc.funcs->init(adev);
1109 	if (r) {
1110 		DRM_ERROR("Failed to init rlc BOs!\n");
1111 		return r;
1112 	}
1113 
1114 	r = gfx_v9_4_3_mec_init(adev);
1115 	if (r) {
1116 		DRM_ERROR("Failed to init MEC BOs!\n");
1117 		return r;
1118 	}
1119 
1120 	/* set up the compute queues - allocate horizontally across pipes */
1121 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1122 		ring_id = 0;
1123 		for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1124 			for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1125 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec;
1126 				     k++) {
1127 					if (!amdgpu_gfx_is_mec_queue_enabled(
1128 							adev, xcc_id, i, k, j))
1129 						continue;
1130 
1131 					r = gfx_v9_4_3_compute_ring_init(adev,
1132 								       ring_id,
1133 								       xcc_id,
1134 								       i, k, j);
1135 					if (r)
1136 						return r;
1137 
1138 					ring_id++;
1139 				}
1140 			}
1141 		}
1142 
1143 		r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id);
1144 		if (r) {
1145 			DRM_ERROR("Failed to init KIQ BOs!\n");
1146 			return r;
1147 		}
1148 
1149 		r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1150 		if (r)
1151 			return r;
1152 
1153 		/* create MQD for all compute queues as wel as KIQ for SRIOV case */
1154 		r = amdgpu_gfx_mqd_sw_init(adev,
1155 				sizeof(struct v9_mqd_allocation), xcc_id);
1156 		if (r)
1157 			return r;
1158 	}
1159 
1160 	r = gfx_v9_4_3_gpu_early_init(adev);
1161 	if (r)
1162 		return r;
1163 
1164 	r = amdgpu_gfx_ras_sw_init(adev);
1165 	if (r)
1166 		return r;
1167 
1168 
1169 	if (!amdgpu_sriov_vf(adev)) {
1170 		r = amdgpu_gfx_sysfs_init(adev);
1171 		if (r)
1172 			return r;
1173 	}
1174 
1175 	gfx_v9_4_3_alloc_ip_dump(adev);
1176 
1177 	r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
1178 	if (r)
1179 		return r;
1180 
1181 	return 0;
1182 }
1183 
1184 static int gfx_v9_4_3_sw_fini(void *handle)
1185 {
1186 	int i, num_xcc;
1187 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1188 
1189 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1190 	for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
1191 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1192 
1193 	for (i = 0; i < num_xcc; i++) {
1194 		amdgpu_gfx_mqd_sw_fini(adev, i);
1195 		amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
1196 		amdgpu_gfx_kiq_fini(adev, i);
1197 	}
1198 
1199 	amdgpu_gfx_cleaner_shader_sw_fini(adev);
1200 
1201 	gfx_v9_4_3_mec_fini(adev);
1202 	amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
1203 	gfx_v9_4_3_free_microcode(adev);
1204 	if (!amdgpu_sriov_vf(adev))
1205 		amdgpu_gfx_sysfs_fini(adev);
1206 	amdgpu_gfx_sysfs_isolation_shader_fini(adev);
1207 
1208 	kfree(adev->gfx.ip_dump_core);
1209 	kfree(adev->gfx.ip_dump_compute_queues);
1210 
1211 	return 0;
1212 }
1213 
1214 #define DEFAULT_SH_MEM_BASES	(0x6000)
1215 static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
1216 					     int xcc_id)
1217 {
1218 	int i;
1219 	uint32_t sh_mem_config;
1220 	uint32_t sh_mem_bases;
1221 	uint32_t data;
1222 
1223 	/*
1224 	 * Configure apertures:
1225 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1226 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1227 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1228 	 */
1229 	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1230 
1231 	sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1232 			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1233 			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1234 
1235 	mutex_lock(&adev->srbm_mutex);
1236 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1237 		soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1238 		/* CP and shaders */
1239 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config);
1240 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
1241 
1242 		/* Enable trap for each kfd vmid. */
1243 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
1244 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1245 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
1246 	}
1247 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1248 	mutex_unlock(&adev->srbm_mutex);
1249 
1250 	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
1251 	   acccess. These should be enabled by FW for target VMIDs. */
1252 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1253 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0);
1254 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0);
1255 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0);
1256 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0);
1257 	}
1258 }
1259 
1260 static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
1261 {
1262 	int vmid;
1263 
1264 	/*
1265 	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1266 	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1267 	 * the driver can enable them for graphics. VMID0 should maintain
1268 	 * access so that HWS firmware can save/restore entries.
1269 	 */
1270 	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
1271 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0);
1272 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0);
1273 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0);
1274 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0);
1275 	}
1276 }
1277 
1278 static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
1279 					  int xcc_id)
1280 {
1281 	u32 tmp;
1282 	int i;
1283 
1284 	/* XXX SH_MEM regs */
1285 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1286 	mutex_lock(&adev->srbm_mutex);
1287 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1288 		soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1289 		/* CP and shaders */
1290 		if (i == 0) {
1291 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1292 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1293 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1294 					    !!adev->gmc.noretry);
1295 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1296 					 regSH_MEM_CONFIG, tmp);
1297 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1298 					 regSH_MEM_BASES, 0);
1299 		} else {
1300 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1301 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1302 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1303 					    !!adev->gmc.noretry);
1304 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1305 					 regSH_MEM_CONFIG, tmp);
1306 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1307 					    (adev->gmc.private_aperture_start >>
1308 					     48));
1309 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1310 					    (adev->gmc.shared_aperture_start >>
1311 					     48));
1312 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1313 					 regSH_MEM_BASES, tmp);
1314 		}
1315 	}
1316 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1317 
1318 	mutex_unlock(&adev->srbm_mutex);
1319 
1320 	gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
1321 	gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
1322 }
1323 
1324 static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
1325 {
1326 	int i, num_xcc;
1327 
1328 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1329 
1330 	gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info);
1331 	adev->gfx.config.db_debug2 =
1332 		RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
1333 
1334 	for (i = 0; i < num_xcc; i++)
1335 		gfx_v9_4_3_xcc_constants_init(adev, i);
1336 }
1337 
1338 static void
1339 gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev,
1340 					   int xcc_id)
1341 {
1342 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1);
1343 }
1344 
1345 static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id)
1346 {
1347 	/*
1348 	 * Rlc save restore list is workable since v2_1.
1349 	 * And it's needed by gfxoff feature.
1350 	 */
1351 	if (adev->gfx.rlc.is_rlc_v2_1)
1352 		gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
1353 }
1354 
1355 static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
1356 {
1357 	uint32_t data;
1358 
1359 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
1360 	data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK;
1361 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
1362 }
1363 
1364 static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
1365 {
1366 	uint32_t rlc_setting;
1367 
1368 	/* if RLC is not enabled, do nothing */
1369 	rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
1370 	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
1371 		return false;
1372 
1373 	return true;
1374 }
1375 
1376 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
1377 {
1378 	uint32_t data;
1379 	unsigned i;
1380 
1381 	data = RLC_SAFE_MODE__CMD_MASK;
1382 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
1383 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1384 
1385 	/* wait for RLC_SAFE_MODE */
1386 	for (i = 0; i < adev->usec_timeout; i++) {
1387 		if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
1388 			break;
1389 		udelay(1);
1390 	}
1391 }
1392 
1393 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev,
1394 					   int xcc_id)
1395 {
1396 	uint32_t data;
1397 
1398 	data = RLC_SAFE_MODE__CMD_MASK;
1399 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1400 }
1401 
1402 static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
1403 {
1404 	int xcc_id, num_xcc;
1405 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1406 
1407 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1408 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1409 		reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
1410 		reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0);
1411 		reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1);
1412 		reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2);
1413 		reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3);
1414 		reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
1415 		reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
1416 		reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT);
1417 	}
1418 	adev->gfx.rlc.rlcg_reg_access_supported = true;
1419 }
1420 
1421 static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
1422 {
1423 	/* init spm vmid with 0xf */
1424 	if (adev->gfx.rlc.funcs->update_spm_vmid)
1425 		adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
1426 
1427 	return 0;
1428 }
1429 
1430 static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev,
1431 					       int xcc_id)
1432 {
1433 	u32 i, j, k;
1434 	u32 mask;
1435 
1436 	mutex_lock(&adev->grbm_idx_mutex);
1437 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1438 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1439 			gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff,
1440 						    xcc_id);
1441 			for (k = 0; k < adev->usec_timeout; k++) {
1442 				if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0)
1443 					break;
1444 				udelay(1);
1445 			}
1446 			if (k == adev->usec_timeout) {
1447 				gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff,
1448 							    0xffffffff,
1449 							    0xffffffff, xcc_id);
1450 				mutex_unlock(&adev->grbm_idx_mutex);
1451 				DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
1452 					 i, j);
1453 				return;
1454 			}
1455 		}
1456 	}
1457 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
1458 				    xcc_id);
1459 	mutex_unlock(&adev->grbm_idx_mutex);
1460 
1461 	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1462 		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1463 		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1464 		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1465 	for (k = 0; k < adev->usec_timeout; k++) {
1466 		if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1467 			break;
1468 		udelay(1);
1469 	}
1470 }
1471 
1472 static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1473 						     bool enable, int xcc_id)
1474 {
1475 	u32 tmp;
1476 
1477 	/* These interrupts should be enabled to drive DS clock */
1478 
1479 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1480 
1481 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1482 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1483 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1484 
1485 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1486 }
1487 
1488 static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id)
1489 {
1490 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1491 			      RLC_ENABLE_F32, 0);
1492 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
1493 	gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id);
1494 }
1495 
1496 static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev)
1497 {
1498 	int i, num_xcc;
1499 
1500 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1501 	for (i = 0; i < num_xcc; i++)
1502 		gfx_v9_4_3_xcc_rlc_stop(adev, i);
1503 }
1504 
1505 static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id)
1506 {
1507 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1508 			      SOFT_RESET_RLC, 1);
1509 	udelay(50);
1510 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1511 			      SOFT_RESET_RLC, 0);
1512 	udelay(50);
1513 }
1514 
1515 static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
1516 {
1517 	int i, num_xcc;
1518 
1519 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1520 	for (i = 0; i < num_xcc; i++)
1521 		gfx_v9_4_3_xcc_rlc_reset(adev, i);
1522 }
1523 
1524 static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id)
1525 {
1526 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1527 			      RLC_ENABLE_F32, 1);
1528 	udelay(50);
1529 
1530 	/* carrizo do enable cp interrupt after cp inited */
1531 	if (!(adev->flags & AMD_IS_APU)) {
1532 		gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
1533 		udelay(50);
1534 	}
1535 }
1536 
1537 static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
1538 {
1539 #ifdef AMDGPU_RLC_DEBUG_RETRY
1540 	u32 rlc_ucode_ver;
1541 #endif
1542 	int i, num_xcc;
1543 
1544 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1545 	for (i = 0; i < num_xcc; i++) {
1546 		gfx_v9_4_3_xcc_rlc_start(adev, i);
1547 #ifdef AMDGPU_RLC_DEBUG_RETRY
1548 		/* RLC_GPM_GENERAL_6 : RLC Ucode version */
1549 		rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6);
1550 		if (rlc_ucode_ver == 0x108) {
1551 			dev_info(adev->dev,
1552 				 "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
1553 				 rlc_ucode_ver, adev->gfx.rlc_fw_version);
1554 			/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
1555 			 * default is 0x9C4 to create a 100us interval */
1556 			WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4);
1557 			/* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
1558 			 * to disable the page fault retry interrupts, default is
1559 			 * 0x100 (256) */
1560 			WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100);
1561 		}
1562 #endif
1563 	}
1564 }
1565 
1566 static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1567 					     int xcc_id)
1568 {
1569 	const struct rlc_firmware_header_v2_0 *hdr;
1570 	const __le32 *fw_data;
1571 	unsigned i, fw_size;
1572 
1573 	if (!adev->gfx.rlc_fw)
1574 		return -EINVAL;
1575 
1576 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1577 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
1578 
1579 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1580 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1581 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1582 
1583 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1584 			RLCG_UCODE_LOADING_START_ADDRESS);
1585 	for (i = 0; i < fw_size; i++) {
1586 		if (amdgpu_emu_mode == 1 && i % 100 == 0) {
1587 			dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i);
1588 			msleep(1);
1589 		}
1590 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
1591 	}
1592 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1593 
1594 	return 0;
1595 }
1596 
1597 static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id)
1598 {
1599 	int r;
1600 
1601 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1602 		gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id);
1603 		/* legacy rlc firmware loading */
1604 		r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id);
1605 		if (r)
1606 			return r;
1607 		gfx_v9_4_3_xcc_rlc_start(adev, xcc_id);
1608 	}
1609 
1610 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
1611 	/* disable CG */
1612 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1613 	gfx_v9_4_3_xcc_init_pg(adev, xcc_id);
1614 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
1615 
1616 	return 0;
1617 }
1618 
1619 static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
1620 {
1621 	int r, i, num_xcc;
1622 
1623 	if (amdgpu_sriov_vf(adev))
1624 		return 0;
1625 
1626 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1627 	for (i = 0; i < num_xcc; i++) {
1628 		r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
1629 		if (r)
1630 			return r;
1631 	}
1632 
1633 	return 0;
1634 }
1635 
1636 static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1637 				       unsigned vmid)
1638 {
1639 	u32 reg, pre_data, data;
1640 
1641 	reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL);
1642 	if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev))
1643 		pre_data = RREG32_NO_KIQ(reg);
1644 	else
1645 		pre_data = RREG32(reg);
1646 
1647 	data =	pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK);
1648 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
1649 
1650 	if (pre_data != data) {
1651 		if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) {
1652 			WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1653 		} else
1654 			WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1655 	}
1656 }
1657 
1658 static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = {
1659 	{SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)},
1660 	{SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)},
1661 };
1662 
1663 static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev,
1664 					uint32_t offset,
1665 					struct soc15_reg_rlcg *entries, int arr_size)
1666 {
1667 	int i, inst;
1668 	uint32_t reg;
1669 
1670 	if (!entries)
1671 		return false;
1672 
1673 	for (i = 0; i < arr_size; i++) {
1674 		const struct soc15_reg_rlcg *entry;
1675 
1676 		entry = &entries[i];
1677 		inst = adev->ip_map.logical_to_dev_inst ?
1678 			       adev->ip_map.logical_to_dev_inst(
1679 				       adev, entry->hwip, entry->instance) :
1680 			       entry->instance;
1681 		reg = adev->reg_offset[entry->hwip][inst][entry->segment] +
1682 		      entry->reg;
1683 		if (offset == reg)
1684 			return true;
1685 	}
1686 
1687 	return false;
1688 }
1689 
1690 static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
1691 {
1692 	return gfx_v9_4_3_check_rlcg_range(adev, offset,
1693 					(void *)rlcg_access_gc_9_4_3,
1694 					ARRAY_SIZE(rlcg_access_gc_9_4_3));
1695 }
1696 
1697 static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev,
1698 					     bool enable, int xcc_id)
1699 {
1700 	if (enable) {
1701 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0);
1702 	} else {
1703 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL,
1704 			(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
1705 		adev->gfx.kiq[xcc_id].ring.sched.ready = false;
1706 	}
1707 	udelay(50);
1708 }
1709 
1710 static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev,
1711 						    int xcc_id)
1712 {
1713 	const struct gfx_firmware_header_v1_0 *mec_hdr;
1714 	const __le32 *fw_data;
1715 	unsigned i;
1716 	u32 tmp;
1717 	u32 mec_ucode_addr_offset;
1718 	u32 mec_ucode_data_offset;
1719 
1720 	if (!adev->gfx.mec_fw)
1721 		return -EINVAL;
1722 
1723 	gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
1724 
1725 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1726 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1727 
1728 	fw_data = (const __le32 *)
1729 		(adev->gfx.mec_fw->data +
1730 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1731 	tmp = 0;
1732 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
1733 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
1734 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
1735 
1736 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
1737 		adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
1738 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
1739 		upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
1740 
1741 	mec_ucode_addr_offset =
1742 		SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR);
1743 	mec_ucode_data_offset =
1744 		SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA);
1745 
1746 	/* MEC1 */
1747 	WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset);
1748 	for (i = 0; i < mec_hdr->jt_size; i++)
1749 		WREG32(mec_ucode_data_offset,
1750 		       le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
1751 
1752 	WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version);
1753 	/* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
1754 
1755 	return 0;
1756 }
1757 
1758 /* KIQ functions */
1759 static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
1760 {
1761 	uint32_t tmp;
1762 	struct amdgpu_device *adev = ring->adev;
1763 
1764 	/* tell RLC which is KIQ queue */
1765 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
1766 	tmp &= 0xffffff00;
1767 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1768 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1769 	tmp |= 0x80;
1770 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1771 }
1772 
1773 static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
1774 {
1775 	struct amdgpu_device *adev = ring->adev;
1776 
1777 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
1778 		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
1779 			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
1780 			mqd->cp_hqd_queue_priority =
1781 				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
1782 		}
1783 	}
1784 }
1785 
1786 static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id)
1787 {
1788 	struct amdgpu_device *adev = ring->adev;
1789 	struct v9_mqd *mqd = ring->mqd_ptr;
1790 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
1791 	uint32_t tmp;
1792 
1793 	mqd->header = 0xC0310800;
1794 	mqd->compute_pipelinestat_enable = 0x00000001;
1795 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
1796 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
1797 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
1798 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
1799 	mqd->compute_misc_reserved = 0x00000003;
1800 
1801 	mqd->dynamic_cu_mask_addr_lo =
1802 		lower_32_bits(ring->mqd_gpu_addr
1803 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1804 	mqd->dynamic_cu_mask_addr_hi =
1805 		upper_32_bits(ring->mqd_gpu_addr
1806 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1807 
1808 	eop_base_addr = ring->eop_gpu_addr >> 8;
1809 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
1810 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
1811 
1812 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1813 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL);
1814 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
1815 			(order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
1816 
1817 	mqd->cp_hqd_eop_control = tmp;
1818 
1819 	/* enable doorbell? */
1820 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL);
1821 
1822 	if (ring->use_doorbell) {
1823 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1824 				    DOORBELL_OFFSET, ring->doorbell_index);
1825 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1826 				    DOORBELL_EN, 1);
1827 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1828 				    DOORBELL_SOURCE, 0);
1829 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1830 				    DOORBELL_HIT, 0);
1831 		if (amdgpu_sriov_vf(adev))
1832 			tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1833 					    DOORBELL_MODE, 1);
1834 	} else {
1835 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1836 					 DOORBELL_EN, 0);
1837 	}
1838 
1839 	mqd->cp_hqd_pq_doorbell_control = tmp;
1840 
1841 	/* disable the queue if it's active */
1842 	ring->wptr = 0;
1843 	mqd->cp_hqd_dequeue_request = 0;
1844 	mqd->cp_hqd_pq_rptr = 0;
1845 	mqd->cp_hqd_pq_wptr_lo = 0;
1846 	mqd->cp_hqd_pq_wptr_hi = 0;
1847 
1848 	/* set the pointer to the MQD */
1849 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
1850 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
1851 
1852 	/* set MQD vmid to 0 */
1853 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL);
1854 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
1855 	mqd->cp_mqd_control = tmp;
1856 
1857 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1858 	hqd_gpu_addr = ring->gpu_addr >> 8;
1859 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
1860 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
1861 
1862 	/* set up the HQD, this is similar to CP_RB0_CNTL */
1863 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL);
1864 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
1865 			    (order_base_2(ring->ring_size / 4) - 1));
1866 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
1867 			((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
1868 #ifdef __BIG_ENDIAN
1869 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
1870 #endif
1871 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
1872 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
1873 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
1874 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
1875 	mqd->cp_hqd_pq_control = tmp;
1876 
1877 	/* set the wb address whether it's enabled or not */
1878 	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
1879 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
1880 	mqd->cp_hqd_pq_rptr_report_addr_hi =
1881 		upper_32_bits(wb_gpu_addr) & 0xffff;
1882 
1883 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1884 	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1885 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
1886 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
1887 
1888 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
1889 	ring->wptr = 0;
1890 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR);
1891 
1892 	/* set the vmid for the queue */
1893 	mqd->cp_hqd_vmid = 0;
1894 
1895 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE);
1896 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
1897 	mqd->cp_hqd_persistent_state = tmp;
1898 
1899 	/* set MIN_IB_AVAIL_SIZE */
1900 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL);
1901 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
1902 	mqd->cp_hqd_ib_control = tmp;
1903 
1904 	/* set static priority for a queue/ring */
1905 	gfx_v9_4_3_mqd_set_priority(ring, mqd);
1906 	mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM);
1907 
1908 	/* map_queues packet doesn't need activate the queue,
1909 	 * so only kiq need set this field.
1910 	 */
1911 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
1912 		mqd->cp_hqd_active = 1;
1913 
1914 	return 0;
1915 }
1916 
1917 static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring,
1918 					    int xcc_id)
1919 {
1920 	struct amdgpu_device *adev = ring->adev;
1921 	struct v9_mqd *mqd = ring->mqd_ptr;
1922 	int j;
1923 
1924 	/* disable wptr polling */
1925 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
1926 
1927 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
1928 	       mqd->cp_hqd_eop_base_addr_lo);
1929 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
1930 	       mqd->cp_hqd_eop_base_addr_hi);
1931 
1932 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1933 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
1934 	       mqd->cp_hqd_eop_control);
1935 
1936 	/* enable doorbell? */
1937 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
1938 	       mqd->cp_hqd_pq_doorbell_control);
1939 
1940 	/* disable the queue if it's active */
1941 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
1942 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
1943 		for (j = 0; j < adev->usec_timeout; j++) {
1944 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
1945 				break;
1946 			udelay(1);
1947 		}
1948 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
1949 		       mqd->cp_hqd_dequeue_request);
1950 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
1951 		       mqd->cp_hqd_pq_rptr);
1952 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
1953 		       mqd->cp_hqd_pq_wptr_lo);
1954 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
1955 		       mqd->cp_hqd_pq_wptr_hi);
1956 	}
1957 
1958 	/* set the pointer to the MQD */
1959 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
1960 	       mqd->cp_mqd_base_addr_lo);
1961 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
1962 	       mqd->cp_mqd_base_addr_hi);
1963 
1964 	/* set MQD vmid to 0 */
1965 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
1966 	       mqd->cp_mqd_control);
1967 
1968 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1969 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
1970 	       mqd->cp_hqd_pq_base_lo);
1971 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
1972 	       mqd->cp_hqd_pq_base_hi);
1973 
1974 	/* set up the HQD, this is similar to CP_RB0_CNTL */
1975 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
1976 	       mqd->cp_hqd_pq_control);
1977 
1978 	/* set the wb address whether it's enabled or not */
1979 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
1980 				mqd->cp_hqd_pq_rptr_report_addr_lo);
1981 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
1982 				mqd->cp_hqd_pq_rptr_report_addr_hi);
1983 
1984 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1985 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
1986 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
1987 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
1988 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
1989 
1990 	/* enable the doorbell if requested */
1991 	if (ring->use_doorbell) {
1992 		WREG32_SOC15(
1993 			GC, GET_INST(GC, xcc_id),
1994 			regCP_MEC_DOORBELL_RANGE_LOWER,
1995 			((adev->doorbell_index.kiq +
1996 			  xcc_id * adev->doorbell_index.xcc_doorbell_range) *
1997 			 2) << 2);
1998 		WREG32_SOC15(
1999 			GC, GET_INST(GC, xcc_id),
2000 			regCP_MEC_DOORBELL_RANGE_UPPER,
2001 			((adev->doorbell_index.userqueue_end +
2002 			  xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2003 			 2) << 2);
2004 	}
2005 
2006 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2007 	       mqd->cp_hqd_pq_doorbell_control);
2008 
2009 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2010 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2011 	       mqd->cp_hqd_pq_wptr_lo);
2012 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2013 	       mqd->cp_hqd_pq_wptr_hi);
2014 
2015 	/* set the vmid for the queue */
2016 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
2017 
2018 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
2019 	       mqd->cp_hqd_persistent_state);
2020 
2021 	/* activate the queue */
2022 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
2023 	       mqd->cp_hqd_active);
2024 
2025 	if (ring->use_doorbell)
2026 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2027 
2028 	return 0;
2029 }
2030 
2031 static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring,
2032 					    int xcc_id)
2033 {
2034 	struct amdgpu_device *adev = ring->adev;
2035 	int j;
2036 
2037 	/* disable the queue if it's active */
2038 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
2039 
2040 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
2041 
2042 		for (j = 0; j < adev->usec_timeout; j++) {
2043 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
2044 				break;
2045 			udelay(1);
2046 		}
2047 
2048 		if (j == AMDGPU_MAX_USEC_TIMEOUT) {
2049 			DRM_DEBUG("%s dequeue request failed.\n", ring->name);
2050 
2051 			/* Manual disable if dequeue request times out */
2052 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
2053 		}
2054 
2055 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
2056 		      0);
2057 	}
2058 
2059 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);
2060 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0);
2061 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT);
2062 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
2063 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);
2064 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
2065 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0);
2066 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0);
2067 
2068 	return 0;
2069 }
2070 
2071 static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
2072 {
2073 	struct amdgpu_device *adev = ring->adev;
2074 	struct v9_mqd *mqd = ring->mqd_ptr;
2075 	struct v9_mqd *tmp_mqd;
2076 
2077 	gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id);
2078 
2079 	/* GPU could be in bad state during probe, driver trigger the reset
2080 	 * after load the SMU, in this case , the mqd is not be initialized.
2081 	 * driver need to re-init the mqd.
2082 	 * check mqd->cp_hqd_pq_control since this value should not be 0
2083 	 */
2084 	tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup;
2085 	if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) {
2086 		/* for GPU_RESET case , reset MQD to a clean status */
2087 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2088 			memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation));
2089 
2090 		/* reset ring buffer */
2091 		ring->wptr = 0;
2092 		amdgpu_ring_clear_ring(ring);
2093 		mutex_lock(&adev->srbm_mutex);
2094 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2095 		gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
2096 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2097 		mutex_unlock(&adev->srbm_mutex);
2098 	} else {
2099 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2100 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2101 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2102 		mutex_lock(&adev->srbm_mutex);
2103 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
2104 			amdgpu_ring_clear_ring(ring);
2105 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2106 		gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
2107 		gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
2108 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2109 		mutex_unlock(&adev->srbm_mutex);
2110 
2111 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2112 			memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
2113 	}
2114 
2115 	return 0;
2116 }
2117 
2118 static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore)
2119 {
2120 	struct amdgpu_device *adev = ring->adev;
2121 	struct v9_mqd *mqd = ring->mqd_ptr;
2122 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
2123 	struct v9_mqd *tmp_mqd;
2124 
2125 	/* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
2126 	 * is not be initialized before
2127 	 */
2128 	tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
2129 
2130 	if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
2131 	    (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
2132 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2133 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2134 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2135 		mutex_lock(&adev->srbm_mutex);
2136 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2137 		gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
2138 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2139 		mutex_unlock(&adev->srbm_mutex);
2140 
2141 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2142 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
2143 	} else {
2144 		/* restore MQD to a clean status */
2145 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2146 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
2147 		/* reset ring buffer */
2148 		ring->wptr = 0;
2149 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
2150 		amdgpu_ring_clear_ring(ring);
2151 	}
2152 
2153 	return 0;
2154 }
2155 
2156 static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
2157 {
2158 	struct amdgpu_ring *ring;
2159 	int j;
2160 
2161 	for (j = 0; j < adev->gfx.num_compute_rings; j++) {
2162 		ring = &adev->gfx.compute_ring[j +  xcc_id * adev->gfx.num_compute_rings];
2163 		if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2164 			mutex_lock(&adev->srbm_mutex);
2165 			soc15_grbm_select(adev, ring->me,
2166 					ring->pipe,
2167 					ring->queue, 0, GET_INST(GC, xcc_id));
2168 			gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id);
2169 			soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2170 			mutex_unlock(&adev->srbm_mutex);
2171 		}
2172 	}
2173 
2174 	return 0;
2175 }
2176 
2177 static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
2178 {
2179 	struct amdgpu_ring *ring;
2180 	int r;
2181 
2182 	ring = &adev->gfx.kiq[xcc_id].ring;
2183 
2184 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
2185 	if (unlikely(r != 0))
2186 		return r;
2187 
2188 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2189 	if (unlikely(r != 0)) {
2190 		amdgpu_bo_unreserve(ring->mqd_obj);
2191 		return r;
2192 	}
2193 
2194 	gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id);
2195 	amdgpu_bo_kunmap(ring->mqd_obj);
2196 	ring->mqd_ptr = NULL;
2197 	amdgpu_bo_unreserve(ring->mqd_obj);
2198 	return 0;
2199 }
2200 
2201 static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
2202 {
2203 	struct amdgpu_ring *ring = NULL;
2204 	int r = 0, i;
2205 
2206 	gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
2207 
2208 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2209 		ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2210 
2211 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
2212 		if (unlikely(r != 0))
2213 			goto done;
2214 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2215 		if (!r) {
2216 			r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
2217 			amdgpu_bo_kunmap(ring->mqd_obj);
2218 			ring->mqd_ptr = NULL;
2219 		}
2220 		amdgpu_bo_unreserve(ring->mqd_obj);
2221 		if (r)
2222 			goto done;
2223 	}
2224 
2225 	r = amdgpu_gfx_enable_kcq(adev, xcc_id);
2226 done:
2227 	return r;
2228 }
2229 
2230 static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
2231 {
2232 	struct amdgpu_ring *ring;
2233 	int r, j;
2234 
2235 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2236 
2237 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2238 		gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id);
2239 
2240 		r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id);
2241 		if (r)
2242 			return r;
2243 	}
2244 
2245 	r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id);
2246 	if (r)
2247 		return r;
2248 
2249 	r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id);
2250 	if (r)
2251 		return r;
2252 
2253 	for (j = 0; j < adev->gfx.num_compute_rings; j++) {
2254 		ring = &adev->gfx.compute_ring
2255 				[j + xcc_id * adev->gfx.num_compute_rings];
2256 		r = amdgpu_ring_test_helper(ring);
2257 		if (r)
2258 			return r;
2259 	}
2260 
2261 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
2262 
2263 	return 0;
2264 }
2265 
2266 static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
2267 {
2268 	int r = 0, i, num_xcc, num_xcp, num_xcc_per_xcp;
2269 
2270 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2271 	if (amdgpu_sriov_vf(adev)) {
2272 		enum amdgpu_gfx_partition mode;
2273 
2274 		mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2275 						       AMDGPU_XCP_FL_NONE);
2276 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2277 			return -EINVAL;
2278 		num_xcc_per_xcp = gfx_v9_4_3_get_xccs_per_xcp(adev);
2279 		adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp;
2280 		num_xcp = num_xcc / num_xcc_per_xcp;
2281 		r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
2282 
2283 	} else {
2284 		if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2285 						    AMDGPU_XCP_FL_NONE) ==
2286 		    AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2287 			r = amdgpu_xcp_switch_partition_mode(
2288 				adev->xcp_mgr, amdgpu_user_partt_mode);
2289 	}
2290 	if (r)
2291 		return r;
2292 
2293 	for (i = 0; i < num_xcc; i++) {
2294 		r = gfx_v9_4_3_xcc_cp_resume(adev, i);
2295 		if (r)
2296 			return r;
2297 	}
2298 
2299 	return 0;
2300 }
2301 
2302 static void gfx_v9_4_3_xcc_cp_enable(struct amdgpu_device *adev, bool enable,
2303 				     int xcc_id)
2304 {
2305 	gfx_v9_4_3_xcc_cp_compute_enable(adev, enable, xcc_id);
2306 }
2307 
2308 static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
2309 {
2310 	if (amdgpu_gfx_disable_kcq(adev, xcc_id))
2311 		DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id);
2312 
2313 	if (amdgpu_sriov_vf(adev)) {
2314 		/* must disable polling for SRIOV when hw finished, otherwise
2315 		 * CPC engine may still keep fetching WB address which is already
2316 		 * invalid after sw finished and trigger DMAR reading error in
2317 		 * hypervisor side.
2318 		 */
2319 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
2320 		return;
2321 	}
2322 
2323 	/* Use deinitialize sequence from CAIL when unbinding device
2324 	 * from driver, otherwise KIQ is hanging when binding back
2325 	 */
2326 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2327 		mutex_lock(&adev->srbm_mutex);
2328 		soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me,
2329 				  adev->gfx.kiq[xcc_id].ring.pipe,
2330 				  adev->gfx.kiq[xcc_id].ring.queue, 0,
2331 				  GET_INST(GC, xcc_id));
2332 		gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring,
2333 						 xcc_id);
2334 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2335 		mutex_unlock(&adev->srbm_mutex);
2336 	}
2337 
2338 	gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id);
2339 	gfx_v9_4_3_xcc_cp_enable(adev, false, xcc_id);
2340 }
2341 
2342 static int gfx_v9_4_3_hw_init(void *handle)
2343 {
2344 	int r;
2345 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2346 
2347 	amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
2348 				       adev->gfx.cleaner_shader_ptr);
2349 
2350 	if (!amdgpu_sriov_vf(adev))
2351 		gfx_v9_4_3_init_golden_registers(adev);
2352 
2353 	gfx_v9_4_3_constants_init(adev);
2354 
2355 	r = adev->gfx.rlc.funcs->resume(adev);
2356 	if (r)
2357 		return r;
2358 
2359 	r = gfx_v9_4_3_cp_resume(adev);
2360 	if (r)
2361 		return r;
2362 
2363 	return r;
2364 }
2365 
2366 static int gfx_v9_4_3_hw_fini(void *handle)
2367 {
2368 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2369 	int i, num_xcc;
2370 
2371 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2372 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2373 	amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
2374 
2375 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2376 	for (i = 0; i < num_xcc; i++) {
2377 		gfx_v9_4_3_xcc_fini(adev, i);
2378 	}
2379 
2380 	return 0;
2381 }
2382 
2383 static int gfx_v9_4_3_suspend(void *handle)
2384 {
2385 	return gfx_v9_4_3_hw_fini(handle);
2386 }
2387 
2388 static int gfx_v9_4_3_resume(void *handle)
2389 {
2390 	return gfx_v9_4_3_hw_init(handle);
2391 }
2392 
2393 static bool gfx_v9_4_3_is_idle(void *handle)
2394 {
2395 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2396 	int i, num_xcc;
2397 
2398 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2399 	for (i = 0; i < num_xcc; i++) {
2400 		if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS),
2401 					GRBM_STATUS, GUI_ACTIVE))
2402 			return false;
2403 	}
2404 	return true;
2405 }
2406 
2407 static int gfx_v9_4_3_wait_for_idle(void *handle)
2408 {
2409 	unsigned i;
2410 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2411 
2412 	for (i = 0; i < adev->usec_timeout; i++) {
2413 		if (gfx_v9_4_3_is_idle(handle))
2414 			return 0;
2415 		udelay(1);
2416 	}
2417 	return -ETIMEDOUT;
2418 }
2419 
2420 static int gfx_v9_4_3_soft_reset(void *handle)
2421 {
2422 	u32 grbm_soft_reset = 0;
2423 	u32 tmp;
2424 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2425 
2426 	/* GRBM_STATUS */
2427 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS);
2428 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
2429 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
2430 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
2431 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
2432 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
2433 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
2434 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2435 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2436 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2437 						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
2438 	}
2439 
2440 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
2441 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2442 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2443 	}
2444 
2445 	/* GRBM_STATUS2 */
2446 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2);
2447 	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
2448 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2449 						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2450 
2451 
2452 	if (grbm_soft_reset) {
2453 		/* stop the rlc */
2454 		adev->gfx.rlc.funcs->stop(adev);
2455 
2456 		/* Disable MEC parsing/prefetching */
2457 		gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
2458 
2459 		if (grbm_soft_reset) {
2460 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2461 			tmp |= grbm_soft_reset;
2462 			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2463 			WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2464 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2465 
2466 			udelay(50);
2467 
2468 			tmp &= ~grbm_soft_reset;
2469 			WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2470 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2471 		}
2472 
2473 		/* Wait a little for things to settle down */
2474 		udelay(50);
2475 	}
2476 	return 0;
2477 }
2478 
2479 static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring,
2480 					  uint32_t vmid,
2481 					  uint32_t gds_base, uint32_t gds_size,
2482 					  uint32_t gws_base, uint32_t gws_size,
2483 					  uint32_t oa_base, uint32_t oa_size)
2484 {
2485 	struct amdgpu_device *adev = ring->adev;
2486 
2487 	/* GDS Base */
2488 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2489 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid,
2490 				   gds_base);
2491 
2492 	/* GDS Size */
2493 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2494 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid,
2495 				   gds_size);
2496 
2497 	/* GWS */
2498 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2499 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid,
2500 				   gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
2501 
2502 	/* OA */
2503 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2504 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid,
2505 				   (1 << (oa_size + oa_base)) - (1 << oa_base));
2506 }
2507 
2508 static int gfx_v9_4_3_early_init(void *handle)
2509 {
2510 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2511 
2512 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2513 					  AMDGPU_MAX_COMPUTE_RINGS);
2514 	gfx_v9_4_3_set_kiq_pm4_funcs(adev);
2515 	gfx_v9_4_3_set_ring_funcs(adev);
2516 	gfx_v9_4_3_set_irq_funcs(adev);
2517 	gfx_v9_4_3_set_gds_init(adev);
2518 	gfx_v9_4_3_set_rlc_funcs(adev);
2519 
2520 	/* init rlcg reg access ctrl */
2521 	gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev);
2522 
2523 	return gfx_v9_4_3_init_microcode(adev);
2524 }
2525 
2526 static int gfx_v9_4_3_late_init(void *handle)
2527 {
2528 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2529 	int r;
2530 
2531 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2532 	if (r)
2533 		return r;
2534 
2535 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2536 	if (r)
2537 		return r;
2538 
2539 	r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
2540 	if (r)
2541 		return r;
2542 
2543 	if (adev->gfx.ras &&
2544 	    adev->gfx.ras->enable_watchdog_timer)
2545 		adev->gfx.ras->enable_watchdog_timer(adev);
2546 
2547 	return 0;
2548 }
2549 
2550 static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev,
2551 					    bool enable, int xcc_id)
2552 {
2553 	uint32_t def, data;
2554 
2555 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
2556 		return;
2557 
2558 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2559 				  regRLC_CGTT_MGCG_OVERRIDE);
2560 
2561 	if (enable)
2562 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2563 	else
2564 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2565 
2566 	if (def != data)
2567 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2568 			     regRLC_CGTT_MGCG_OVERRIDE, data);
2569 
2570 }
2571 
2572 static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
2573 						bool enable, int xcc_id)
2574 {
2575 	uint32_t def, data;
2576 
2577 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
2578 		return;
2579 
2580 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2581 				  regRLC_CGTT_MGCG_OVERRIDE);
2582 
2583 	if (enable)
2584 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2585 	else
2586 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2587 
2588 	if (def != data)
2589 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2590 			     regRLC_CGTT_MGCG_OVERRIDE, data);
2591 }
2592 
2593 static void
2594 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
2595 						bool enable, int xcc_id)
2596 {
2597 	uint32_t data, def;
2598 
2599 	/* It is disabled by HW by default */
2600 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
2601 		/* 1 - RLC_CGTT_MGCG_OVERRIDE */
2602 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2603 
2604 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2605 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2606 			  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2607 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2608 
2609 		if (def != data)
2610 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2611 
2612 		/* MGLS is a global flag to control all MGLS in GFX */
2613 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
2614 			/* 2 - RLC memory Light sleep */
2615 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
2616 				def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2617 				data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2618 				if (def != data)
2619 					WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2620 			}
2621 			/* 3 - CP memory Light sleep */
2622 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
2623 				def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2624 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2625 				if (def != data)
2626 					WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2627 			}
2628 		}
2629 	} else {
2630 		/* 1 - MGCG_OVERRIDE */
2631 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2632 
2633 		data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2634 			 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2635 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2636 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2637 
2638 		if (def != data)
2639 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2640 
2641 		/* 2 - disable MGLS in RLC */
2642 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2643 		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
2644 			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2645 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2646 		}
2647 
2648 		/* 3 - disable MGLS in CP */
2649 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2650 		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
2651 			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2652 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2653 		}
2654 	}
2655 
2656 }
2657 
2658 static void
2659 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
2660 						bool enable, int xcc_id)
2661 {
2662 	uint32_t def, data;
2663 
2664 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
2665 
2666 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2667 		/* unset CGCG override */
2668 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
2669 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2670 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2671 		else
2672 			data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2673 		/* update CGCG and CGLS override bits */
2674 		if (def != data)
2675 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2676 
2677 		/* CGCG Hysteresis: 400us */
2678 		def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2679 
2680 		data = (0x2710
2681 			<< RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
2682 		       RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2683 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2684 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
2685 				RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2686 		if (def != data)
2687 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2688 
2689 		/* set IDLE_POLL_COUNT(0x33450100)*/
2690 		def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
2691 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
2692 			(0x3345 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2693 		if (def != data)
2694 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
2695 	} else {
2696 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2697 		/* reset CGCG/CGLS bits */
2698 		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
2699 		/* disable cgcg and cgls in FSM */
2700 		if (def != data)
2701 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2702 	}
2703 
2704 }
2705 
2706 static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
2707 						  bool enable, int xcc_id)
2708 {
2709 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
2710 
2711 	if (enable) {
2712 		/* FGCG */
2713 		gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2714 		gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2715 
2716 		/* CGCG/CGLS should be enabled after MGCG/MGLS
2717 		 * ===  MGCG + MGLS ===
2718 		 */
2719 		gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2720 								xcc_id);
2721 		/* ===  CGCG + CGLS === */
2722 		gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2723 								xcc_id);
2724 	} else {
2725 		/* CGCG/CGLS should be disabled before MGCG/MGLS
2726 		 * ===  CGCG + CGLS ===
2727 		 */
2728 		gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2729 								xcc_id);
2730 		/* ===  MGCG + MGLS === */
2731 		gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2732 								xcc_id);
2733 
2734 		/* FGCG */
2735 		gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2736 		gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2737 	}
2738 
2739 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
2740 
2741 	return 0;
2742 }
2743 
2744 static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
2745 	.is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled,
2746 	.set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode,
2747 	.unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode,
2748 	.init = gfx_v9_4_3_rlc_init,
2749 	.resume = gfx_v9_4_3_rlc_resume,
2750 	.stop = gfx_v9_4_3_rlc_stop,
2751 	.reset = gfx_v9_4_3_rlc_reset,
2752 	.start = gfx_v9_4_3_rlc_start,
2753 	.update_spm_vmid = gfx_v9_4_3_update_spm_vmid,
2754 	.is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
2755 };
2756 
2757 static int gfx_v9_4_3_set_powergating_state(void *handle,
2758 					  enum amd_powergating_state state)
2759 {
2760 	return 0;
2761 }
2762 
2763 static int gfx_v9_4_3_set_clockgating_state(void *handle,
2764 					  enum amd_clockgating_state state)
2765 {
2766 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2767 	int i, num_xcc;
2768 
2769 	if (amdgpu_sriov_vf(adev))
2770 		return 0;
2771 
2772 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2773 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2774 	case IP_VERSION(9, 4, 3):
2775 	case IP_VERSION(9, 4, 4):
2776 		for (i = 0; i < num_xcc; i++)
2777 			gfx_v9_4_3_xcc_update_gfx_clock_gating(
2778 				adev, state == AMD_CG_STATE_GATE, i);
2779 		break;
2780 	default:
2781 		break;
2782 	}
2783 	return 0;
2784 }
2785 
2786 static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags)
2787 {
2788 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2789 	int data;
2790 
2791 	if (amdgpu_sriov_vf(adev))
2792 		*flags = 0;
2793 
2794 	/* AMD_CG_SUPPORT_GFX_MGCG */
2795 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE));
2796 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
2797 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
2798 
2799 	/* AMD_CG_SUPPORT_GFX_CGCG */
2800 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL));
2801 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
2802 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
2803 
2804 	/* AMD_CG_SUPPORT_GFX_CGLS */
2805 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
2806 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
2807 
2808 	/* AMD_CG_SUPPORT_GFX_RLC_LS */
2809 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL));
2810 	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
2811 		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
2812 
2813 	/* AMD_CG_SUPPORT_GFX_CP_LS */
2814 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL));
2815 	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
2816 		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
2817 }
2818 
2819 static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2820 {
2821 	struct amdgpu_device *adev = ring->adev;
2822 	u32 ref_and_mask, reg_mem_engine;
2823 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
2824 
2825 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2826 		switch (ring->me) {
2827 		case 1:
2828 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
2829 			break;
2830 		case 2:
2831 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
2832 			break;
2833 		default:
2834 			return;
2835 		}
2836 		reg_mem_engine = 0;
2837 	} else {
2838 		ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
2839 		reg_mem_engine = 1; /* pfp */
2840 	}
2841 
2842 	gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1,
2843 			      adev->nbio.funcs->get_hdp_flush_req_offset(adev),
2844 			      adev->nbio.funcs->get_hdp_flush_done_offset(adev),
2845 			      ref_and_mask, ref_and_mask, 0x20);
2846 }
2847 
2848 static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring,
2849 					  struct amdgpu_job *job,
2850 					  struct amdgpu_ib *ib,
2851 					  uint32_t flags)
2852 {
2853 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2854 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
2855 
2856 	/* Currently, there is a high possibility to get wave ID mismatch
2857 	 * between ME and GDS, leading to a hw deadlock, because ME generates
2858 	 * different wave IDs than the GDS expects. This situation happens
2859 	 * randomly when at least 5 compute pipes use GDS ordered append.
2860 	 * The wave IDs generated by ME are also wrong after suspend/resume.
2861 	 * Those are probably bugs somewhere else in the kernel driver.
2862 	 *
2863 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
2864 	 * GDS to 0 for this ring (me/pipe).
2865 	 */
2866 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
2867 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2868 		amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
2869 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2870 	}
2871 
2872 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2873 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
2874 	amdgpu_ring_write(ring,
2875 #ifdef __BIG_ENDIAN
2876 				(2 << 0) |
2877 #endif
2878 				lower_32_bits(ib->gpu_addr));
2879 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
2880 	amdgpu_ring_write(ring, control);
2881 }
2882 
2883 static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
2884 				     u64 seq, unsigned flags)
2885 {
2886 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2887 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2888 	bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
2889 
2890 	/* RELEASE_MEM - flush caches, send int */
2891 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
2892 	amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
2893 					       EOP_TC_NC_ACTION_EN) :
2894 					      (EOP_TCL1_ACTION_EN |
2895 					       EOP_TC_ACTION_EN |
2896 					       EOP_TC_WB_ACTION_EN |
2897 					       EOP_TC_MD_ACTION_EN)) |
2898 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2899 				 EVENT_INDEX(5)));
2900 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2901 
2902 	/*
2903 	 * the address should be Qword aligned if 64bit write, Dword
2904 	 * aligned if only send 32bit data low (discard data high)
2905 	 */
2906 	if (write64bit)
2907 		BUG_ON(addr & 0x7);
2908 	else
2909 		BUG_ON(addr & 0x3);
2910 	amdgpu_ring_write(ring, lower_32_bits(addr));
2911 	amdgpu_ring_write(ring, upper_32_bits(addr));
2912 	amdgpu_ring_write(ring, lower_32_bits(seq));
2913 	amdgpu_ring_write(ring, upper_32_bits(seq));
2914 	amdgpu_ring_write(ring, 0);
2915 }
2916 
2917 static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
2918 {
2919 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2920 	uint32_t seq = ring->fence_drv.sync_seq;
2921 	uint64_t addr = ring->fence_drv.gpu_addr;
2922 
2923 	gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0,
2924 			      lower_32_bits(addr), upper_32_bits(addr),
2925 			      seq, 0xffffffff, 4);
2926 }
2927 
2928 static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring,
2929 					unsigned vmid, uint64_t pd_addr)
2930 {
2931 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
2932 }
2933 
2934 static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring)
2935 {
2936 	return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
2937 }
2938 
2939 static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring)
2940 {
2941 	u64 wptr;
2942 
2943 	/* XXX check if swapping is necessary on BE */
2944 	if (ring->use_doorbell)
2945 		wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
2946 	else
2947 		BUG();
2948 	return wptr;
2949 }
2950 
2951 static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring)
2952 {
2953 	struct amdgpu_device *adev = ring->adev;
2954 
2955 	/* XXX check if swapping is necessary on BE */
2956 	if (ring->use_doorbell) {
2957 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
2958 		WDOORBELL64(ring->doorbell_index, ring->wptr);
2959 	} else {
2960 		BUG(); /* only DOORBELL method supported on gfx9 now */
2961 	}
2962 }
2963 
2964 static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
2965 					 u64 seq, unsigned int flags)
2966 {
2967 	struct amdgpu_device *adev = ring->adev;
2968 
2969 	/* we only allocate 32bit for each seq wb address */
2970 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
2971 
2972 	/* write fence seq to the "addr" */
2973 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2974 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2975 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
2976 	amdgpu_ring_write(ring, lower_32_bits(addr));
2977 	amdgpu_ring_write(ring, upper_32_bits(addr));
2978 	amdgpu_ring_write(ring, lower_32_bits(seq));
2979 
2980 	if (flags & AMDGPU_FENCE_FLAG_INT) {
2981 		/* set register to trigger INT */
2982 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2983 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2984 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
2985 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
2986 		amdgpu_ring_write(ring, 0);
2987 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
2988 	}
2989 }
2990 
2991 static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
2992 				    uint32_t reg_val_offs)
2993 {
2994 	struct amdgpu_device *adev = ring->adev;
2995 
2996 	reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
2997 
2998 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
2999 	amdgpu_ring_write(ring, 0 |	/* src: register*/
3000 				(5 << 8) |	/* dst: memory */
3001 				(1 << 20));	/* write confirm */
3002 	amdgpu_ring_write(ring, reg);
3003 	amdgpu_ring_write(ring, 0);
3004 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3005 				reg_val_offs * 4));
3006 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3007 				reg_val_offs * 4));
3008 }
3009 
3010 static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
3011 				    uint32_t val)
3012 {
3013 	uint32_t cmd = 0;
3014 
3015 	reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
3016 
3017 	switch (ring->funcs->type) {
3018 	case AMDGPU_RING_TYPE_GFX:
3019 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
3020 		break;
3021 	case AMDGPU_RING_TYPE_KIQ:
3022 		cmd = (1 << 16); /* no inc addr */
3023 		break;
3024 	default:
3025 		cmd = WR_CONFIRM;
3026 		break;
3027 	}
3028 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3029 	amdgpu_ring_write(ring, cmd);
3030 	amdgpu_ring_write(ring, reg);
3031 	amdgpu_ring_write(ring, 0);
3032 	amdgpu_ring_write(ring, val);
3033 }
3034 
3035 static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
3036 					uint32_t val, uint32_t mask)
3037 {
3038 	gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
3039 }
3040 
3041 static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
3042 						  uint32_t reg0, uint32_t reg1,
3043 						  uint32_t ref, uint32_t mask)
3044 {
3045 	amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
3046 						   ref, mask);
3047 }
3048 
3049 static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring,
3050 					  unsigned vmid)
3051 {
3052 	struct amdgpu_device *adev = ring->adev;
3053 	uint32_t value = 0;
3054 
3055 	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
3056 	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
3057 	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
3058 	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
3059 	amdgpu_gfx_rlc_enter_safe_mode(adev, ring->xcc_id);
3060 	WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regSQ_CMD, value);
3061 	amdgpu_gfx_rlc_exit_safe_mode(adev, ring->xcc_id);
3062 }
3063 
3064 static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3065 	struct amdgpu_device *adev, int me, int pipe,
3066 	enum amdgpu_interrupt_state state, int xcc_id)
3067 {
3068 	u32 mec_int_cntl, mec_int_cntl_reg;
3069 
3070 	/*
3071 	 * amdgpu controls only the first MEC. That's why this function only
3072 	 * handles the setting of interrupts for this specific MEC. All other
3073 	 * pipes' interrupts are set by amdkfd.
3074 	 */
3075 
3076 	if (me == 1) {
3077 		switch (pipe) {
3078 		case 0:
3079 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
3080 			break;
3081 		case 1:
3082 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
3083 			break;
3084 		case 2:
3085 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
3086 			break;
3087 		case 3:
3088 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
3089 			break;
3090 		default:
3091 			DRM_DEBUG("invalid pipe %d\n", pipe);
3092 			return;
3093 		}
3094 	} else {
3095 		DRM_DEBUG("invalid me %d\n", me);
3096 		return;
3097 	}
3098 
3099 	switch (state) {
3100 	case AMDGPU_IRQ_STATE_DISABLE:
3101 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3102 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3103 					     TIME_STAMP_INT_ENABLE, 0);
3104 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3105 		break;
3106 	case AMDGPU_IRQ_STATE_ENABLE:
3107 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3108 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3109 					     TIME_STAMP_INT_ENABLE, 1);
3110 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3111 		break;
3112 	default:
3113 		break;
3114 	}
3115 }
3116 
3117 static u32 gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device *adev,
3118 				     int xcc_id, int me, int pipe)
3119 {
3120 	/*
3121 	 * amdgpu controls only the first MEC. That's why this function only
3122 	 * handles the setting of interrupts for this specific MEC. All other
3123 	 * pipes' interrupts are set by amdkfd.
3124 	 */
3125 	if (me != 1)
3126 		return 0;
3127 
3128 	switch (pipe) {
3129 	case 0:
3130 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
3131 	case 1:
3132 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
3133 	case 2:
3134 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
3135 	case 3:
3136 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
3137 	default:
3138 		return 0;
3139 	}
3140 }
3141 
3142 static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev,
3143 					     struct amdgpu_irq_src *source,
3144 					     unsigned type,
3145 					     enum amdgpu_interrupt_state state)
3146 {
3147 	u32 mec_int_cntl_reg, mec_int_cntl;
3148 	int i, j, k, num_xcc;
3149 
3150 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3151 	switch (state) {
3152 	case AMDGPU_IRQ_STATE_DISABLE:
3153 	case AMDGPU_IRQ_STATE_ENABLE:
3154 		for (i = 0; i < num_xcc; i++) {
3155 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3156 					      PRIV_REG_INT_ENABLE,
3157 					      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3158 			for (j = 0; j < adev->gfx.mec.num_mec; j++) {
3159 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
3160 					/* MECs start at 1 */
3161 					mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
3162 
3163 					if (mec_int_cntl_reg) {
3164 						mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
3165 						mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3166 									     PRIV_REG_INT_ENABLE,
3167 									     state == AMDGPU_IRQ_STATE_ENABLE ?
3168 									     1 : 0);
3169 						WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
3170 					}
3171 				}
3172 			}
3173 		}
3174 		break;
3175 	default:
3176 		break;
3177 	}
3178 
3179 	return 0;
3180 }
3181 
3182 static int gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device *adev,
3183 					     struct amdgpu_irq_src *source,
3184 					     unsigned type,
3185 					     enum amdgpu_interrupt_state state)
3186 {
3187 	u32 mec_int_cntl_reg, mec_int_cntl;
3188 	int i, j, k, num_xcc;
3189 
3190 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3191 	switch (state) {
3192 	case AMDGPU_IRQ_STATE_DISABLE:
3193 	case AMDGPU_IRQ_STATE_ENABLE:
3194 		for (i = 0; i < num_xcc; i++) {
3195 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3196 					      OPCODE_ERROR_INT_ENABLE,
3197 					      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3198 			for (j = 0; j < adev->gfx.mec.num_mec; j++) {
3199 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
3200 					/* MECs start at 1 */
3201 					mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
3202 
3203 					if (mec_int_cntl_reg) {
3204 						mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
3205 						mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3206 									     OPCODE_ERROR_INT_ENABLE,
3207 									     state == AMDGPU_IRQ_STATE_ENABLE ?
3208 									     1 : 0);
3209 						WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
3210 					}
3211 				}
3212 			}
3213 		}
3214 		break;
3215 	default:
3216 		break;
3217 	}
3218 
3219 	return 0;
3220 }
3221 
3222 static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev,
3223 					      struct amdgpu_irq_src *source,
3224 					      unsigned type,
3225 					      enum amdgpu_interrupt_state state)
3226 {
3227 	int i, num_xcc;
3228 
3229 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3230 	switch (state) {
3231 	case AMDGPU_IRQ_STATE_DISABLE:
3232 	case AMDGPU_IRQ_STATE_ENABLE:
3233 		for (i = 0; i < num_xcc; i++)
3234 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3235 				PRIV_INSTR_INT_ENABLE,
3236 				state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3237 		break;
3238 	default:
3239 		break;
3240 	}
3241 
3242 	return 0;
3243 }
3244 
3245 static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev,
3246 					    struct amdgpu_irq_src *src,
3247 					    unsigned type,
3248 					    enum amdgpu_interrupt_state state)
3249 {
3250 	int i, num_xcc;
3251 
3252 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3253 	for (i = 0; i < num_xcc; i++) {
3254 		switch (type) {
3255 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3256 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3257 				adev, 1, 0, state, i);
3258 			break;
3259 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3260 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3261 				adev, 1, 1, state, i);
3262 			break;
3263 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
3264 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3265 				adev, 1, 2, state, i);
3266 			break;
3267 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
3268 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3269 				adev, 1, 3, state, i);
3270 			break;
3271 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
3272 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3273 				adev, 2, 0, state, i);
3274 			break;
3275 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
3276 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3277 				adev, 2, 1, state, i);
3278 			break;
3279 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
3280 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3281 				adev, 2, 2, state, i);
3282 			break;
3283 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
3284 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3285 				adev, 2, 3, state, i);
3286 			break;
3287 		default:
3288 			break;
3289 		}
3290 	}
3291 
3292 	return 0;
3293 }
3294 
3295 static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev,
3296 			    struct amdgpu_irq_src *source,
3297 			    struct amdgpu_iv_entry *entry)
3298 {
3299 	int i, xcc_id;
3300 	u8 me_id, pipe_id, queue_id;
3301 	struct amdgpu_ring *ring;
3302 
3303 	DRM_DEBUG("IH: CP EOP\n");
3304 	me_id = (entry->ring_id & 0x0c) >> 2;
3305 	pipe_id = (entry->ring_id & 0x03) >> 0;
3306 	queue_id = (entry->ring_id & 0x70) >> 4;
3307 
3308 	xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
3309 
3310 	if (xcc_id == -EINVAL)
3311 		return -EINVAL;
3312 
3313 	switch (me_id) {
3314 	case 0:
3315 	case 1:
3316 	case 2:
3317 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3318 			ring = &adev->gfx.compute_ring
3319 					[i +
3320 					 xcc_id * adev->gfx.num_compute_rings];
3321 			/* Per-queue interrupt is supported for MEC starting from VI.
3322 			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
3323 			  */
3324 
3325 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
3326 				amdgpu_fence_process(ring);
3327 		}
3328 		break;
3329 	}
3330 	return 0;
3331 }
3332 
3333 static void gfx_v9_4_3_fault(struct amdgpu_device *adev,
3334 			   struct amdgpu_iv_entry *entry)
3335 {
3336 	u8 me_id, pipe_id, queue_id;
3337 	struct amdgpu_ring *ring;
3338 	int i, xcc_id;
3339 
3340 	me_id = (entry->ring_id & 0x0c) >> 2;
3341 	pipe_id = (entry->ring_id & 0x03) >> 0;
3342 	queue_id = (entry->ring_id & 0x70) >> 4;
3343 
3344 	xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
3345 
3346 	if (xcc_id == -EINVAL)
3347 		return;
3348 
3349 	switch (me_id) {
3350 	case 0:
3351 	case 1:
3352 	case 2:
3353 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3354 			ring = &adev->gfx.compute_ring
3355 					[i +
3356 					 xcc_id * adev->gfx.num_compute_rings];
3357 			if (ring->me == me_id && ring->pipe == pipe_id &&
3358 			    ring->queue == queue_id)
3359 				drm_sched_fault(&ring->sched);
3360 		}
3361 		break;
3362 	}
3363 }
3364 
3365 static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev,
3366 				 struct amdgpu_irq_src *source,
3367 				 struct amdgpu_iv_entry *entry)
3368 {
3369 	DRM_ERROR("Illegal register access in command stream\n");
3370 	gfx_v9_4_3_fault(adev, entry);
3371 	return 0;
3372 }
3373 
3374 static int gfx_v9_4_3_bad_op_irq(struct amdgpu_device *adev,
3375 				 struct amdgpu_irq_src *source,
3376 				 struct amdgpu_iv_entry *entry)
3377 {
3378 	DRM_ERROR("Illegal opcode in command stream\n");
3379 	gfx_v9_4_3_fault(adev, entry);
3380 	return 0;
3381 }
3382 
3383 static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev,
3384 				  struct amdgpu_irq_src *source,
3385 				  struct amdgpu_iv_entry *entry)
3386 {
3387 	DRM_ERROR("Illegal instruction in command stream\n");
3388 	gfx_v9_4_3_fault(adev, entry);
3389 	return 0;
3390 }
3391 
3392 static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring)
3393 {
3394 	const unsigned int cp_coher_cntl =
3395 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
3396 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
3397 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
3398 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
3399 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
3400 
3401 	/* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
3402 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
3403 	amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
3404 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
3405 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
3406 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3407 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
3408 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
3409 }
3410 
3411 static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring,
3412 					uint32_t pipe, bool enable)
3413 {
3414 	struct amdgpu_device *adev = ring->adev;
3415 	uint32_t val;
3416 	uint32_t wcl_cs_reg;
3417 
3418 	/* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
3419 	val = enable ? 0x1 : 0x7f;
3420 
3421 	switch (pipe) {
3422 	case 0:
3423 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0);
3424 		break;
3425 	case 1:
3426 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1);
3427 		break;
3428 	case 2:
3429 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2);
3430 		break;
3431 	case 3:
3432 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3);
3433 		break;
3434 	default:
3435 		DRM_DEBUG("invalid pipe %d\n", pipe);
3436 		return;
3437 	}
3438 
3439 	amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
3440 
3441 }
3442 static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
3443 {
3444 	struct amdgpu_device *adev = ring->adev;
3445 	uint32_t val;
3446 	int i;
3447 
3448 	/* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
3449 	 * number of gfx waves. Setting 5 bit will make sure gfx only gets
3450 	 * around 25% of gpu resources.
3451 	 */
3452 	val = enable ? 0x1f : 0x07ffffff;
3453 	amdgpu_ring_emit_wreg(ring,
3454 			      SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX),
3455 			      val);
3456 
3457 	/* Restrict waves for normal/low priority compute queues as well
3458 	 * to get best QoS for high priority compute jobs.
3459 	 *
3460 	 * amdgpu controls only 1st ME(0-3 CS pipes).
3461 	 */
3462 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3463 		if (i != ring->pipe)
3464 			gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable);
3465 
3466 	}
3467 }
3468 
3469 static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
3470 				unsigned int vmid)
3471 {
3472 	struct amdgpu_device *adev = ring->adev;
3473 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
3474 	struct amdgpu_ring *kiq_ring = &kiq->ring;
3475 	unsigned long flags;
3476 	int r, i;
3477 
3478 	if (amdgpu_sriov_vf(adev))
3479 		return -EINVAL;
3480 
3481 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
3482 		return -EINVAL;
3483 
3484 	spin_lock_irqsave(&kiq->ring_lock, flags);
3485 
3486 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
3487 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
3488 		return -ENOMEM;
3489 	}
3490 
3491 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
3492 				   0, 0);
3493 	amdgpu_ring_commit(kiq_ring);
3494 
3495 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
3496 
3497 	r = amdgpu_ring_test_ring(kiq_ring);
3498 	if (r)
3499 		return r;
3500 
3501 	/* make sure dequeue is complete*/
3502 	amdgpu_gfx_rlc_enter_safe_mode(adev, ring->xcc_id);
3503 	mutex_lock(&adev->srbm_mutex);
3504 	soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, ring->xcc_id));
3505 	for (i = 0; i < adev->usec_timeout; i++) {
3506 		if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
3507 			break;
3508 		udelay(1);
3509 	}
3510 	if (i >= adev->usec_timeout)
3511 		r = -ETIMEDOUT;
3512 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, ring->xcc_id));
3513 	mutex_unlock(&adev->srbm_mutex);
3514 	amdgpu_gfx_rlc_exit_safe_mode(adev, ring->xcc_id);
3515 	if (r) {
3516 		dev_err(adev->dev, "fail to wait on hqd deactive\n");
3517 		return r;
3518 	}
3519 
3520 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
3521 	if (unlikely(r != 0)){
3522 		dev_err(adev->dev, "fail to resv mqd_obj\n");
3523 		return r;
3524 	}
3525 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3526 	if (!r) {
3527 		r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
3528 		amdgpu_bo_kunmap(ring->mqd_obj);
3529 		ring->mqd_ptr = NULL;
3530 	}
3531 	amdgpu_bo_unreserve(ring->mqd_obj);
3532 	if (r) {
3533 		dev_err(adev->dev, "fail to unresv mqd_obj\n");
3534 		return r;
3535 	}
3536 	spin_lock_irqsave(&kiq->ring_lock, flags);
3537 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
3538 	if (r) {
3539 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
3540 		return -ENOMEM;
3541 	}
3542 	kiq->pmf->kiq_map_queues(kiq_ring, ring);
3543 	amdgpu_ring_commit(kiq_ring);
3544 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
3545 
3546 	r = amdgpu_ring_test_ring(kiq_ring);
3547 	if (r) {
3548 		dev_err(adev->dev, "fail to remap queue\n");
3549 		return r;
3550 	}
3551 	return amdgpu_ring_test_ring(ring);
3552 }
3553 
3554 enum amdgpu_gfx_cp_ras_mem_id {
3555 	AMDGPU_GFX_CP_MEM1 = 1,
3556 	AMDGPU_GFX_CP_MEM2,
3557 	AMDGPU_GFX_CP_MEM3,
3558 	AMDGPU_GFX_CP_MEM4,
3559 	AMDGPU_GFX_CP_MEM5,
3560 };
3561 
3562 enum amdgpu_gfx_gcea_ras_mem_id {
3563 	AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4,
3564 	AMDGPU_GFX_GCEA_IORD_CMDMEM,
3565 	AMDGPU_GFX_GCEA_GMIWR_CMDMEM,
3566 	AMDGPU_GFX_GCEA_GMIRD_CMDMEM,
3567 	AMDGPU_GFX_GCEA_DRAMWR_CMDMEM,
3568 	AMDGPU_GFX_GCEA_DRAMRD_CMDMEM,
3569 	AMDGPU_GFX_GCEA_MAM_DMEM0,
3570 	AMDGPU_GFX_GCEA_MAM_DMEM1,
3571 	AMDGPU_GFX_GCEA_MAM_DMEM2,
3572 	AMDGPU_GFX_GCEA_MAM_DMEM3,
3573 	AMDGPU_GFX_GCEA_MAM_AMEM0,
3574 	AMDGPU_GFX_GCEA_MAM_AMEM1,
3575 	AMDGPU_GFX_GCEA_MAM_AMEM2,
3576 	AMDGPU_GFX_GCEA_MAM_AMEM3,
3577 	AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER,
3578 	AMDGPU_GFX_GCEA_WRET_TAGMEM,
3579 	AMDGPU_GFX_GCEA_RRET_TAGMEM,
3580 	AMDGPU_GFX_GCEA_IOWR_DATAMEM,
3581 	AMDGPU_GFX_GCEA_GMIWR_DATAMEM,
3582 	AMDGPU_GFX_GCEA_DRAM_DATAMEM,
3583 };
3584 
3585 enum amdgpu_gfx_gc_cane_ras_mem_id {
3586 	AMDGPU_GFX_GC_CANE_MEM0 = 0,
3587 };
3588 
3589 enum amdgpu_gfx_gcutcl2_ras_mem_id {
3590 	AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160,
3591 };
3592 
3593 enum amdgpu_gfx_gds_ras_mem_id {
3594 	AMDGPU_GFX_GDS_MEM0 = 0,
3595 };
3596 
3597 enum amdgpu_gfx_lds_ras_mem_id {
3598 	AMDGPU_GFX_LDS_BANK0 = 0,
3599 	AMDGPU_GFX_LDS_BANK1,
3600 	AMDGPU_GFX_LDS_BANK2,
3601 	AMDGPU_GFX_LDS_BANK3,
3602 	AMDGPU_GFX_LDS_BANK4,
3603 	AMDGPU_GFX_LDS_BANK5,
3604 	AMDGPU_GFX_LDS_BANK6,
3605 	AMDGPU_GFX_LDS_BANK7,
3606 	AMDGPU_GFX_LDS_BANK8,
3607 	AMDGPU_GFX_LDS_BANK9,
3608 	AMDGPU_GFX_LDS_BANK10,
3609 	AMDGPU_GFX_LDS_BANK11,
3610 	AMDGPU_GFX_LDS_BANK12,
3611 	AMDGPU_GFX_LDS_BANK13,
3612 	AMDGPU_GFX_LDS_BANK14,
3613 	AMDGPU_GFX_LDS_BANK15,
3614 	AMDGPU_GFX_LDS_BANK16,
3615 	AMDGPU_GFX_LDS_BANK17,
3616 	AMDGPU_GFX_LDS_BANK18,
3617 	AMDGPU_GFX_LDS_BANK19,
3618 	AMDGPU_GFX_LDS_BANK20,
3619 	AMDGPU_GFX_LDS_BANK21,
3620 	AMDGPU_GFX_LDS_BANK22,
3621 	AMDGPU_GFX_LDS_BANK23,
3622 	AMDGPU_GFX_LDS_BANK24,
3623 	AMDGPU_GFX_LDS_BANK25,
3624 	AMDGPU_GFX_LDS_BANK26,
3625 	AMDGPU_GFX_LDS_BANK27,
3626 	AMDGPU_GFX_LDS_BANK28,
3627 	AMDGPU_GFX_LDS_BANK29,
3628 	AMDGPU_GFX_LDS_BANK30,
3629 	AMDGPU_GFX_LDS_BANK31,
3630 	AMDGPU_GFX_LDS_SP_BUFFER_A,
3631 	AMDGPU_GFX_LDS_SP_BUFFER_B,
3632 };
3633 
3634 enum amdgpu_gfx_rlc_ras_mem_id {
3635 	AMDGPU_GFX_RLC_GPMF32 = 1,
3636 	AMDGPU_GFX_RLC_RLCVF32,
3637 	AMDGPU_GFX_RLC_SCRATCH,
3638 	AMDGPU_GFX_RLC_SRM_ARAM,
3639 	AMDGPU_GFX_RLC_SRM_DRAM,
3640 	AMDGPU_GFX_RLC_TCTAG,
3641 	AMDGPU_GFX_RLC_SPM_SE,
3642 	AMDGPU_GFX_RLC_SPM_GRBMT,
3643 };
3644 
3645 enum amdgpu_gfx_sp_ras_mem_id {
3646 	AMDGPU_GFX_SP_SIMDID0 = 0,
3647 };
3648 
3649 enum amdgpu_gfx_spi_ras_mem_id {
3650 	AMDGPU_GFX_SPI_MEM0 = 0,
3651 	AMDGPU_GFX_SPI_MEM1,
3652 	AMDGPU_GFX_SPI_MEM2,
3653 	AMDGPU_GFX_SPI_MEM3,
3654 };
3655 
3656 enum amdgpu_gfx_sqc_ras_mem_id {
3657 	AMDGPU_GFX_SQC_INST_CACHE_A = 100,
3658 	AMDGPU_GFX_SQC_INST_CACHE_B = 101,
3659 	AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102,
3660 	AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103,
3661 	AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104,
3662 	AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105,
3663 	AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106,
3664 	AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107,
3665 	AMDGPU_GFX_SQC_DATA_CACHE_A = 200,
3666 	AMDGPU_GFX_SQC_DATA_CACHE_B = 201,
3667 	AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202,
3668 	AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203,
3669 	AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204,
3670 	AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205,
3671 	AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206,
3672 	AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207,
3673 	AMDGPU_GFX_SQC_DIRTY_BIT_A = 208,
3674 	AMDGPU_GFX_SQC_DIRTY_BIT_B = 209,
3675 	AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210,
3676 	AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211,
3677 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212,
3678 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213,
3679 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108,
3680 };
3681 
3682 enum amdgpu_gfx_sq_ras_mem_id {
3683 	AMDGPU_GFX_SQ_SGPR_MEM0 = 0,
3684 	AMDGPU_GFX_SQ_SGPR_MEM1,
3685 	AMDGPU_GFX_SQ_SGPR_MEM2,
3686 	AMDGPU_GFX_SQ_SGPR_MEM3,
3687 };
3688 
3689 enum amdgpu_gfx_ta_ras_mem_id {
3690 	AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1,
3691 	AMDGPU_GFX_TA_FS_AFIFO_RAM_HI,
3692 	AMDGPU_GFX_TA_FS_CFIFO_RAM,
3693 	AMDGPU_GFX_TA_FSX_LFIFO,
3694 	AMDGPU_GFX_TA_FS_DFIFO_RAM,
3695 };
3696 
3697 enum amdgpu_gfx_tcc_ras_mem_id {
3698 	AMDGPU_GFX_TCC_MEM1 = 1,
3699 };
3700 
3701 enum amdgpu_gfx_tca_ras_mem_id {
3702 	AMDGPU_GFX_TCA_MEM1 = 1,
3703 };
3704 
3705 enum amdgpu_gfx_tci_ras_mem_id {
3706 	AMDGPU_GFX_TCIW_MEM = 1,
3707 };
3708 
3709 enum amdgpu_gfx_tcp_ras_mem_id {
3710 	AMDGPU_GFX_TCP_LFIFO0 = 1,
3711 	AMDGPU_GFX_TCP_SET0BANK0_RAM,
3712 	AMDGPU_GFX_TCP_SET0BANK1_RAM,
3713 	AMDGPU_GFX_TCP_SET0BANK2_RAM,
3714 	AMDGPU_GFX_TCP_SET0BANK3_RAM,
3715 	AMDGPU_GFX_TCP_SET1BANK0_RAM,
3716 	AMDGPU_GFX_TCP_SET1BANK1_RAM,
3717 	AMDGPU_GFX_TCP_SET1BANK2_RAM,
3718 	AMDGPU_GFX_TCP_SET1BANK3_RAM,
3719 	AMDGPU_GFX_TCP_SET2BANK0_RAM,
3720 	AMDGPU_GFX_TCP_SET2BANK1_RAM,
3721 	AMDGPU_GFX_TCP_SET2BANK2_RAM,
3722 	AMDGPU_GFX_TCP_SET2BANK3_RAM,
3723 	AMDGPU_GFX_TCP_SET3BANK0_RAM,
3724 	AMDGPU_GFX_TCP_SET3BANK1_RAM,
3725 	AMDGPU_GFX_TCP_SET3BANK2_RAM,
3726 	AMDGPU_GFX_TCP_SET3BANK3_RAM,
3727 	AMDGPU_GFX_TCP_VM_FIFO,
3728 	AMDGPU_GFX_TCP_DB_TAGRAM0,
3729 	AMDGPU_GFX_TCP_DB_TAGRAM1,
3730 	AMDGPU_GFX_TCP_DB_TAGRAM2,
3731 	AMDGPU_GFX_TCP_DB_TAGRAM3,
3732 	AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0,
3733 	AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1,
3734 	AMDGPU_GFX_TCP_CMD_FIFO,
3735 };
3736 
3737 enum amdgpu_gfx_td_ras_mem_id {
3738 	AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1,
3739 	AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM,
3740 	AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM,
3741 };
3742 
3743 enum amdgpu_gfx_tcx_ras_mem_id {
3744 	AMDGPU_GFX_TCX_FIFOD0 = 0,
3745 	AMDGPU_GFX_TCX_FIFOD1,
3746 	AMDGPU_GFX_TCX_FIFOD2,
3747 	AMDGPU_GFX_TCX_FIFOD3,
3748 	AMDGPU_GFX_TCX_FIFOD4,
3749 	AMDGPU_GFX_TCX_FIFOD5,
3750 	AMDGPU_GFX_TCX_FIFOD6,
3751 	AMDGPU_GFX_TCX_FIFOD7,
3752 	AMDGPU_GFX_TCX_FIFOB0,
3753 	AMDGPU_GFX_TCX_FIFOB1,
3754 	AMDGPU_GFX_TCX_FIFOB2,
3755 	AMDGPU_GFX_TCX_FIFOB3,
3756 	AMDGPU_GFX_TCX_FIFOB4,
3757 	AMDGPU_GFX_TCX_FIFOB5,
3758 	AMDGPU_GFX_TCX_FIFOB6,
3759 	AMDGPU_GFX_TCX_FIFOB7,
3760 	AMDGPU_GFX_TCX_FIFOA0,
3761 	AMDGPU_GFX_TCX_FIFOA1,
3762 	AMDGPU_GFX_TCX_FIFOA2,
3763 	AMDGPU_GFX_TCX_FIFOA3,
3764 	AMDGPU_GFX_TCX_FIFOA4,
3765 	AMDGPU_GFX_TCX_FIFOA5,
3766 	AMDGPU_GFX_TCX_FIFOA6,
3767 	AMDGPU_GFX_TCX_FIFOA7,
3768 	AMDGPU_GFX_TCX_CFIFO0,
3769 	AMDGPU_GFX_TCX_CFIFO1,
3770 	AMDGPU_GFX_TCX_CFIFO2,
3771 	AMDGPU_GFX_TCX_CFIFO3,
3772 	AMDGPU_GFX_TCX_CFIFO4,
3773 	AMDGPU_GFX_TCX_CFIFO5,
3774 	AMDGPU_GFX_TCX_CFIFO6,
3775 	AMDGPU_GFX_TCX_CFIFO7,
3776 	AMDGPU_GFX_TCX_FIFO_ACKB0,
3777 	AMDGPU_GFX_TCX_FIFO_ACKB1,
3778 	AMDGPU_GFX_TCX_FIFO_ACKB2,
3779 	AMDGPU_GFX_TCX_FIFO_ACKB3,
3780 	AMDGPU_GFX_TCX_FIFO_ACKB4,
3781 	AMDGPU_GFX_TCX_FIFO_ACKB5,
3782 	AMDGPU_GFX_TCX_FIFO_ACKB6,
3783 	AMDGPU_GFX_TCX_FIFO_ACKB7,
3784 	AMDGPU_GFX_TCX_FIFO_ACKD0,
3785 	AMDGPU_GFX_TCX_FIFO_ACKD1,
3786 	AMDGPU_GFX_TCX_FIFO_ACKD2,
3787 	AMDGPU_GFX_TCX_FIFO_ACKD3,
3788 	AMDGPU_GFX_TCX_FIFO_ACKD4,
3789 	AMDGPU_GFX_TCX_FIFO_ACKD5,
3790 	AMDGPU_GFX_TCX_FIFO_ACKD6,
3791 	AMDGPU_GFX_TCX_FIFO_ACKD7,
3792 	AMDGPU_GFX_TCX_DST_FIFOA0,
3793 	AMDGPU_GFX_TCX_DST_FIFOA1,
3794 	AMDGPU_GFX_TCX_DST_FIFOA2,
3795 	AMDGPU_GFX_TCX_DST_FIFOA3,
3796 	AMDGPU_GFX_TCX_DST_FIFOA4,
3797 	AMDGPU_GFX_TCX_DST_FIFOA5,
3798 	AMDGPU_GFX_TCX_DST_FIFOA6,
3799 	AMDGPU_GFX_TCX_DST_FIFOA7,
3800 	AMDGPU_GFX_TCX_DST_FIFOB0,
3801 	AMDGPU_GFX_TCX_DST_FIFOB1,
3802 	AMDGPU_GFX_TCX_DST_FIFOB2,
3803 	AMDGPU_GFX_TCX_DST_FIFOB3,
3804 	AMDGPU_GFX_TCX_DST_FIFOB4,
3805 	AMDGPU_GFX_TCX_DST_FIFOB5,
3806 	AMDGPU_GFX_TCX_DST_FIFOB6,
3807 	AMDGPU_GFX_TCX_DST_FIFOB7,
3808 	AMDGPU_GFX_TCX_DST_FIFOD0,
3809 	AMDGPU_GFX_TCX_DST_FIFOD1,
3810 	AMDGPU_GFX_TCX_DST_FIFOD2,
3811 	AMDGPU_GFX_TCX_DST_FIFOD3,
3812 	AMDGPU_GFX_TCX_DST_FIFOD4,
3813 	AMDGPU_GFX_TCX_DST_FIFOD5,
3814 	AMDGPU_GFX_TCX_DST_FIFOD6,
3815 	AMDGPU_GFX_TCX_DST_FIFOD7,
3816 	AMDGPU_GFX_TCX_DST_FIFO_ACKB0,
3817 	AMDGPU_GFX_TCX_DST_FIFO_ACKB1,
3818 	AMDGPU_GFX_TCX_DST_FIFO_ACKB2,
3819 	AMDGPU_GFX_TCX_DST_FIFO_ACKB3,
3820 	AMDGPU_GFX_TCX_DST_FIFO_ACKB4,
3821 	AMDGPU_GFX_TCX_DST_FIFO_ACKB5,
3822 	AMDGPU_GFX_TCX_DST_FIFO_ACKB6,
3823 	AMDGPU_GFX_TCX_DST_FIFO_ACKB7,
3824 	AMDGPU_GFX_TCX_DST_FIFO_ACKD0,
3825 	AMDGPU_GFX_TCX_DST_FIFO_ACKD1,
3826 	AMDGPU_GFX_TCX_DST_FIFO_ACKD2,
3827 	AMDGPU_GFX_TCX_DST_FIFO_ACKD3,
3828 	AMDGPU_GFX_TCX_DST_FIFO_ACKD4,
3829 	AMDGPU_GFX_TCX_DST_FIFO_ACKD5,
3830 	AMDGPU_GFX_TCX_DST_FIFO_ACKD6,
3831 	AMDGPU_GFX_TCX_DST_FIFO_ACKD7,
3832 };
3833 
3834 enum amdgpu_gfx_atc_l2_ras_mem_id {
3835 	AMDGPU_GFX_ATC_L2_MEM0 = 0,
3836 };
3837 
3838 enum amdgpu_gfx_utcl2_ras_mem_id {
3839 	AMDGPU_GFX_UTCL2_MEM0 = 0,
3840 };
3841 
3842 enum amdgpu_gfx_vml2_ras_mem_id {
3843 	AMDGPU_GFX_VML2_MEM0 = 0,
3844 };
3845 
3846 enum amdgpu_gfx_vml2_walker_ras_mem_id {
3847 	AMDGPU_GFX_VML2_WALKER_MEM0 = 0,
3848 };
3849 
3850 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = {
3851 	{AMDGPU_GFX_CP_MEM1, "CP_MEM1"},
3852 	{AMDGPU_GFX_CP_MEM2, "CP_MEM2"},
3853 	{AMDGPU_GFX_CP_MEM3, "CP_MEM3"},
3854 	{AMDGPU_GFX_CP_MEM4, "CP_MEM4"},
3855 	{AMDGPU_GFX_CP_MEM5, "CP_MEM5"},
3856 };
3857 
3858 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = {
3859 	{AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"},
3860 	{AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"},
3861 	{AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"},
3862 	{AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"},
3863 	{AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"},
3864 	{AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"},
3865 	{AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"},
3866 	{AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"},
3867 	{AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"},
3868 	{AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"},
3869 	{AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"},
3870 	{AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"},
3871 	{AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"},
3872 	{AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"},
3873 	{AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"},
3874 	{AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"},
3875 	{AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"},
3876 	{AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"},
3877 	{AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"},
3878 	{AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"},
3879 };
3880 
3881 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = {
3882 	{AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"},
3883 };
3884 
3885 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = {
3886 	{AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"},
3887 };
3888 
3889 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = {
3890 	{AMDGPU_GFX_GDS_MEM0, "GDS_MEM"},
3891 };
3892 
3893 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = {
3894 	{AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"},
3895 	{AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"},
3896 	{AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"},
3897 	{AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"},
3898 	{AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"},
3899 	{AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"},
3900 	{AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"},
3901 	{AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"},
3902 	{AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"},
3903 	{AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"},
3904 	{AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"},
3905 	{AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"},
3906 	{AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"},
3907 	{AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"},
3908 	{AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"},
3909 	{AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"},
3910 	{AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"},
3911 	{AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"},
3912 	{AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"},
3913 	{AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"},
3914 	{AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"},
3915 	{AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"},
3916 	{AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"},
3917 	{AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"},
3918 	{AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"},
3919 	{AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"},
3920 	{AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"},
3921 	{AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"},
3922 	{AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"},
3923 	{AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"},
3924 	{AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"},
3925 	{AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"},
3926 	{AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"},
3927 	{AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"},
3928 };
3929 
3930 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = {
3931 	{AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"},
3932 	{AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"},
3933 	{AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"},
3934 	{AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"},
3935 	{AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"},
3936 	{AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"},
3937 	{AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"},
3938 	{AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"},
3939 };
3940 
3941 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = {
3942 	{AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"},
3943 };
3944 
3945 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = {
3946 	{AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"},
3947 	{AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"},
3948 	{AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"},
3949 	{AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"},
3950 };
3951 
3952 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = {
3953 	{AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"},
3954 	{AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"},
3955 	{AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"},
3956 	{AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"},
3957 	{AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"},
3958 	{AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"},
3959 	{AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"},
3960 	{AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"},
3961 	{AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"},
3962 	{AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"},
3963 	{AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"},
3964 	{AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"},
3965 	{AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"},
3966 	{AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"},
3967 	{AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"},
3968 	{AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"},
3969 	{AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"},
3970 	{AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"},
3971 	{AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"},
3972 	{AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"},
3973 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"},
3974 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"},
3975 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"},
3976 };
3977 
3978 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = {
3979 	{AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"},
3980 	{AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"},
3981 	{AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"},
3982 	{AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"},
3983 };
3984 
3985 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = {
3986 	{AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"},
3987 	{AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"},
3988 	{AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"},
3989 	{AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"},
3990 	{AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"},
3991 };
3992 
3993 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = {
3994 	{AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"},
3995 };
3996 
3997 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = {
3998 	{AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"},
3999 };
4000 
4001 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = {
4002 	{AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"},
4003 };
4004 
4005 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = {
4006 	{AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"},
4007 	{AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"},
4008 	{AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"},
4009 	{AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"},
4010 	{AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"},
4011 	{AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"},
4012 	{AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"},
4013 	{AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"},
4014 	{AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"},
4015 	{AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"},
4016 	{AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"},
4017 	{AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"},
4018 	{AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"},
4019 	{AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"},
4020 	{AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"},
4021 	{AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"},
4022 	{AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"},
4023 	{AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"},
4024 	{AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"},
4025 	{AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"},
4026 	{AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"},
4027 	{AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"},
4028 	{AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"},
4029 	{AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"},
4030 	{AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"},
4031 };
4032 
4033 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = {
4034 	{AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"},
4035 	{AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"},
4036 	{AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"},
4037 };
4038 
4039 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = {
4040 	{AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"},
4041 	{AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"},
4042 	{AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"},
4043 	{AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"},
4044 	{AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"},
4045 	{AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"},
4046 	{AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"},
4047 	{AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"},
4048 	{AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"},
4049 	{AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"},
4050 	{AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"},
4051 	{AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"},
4052 	{AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"},
4053 	{AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"},
4054 	{AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"},
4055 	{AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"},
4056 	{AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"},
4057 	{AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"},
4058 	{AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"},
4059 	{AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"},
4060 	{AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"},
4061 	{AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"},
4062 	{AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"},
4063 	{AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"},
4064 	{AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"},
4065 	{AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"},
4066 	{AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"},
4067 	{AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"},
4068 	{AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"},
4069 	{AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"},
4070 	{AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"},
4071 	{AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"},
4072 	{AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"},
4073 	{AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"},
4074 	{AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"},
4075 	{AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"},
4076 	{AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"},
4077 	{AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"},
4078 	{AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"},
4079 	{AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"},
4080 	{AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"},
4081 	{AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"},
4082 	{AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"},
4083 	{AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"},
4084 	{AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"},
4085 	{AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"},
4086 	{AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"},
4087 	{AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"},
4088 	{AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"},
4089 	{AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"},
4090 	{AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"},
4091 	{AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"},
4092 	{AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"},
4093 	{AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"},
4094 	{AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"},
4095 	{AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"},
4096 	{AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"},
4097 	{AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"},
4098 	{AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"},
4099 	{AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"},
4100 	{AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"},
4101 	{AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"},
4102 	{AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"},
4103 	{AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"},
4104 	{AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"},
4105 	{AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"},
4106 	{AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"},
4107 	{AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"},
4108 	{AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"},
4109 	{AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"},
4110 	{AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"},
4111 	{AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"},
4112 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"},
4113 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"},
4114 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"},
4115 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"},
4116 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"},
4117 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"},
4118 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"},
4119 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"},
4120 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"},
4121 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"},
4122 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"},
4123 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"},
4124 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"},
4125 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"},
4126 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"},
4127 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"},
4128 };
4129 
4130 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = {
4131 	{AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"},
4132 };
4133 
4134 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = {
4135 	{AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"},
4136 };
4137 
4138 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = {
4139 	{AMDGPU_GFX_VML2_MEM, "VML2_MEM"},
4140 };
4141 
4142 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = {
4143 	{AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"},
4144 };
4145 
4146 static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = {
4147 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list)
4148 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list)
4149 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list)
4150 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list)
4151 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list)
4152 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list)
4153 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list)
4154 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list)
4155 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list)
4156 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list)
4157 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list)
4158 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list)
4159 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list)
4160 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list)
4161 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list)
4162 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list)
4163 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list)
4164 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list)
4165 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list)
4166 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list)
4167 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list)
4168 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list)
4169 };
4170 
4171 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = {
4172 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH),
4173 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
4174 	    AMDGPU_GFX_RLC_MEM, 1},
4175 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI),
4176 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
4177 	    AMDGPU_GFX_CP_MEM, 1},
4178 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI),
4179 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
4180 	    AMDGPU_GFX_CP_MEM, 1},
4181 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI),
4182 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
4183 	    AMDGPU_GFX_CP_MEM, 1},
4184 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI),
4185 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
4186 	    AMDGPU_GFX_GDS_MEM, 1},
4187 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI),
4188 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
4189 	    AMDGPU_GFX_GC_CANE_MEM, 1},
4190 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI),
4191 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
4192 	    AMDGPU_GFX_SPI_MEM, 1},
4193 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI),
4194 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
4195 	    AMDGPU_GFX_SP_MEM, 4},
4196 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI),
4197 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
4198 	    AMDGPU_GFX_SP_MEM, 4},
4199 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI),
4200 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
4201 	    AMDGPU_GFX_SQ_MEM, 4},
4202 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI),
4203 	    5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
4204 	    AMDGPU_GFX_SQC_MEM, 4},
4205 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI),
4206 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
4207 	    AMDGPU_GFX_TCX_MEM, 1},
4208 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI),
4209 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
4210 	    AMDGPU_GFX_TCC_MEM, 1},
4211 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI),
4212 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
4213 	    AMDGPU_GFX_TA_MEM, 4},
4214 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG),
4215 	    27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
4216 	    AMDGPU_GFX_TCI_MEM, 1},
4217 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG),
4218 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
4219 	    AMDGPU_GFX_TCP_MEM, 4},
4220 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI),
4221 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
4222 	    AMDGPU_GFX_TD_MEM, 4},
4223 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI),
4224 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
4225 	    AMDGPU_GFX_GCEA_MEM, 1},
4226 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI),
4227 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
4228 	    AMDGPU_GFX_LDS_MEM, 4},
4229 };
4230 
4231 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = {
4232 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH),
4233 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
4234 	    AMDGPU_GFX_RLC_MEM, 1},
4235 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI),
4236 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
4237 	    AMDGPU_GFX_CP_MEM, 1},
4238 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI),
4239 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
4240 	    AMDGPU_GFX_CP_MEM, 1},
4241 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI),
4242 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
4243 	    AMDGPU_GFX_CP_MEM, 1},
4244 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI),
4245 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
4246 	    AMDGPU_GFX_GDS_MEM, 1},
4247 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI),
4248 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
4249 	    AMDGPU_GFX_GC_CANE_MEM, 1},
4250 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI),
4251 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
4252 	    AMDGPU_GFX_SPI_MEM, 1},
4253 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI),
4254 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
4255 	    AMDGPU_GFX_SP_MEM, 4},
4256 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI),
4257 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
4258 	    AMDGPU_GFX_SP_MEM, 4},
4259 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI),
4260 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
4261 	    AMDGPU_GFX_SQ_MEM, 4},
4262 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI),
4263 	    5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
4264 	    AMDGPU_GFX_SQC_MEM, 4},
4265 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI),
4266 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
4267 	    AMDGPU_GFX_TCX_MEM, 1},
4268 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI),
4269 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
4270 	    AMDGPU_GFX_TCC_MEM, 1},
4271 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI),
4272 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
4273 	    AMDGPU_GFX_TA_MEM, 4},
4274 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG),
4275 	    27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
4276 	    AMDGPU_GFX_TCI_MEM, 1},
4277 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG),
4278 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
4279 	    AMDGPU_GFX_TCP_MEM, 4},
4280 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI),
4281 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
4282 	    AMDGPU_GFX_TD_MEM, 4},
4283 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI),
4284 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"},
4285 	    AMDGPU_GFX_TCA_MEM, 1},
4286 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI),
4287 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
4288 	    AMDGPU_GFX_GCEA_MEM, 1},
4289 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI),
4290 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
4291 	    AMDGPU_GFX_LDS_MEM, 4},
4292 };
4293 
4294 static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
4295 					void *ras_error_status, int xcc_id)
4296 {
4297 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
4298 	unsigned long ce_count = 0, ue_count = 0;
4299 	uint32_t i, j, k;
4300 
4301 	/* NOTE: convert xcc_id to physical XCD ID (XCD0 or XCD1) */
4302 	struct amdgpu_smuio_mcm_config_info mcm_info = {
4303 		.socket_id = adev->smuio.funcs->get_socket_id(adev),
4304 		.die_id = xcc_id & 0x01 ? 1 : 0,
4305 	};
4306 
4307 	mutex_lock(&adev->grbm_idx_mutex);
4308 
4309 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
4310 		for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
4311 			for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
4312 				/* no need to select if instance number is 1 */
4313 				if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
4314 				    gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
4315 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4316 
4317 				amdgpu_ras_inst_query_ras_error_count(adev,
4318 					&(gfx_v9_4_3_ce_reg_list[i].reg_entry),
4319 					1,
4320 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent,
4321 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size,
4322 					GET_INST(GC, xcc_id),
4323 					AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
4324 					&ce_count);
4325 
4326 				amdgpu_ras_inst_query_ras_error_count(adev,
4327 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4328 					1,
4329 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
4330 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
4331 					GET_INST(GC, xcc_id),
4332 					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
4333 					&ue_count);
4334 			}
4335 		}
4336 	}
4337 
4338 	/* handle extra register entries of UE */
4339 	for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
4340 		for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
4341 			for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
4342 				/* no need to select if instance number is 1 */
4343 				if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
4344 					gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
4345 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4346 
4347 				amdgpu_ras_inst_query_ras_error_count(adev,
4348 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4349 					1,
4350 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
4351 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
4352 					GET_INST(GC, xcc_id),
4353 					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
4354 					&ue_count);
4355 			}
4356 		}
4357 	}
4358 
4359 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4360 			xcc_id);
4361 	mutex_unlock(&adev->grbm_idx_mutex);
4362 
4363 	/* the caller should make sure initialize value of
4364 	 * err_data->ue_count and err_data->ce_count
4365 	 */
4366 	amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
4367 	amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
4368 }
4369 
4370 static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
4371 					void *ras_error_status, int xcc_id)
4372 {
4373 	uint32_t i, j, k;
4374 
4375 	mutex_lock(&adev->grbm_idx_mutex);
4376 
4377 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
4378 		for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
4379 			for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
4380 				/* no need to select if instance number is 1 */
4381 				if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
4382 				    gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
4383 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4384 
4385 				amdgpu_ras_inst_reset_ras_error_count(adev,
4386 					&(gfx_v9_4_3_ce_reg_list[i].reg_entry),
4387 					1,
4388 					GET_INST(GC, xcc_id));
4389 
4390 				amdgpu_ras_inst_reset_ras_error_count(adev,
4391 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4392 					1,
4393 					GET_INST(GC, xcc_id));
4394 			}
4395 		}
4396 	}
4397 
4398 	/* handle extra register entries of UE */
4399 	for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
4400 		for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
4401 			for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
4402 				/* no need to select if instance number is 1 */
4403 				if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
4404 					gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
4405 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4406 
4407 				amdgpu_ras_inst_reset_ras_error_count(adev,
4408 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4409 					1,
4410 					GET_INST(GC, xcc_id));
4411 			}
4412 		}
4413 	}
4414 
4415 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4416 			xcc_id);
4417 	mutex_unlock(&adev->grbm_idx_mutex);
4418 }
4419 
4420 static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
4421 					void *ras_error_status, int xcc_id)
4422 {
4423 	uint32_t i;
4424 	uint32_t data;
4425 
4426 	if (amdgpu_sriov_vf(adev))
4427 		return;
4428 
4429 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG);
4430 	data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
4431 			     amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0);
4432 
4433 	if (amdgpu_watchdog_timer.timeout_fatal_disable &&
4434 	    (amdgpu_watchdog_timer.period < 1 ||
4435 	     amdgpu_watchdog_timer.period > 0x23)) {
4436 		dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n");
4437 		amdgpu_watchdog_timer.period = 0x23;
4438 	}
4439 	data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL,
4440 			     amdgpu_watchdog_timer.period);
4441 
4442 	mutex_lock(&adev->grbm_idx_mutex);
4443 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4444 		gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id);
4445 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data);
4446 	}
4447 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4448 			xcc_id);
4449 	mutex_unlock(&adev->grbm_idx_mutex);
4450 }
4451 
4452 static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev,
4453 					void *ras_error_status)
4454 {
4455 	amdgpu_gfx_ras_error_func(adev, ras_error_status,
4456 			gfx_v9_4_3_inst_query_ras_err_count);
4457 }
4458 
4459 static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev)
4460 {
4461 	amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count);
4462 }
4463 
4464 static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
4465 {
4466 	amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer);
4467 }
4468 
4469 static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
4470 {
4471 	int i;
4472 
4473 	/* Header itself is a NOP packet */
4474 	if (num_nop == 1) {
4475 		amdgpu_ring_write(ring, ring->funcs->nop);
4476 		return;
4477 	}
4478 
4479 	/* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
4480 	amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
4481 
4482 	/* Header is at index 0, followed by num_nops - 1 NOP packet's */
4483 	for (i = 1; i < num_nop; i++)
4484 		amdgpu_ring_write(ring, ring->funcs->nop);
4485 }
4486 
4487 static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p)
4488 {
4489 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4490 	uint32_t i, j, k;
4491 	uint32_t xcc_id, xcc_offset, inst_offset;
4492 	uint32_t num_xcc, reg, num_inst;
4493 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
4494 
4495 	if (!adev->gfx.ip_dump_core)
4496 		return;
4497 
4498 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4499 	drm_printf(p, "Number of Instances:%d\n", num_xcc);
4500 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4501 		xcc_offset = xcc_id * reg_count;
4502 		drm_printf(p, "\nInstance id:%d\n", xcc_id);
4503 		for (i = 0; i < reg_count; i++)
4504 			drm_printf(p, "%-50s \t 0x%08x\n",
4505 				   gc_reg_list_9_4_3[i].reg_name,
4506 				   adev->gfx.ip_dump_core[xcc_offset + i]);
4507 	}
4508 
4509 	/* print compute queue registers for all instances */
4510 	if (!adev->gfx.ip_dump_compute_queues)
4511 		return;
4512 
4513 	num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
4514 		adev->gfx.mec.num_queue_per_pipe;
4515 
4516 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
4517 	drm_printf(p, "\nnum_xcc: %d num_mec: %d num_pipe: %d num_queue: %d\n",
4518 		   num_xcc,
4519 		   adev->gfx.mec.num_mec,
4520 		   adev->gfx.mec.num_pipe_per_mec,
4521 		   adev->gfx.mec.num_queue_per_pipe);
4522 
4523 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4524 		xcc_offset = xcc_id * reg_count * num_inst;
4525 		inst_offset = 0;
4526 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4527 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4528 				for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
4529 					drm_printf(p,
4530 						   "\nxcc:%d mec:%d, pipe:%d, queue:%d\n",
4531 						    xcc_id, i, j, k);
4532 					for (reg = 0; reg < reg_count; reg++) {
4533 						drm_printf(p,
4534 							   "%-50s \t 0x%08x\n",
4535 							   gc_cp_reg_list_9_4_3[reg].reg_name,
4536 							   adev->gfx.ip_dump_compute_queues
4537 								[xcc_offset + inst_offset +
4538 								reg]);
4539 					}
4540 					inst_offset += reg_count;
4541 				}
4542 			}
4543 		}
4544 	}
4545 }
4546 
4547 static void gfx_v9_4_3_ip_dump(void *handle)
4548 {
4549 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4550 	uint32_t i, j, k;
4551 	uint32_t num_xcc, reg, num_inst;
4552 	uint32_t xcc_id, xcc_offset, inst_offset;
4553 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
4554 
4555 	if (!adev->gfx.ip_dump_core)
4556 		return;
4557 
4558 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4559 
4560 	amdgpu_gfx_off_ctrl(adev, false);
4561 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4562 		xcc_offset = xcc_id * reg_count;
4563 		for (i = 0; i < reg_count; i++)
4564 			adev->gfx.ip_dump_core[xcc_offset + i] =
4565 				RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i],
4566 								   GET_INST(GC, xcc_id)));
4567 	}
4568 	amdgpu_gfx_off_ctrl(adev, true);
4569 
4570 	/* dump compute queue registers for all instances */
4571 	if (!adev->gfx.ip_dump_compute_queues)
4572 		return;
4573 
4574 	num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
4575 		adev->gfx.mec.num_queue_per_pipe;
4576 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
4577 	amdgpu_gfx_off_ctrl(adev, false);
4578 	mutex_lock(&adev->srbm_mutex);
4579 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4580 		xcc_offset = xcc_id * reg_count * num_inst;
4581 		inst_offset = 0;
4582 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4583 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4584 				for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
4585 					/* ME0 is for GFX so start from 1 for CP */
4586 					soc15_grbm_select(adev, 1 + i, j, k, 0,
4587 							  GET_INST(GC, xcc_id));
4588 
4589 					for (reg = 0; reg < reg_count; reg++) {
4590 						adev->gfx.ip_dump_compute_queues
4591 							[xcc_offset +
4592 							 inst_offset + reg] =
4593 							RREG32(SOC15_REG_ENTRY_OFFSET_INST(
4594 								gc_cp_reg_list_9_4_3[reg],
4595 								GET_INST(GC, xcc_id)));
4596 					}
4597 					inst_offset += reg_count;
4598 				}
4599 			}
4600 		}
4601 	}
4602 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
4603 	mutex_unlock(&adev->srbm_mutex);
4604 	amdgpu_gfx_off_ctrl(adev, true);
4605 }
4606 
4607 static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
4608 {
4609 	/* Emit the cleaner shader */
4610 	amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
4611 	amdgpu_ring_write(ring, 0);  /* RESERVED field, programmed to zero */
4612 }
4613 
4614 static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
4615 	.name = "gfx_v9_4_3",
4616 	.early_init = gfx_v9_4_3_early_init,
4617 	.late_init = gfx_v9_4_3_late_init,
4618 	.sw_init = gfx_v9_4_3_sw_init,
4619 	.sw_fini = gfx_v9_4_3_sw_fini,
4620 	.hw_init = gfx_v9_4_3_hw_init,
4621 	.hw_fini = gfx_v9_4_3_hw_fini,
4622 	.suspend = gfx_v9_4_3_suspend,
4623 	.resume = gfx_v9_4_3_resume,
4624 	.is_idle = gfx_v9_4_3_is_idle,
4625 	.wait_for_idle = gfx_v9_4_3_wait_for_idle,
4626 	.soft_reset = gfx_v9_4_3_soft_reset,
4627 	.set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
4628 	.set_powergating_state = gfx_v9_4_3_set_powergating_state,
4629 	.get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
4630 	.dump_ip_state = gfx_v9_4_3_ip_dump,
4631 	.print_ip_state = gfx_v9_4_3_ip_print,
4632 };
4633 
4634 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
4635 	.type = AMDGPU_RING_TYPE_COMPUTE,
4636 	.align_mask = 0xff,
4637 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4638 	.support_64bit_ptrs = true,
4639 	.get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4640 	.get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4641 	.set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4642 	.emit_frame_size =
4643 		20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4644 		7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4645 		5 + /* hdp invalidate */
4646 		7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4647 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4648 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4649 		2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4650 		8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */
4651 		7 + /* gfx_v9_4_3_emit_mem_sync */
4652 		5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */
4653 		15 + /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
4654 		2, /* gfx_v9_4_3_ring_emit_cleaner_shader */
4655 	.emit_ib_size =	7, /* gfx_v9_4_3_ring_emit_ib_compute */
4656 	.emit_ib = gfx_v9_4_3_ring_emit_ib_compute,
4657 	.emit_fence = gfx_v9_4_3_ring_emit_fence,
4658 	.emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync,
4659 	.emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush,
4660 	.emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch,
4661 	.emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
4662 	.test_ring = gfx_v9_4_3_ring_test_ring,
4663 	.test_ib = gfx_v9_4_3_ring_test_ib,
4664 	.insert_nop = gfx_v9_4_3_ring_insert_nop,
4665 	.pad_ib = amdgpu_ring_generic_pad_ib,
4666 	.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4667 	.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4668 	.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4669 	.soft_recovery = gfx_v9_4_3_ring_soft_recovery,
4670 	.emit_mem_sync = gfx_v9_4_3_emit_mem_sync,
4671 	.emit_wave_limit = gfx_v9_4_3_emit_wave_limit,
4672 	.reset = gfx_v9_4_3_reset_kcq,
4673 	.emit_cleaner_shader = gfx_v9_4_3_ring_emit_cleaner_shader,
4674 	.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
4675 	.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
4676 };
4677 
4678 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
4679 	.type = AMDGPU_RING_TYPE_KIQ,
4680 	.align_mask = 0xff,
4681 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4682 	.support_64bit_ptrs = true,
4683 	.get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4684 	.get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4685 	.set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4686 	.emit_frame_size =
4687 		20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4688 		7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4689 		5 + /* hdp invalidate */
4690 		7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4691 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4692 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4693 		2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4694 		8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */
4695 	.emit_ib_size =	7, /* gfx_v9_4_3_ring_emit_ib_compute */
4696 	.emit_fence = gfx_v9_4_3_ring_emit_fence_kiq,
4697 	.test_ring = gfx_v9_4_3_ring_test_ring,
4698 	.insert_nop = amdgpu_ring_insert_nop,
4699 	.pad_ib = amdgpu_ring_generic_pad_ib,
4700 	.emit_rreg = gfx_v9_4_3_ring_emit_rreg,
4701 	.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4702 	.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4703 	.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4704 };
4705 
4706 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
4707 {
4708 	int i, j, num_xcc;
4709 
4710 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4711 	for (i = 0; i < num_xcc; i++) {
4712 		adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq;
4713 
4714 		for (j = 0; j < adev->gfx.num_compute_rings; j++)
4715 			adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs
4716 					= &gfx_v9_4_3_ring_funcs_compute;
4717 	}
4718 }
4719 
4720 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = {
4721 	.set = gfx_v9_4_3_set_eop_interrupt_state,
4722 	.process = gfx_v9_4_3_eop_irq,
4723 };
4724 
4725 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = {
4726 	.set = gfx_v9_4_3_set_priv_reg_fault_state,
4727 	.process = gfx_v9_4_3_priv_reg_irq,
4728 };
4729 
4730 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_bad_op_irq_funcs = {
4731 	.set = gfx_v9_4_3_set_bad_op_fault_state,
4732 	.process = gfx_v9_4_3_bad_op_irq,
4733 };
4734 
4735 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = {
4736 	.set = gfx_v9_4_3_set_priv_inst_fault_state,
4737 	.process = gfx_v9_4_3_priv_inst_irq,
4738 };
4739 
4740 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev)
4741 {
4742 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4743 	adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs;
4744 
4745 	adev->gfx.priv_reg_irq.num_types = 1;
4746 	adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs;
4747 
4748 	adev->gfx.bad_op_irq.num_types = 1;
4749 	adev->gfx.bad_op_irq.funcs = &gfx_v9_4_3_bad_op_irq_funcs;
4750 
4751 	adev->gfx.priv_inst_irq.num_types = 1;
4752 	adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs;
4753 }
4754 
4755 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev)
4756 {
4757 	adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs;
4758 }
4759 
4760 
4761 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
4762 {
4763 	/* init asci gds info */
4764 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4765 	case IP_VERSION(9, 4, 3):
4766 	case IP_VERSION(9, 4, 4):
4767 		/* 9.4.3 removed all the GDS internal memory,
4768 		 * only support GWS opcode in kernel, like barrier
4769 		 * semaphore.etc */
4770 		adev->gds.gds_size = 0;
4771 		break;
4772 	default:
4773 		adev->gds.gds_size = 0x10000;
4774 		break;
4775 	}
4776 
4777 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4778 	case IP_VERSION(9, 4, 3):
4779 	case IP_VERSION(9, 4, 4):
4780 		/* deprecated for 9.4.3, no usage at all */
4781 		adev->gds.gds_compute_max_wave_id = 0;
4782 		break;
4783 	default:
4784 		/* this really depends on the chip */
4785 		adev->gds.gds_compute_max_wave_id = 0x7ff;
4786 		break;
4787 	}
4788 
4789 	adev->gds.gws_size = 64;
4790 	adev->gds.oa_size = 16;
4791 }
4792 
4793 static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4794 						 u32 bitmap, int xcc_id)
4795 {
4796 	u32 data;
4797 
4798 	if (!bitmap)
4799 		return;
4800 
4801 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4802 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4803 
4804 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
4805 }
4806 
4807 static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id)
4808 {
4809 	u32 data, mask;
4810 
4811 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
4812 	data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
4813 
4814 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4815 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4816 
4817 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4818 
4819 	return (~data) & mask;
4820 }
4821 
4822 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
4823 				 struct amdgpu_cu_info *cu_info)
4824 {
4825 	int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0;
4826 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp;
4827 	unsigned disable_masks[4 * 4];
4828 	bool is_symmetric_cus;
4829 
4830 	if (!adev || !cu_info)
4831 		return -EINVAL;
4832 
4833 	/*
4834 	 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
4835 	 */
4836 	if (adev->gfx.config.max_shader_engines *
4837 		adev->gfx.config.max_sh_per_se > 16)
4838 		return -EINVAL;
4839 
4840 	amdgpu_gfx_parse_disable_cu(disable_masks,
4841 				    adev->gfx.config.max_shader_engines,
4842 				    adev->gfx.config.max_sh_per_se);
4843 
4844 	mutex_lock(&adev->grbm_idx_mutex);
4845 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
4846 		is_symmetric_cus = true;
4847 		for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4848 			for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4849 				mask = 1;
4850 				ao_bitmap = 0;
4851 				counter = 0;
4852 				gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
4853 				gfx_v9_4_3_set_user_cu_inactive_bitmap(
4854 					adev,
4855 					disable_masks[i * adev->gfx.config.max_sh_per_se + j],
4856 					xcc_id);
4857 				bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id);
4858 
4859 				cu_info->bitmap[xcc_id][i][j] = bitmap;
4860 
4861 				for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
4862 					if (bitmap & mask) {
4863 						if (counter < adev->gfx.config.max_cu_per_sh)
4864 							ao_bitmap |= mask;
4865 						counter++;
4866 					}
4867 					mask <<= 1;
4868 				}
4869 				active_cu_number += counter;
4870 				if (i < 2 && j < 2)
4871 					ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4872 				cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4873 			}
4874 			if (i && is_symmetric_cus && prev_counter != counter)
4875 				is_symmetric_cus = false;
4876 			prev_counter = counter;
4877 		}
4878 		if (is_symmetric_cus) {
4879 			tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG);
4880 			tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1);
4881 			tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1);
4882 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp);
4883 		}
4884 		gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4885 					    xcc_id);
4886 	}
4887 	mutex_unlock(&adev->grbm_idx_mutex);
4888 
4889 	cu_info->number = active_cu_number;
4890 	cu_info->ao_cu_mask = ao_cu_mask;
4891 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
4892 
4893 	return 0;
4894 }
4895 
4896 const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = {
4897 	.type = AMD_IP_BLOCK_TYPE_GFX,
4898 	.major = 9,
4899 	.minor = 4,
4900 	.rev = 3,
4901 	.funcs = &gfx_v9_4_3_ip_funcs,
4902 };
4903 
4904 static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask)
4905 {
4906 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4907 	uint32_t tmp_mask;
4908 	int i, r;
4909 
4910 	/* TODO : Initialize golden regs */
4911 	/* gfx_v9_4_3_init_golden_registers(adev); */
4912 
4913 	tmp_mask = inst_mask;
4914 	for_each_inst(i, tmp_mask)
4915 		gfx_v9_4_3_xcc_constants_init(adev, i);
4916 
4917 	if (!amdgpu_sriov_vf(adev)) {
4918 		tmp_mask = inst_mask;
4919 		for_each_inst(i, tmp_mask) {
4920 			r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
4921 			if (r)
4922 				return r;
4923 		}
4924 	}
4925 
4926 	tmp_mask = inst_mask;
4927 	for_each_inst(i, tmp_mask) {
4928 		r = gfx_v9_4_3_xcc_cp_resume(adev, i);
4929 		if (r)
4930 			return r;
4931 	}
4932 
4933 	return 0;
4934 }
4935 
4936 static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask)
4937 {
4938 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4939 	int i;
4940 
4941 	for_each_inst(i, inst_mask)
4942 		gfx_v9_4_3_xcc_fini(adev, i);
4943 
4944 	return 0;
4945 }
4946 
4947 struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = {
4948 	.suspend = &gfx_v9_4_3_xcp_suspend,
4949 	.resume = &gfx_v9_4_3_xcp_resume
4950 };
4951 
4952 struct amdgpu_ras_block_hw_ops  gfx_v9_4_3_ras_ops = {
4953 	.query_ras_error_count = &gfx_v9_4_3_query_ras_error_count,
4954 	.reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
4955 };
4956 
4957 static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
4958 {
4959 	int r;
4960 
4961 	r = amdgpu_ras_block_late_init(adev, ras_block);
4962 	if (r)
4963 		return r;
4964 
4965 	r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__GFX,
4966 				&gfx_v9_4_3_aca_info,
4967 				NULL);
4968 	if (r)
4969 		goto late_fini;
4970 
4971 	return 0;
4972 
4973 late_fini:
4974 	amdgpu_ras_block_late_fini(adev, ras_block);
4975 
4976 	return r;
4977 }
4978 
4979 struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
4980 	.ras_block = {
4981 		.hw_ops = &gfx_v9_4_3_ras_ops,
4982 		.ras_late_init = &gfx_v9_4_3_ras_late_init,
4983 	},
4984 	.enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer,
4985 };
4986